diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000..d25a15416 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,1181 @@ +name: WasmVM Tests + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + setup: + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: "1.23" + - name: Cache Go modules + uses: actions/cache@v3 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + # Main package tests + test-ibc: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestIBC$ + + test-ibc-handshake: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestIBCHandshake$ + + test-ibc-packet-dispatch: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestIBCPacketDispatch$ + + test-analyze-code: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestAnalyzeCode$ + + test-ibc-msg-get-channel: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestIBCMsgGetChannel$ + + test-ibc-msg-get-counter-version: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestIBCMsgGetCounterVersion$ + + test-store-code: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestStoreCode$ + + test-simulate-store-code: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestSimulateStoreCode$ + + test-store-code-and-get: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestStoreCodeAndGet$ + + test-remove-code: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestRemoveCode$ + + test-happy-path: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestHappyPath$ + + test-env: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestEnv$ + + test-get-metrics: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./... -run ^TestGetMetrics$ + + # API Tests + test-validate-address-failure: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestValidateAddressFailure$ + + test-store-iterator: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestStoreIterator$ + + test-store-iterator-hits-limit: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestStoreIteratorHitsLimit$ + + test-queue-iterator-simple: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestQueueIteratorSimple$ + + test-queue-iterator-races: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestQueueIteratorRaces$ + + test-queue-iterator-limit: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestQueueIteratorLimit$ + + test-init-and-release-cache: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestInitAndReleaseCache$ + + test-init-cache-works-for-non-existent-dir: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestInitCacheWorksForNonExistentDir$ + + test-init-cache-errors-for-broken-dir: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestInitCacheErrorsForBrokenDir$ + + test-init-locking-prevents-concurrent-access: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestInitLockingPreventsConcurrentAccess$ + + test-init-locking-allows-multiple-instances: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestInitLockingAllowsMultipleInstancesInDifferentDirs$ + + test-init-cache-empty-capabilities: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestInitCacheEmptyCapabilities$ + + test-store-code-and-get-code: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestStoreCodeAndGetCode$ + + test-store-code-fails-with-bad-data: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestStoreCodeFailsWithBadData$ + + test-store-code-unchecked: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestStoreCodeUnchecked$ + + test-pin: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestPin$ + + test-pin-errors: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestPinErrors$ + + test-unpin: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestUnpin$ + + test-unpin-errors: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestUnpinErrors$ + + test-get-pinned-metrics: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestGetPinnedMetrics$ + + test-instantiate: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestInstantiate$ + + test-execute: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestExecute$ + + test-execute-panic: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestExecutePanic$ + + test-execute-unreachable: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestExecuteUnreachable$ + + test-execute-cpu-loop: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestExecuteCpuLoop$ + + test-execute-storage-loop: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestExecuteStorageLoop$ + + test-execute-user-errors-in-api-calls: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestExecuteUserErrorsInApiCalls$ + + test-migrate: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestMigrate$ + + test-multiple-instances: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestMultipleInstances$ + + test-sudo: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestSudo$ + + test-dispatch-submessage: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestDispatchSubmessage$ + + test-reply-and-query: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestReplyAndQuery$ + + test-query: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestQuery$ + + test-hackatom-querier: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestHackatomQuerier$ + + test-custom-reflect-querier: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestCustomReflectQuerier$ + + test-floats: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestFloats$ + + test-libwasmvm-version: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./internal/api -run ^TestLibwasmvmVersion$ + + # Types package tests + test-config-json: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestConfigJSON$ + + test-message-info-handles-multiple-coins: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestMessageInfoHandlesMultipleCoins$ + + test-message-info-handles-missing-coins: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestMessageInfoHandlesMissingCoins$ + + test-block-info-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestBlockInfoSerialization$ + + test-block-info-deserialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestBlockInfoDeserialization$ + + test-ibc-timeout-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestIbcTimeoutSerialization$ + + test-ibc-timeout-deserialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestIbcTimeoutDeserialization$ + + test-ibc-receive-response-deserialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestIbcReceiveResponseDeserialization$ + + test-wasm-msg-instantiate-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestWasmMsgInstantiateSerialization$ + + test-wasm-msg-instantiate2-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestWasmMsgInstantiate2Serialization$ + + test-any-msg-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestAnyMsgSerialization$ + + test-gov-msg-vote-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestGovMsgVoteSerialization$ + + test-gov-msg-vote-weighted-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestGovMsgVoteWeightedSerialization$ + + test-msg-fund-community-pool-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestMsgFundCommunityPoolSerialization$ + + test-delegation-with-empty-array: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestDelegationWithEmptyArray$ + + test-delegation-with-data: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestDelegationWithData$ + + test-validator-with-empty-array: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestValidatorWithEmptyArray$ + + test-validator-with-data: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestValidatorWithData$ + + test-query-result-with-empty-data: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestQueryResultWithEmptyData$ + + test-wasm-query-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestWasmQuerySerialization$ + + test-contract-info-response-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestContractInfoResponseSerialization$ + + test-distribution-query-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestDistributionQuerySerialization$ + + test-code-info-response-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestCodeInfoResponseSerialization$ + + test-reply-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestReplySerialization$ + + test-sub-msg-response-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestSubMsgResponseSerialization$ + + test-system-error-no-such-contract-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestSystemErrorNoSuchContractSerialization$ + + test-system-error-no-such-code-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestSystemErrorNoSuchCodeSerialization$ + + test-checksum-string: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestChecksumString$ + + test-uint64-json: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestUint64JSON$ + + test-int64-json: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestInt64JSON$ + + test-array-serialization: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - run: go test -v ./types -run ^TestArraySerialization$ + + benchmarks: + needs: setup + strategy: + fail-fast: false + matrix: + os: [macos-latest, ubuntu-latest] + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-go@v4 + with: + go-version: "1.23" + - name: Run benchmarks + run: | + go test -v ./internal/api -run=^$ -bench=. diff --git a/.gitignore b/.gitignore index cc52551b4..ef2174133 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,10 @@ *.iml .idea .vscode +**combined_code.txt +/vm +/contracts + # no static libraries (35MB+) /internal/api/lib*.a @@ -13,6 +17,8 @@ /demo tmp a.out +assistant* +libwasmvm/target/** # macOS .DS_Store diff --git a/.golangci.yml b/.golangci.yml index 3af894222..b8b52e5a5 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,10 +1,44 @@ +run: + tests: true + linters: # Enable specific linter # https://golangci-lint.run/usage/linters/#enabled-by-default enable: - gofumpt - - goimports + - gci + - testifylint + - errcheck + - thelper + - staticcheck linters-settings: - goimports: - local-prefixes: github.com/CosmWasm/wasmvm + gci: + # Section configuration to compare against. + # Section names are case-insensitive and may contain parameters in (). + # The default order of sections is `standard > default > custom > blank > dot > alias > localmodule`, + # If `custom-order` is `true`, it follows the order of `sections` option. + # Default: ["standard", "default"] + sections: + - standard # Standard section: captures all standard packages. + - default # Default section: contains all imports that could not be matched to another section type. + - prefix(github.com/cosmos/cosmos-sdk) # Custom section: groups all imports with the specified Prefix. + - prefix(github.com/cosmos/ibc-go) + - blank # Blank section: contains all blank imports. This section is not present unless explicitly enabled. + - dot # Dot section: contains all dot imports. This section is not present unless explicitly enabled. + - alias # Alias section: contains all alias imports. This section is not present unless explicitly enabled. + - localmodule # Local module section: contains all local packages. This section is not present unless explicitly enabled. + # Skip generated files. + # Default: true + skip-generated: false + # Enable custom order of sections. + # If `true`, make the section order the same as the order of `sections`. + # Default: false + custom-order: true + # Drops lexical ordering for custom sections. + # Default: false + no-lex-order: true + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 diff --git a/cmd/demo/main.go b/cmd/demo/main.go index 58286e782..b7f0e5cdd 100644 --- a/cmd/demo/main.go +++ b/cmd/demo/main.go @@ -22,10 +22,10 @@ func main() { if file == "version" { libwasmvmVersion, err := wasmvm.LibwasmvmVersion() + fmt.Printf("libwasmvm: %s\n", libwasmvmVersion) if err != nil { panic(err) } - fmt.Printf("libwasmvm: %s\n", libwasmvmVersion) return } diff --git a/go.mod b/go.mod index b8a003356..ac1479942 100644 --- a/go.mod +++ b/go.mod @@ -4,8 +4,10 @@ go 1.21 require ( github.com/google/btree v1.0.0 + github.com/kilic/bls12-381 v0.1.0 github.com/shamaton/msgpack/v2 v2.2.0 github.com/stretchr/testify v1.8.1 + github.com/tetratelabs/wazero v1.8.2 golang.org/x/sys v0.16.0 ) diff --git a/go.sum b/go.sum index 0e767c24f..d2432cb48 100644 --- a/go.sum +++ b/go.sum @@ -4,6 +4,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/kilic/bls12-381 v0.1.0 h1:encrdjqKMEvabVQ7qYOKu1OvhqpK4s47wDYtNiPtlp4= +github.com/kilic/bls12-381 v0.1.0/go.mod h1:vDTTHJONJ6G+P2R74EhnyotQDTliQDnFEwhdmfzw1ig= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -22,6 +24,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tetratelabs/wazero v1.8.2 h1:yIgLR/b2bN31bjxwXHD8a3d+BogigR952csSDdLYEv4= +github.com/tetratelabs/wazero v1.8.2/go.mod h1:yAI0XTsMBhREkM/YDAK/zNou3GoiAce1P6+rp/wQhjs= +golang.org/x/sys v0.0.0-20201101102859-da207088b7d1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/ibc_test.go b/ibc_test.go index 2da754d14..4992ee50b 100644 --- a/ibc_test.go +++ b/ibc_test.go @@ -76,6 +76,7 @@ type AcknowledgeDispatch struct { } func toBytes(t *testing.T, v interface{}) []byte { + t.Helper() bz, err := json.Marshal(v) require.NoError(t, err) return bz @@ -109,7 +110,7 @@ func TestIBCHandshake(t *testing.T) { require.NoError(t, err) assert.NotNil(t, i.Ok) iResponse := i.Ok - require.Equal(t, 0, len(iResponse.Messages)) + require.Empty(t, iResponse.Messages) // channel open gasMeter2 := api.NewMockGasMeter(TESTING_GAS_LIMIT) @@ -132,7 +133,7 @@ func TestIBCHandshake(t *testing.T) { require.NoError(t, err) require.NotNil(t, conn.Ok) connResponse := conn.Ok - require.Equal(t, 1, len(connResponse.Messages)) + require.Len(t, connResponse.Messages, 1) // check for the expected custom event expected_events := []types.Event{{ @@ -200,7 +201,7 @@ func TestIBCPacketDispatch(t *testing.T) { require.NoError(t, err) require.NotNil(t, conn.Ok) connResponse := conn.Ok - require.Equal(t, 1, len(connResponse.Messages)) + require.Len(t, connResponse.Messages, 1) id := connResponse.Messages[0].ID // mock reflect init callback (to store address) @@ -237,7 +238,7 @@ func TestIBCPacketDispatch(t *testing.T) { var accounts ListAccountsResponse err = json.Unmarshal(qResponse, &accounts) require.NoError(t, err) - require.Equal(t, 1, len(accounts.Accounts)) + require.Len(t, accounts.Accounts, 1) require.Equal(t, CHANNEL_ID, accounts.Accounts[0].ChannelID) require.Equal(t, REFLECT_ADDR, accounts.Accounts[0].Account) @@ -332,7 +333,7 @@ func TestIBCMsgGetChannel(t *testing.T) { require.Equal(t, msg1.GetChannel(), msg4.GetChannel()) require.Equal(t, msg1.GetChannel(), msg5.GetChannel()) require.Equal(t, msg1.GetChannel(), msg6.GetChannel()) - require.Equal(t, msg1.GetChannel().Endpoint.ChannelID, CHANNEL_ID) + require.Equal(t, CHANNEL_ID, msg1.GetChannel().Endpoint.ChannelID) } func TestIBCMsgGetCounterVersion(t *testing.T) { diff --git a/internal/adapter/interface.go b/internal/adapter/interface.go new file mode 100644 index 000000000..386ae9cce --- /dev/null +++ b/internal/adapter/interface.go @@ -0,0 +1,44 @@ +// file: internal/runtime/wasm_runtime.go +package runtime + +import "github.com/CosmWasm/wasmvm/v2/types" + +type WasmRuntime interface { + // InitCache sets up any runtime-specific cache or resources. Returns a handle. + InitCache(config types.VMConfig) (any, error) + + // ReleaseCache frees resources created by InitCache. + ReleaseCache(handle any) + + // Compilation and code storage + StoreCode(code []byte, persist bool) (checksum []byte, err error) + StoreCodeUnchecked(code []byte) ([]byte, error) + GetCode(checksum []byte) ([]byte, error) + RemoveCode(checksum []byte) error + Pin(checksum []byte) error + Unpin(checksum []byte) error + AnalyzeCode(checksum []byte) (*types.AnalysisReport, error) + + // Execution lifecycles + Instantiate(checksum []byte, env []byte, info []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Execute(checksum []byte, env []byte, info []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Migrate(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + MigrateWithInfo(checksum []byte, env []byte, msg []byte, migrateInfo []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Sudo(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Reply(checksum []byte, env []byte, reply []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Query(checksum []byte, env []byte, query []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + + // IBC entry points + IBCChannelOpen(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCChannelConnect(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCChannelClose(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCPacketReceive(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCPacketAck(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCPacketTimeout(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCSourceCallback(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCDestinationCallback(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + + // Metrics + GetMetrics() (*types.Metrics, error) + GetPinnedMetrics() (*types.PinnedMetrics, error) +} diff --git a/internal/api/bindings.h b/internal/api/bindings.h deleted file mode 100644 index 1f356a7fc..000000000 --- a/internal/api/bindings.h +++ /dev/null @@ -1,645 +0,0 @@ -/* Licensed under Apache-2.0. Copyright see https://github.com/CosmWasm/wasmvm/blob/main/NOTICE. */ - -/* Generated with cbindgen:0.27.0 */ - -/* Warning, this file is autogenerated by cbindgen. Don't modify this manually. */ - -#include -#include -#include -#include - -enum ErrnoValue { - ErrnoValue_Success = 0, - ErrnoValue_Other = 1, - ErrnoValue_OutOfGas = 2, -}; -typedef int32_t ErrnoValue; - -/** - * This enum gives names to the status codes returned from Go callbacks to Rust. - * The Go code will return one of these variants when returning. - * - * 0 means no error, all the other cases are some sort of error. - * - */ -enum GoError { - GoError_None = 0, - /** - * Go panicked for an unexpected reason. - */ - GoError_Panic = 1, - /** - * Go received a bad argument from Rust - */ - GoError_BadArgument = 2, - /** - * Ran out of gas while using the SDK (e.g. storage). This can come from the Cosmos SDK gas meter - * (https://github.com/cosmos/cosmos-sdk/blob/v0.45.4/store/types/gas.go#L29-L32). - */ - GoError_OutOfGas = 3, - /** - * Error while trying to serialize data in Go code (typically json.Marshal) - */ - GoError_CannotSerialize = 4, - /** - * An error happened during normal operation of a Go callback, which should be fed back to the contract - */ - GoError_User = 5, - /** - * An error type that should never be created by us. It only serves as a fallback for the i32 to GoError conversion. - */ - GoError_Other = -1, -}; -typedef int32_t GoError; - -typedef struct cache_t { - -} cache_t; - -/** - * A view into an externally owned byte slice (Go `[]byte`). - * Use this for the current call only. A view cannot be copied for safety reasons. - * If you need a copy, use [`ByteSliceView::to_owned`]. - * - * Go's nil value is fully supported, such that we can differentiate between nil and an empty slice. - */ -typedef struct ByteSliceView { - /** - * True if and only if the byte slice is nil in Go. If this is true, the other fields must be ignored. - */ - bool is_nil; - const uint8_t *ptr; - uintptr_t len; -} ByteSliceView; - -/** - * An optional Vector type that requires explicit creation and destruction - * and can be sent via FFI. - * It can be created from `Option>` and be converted into `Option>`. - * - * This type is always created in Rust and always dropped in Rust. - * If Go code want to create it, it must instruct Rust to do so via the - * [`new_unmanaged_vector`] FFI export. If Go code wants to consume its data, - * it must create a copy and instruct Rust to destroy it via the - * [`destroy_unmanaged_vector`] FFI export. - * - * An UnmanagedVector is immutable. - * - * ## Ownership - * - * Ownership is the right and the obligation to destroy an `UnmanagedVector` - * exactly once. Both Rust and Go can create an `UnmanagedVector`, which gives - * then ownership. Sometimes it is necessary to transfer ownership. - * - * ### Transfer ownership from Rust to Go - * - * When an `UnmanagedVector` was created in Rust using [`UnmanagedVector::new`], [`UnmanagedVector::default`] - * or [`new_unmanaged_vector`], it can be passed to Go as a return value (see e.g. [load_wasm][crate::load_wasm]). - * Rust then has no chance to destroy the vector anymore, so ownership is transferred to Go. - * In Go, the data has to be copied to a garbage collected `[]byte`. Then the vector must be destroyed - * using [`destroy_unmanaged_vector`]. - * - * ### Transfer ownership from Go to Rust - * - * When Rust code calls into Go (using the vtable methods), return data or error messages must be created - * in Go. This is done by calling [`new_unmanaged_vector`] from Go, which copies data into a newly created - * `UnmanagedVector`. Since Go created it, it owns it. The ownership is then passed to Rust via the - * mutable return value pointers. On the Rust side, the vector is destroyed using [`UnmanagedVector::consume`]. - * - * ## Examples - * - * Transferring ownership from Rust to Go using return values of FFI calls: - * - * ``` - * # use wasmvm::{cache_t, ByteSliceView, UnmanagedVector}; - * #[no_mangle] - * pub extern "C" fn save_wasm_to_cache( - * cache: *mut cache_t, - * wasm: ByteSliceView, - * error_msg: Option<&mut UnmanagedVector>, - * ) -> UnmanagedVector { - * # let checksum: Vec = Default::default(); - * // some operation producing a `let checksum: Vec` - * - * UnmanagedVector::new(Some(checksum)) // this unmanaged vector is owned by the caller - * } - * ``` - * - * Transferring ownership from Go to Rust using return value pointers: - * - * ```rust - * # use cosmwasm_vm::{BackendResult, GasInfo}; - * # use wasmvm::{Db, GoError, U8SliceView, UnmanagedVector}; - * fn db_read(db: &Db, key: &[u8]) -> BackendResult>> { - * - * // Create a None vector in order to reserve memory for the result - * let mut output = UnmanagedVector::default(); - * - * // … - * # let mut error_msg = UnmanagedVector::default(); - * # let mut used_gas = 0_u64; - * # let read_db = db.vtable.read_db.unwrap(); - * - * let go_error: GoError = read_db( - * db.state, - * db.gas_meter, - * &mut used_gas as *mut u64, - * U8SliceView::new(Some(key)), - * // Go will create a new UnmanagedVector and override this address - * &mut output as *mut UnmanagedVector, - * &mut error_msg as *mut UnmanagedVector, - * ) - * .into(); - * - * // We now own the new UnmanagedVector written to the pointer and must destroy it - * let value = output.consume(); - * - * // Some gas processing and error handling - * # let gas_info = GasInfo::free(); - * - * (Ok(value), gas_info) - * } - * ``` - * - * - * If you want to mutate data, you need to consume the vector and create a new one: - * - * ```rust - * # use wasmvm::{UnmanagedVector}; - * # let input = UnmanagedVector::new(Some(vec![0xAA])); - * let mut mutable: Vec = input.consume().unwrap_or_default(); - * assert_eq!(mutable, vec![0xAA]); - * - * // `input` is now gone and we cam do everything we want to `mutable`, - * // including operations that reallocate the underlying data. - * - * mutable.push(0xBB); - * mutable.push(0xCC); - * - * assert_eq!(mutable, vec![0xAA, 0xBB, 0xCC]); - * - * let output = UnmanagedVector::new(Some(mutable)); - * - * // `output` is ready to be passed around - * ``` - */ -typedef struct UnmanagedVector { - /** - * True if and only if this is None. If this is true, the other fields must be ignored. - */ - bool is_none; - uint8_t *ptr; - uintptr_t len; - uintptr_t cap; -} UnmanagedVector; - -/** - * A version of `Option` that can be used safely in FFI. - */ -typedef struct OptionalU64 { - bool is_some; - uint64_t value; -} OptionalU64; - -/** - * The result type of the FFI function analyze_code. - * - * Please note that the unmanaged vector in `required_capabilities` - * has to be destroyed exactly once. When calling `analyze_code` - * from Go this is done via `C.destroy_unmanaged_vector`. - */ -typedef struct AnalysisReport { - /** - * `true` if and only if all required ibc exports exist as exported functions. - * This does not guarantee they are functional or even have the correct signatures. - */ - bool has_ibc_entry_points; - /** - * A UTF-8 encoded comma separated list of all entrypoints that - * are exported by the contract. - */ - struct UnmanagedVector entrypoints; - /** - * An UTF-8 encoded comma separated list of required capabilities. - * This is never None/nil. - */ - struct UnmanagedVector required_capabilities; - /** - * The migrate version of the contract. - * This is None if the contract does not have a migrate version and the `migrate` entrypoint - * needs to be called for every migration (if present). - * If it is `Some(version)`, it only needs to be called if the `version` increased. - */ - struct OptionalU64 contract_migrate_version; -} AnalysisReport; - -typedef struct Metrics { - uint32_t hits_pinned_memory_cache; - uint32_t hits_memory_cache; - uint32_t hits_fs_cache; - uint32_t misses; - uint64_t elements_pinned_memory_cache; - uint64_t elements_memory_cache; - uint64_t size_pinned_memory_cache; - uint64_t size_memory_cache; -} Metrics; - -/** - * An opaque type. `*gas_meter_t` represents a pointer to Go memory holding the gas meter. - */ -typedef struct gas_meter_t { - uint8_t _private[0]; -} gas_meter_t; - -typedef struct db_t { - uint8_t _private[0]; -} db_t; - -/** - * A view into a `Option<&[u8]>`, created and maintained by Rust. - * - * This can be copied into a []byte in Go. - */ -typedef struct U8SliceView { - /** - * True if and only if this is None. If this is true, the other fields must be ignored. - */ - bool is_none; - const uint8_t *ptr; - uintptr_t len; -} U8SliceView; - -/** - * A reference to some tables on the Go side which allow accessing - * the actual iterator instance. - */ -typedef struct IteratorReference { - /** - * An ID assigned to this contract call - */ - uint64_t call_id; - /** - * An ID assigned to this iterator - */ - uint64_t iterator_id; -} IteratorReference; - -typedef struct IteratorVtable { - int32_t (*next)(struct IteratorReference iterator, - struct gas_meter_t *gas_meter, - uint64_t *gas_used, - struct UnmanagedVector *key_out, - struct UnmanagedVector *value_out, - struct UnmanagedVector *err_msg_out); - int32_t (*next_key)(struct IteratorReference iterator, - struct gas_meter_t *gas_meter, - uint64_t *gas_used, - struct UnmanagedVector *key_out, - struct UnmanagedVector *err_msg_out); - int32_t (*next_value)(struct IteratorReference iterator, - struct gas_meter_t *gas_meter, - uint64_t *gas_used, - struct UnmanagedVector *value_out, - struct UnmanagedVector *err_msg_out); -} IteratorVtable; - -typedef struct GoIter { - struct gas_meter_t *gas_meter; - /** - * A reference which identifies the iterator and allows finding and accessing the - * actual iterator instance in Go. Once fully initialized, this is immutable. - */ - struct IteratorReference reference; - struct IteratorVtable vtable; -} GoIter; - -typedef struct DbVtable { - int32_t (*read_db)(struct db_t *db, - struct gas_meter_t *gas_meter, - uint64_t *gas_used, - struct U8SliceView key, - struct UnmanagedVector *value_out, - struct UnmanagedVector *err_msg_out); - int32_t (*write_db)(struct db_t *db, - struct gas_meter_t *gas_meter, - uint64_t *gas_used, - struct U8SliceView key, - struct U8SliceView value, - struct UnmanagedVector *err_msg_out); - int32_t (*remove_db)(struct db_t *db, - struct gas_meter_t *gas_meter, - uint64_t *gas_used, - struct U8SliceView key, - struct UnmanagedVector *err_msg_out); - int32_t (*scan_db)(struct db_t *db, - struct gas_meter_t *gas_meter, - uint64_t *gas_used, - struct U8SliceView start, - struct U8SliceView end, - int32_t order, - struct GoIter *iterator_out, - struct UnmanagedVector *err_msg_out); -} DbVtable; - -typedef struct Db { - struct gas_meter_t *gas_meter; - struct db_t *state; - struct DbVtable vtable; -} Db; - -typedef struct api_t { - uint8_t _private[0]; -} api_t; - -typedef struct GoApiVtable { - int32_t (*humanize_address)(const struct api_t *api, - struct U8SliceView input, - struct UnmanagedVector *humanized_address_out, - struct UnmanagedVector *err_msg_out, - uint64_t *gas_used); - int32_t (*canonicalize_address)(const struct api_t *api, - struct U8SliceView input, - struct UnmanagedVector *canonicalized_address_out, - struct UnmanagedVector *err_msg_out, - uint64_t *gas_used); - int32_t (*validate_address)(const struct api_t *api, - struct U8SliceView input, - struct UnmanagedVector *err_msg_out, - uint64_t *gas_used); -} GoApiVtable; - -typedef struct GoApi { - const struct api_t *state; - struct GoApiVtable vtable; -} GoApi; - -typedef struct querier_t { - uint8_t _private[0]; -} querier_t; - -typedef struct QuerierVtable { - int32_t (*query_external)(const struct querier_t *querier, - uint64_t gas_limit, - uint64_t *gas_used, - struct U8SliceView request, - struct UnmanagedVector *result_out, - struct UnmanagedVector *err_msg_out); -} QuerierVtable; - -typedef struct GoQuerier { - const struct querier_t *state; - struct QuerierVtable vtable; -} GoQuerier; - -typedef struct GasReport { - /** - * The original limit the instance was created with - */ - uint64_t limit; - /** - * The remaining gas that can be spend - */ - uint64_t remaining; - /** - * The amount of gas that was spend and metered externally in operations triggered by this instance - */ - uint64_t used_externally; - /** - * The amount of gas that was spend and metered internally (i.e. by executing Wasm and calling - * API methods which are not metered externally) - */ - uint64_t used_internally; -} GasReport; - -struct cache_t *init_cache(struct ByteSliceView config, struct UnmanagedVector *error_msg); - -struct UnmanagedVector store_code(struct cache_t *cache, - struct ByteSliceView wasm, - bool checked, - bool persist, - struct UnmanagedVector *error_msg); - -void remove_wasm(struct cache_t *cache, - struct ByteSliceView checksum, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector load_wasm(struct cache_t *cache, - struct ByteSliceView checksum, - struct UnmanagedVector *error_msg); - -void pin(struct cache_t *cache, struct ByteSliceView checksum, struct UnmanagedVector *error_msg); - -void unpin(struct cache_t *cache, struct ByteSliceView checksum, struct UnmanagedVector *error_msg); - -struct AnalysisReport analyze_code(struct cache_t *cache, - struct ByteSliceView checksum, - struct UnmanagedVector *error_msg); - -struct Metrics get_metrics(struct cache_t *cache, struct UnmanagedVector *error_msg); - -struct UnmanagedVector get_pinned_metrics(struct cache_t *cache, struct UnmanagedVector *error_msg); - -/** - * frees a cache reference - * - * # Safety - * - * This must be called exactly once for any `*cache_t` returned by `init_cache` - * and cannot be called on any other pointer. - */ -void release_cache(struct cache_t *cache); - -struct UnmanagedVector instantiate(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView info, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector execute(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView info, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector migrate(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector migrate_with_info(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct ByteSliceView migrate_info, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector sudo(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector reply(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector query(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector ibc_channel_open(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector ibc_channel_connect(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector ibc_channel_close(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector ibc_packet_receive(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector ibc_packet_ack(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector ibc_packet_timeout(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector ibc_source_callback(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector ibc_destination_callback(struct cache_t *cache, - struct ByteSliceView checksum, - struct ByteSliceView env, - struct ByteSliceView msg, - struct Db db, - struct GoApi api, - struct GoQuerier querier, - uint64_t gas_limit, - bool print_debug, - struct GasReport *gas_report, - struct UnmanagedVector *error_msg); - -struct UnmanagedVector new_unmanaged_vector(bool nil, const uint8_t *ptr, uintptr_t length); - -void destroy_unmanaged_vector(struct UnmanagedVector v); - -/** - * Returns a version number of this library as a C string. - * - * The string is owned by libwasmvm and must not be mutated or destroyed by the caller. - */ -const char *version_str(void); diff --git a/internal/api/callbacks.go b/internal/api/callbacks.go deleted file mode 100644 index 702c8faf7..000000000 --- a/internal/api/callbacks.go +++ /dev/null @@ -1,503 +0,0 @@ -package api - -// Check https://akrennmair.github.io/golang-cgo-slides/ to learn -// how this embedded C code works. - -/* -#include "bindings.h" - -// All C function types in struct fields will be represented as a *[0]byte in Go and -// we don't get any type safety on the signature. To express this fact in type conversions, -// we create a single function pointer type here. -// The only thing this is used for is casting between unsafe.Pointer and *[0]byte in Go. -// See also https://github.com/golang/go/issues/19835 -typedef void (*any_function_t)(); - -// forward declarations (db) -GoError cGet_cgo(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView key, UnmanagedVector *val, UnmanagedVector *errOut); -GoError cSet_cgo(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView key, U8SliceView val, UnmanagedVector *errOut); -GoError cDelete_cgo(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView key, UnmanagedVector *errOut); -GoError cScan_cgo(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView start, U8SliceView end, int32_t order, GoIter *out, UnmanagedVector *errOut); -// iterator -GoError cNext_cgo(IteratorReference *ref, gas_meter_t *gas_meter, uint64_t *used_gas, UnmanagedVector *key, UnmanagedVector *val, UnmanagedVector *errOut); -GoError cNextKey_cgo(IteratorReference *ref, gas_meter_t *gas_meter, uint64_t *used_gas, UnmanagedVector *key, UnmanagedVector *errOut); -GoError cNextValue_cgo(IteratorReference *ref, gas_meter_t *gas_meter, uint64_t *used_gas, UnmanagedVector *val, UnmanagedVector *errOut); -// api -GoError cHumanizeAddress_cgo(api_t *ptr, U8SliceView src, UnmanagedVector *dest, UnmanagedVector *errOut, uint64_t *used_gas); -GoError cCanonicalizeAddress_cgo(api_t *ptr, U8SliceView src, UnmanagedVector *dest, UnmanagedVector *errOut, uint64_t *used_gas); -GoError cValidateAddress_cgo(api_t *ptr, U8SliceView src, UnmanagedVector *errOut, uint64_t *used_gas); -// and querier -GoError cQueryExternal_cgo(querier_t *ptr, uint64_t gas_limit, uint64_t *used_gas, U8SliceView request, UnmanagedVector *result, UnmanagedVector *errOut); - - -*/ -import "C" - -import ( - "encoding/json" - "fmt" - "log" - "reflect" - "runtime/debug" - "unsafe" - - "github.com/CosmWasm/wasmvm/v2/types" -) - -// Note: we have to include all exports in the same file (at least since they both import bindings.h), -// or get odd cgo build errors about duplicate definitions - -func recoverPanic(ret *C.GoError) { - if rec := recover(); rec != nil { - // This is used to handle ErrorOutOfGas panics. - // - // What we do here is something that should not be done in the first place. - // "A panic typically means something went unexpectedly wrong. Mostly we use it to fail fast - // on errors that shouldn’t occur during normal operation, or that we aren’t prepared to - // handle gracefully." says https://gobyexample.com/panic. - // And 'Ask yourself "when this happens, should the application immediately crash?" If yes, - // use a panic; otherwise, use an error.' says this popular answer on SO: https://stackoverflow.com/a/44505268. - // Oh, and "If you're already worrying about discriminating different kinds of panics, you've lost sight of the ball." - // (Rob Pike) from https://eli.thegreenplace.net/2018/on-the-uses-and-misuses-of-panics-in-go/ - // - // We don't want to import Cosmos SDK and also cannot use interfaces to detect these - // error types (as they have no methods). So, let's just rely on the descriptive names. - name := reflect.TypeOf(rec).Name() - switch name { - // These three types are "thrown" (which is not a thing in Go 🙃) in panics from the gas module - // (https://github.com/cosmos/cosmos-sdk/blob/v0.45.4/store/types/gas.go): - // 1. ErrorOutOfGas - // 2. ErrorGasOverflow - // 3. ErrorNegativeGasConsumed - // - // In the baseapp, ErrorOutOfGas gets special treatment: - // - https://github.com/cosmos/cosmos-sdk/blob/v0.45.4/baseapp/baseapp.go#L607 - // - https://github.com/cosmos/cosmos-sdk/blob/v0.45.4/baseapp/recovery.go#L50-L60 - // This turns the panic into a regular error with a helpful error message. - // - // The other two gas related panic types indicate programming errors and are handled along - // with all other errors in https://github.com/cosmos/cosmos-sdk/blob/v0.45.4/baseapp/recovery.go#L66-L77. - case "ErrorOutOfGas": - // TODO: figure out how to pass the text in its `Descriptor` field through all the FFI - *ret = C.GoError_OutOfGas - default: - log.Printf("Panic in Go callback: %#v\n", rec) - debug.PrintStack() - *ret = C.GoError_Panic - } - } -} - -/****** DB ********/ - -var db_vtable = C.DbVtable{ - read_db: C.any_function_t(C.cGet_cgo), - write_db: C.any_function_t(C.cSet_cgo), - remove_db: C.any_function_t(C.cDelete_cgo), - scan_db: C.any_function_t(C.cScan_cgo), -} - -type DBState struct { - Store types.KVStore - // CallID is used to lookup the proper frame for iterators associated with this contract call (iterator.go) - CallID uint64 -} - -// use this to create C.Db in two steps, so the pointer lives as long as the calling stack -// -// state := buildDBState(kv, callID) -// db := buildDB(&state, &gasMeter) -// // then pass db into some FFI function -func buildDBState(kv types.KVStore, callID uint64) DBState { - return DBState{ - Store: kv, - CallID: callID, - } -} - -// contract: original pointer/struct referenced must live longer than C.Db struct -// since this is only used internally, we can verify the code that this is the case -func buildDB(state *DBState, gm *types.GasMeter) C.Db { - return C.Db{ - gas_meter: (*C.gas_meter_t)(unsafe.Pointer(gm)), - state: (*C.db_t)(unsafe.Pointer(state)), - vtable: db_vtable, - } -} - -var iterator_vtable = C.IteratorVtable{ - next: C.any_function_t(C.cNext_cgo), - next_key: C.any_function_t(C.cNextKey_cgo), - next_value: C.any_function_t(C.cNextValue_cgo), -} - -// An iterator including referenced objects is 117 bytes large (calculated using https://github.com/DmitriyVTitov/size). -// We limit the number of iterators per contract call ID here in order limit memory usage to 32768*117 = ~3.8 MB as a safety measure. -// In any reasonable contract, gas limits should hit sooner than that though. -const frameLenLimit = 32768 - -// contract: original pointer/struct referenced must live longer than C.Db struct -// since this is only used internally, we can verify the code that this is the case -func buildIterator(callID uint64, it types.Iterator) (C.IteratorReference, error) { - iteratorID, err := storeIterator(callID, it, frameLenLimit) - if err != nil { - return C.IteratorReference{}, err - } - return C.IteratorReference{ - call_id: cu64(callID), - iterator_id: cu64(iteratorID), - }, nil -} - -//export cGet -func cGet(ptr *C.db_t, gasMeter *C.gas_meter_t, usedGas *cu64, key C.U8SliceView, val *C.UnmanagedVector, errOut *C.UnmanagedVector) (ret C.GoError) { - defer recoverPanic(&ret) - - if ptr == nil || gasMeter == nil || usedGas == nil || val == nil || errOut == nil { - // we received an invalid pointer - return C.GoError_BadArgument - } - // errOut is unused and we don't check `is_none` because of https://github.com/CosmWasm/wasmvm/issues/536 - if !(*val).is_none { - panic("Got a non-none UnmanagedVector we're about to override. This is a bug because someone has to drop the old one.") - } - - gm := *(*types.GasMeter)(unsafe.Pointer(gasMeter)) - kv := *(*types.KVStore)(unsafe.Pointer(ptr)) - k := copyU8Slice(key) - - gasBefore := gm.GasConsumed() - v := kv.Get(k) - gasAfter := gm.GasConsumed() - *usedGas = (cu64)(gasAfter - gasBefore) - - // v will equal nil when the key is missing - // https://github.com/cosmos/cosmos-sdk/blob/1083fa948e347135861f88e07ec76b0314296832/store/types/store.go#L174 - *val = newUnmanagedVector(v) - - return C.GoError_None -} - -//export cSet -func cSet(ptr *C.db_t, gasMeter *C.gas_meter_t, usedGas *cu64, key C.U8SliceView, val C.U8SliceView, errOut *C.UnmanagedVector) (ret C.GoError) { - defer recoverPanic(&ret) - - if ptr == nil || gasMeter == nil || usedGas == nil || errOut == nil { - // we received an invalid pointer - return C.GoError_BadArgument - } - // errOut is unused and we don't check `is_none` because of https://github.com/CosmWasm/wasmvm/issues/536 - - gm := *(*types.GasMeter)(unsafe.Pointer(gasMeter)) - kv := *(*types.KVStore)(unsafe.Pointer(ptr)) - k := copyU8Slice(key) - v := copyU8Slice(val) - - gasBefore := gm.GasConsumed() - kv.Set(k, v) - gasAfter := gm.GasConsumed() - *usedGas = (cu64)(gasAfter - gasBefore) - - return C.GoError_None -} - -//export cDelete -func cDelete(ptr *C.db_t, gasMeter *C.gas_meter_t, usedGas *cu64, key C.U8SliceView, errOut *C.UnmanagedVector) (ret C.GoError) { - defer recoverPanic(&ret) - - if ptr == nil || gasMeter == nil || usedGas == nil || errOut == nil { - // we received an invalid pointer - return C.GoError_BadArgument - } - // errOut is unused and we don't check `is_none` because of https://github.com/CosmWasm/wasmvm/issues/536 - - gm := *(*types.GasMeter)(unsafe.Pointer(gasMeter)) - kv := *(*types.KVStore)(unsafe.Pointer(ptr)) - k := copyU8Slice(key) - - gasBefore := gm.GasConsumed() - kv.Delete(k) - gasAfter := gm.GasConsumed() - *usedGas = (cu64)(gasAfter - gasBefore) - - return C.GoError_None -} - -//export cScan -func cScan(ptr *C.db_t, gasMeter *C.gas_meter_t, usedGas *cu64, start C.U8SliceView, end C.U8SliceView, order ci32, out *C.GoIter, errOut *C.UnmanagedVector) (ret C.GoError) { - defer recoverPanic(&ret) - - if ptr == nil || gasMeter == nil || usedGas == nil || out == nil || errOut == nil { - // we received an invalid pointer - return C.GoError_BadArgument - } - if !(*errOut).is_none { - panic("Got a non-none UnmanagedVector we're about to override. This is a bug because someone has to drop the old one.") - } - - gm := *(*types.GasMeter)(unsafe.Pointer(gasMeter)) - state := (*DBState)(unsafe.Pointer(ptr)) - kv := state.Store - s := copyU8Slice(start) - e := copyU8Slice(end) - - var iter types.Iterator - gasBefore := gm.GasConsumed() - switch order { - case 1: // Ascending - iter = kv.Iterator(s, e) - case 2: // Descending - iter = kv.ReverseIterator(s, e) - default: - return C.GoError_BadArgument - } - gasAfter := gm.GasConsumed() - *usedGas = (cu64)(gasAfter - gasBefore) - - iteratorRef, err := buildIterator(state.CallID, iter) - if err != nil { - // store the actual error message in the return buffer - *errOut = newUnmanagedVector([]byte(err.Error())) - return C.GoError_User - } - - *out = C.GoIter{ - gas_meter: gasMeter, - reference: iteratorRef, - vtable: iterator_vtable, - } - - return C.GoError_None -} - -//export cNext -func cNext(ref C.IteratorReference, gasMeter *C.gas_meter_t, usedGas *cu64, key *C.UnmanagedVector, val *C.UnmanagedVector, errOut *C.UnmanagedVector) (ret C.GoError) { - // typical usage of iterator - // for ; itr.Valid(); itr.Next() { - // k, v := itr.Key(); itr.Value() - // ... - // } - - defer recoverPanic(&ret) - if ref.call_id == 0 || gasMeter == nil || usedGas == nil || key == nil || val == nil || errOut == nil { - // we received an invalid pointer - return C.GoError_BadArgument - } - // errOut is unused and we don't check `is_none` because of https://github.com/CosmWasm/wasmvm/issues/536 - if !(*key).is_none || !(*val).is_none { - panic("Got a non-none UnmanagedVector we're about to override. This is a bug because someone has to drop the old one.") - } - - gm := *(*types.GasMeter)(unsafe.Pointer(gasMeter)) - iter := retrieveIterator(uint64(ref.call_id), uint64(ref.iterator_id)) - if iter == nil { - panic("Unable to retrieve iterator.") - } - if !iter.Valid() { - // end of iterator, return as no-op, nil key is considered end - return C.GoError_None - } - - gasBefore := gm.GasConsumed() - // call Next at the end, upon creation we have first data loaded - k := iter.Key() - v := iter.Value() - // check iter.Error() ???? - iter.Next() - gasAfter := gm.GasConsumed() - *usedGas = (cu64)(gasAfter - gasBefore) - - *key = newUnmanagedVector(k) - *val = newUnmanagedVector(v) - return C.GoError_None -} - -//export cNextKey -func cNextKey(ref C.IteratorReference, gasMeter *C.gas_meter_t, usedGas *cu64, key *C.UnmanagedVector, errOut *C.UnmanagedVector) (ret C.GoError) { - return nextPart(ref, gasMeter, usedGas, key, errOut, func(iter types.Iterator) []byte { return iter.Key() }) -} - -//export cNextValue -func cNextValue(ref C.IteratorReference, gasMeter *C.gas_meter_t, usedGas *cu64, value *C.UnmanagedVector, errOut *C.UnmanagedVector) (ret C.GoError) { - return nextPart(ref, gasMeter, usedGas, value, errOut, func(iter types.Iterator) []byte { return iter.Value() }) -} - -// nextPart is a helper function that contains the shared code for key- and value-only iteration. -func nextPart(ref C.IteratorReference, gasMeter *C.gas_meter_t, usedGas *cu64, output *C.UnmanagedVector, errOut *C.UnmanagedVector, valFn func(types.Iterator) []byte) (ret C.GoError) { - // typical usage of iterator - // for ; itr.Valid(); itr.Next() { - // k, v := itr.Key(); itr.Value() - // ... - // } - - defer recoverPanic(&ret) - if ref.call_id == 0 || gasMeter == nil || usedGas == nil || output == nil || errOut == nil { - // we received an invalid pointer - return C.GoError_BadArgument - } - // errOut is unused and we don't check `is_none` because of https://github.com/CosmWasm/wasmvm/issues/536 - if !(*output).is_none { - panic("Got a non-none UnmanagedVector we're about to override. This is a bug because someone has to drop the old one.") - } - - gm := *(*types.GasMeter)(unsafe.Pointer(gasMeter)) - iter := retrieveIterator(uint64(ref.call_id), uint64(ref.iterator_id)) - if iter == nil { - panic("Unable to retrieve iterator.") - } - if !iter.Valid() { - // end of iterator, return as no-op, nil `output` is considered end - return C.GoError_None - } - - gasBefore := gm.GasConsumed() - // call Next at the end, upon creation we have first data loaded - out := valFn(iter) - // check iter.Error() ???? - iter.Next() - gasAfter := gm.GasConsumed() - *usedGas = (cu64)(gasAfter - gasBefore) - - *output = newUnmanagedVector(out) - return C.GoError_None -} - -var api_vtable = C.GoApiVtable{ - humanize_address: C.any_function_t(C.cHumanizeAddress_cgo), - canonicalize_address: C.any_function_t(C.cCanonicalizeAddress_cgo), - validate_address: C.any_function_t(C.cValidateAddress_cgo), -} - -// contract: original pointer/struct referenced must live longer than C.GoApi struct -// since this is only used internally, we can verify the code that this is the case -func buildAPI(api *types.GoAPI) C.GoApi { - return C.GoApi{ - state: (*C.api_t)(unsafe.Pointer(api)), - vtable: api_vtable, - } -} - -//export cHumanizeAddress -func cHumanizeAddress(ptr *C.api_t, src C.U8SliceView, dest *C.UnmanagedVector, errOut *C.UnmanagedVector, used_gas *cu64) (ret C.GoError) { - defer recoverPanic(&ret) - - if dest == nil || errOut == nil { - return C.GoError_BadArgument - } - if !(*dest).is_none || !(*errOut).is_none { - panic("Got a non-none UnmanagedVector we're about to override. This is a bug because someone has to drop the old one.") - } - - api := (*types.GoAPI)(unsafe.Pointer(ptr)) - s := copyU8Slice(src) - - h, cost, err := api.HumanizeAddress(s) - *used_gas = cu64(cost) - if err != nil { - // store the actual error message in the return buffer - *errOut = newUnmanagedVector([]byte(err.Error())) - return C.GoError_User - } - if len(h) == 0 { - panic(fmt.Sprintf("`api.HumanizeAddress()` returned an empty string for %q", s)) - } - *dest = newUnmanagedVector([]byte(h)) - return C.GoError_None -} - -//export cCanonicalizeAddress -func cCanonicalizeAddress(ptr *C.api_t, src C.U8SliceView, dest *C.UnmanagedVector, errOut *C.UnmanagedVector, used_gas *cu64) (ret C.GoError) { - defer recoverPanic(&ret) - - if dest == nil || errOut == nil { - return C.GoError_BadArgument - } - if !(*dest).is_none || !(*errOut).is_none { - panic("Got a non-none UnmanagedVector we're about to override. This is a bug because someone has to drop the old one.") - } - - api := (*types.GoAPI)(unsafe.Pointer(ptr)) - s := string(copyU8Slice(src)) - c, cost, err := api.CanonicalizeAddress(s) - *used_gas = cu64(cost) - if err != nil { - // store the actual error message in the return buffer - *errOut = newUnmanagedVector([]byte(err.Error())) - return C.GoError_User - } - if len(c) == 0 { - panic(fmt.Sprintf("`api.CanonicalizeAddress()` returned an empty string for %q", s)) - } - *dest = newUnmanagedVector(c) - return C.GoError_None -} - -//export cValidateAddress -func cValidateAddress(ptr *C.api_t, src C.U8SliceView, errOut *C.UnmanagedVector, used_gas *cu64) (ret C.GoError) { - defer recoverPanic(&ret) - - if errOut == nil { - return C.GoError_BadArgument - } - if !(*errOut).is_none { - panic("Got a non-none UnmanagedVector we're about to override. This is a bug because someone has to drop the old one.") - } - - api := (*types.GoAPI)(unsafe.Pointer(ptr)) - s := string(copyU8Slice(src)) - cost, err := api.ValidateAddress(s) - - *used_gas = cu64(cost) - if err != nil { - // store the actual error message in the return buffer - *errOut = newUnmanagedVector([]byte(err.Error())) - return C.GoError_User - } - return C.GoError_None -} - -/****** Go Querier ********/ - -var querier_vtable = C.QuerierVtable{ - query_external: C.any_function_t(C.cQueryExternal_cgo), -} - -// contract: original pointer/struct referenced must live longer than C.GoQuerier struct -// since this is only used internally, we can verify the code that this is the case -func buildQuerier(q *Querier) C.GoQuerier { - return C.GoQuerier{ - state: (*C.querier_t)(unsafe.Pointer(q)), - vtable: querier_vtable, - } -} - -//export cQueryExternal -func cQueryExternal(ptr *C.querier_t, gasLimit cu64, usedGas *cu64, request C.U8SliceView, result *C.UnmanagedVector, errOut *C.UnmanagedVector) (ret C.GoError) { - defer recoverPanic(&ret) - - if ptr == nil || usedGas == nil || result == nil || errOut == nil { - // we received an invalid pointer - return C.GoError_BadArgument - } - if !(*result).is_none || !(*errOut).is_none { - panic("Got a non-none UnmanagedVector we're about to override. This is a bug because someone has to drop the old one.") - } - - // query the data - querier := *(*Querier)(unsafe.Pointer(ptr)) - req := copyU8Slice(request) - - gasBefore := querier.GasConsumed() - res := types.RustQuery(querier, req, uint64(gasLimit)) - gasAfter := querier.GasConsumed() - *usedGas = (cu64)(gasAfter - gasBefore) - - // serialize the response - bz, err := json.Marshal(res) - if err != nil { - *errOut = newUnmanagedVector([]byte(err.Error())) - return C.GoError_CannotSerialize - } - *result = newUnmanagedVector(bz) - return C.GoError_None -} diff --git a/internal/api/callbacks_cgo.go b/internal/api/callbacks_cgo.go deleted file mode 100644 index 53d84c076..000000000 --- a/internal/api/callbacks_cgo.go +++ /dev/null @@ -1,69 +0,0 @@ -package api - -/* -#include "bindings.h" -#include - -// imports (db) -GoError cSet(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView key, U8SliceView val, UnmanagedVector *errOut); -GoError cGet(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView key, UnmanagedVector *val, UnmanagedVector *errOut); -GoError cDelete(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView key, UnmanagedVector *errOut); -GoError cScan(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView start, U8SliceView end, int32_t order, GoIter *out, UnmanagedVector *errOut); -// imports (iterator) -GoError cNext(IteratorReference *ref, gas_meter_t *gas_meter, uint64_t *used_gas, UnmanagedVector *key, UnmanagedVector *val, UnmanagedVector *errOut); -GoError cNextKey(IteratorReference *ref, gas_meter_t *gas_meter, uint64_t *used_gas, UnmanagedVector *key, UnmanagedVector *errOut); -GoError cNextValue(IteratorReference *ref, gas_meter_t *gas_meter, uint64_t *used_gas, UnmanagedVector *value, UnmanagedVector *errOut); -// imports (api) -GoError cHumanizeAddress(api_t *ptr, U8SliceView src, UnmanagedVector *dest, UnmanagedVector *errOut, uint64_t *used_gas); -GoError cCanonicalizeAddress(api_t *ptr, U8SliceView src, UnmanagedVector *dest, UnmanagedVector *errOut, uint64_t *used_gas); -GoError cValidateAddress(api_t *ptr, U8SliceView src, UnmanagedVector *errOut, uint64_t *used_gas); -// imports (querier) -GoError cQueryExternal(querier_t *ptr, uint64_t gas_limit, uint64_t *used_gas, U8SliceView request, UnmanagedVector *result, UnmanagedVector *errOut); - -// Gateway functions (db) -GoError cGet_cgo(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView key, UnmanagedVector *val, UnmanagedVector *errOut) { - return cGet(ptr, gas_meter, used_gas, key, val, errOut); -} -GoError cSet_cgo(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView key, U8SliceView val, UnmanagedVector *errOut) { - return cSet(ptr, gas_meter, used_gas, key, val, errOut); -} -GoError cDelete_cgo(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView key, UnmanagedVector *errOut) { - return cDelete(ptr, gas_meter, used_gas, key, errOut); -} -GoError cScan_cgo(db_t *ptr, gas_meter_t *gas_meter, uint64_t *used_gas, U8SliceView start, U8SliceView end, int32_t order, GoIter *out, UnmanagedVector *errOut) { - return cScan(ptr, gas_meter, used_gas, start, end, order, out, errOut); -} - -// Gateway functions (iterator) -GoError cNext_cgo(IteratorReference *ref, gas_meter_t *gas_meter, uint64_t *used_gas, UnmanagedVector *key, UnmanagedVector *val, UnmanagedVector *errOut) { - return cNext(ref, gas_meter, used_gas, key, val, errOut); -} -GoError cNextKey_cgo(IteratorReference *ref, gas_meter_t *gas_meter, uint64_t *used_gas, UnmanagedVector *key, UnmanagedVector *errOut) { - return cNextKey(ref, gas_meter, used_gas, key, errOut); -} -GoError cNextValue_cgo(IteratorReference *ref, gas_meter_t *gas_meter, uint64_t *used_gas, UnmanagedVector *val, UnmanagedVector *errOut) { - return cNextValue(ref, gas_meter, used_gas, val, errOut); -} - -// Gateway functions (api) -GoError cCanonicalizeAddress_cgo(api_t *ptr, U8SliceView src, UnmanagedVector *dest, UnmanagedVector *errOut, uint64_t *used_gas) { - return cCanonicalizeAddress(ptr, src, dest, errOut, used_gas); -} -GoError cHumanizeAddress_cgo(api_t *ptr, U8SliceView src, UnmanagedVector *dest, UnmanagedVector *errOut, uint64_t *used_gas) { - return cHumanizeAddress(ptr, src, dest, errOut, used_gas); -} -GoError cValidateAddress_cgo(api_t *ptr, U8SliceView src, UnmanagedVector *errOut, uint64_t *used_gas) { - return cValidateAddress(ptr, src, errOut, used_gas); -} - -// Gateway functions (querier) -GoError cQueryExternal_cgo(querier_t *ptr, uint64_t gas_limit, uint64_t *used_gas, U8SliceView request, UnmanagedVector *result, UnmanagedVector *errOut) { - return cQueryExternal(ptr, gas_limit, used_gas, request, result, errOut); -} -*/ -import "C" - -// We need these gateway functions to allow calling back to a go function from the c code. -// At least I didn't discover a cleaner way. -// Also, this needs to be in a different file than `callbacks.go`, as we cannot create functions -// in the same file that has //export directives. Only import header types diff --git a/internal/api/iterator.go b/internal/api/iterator.go index c9a768b40..2f997e707 100644 --- a/internal/api/iterator.go +++ b/internal/api/iterator.go @@ -28,7 +28,7 @@ var ( func startCall() uint64 { latestCallIDMutex.Lock() defer latestCallIDMutex.Unlock() - latestCallID += 1 + latestCallID++ return latestCallID } diff --git a/internal/api/iterator_test.go b/internal/api/iterator_test.go index 0c81db775..05142918b 100644 --- a/internal/api/iterator_test.go +++ b/internal/api/iterator_test.go @@ -6,7 +6,6 @@ import ( "sync" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/CosmWasm/wasmvm/v2/internal/api/testdb" @@ -25,6 +24,7 @@ func (q queueData) Store(meter MockGasMeter) types.KVStore { } func setupQueueContractWithData(t *testing.T, cache Cache, values ...int) queueData { + t.Helper() checksum := createQueueContract(t, cache) gasMeter1 := NewMockGasMeter(TESTING_GAS_LIMIT) @@ -59,6 +59,7 @@ func setupQueueContractWithData(t *testing.T, cache Cache, values ...int) queueD } func setupQueueContract(t *testing.T, cache Cache) queueData { + t.Helper() return setupQueueContractWithData(t, cache, 17, 22) } @@ -202,14 +203,14 @@ func TestQueueIteratorSimple(t *testing.T) { err = json.Unmarshal(data, &reduced) require.NoError(t, err) require.Equal(t, "", reduced.Err) - require.Equal(t, `{"counters":[[17,22],[22,0]]}`, string(reduced.Ok)) + require.JSONEq(t, `{"counters":[[17,22],[22,0]]}`, string(reduced.Ok)) } func TestQueueIteratorRaces(t *testing.T) { cache, cleanup := withCache(t) defer cleanup() - assert.Equal(t, 0, len(iteratorFrames)) + require.Empty(t, iteratorFrames) contract1 := setupQueueContractWithData(t, cache, 17, 22) contract2 := setupQueueContractWithData(t, cache, 1, 19, 6, 35, 8) @@ -217,6 +218,7 @@ func TestQueueIteratorRaces(t *testing.T) { env := MockEnvBin(t) reduceQuery := func(t *testing.T, setup queueData, expected string) { + t.Helper() checksum, querier, api := setup.checksum, setup.querier, setup.api gasMeter := NewMockGasMeter(TESTING_GAS_LIMIT) igasMeter := types.GasMeter(gasMeter) @@ -230,7 +232,7 @@ func TestQueueIteratorRaces(t *testing.T) { err = json.Unmarshal(data, &reduced) require.NoError(t, err) require.Equal(t, "", reduced.Err) - require.Equal(t, fmt.Sprintf(`{"counters":%s}`, expected), string(reduced.Ok)) + require.JSONEq(t, fmt.Sprintf(`{"counters":%s}`, expected), string(reduced.Ok)) } // 30 concurrent batches (in go routines) to trigger any race condition @@ -256,7 +258,7 @@ func TestQueueIteratorRaces(t *testing.T) { wg.Wait() // when they finish, we should have removed all frames - assert.Equal(t, 0, len(iteratorFrames)) + require.Empty(t, iteratorFrames) } func TestQueueIteratorLimit(t *testing.T) { diff --git a/internal/api/lib.go b/internal/api/lib.go index 2b71adc1c..52a5c1129 100644 --- a/internal/api/lib.go +++ b/internal/api/lib.go @@ -1,209 +1,128 @@ package api -// #include -// #include "bindings.h" -import "C" - import ( - "encoding/json" "fmt" "os" "path/filepath" - "runtime" - "strings" - "syscall" "golang.org/x/sys/unix" + "github.com/CosmWasm/wasmvm/v2/internal/runtime" "github.com/CosmWasm/wasmvm/v2/types" ) -// Value types -type ( - cint = C.int - cbool = C.bool - cusize = C.size_t - cu8 = C.uint8_t - cu32 = C.uint32_t - cu64 = C.uint64_t - ci8 = C.int8_t - ci32 = C.int32_t - ci64 = C.int64_t -) - -// Pointers -type ( - cu8_ptr = *C.uint8_t -) +func init() { + // Create a new wazero runtime instance and assign it to currentRuntime + r, err := runtime.NewWazeroRuntime() + if err != nil { + panic(fmt.Sprintf("Failed to create wazero runtime: %v", err)) + } + currentRuntime = r +} type Cache struct { - ptr *C.cache_t + handle any lockfile os.File } -type Querier = types.Querier +// currentRuntime should be initialized with an instance of WazeroRuntime or another runtime. +var currentRuntime runtime.WasmRuntime func InitCache(config types.VMConfig) (Cache, error) { - // libwasmvm would create this directory too but we need it earlier for the lockfile err := os.MkdirAll(config.Cache.BaseDir, 0o755) if err != nil { - return Cache{}, fmt.Errorf("Could not create base directory") + return Cache{}, fmt.Errorf("Could not create base directory: %w", err) } - lockfile, err := os.OpenFile(filepath.Join(config.Cache.BaseDir, "exclusive.lock"), os.O_WRONLY|os.O_CREATE, 0o666) + lockPath := filepath.Join(config.Cache.BaseDir, "exclusive.lock") + lockfile, err := os.OpenFile(lockPath, os.O_WRONLY|os.O_CREATE, 0o666) if err != nil { return Cache{}, fmt.Errorf("Could not open exclusive.lock") } - _, err = lockfile.WriteString("This is a lockfile that prevent two VM instances to operate on the same directory in parallel.\nSee codebase at github.com/CosmWasm/wasmvm for more information.\nSafety first – brought to you by Confio ❤️\n") + + // Write the lockfile content + _, err = lockfile.WriteString("This is a lockfile that prevents two VM instances from operating on the same directory in parallel.\nSee codebase at github.com/CosmWasm/wasmvm for more information.\nSafety first – brought to you by Confio ❤️\n") if err != nil { + lockfile.Close() return Cache{}, fmt.Errorf("Error writing to exclusive.lock") } + // Try to acquire the lock err = unix.Flock(int(lockfile.Fd()), unix.LOCK_EX|unix.LOCK_NB) if err != nil { + lockfile.Close() return Cache{}, fmt.Errorf("Could not lock exclusive.lock. Is a different VM running in the same directory already?") } - configBytes, err := json.Marshal(config) + // Initialize the runtime with the config + handle, err := currentRuntime.InitCache(config) if err != nil { - return Cache{}, fmt.Errorf("Could not serialize config") + if err := unix.Flock(int(lockfile.Fd()), unix.LOCK_UN); err != nil { + fmt.Printf("Error unlocking file: %v\n", err) + } + lockfile.Close() + return Cache{}, err } - configView := makeView(configBytes) - defer runtime.KeepAlive(configBytes) - errmsg := uninitializedUnmanagedVector() - - ptr, err := C.init_cache(configView, &errmsg) - if err != nil { - return Cache{}, errorWithMessage(err, errmsg) - } - return Cache{ptr: ptr, lockfile: *lockfile}, nil + return Cache{ + handle: handle, + lockfile: *lockfile, + }, nil } func ReleaseCache(cache Cache) { - C.release_cache(cache.ptr) + if cache.handle != nil { + currentRuntime.ReleaseCache(cache.handle) + } - cache.lockfile.Close() // Also releases the file lock + // Release the file lock and close the lockfile + if cache.lockfile != (os.File{}) { + if err := unix.Flock(int(cache.lockfile.Fd()), unix.LOCK_UN); err != nil { + fmt.Printf("Error unlocking cache file: %v\n", err) + } + cache.lockfile.Close() + } } func StoreCode(cache Cache, wasm []byte, persist bool) ([]byte, error) { - w := makeView(wasm) - defer runtime.KeepAlive(wasm) - errmsg := uninitializedUnmanagedVector() - checksum, err := C.store_code(cache.ptr, w, cbool(true), cbool(persist), &errmsg) - if err != nil { - return nil, errorWithMessage(err, errmsg) + if cache.handle == nil { + return nil, fmt.Errorf("cache handle is nil") } - return copyAndDestroyUnmanagedVector(checksum), nil + checksum, err := currentRuntime.StoreCode(wasm, persist) + return checksum, err } func StoreCodeUnchecked(cache Cache, wasm []byte) ([]byte, error) { - w := makeView(wasm) - defer runtime.KeepAlive(wasm) - errmsg := uninitializedUnmanagedVector() - checksum, err := C.store_code(cache.ptr, w, cbool(true), cbool(true), &errmsg) - if err != nil { - return nil, errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(checksum), nil + checksum, err := currentRuntime.StoreCodeUnchecked(wasm) + return checksum, err } func RemoveCode(cache Cache, checksum []byte) error { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - errmsg := uninitializedUnmanagedVector() - _, err := C.remove_wasm(cache.ptr, cs, &errmsg) - if err != nil { - return errorWithMessage(err, errmsg) - } - return nil + return currentRuntime.RemoveCode(checksum) } func GetCode(cache Cache, checksum []byte) ([]byte, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - errmsg := uninitializedUnmanagedVector() - wasm, err := C.load_wasm(cache.ptr, cs, &errmsg) - if err != nil { - return nil, errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(wasm), nil + return currentRuntime.GetCode(checksum) } func Pin(cache Cache, checksum []byte) error { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - errmsg := uninitializedUnmanagedVector() - _, err := C.pin(cache.ptr, cs, &errmsg) - if err != nil { - return errorWithMessage(err, errmsg) - } - return nil + return currentRuntime.Pin(checksum) } func Unpin(cache Cache, checksum []byte) error { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - errmsg := uninitializedUnmanagedVector() - _, err := C.unpin(cache.ptr, cs, &errmsg) - if err != nil { - return errorWithMessage(err, errmsg) - } - return nil + return currentRuntime.Unpin(checksum) } func AnalyzeCode(cache Cache, checksum []byte) (*types.AnalysisReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - errmsg := uninitializedUnmanagedVector() - report, err := C.analyze_code(cache.ptr, cs, &errmsg) - if err != nil { - return nil, errorWithMessage(err, errmsg) - } - requiredCapabilities := string(copyAndDestroyUnmanagedVector(report.required_capabilities)) - entrypoints := string(copyAndDestroyUnmanagedVector(report.entrypoints)) - - res := types.AnalysisReport{ - HasIBCEntryPoints: bool(report.has_ibc_entry_points), - RequiredCapabilities: requiredCapabilities, - Entrypoints: strings.Split(entrypoints, ","), - ContractMigrateVersion: optionalU64ToPtr(report.contract_migrate_version), - } - return &res, nil + return currentRuntime.AnalyzeCode(checksum) } func GetMetrics(cache Cache) (*types.Metrics, error) { - errmsg := uninitializedUnmanagedVector() - metrics, err := C.get_metrics(cache.ptr, &errmsg) - if err != nil { - return nil, errorWithMessage(err, errmsg) - } - - return &types.Metrics{ - HitsPinnedMemoryCache: uint32(metrics.hits_pinned_memory_cache), - HitsMemoryCache: uint32(metrics.hits_memory_cache), - HitsFsCache: uint32(metrics.hits_fs_cache), - Misses: uint32(metrics.misses), - ElementsPinnedMemoryCache: uint64(metrics.elements_pinned_memory_cache), - ElementsMemoryCache: uint64(metrics.elements_memory_cache), - SizePinnedMemoryCache: uint64(metrics.size_pinned_memory_cache), - SizeMemoryCache: uint64(metrics.size_memory_cache), - }, nil + return currentRuntime.GetMetrics() } func GetPinnedMetrics(cache Cache) (*types.PinnedMetrics, error) { - errmsg := uninitializedUnmanagedVector() - metrics, err := C.get_pinned_metrics(cache.ptr, &errmsg) - if err != nil { - return nil, errorWithMessage(err, errmsg) - } - - var pinnedMetrics types.PinnedMetrics - if err := pinnedMetrics.UnmarshalMessagePack(copyAndDestroyUnmanagedVector(metrics)); err != nil { - return nil, err - } - - return &pinnedMetrics, nil + return currentRuntime.GetPinnedMetrics() } func Instantiate( @@ -215,40 +134,11 @@ func Instantiate( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - i := makeView(info) - defer runtime.KeepAlive(info) - m := makeView(msg) - defer runtime.KeepAlive(msg) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.instantiate(cache.ptr, cs, e, i, m, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.Instantiate(checksum, env, info, msg, gasMeter, store, api, querier, gasLimit, printDebug) } func Execute( @@ -260,40 +150,11 @@ func Execute( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - i := makeView(info) - defer runtime.KeepAlive(info) - m := makeView(msg) - defer runtime.KeepAlive(msg) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.execute(cache.ptr, cs, e, i, m, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.Execute(checksum, env, info, msg, gasMeter, store, api, querier, gasLimit, printDebug) } func Migrate( @@ -304,38 +165,11 @@ func Migrate( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - m := makeView(msg) - defer runtime.KeepAlive(msg) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.migrate(cache.ptr, cs, e, m, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.Migrate(checksum, env, msg, gasMeter, store, api, querier, gasLimit, printDebug) } func MigrateWithInfo( @@ -347,40 +181,11 @@ func MigrateWithInfo( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - m := makeView(msg) - defer runtime.KeepAlive(msg) - i := makeView(migrateInfo) - defer runtime.KeepAlive(i) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.migrate_with_info(cache.ptr, cs, e, m, i, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.MigrateWithInfo(checksum, env, msg, migrateInfo, gasMeter, store, api, querier, gasLimit, printDebug) } func Sudo( @@ -391,38 +196,11 @@ func Sudo( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - m := makeView(msg) - defer runtime.KeepAlive(msg) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.sudo(cache.ptr, cs, e, m, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.Sudo(checksum, env, msg, gasMeter, store, api, querier, gasLimit, printDebug) } func Reply( @@ -433,38 +211,11 @@ func Reply( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - r := makeView(reply) - defer runtime.KeepAlive(reply) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.reply(cache.ptr, cs, e, r, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.Reply(checksum, env, reply, gasMeter, store, api, querier, gasLimit, printDebug) } func Query( @@ -475,38 +226,11 @@ func Query( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - m := makeView(msg) - defer runtime.KeepAlive(msg) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.query(cache.ptr, cs, e, m, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.Query(checksum, env, msg, gasMeter, store, api, querier, gasLimit, printDebug) } func IBCChannelOpen( @@ -517,38 +241,11 @@ func IBCChannelOpen( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - m := makeView(msg) - defer runtime.KeepAlive(msg) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.ibc_channel_open(cache.ptr, cs, e, m, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.IBCChannelOpen(checksum, env, msg, gasMeter, store, api, querier, gasLimit, printDebug) } func IBCChannelConnect( @@ -559,38 +256,11 @@ func IBCChannelConnect( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - m := makeView(msg) - defer runtime.KeepAlive(msg) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.ibc_channel_connect(cache.ptr, cs, e, m, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.IBCChannelConnect(checksum, env, msg, gasMeter, store, api, querier, gasLimit, printDebug) } func IBCChannelClose( @@ -601,38 +271,11 @@ func IBCChannelClose( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - m := makeView(msg) - defer runtime.KeepAlive(msg) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.ibc_channel_close(cache.ptr, cs, e, m, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.IBCChannelClose(checksum, env, msg, gasMeter, store, api, querier, gasLimit, printDebug) } func IBCPacketReceive( @@ -643,38 +286,11 @@ func IBCPacketReceive( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - pa := makeView(packet) - defer runtime.KeepAlive(packet) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.ibc_packet_receive(cache.ptr, cs, e, pa, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.IBCPacketReceive(checksum, env, packet, gasMeter, store, api, querier, gasLimit, printDebug) } func IBCPacketAck( @@ -685,38 +301,11 @@ func IBCPacketAck( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - ac := makeView(ack) - defer runtime.KeepAlive(ack) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.ibc_packet_ack(cache.ptr, cs, e, ac, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.IBCPacketAck(checksum, env, ack, gasMeter, store, api, querier, gasLimit, printDebug) } func IBCPacketTimeout( @@ -727,38 +316,11 @@ func IBCPacketTimeout( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - pa := makeView(packet) - defer runtime.KeepAlive(packet) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.ibc_packet_timeout(cache.ptr, cs, e, pa, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.IBCPacketTimeout(checksum, env, packet, gasMeter, store, api, querier, gasLimit, printDebug) } func IBCSourceCallback( @@ -769,38 +331,11 @@ func IBCSourceCallback( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - msgBytes := makeView(msg) - defer runtime.KeepAlive(msg) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.ibc_source_callback(cache.ptr, cs, e, msgBytes, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil + return currentRuntime.IBCSourceCallback(checksum, env, msg, gasMeter, store, api, querier, gasLimit, printDebug) } func IBCDestinationCallback( @@ -811,98 +346,9 @@ func IBCDestinationCallback( gasMeter *types.GasMeter, store types.KVStore, api *types.GoAPI, - querier *Querier, + querier *types.Querier, gasLimit uint64, printDebug bool, ) ([]byte, types.GasReport, error) { - cs := makeView(checksum) - defer runtime.KeepAlive(checksum) - e := makeView(env) - defer runtime.KeepAlive(env) - msgBytes := makeView(msg) - defer runtime.KeepAlive(msg) - var pinner runtime.Pinner - pinner.Pin(gasMeter) - checkAndPinAPI(api, pinner) - checkAndPinQuerier(querier, pinner) - defer pinner.Unpin() - - callID := startCall() - defer endCall(callID) - - dbState := buildDBState(store, callID) - db := buildDB(&dbState, gasMeter) - a := buildAPI(api) - q := buildQuerier(querier) - var gasReport C.GasReport - errmsg := uninitializedUnmanagedVector() - - res, err := C.ibc_destination_callback(cache.ptr, cs, e, msgBytes, db, a, q, cu64(gasLimit), cbool(printDebug), &gasReport, &errmsg) - if err != nil && err.(syscall.Errno) != C.ErrnoValue_Success { - // Depending on the nature of the error, `gasUsed` will either have a meaningful value, or just 0. - return nil, convertGasReport(gasReport), errorWithMessage(err, errmsg) - } - return copyAndDestroyUnmanagedVector(res), convertGasReport(gasReport), nil -} - -func convertGasReport(report C.GasReport) types.GasReport { - return types.GasReport{ - Limit: uint64(report.limit), - Remaining: uint64(report.remaining), - UsedExternally: uint64(report.used_externally), - UsedInternally: uint64(report.used_internally), - } -} - -/**** To error module ***/ - -func errorWithMessage(err error, b C.UnmanagedVector) error { - // we always destroy the unmanaged vector to avoid a memory leak - msg := copyAndDestroyUnmanagedVector(b) - - // this checks for out of gas as a special case - if errno, ok := err.(syscall.Errno); ok && int(errno) == 2 { - return types.OutOfGasError{} - } - if msg == nil { - return err - } - return fmt.Errorf("%s", string(msg)) -} - -// checkAndPinAPI checks and pins the API and relevant pointers inside of it. -// All errors will result in panics as they indicate misuse of the wasmvm API and are not expected -// to be caused by user data. -func checkAndPinAPI(api *types.GoAPI, pinner runtime.Pinner) { - if api == nil { - panic("API must not be nil. If you don't want to provide API functionality, please create an instance that returns an error on every call to HumanizeAddress(), CanonicalizeAddress() and ValidateAddress().") - } - - // func cHumanizeAddress assumes this is set - if api.HumanizeAddress == nil { - panic("HumanizeAddress in API must not be nil. If you don't want to provide API functionality, please create an instance that returns an error on every call to HumanizeAddress(), CanonicalizeAddress() and ValidateAddress().") - } - - // func cCanonicalizeAddress assumes this is set - if api.CanonicalizeAddress == nil { - panic("CanonicalizeAddress in API must not be nil. If you don't want to provide API functionality, please create an instance that returns an error on every call to HumanizeAddress(), CanonicalizeAddress() and ValidateAddress().") - } - - // func cValidateAddress assumes this is set - if api.ValidateAddress == nil { - panic("ValidateAddress in API must not be nil. If you don't want to provide API functionality, please create an instance that returns an error on every call to HumanizeAddress(), CanonicalizeAddress() and ValidateAddress().") - } - - pinner.Pin(api) // this pointer is used in Rust (`state` in `C.GoApi`) and must not change -} - -// checkAndPinQuerier checks and pins the querier. -// All errors will result in panics as they indicate misuse of the wasmvm API and are not expected -// to be caused by user data. -func checkAndPinQuerier(querier *Querier, pinner runtime.Pinner) { - if querier == nil { - panic("Querier must not be nil. If you don't want to provide querier functionality, please create an instance that returns an error on every call to Query().") - } - - pinner.Pin(querier) // this pointer is used in Rust (`state` in `C.GoQuerier`) and must not change + return currentRuntime.IBCDestinationCallback(checksum, env, msg, gasMeter, store, api, querier, gasLimit, printDebug) } diff --git a/internal/api/lib_test.go b/internal/api/lib_test.go index 90c55a8c2..3d0ae2fca 100644 --- a/internal/api/lib_test.go +++ b/internal/api/lib_test.go @@ -20,7 +20,7 @@ import ( ) const ( - TESTING_PRINT_DEBUG = false + TESTING_PRINT_DEBUG = true TESTING_GAS_LIMIT = uint64(500_000_000_000) // ~0.5ms TESTING_MEMORY_LIMIT = 32 // MiB TESTING_CACHE_SIZE = 100 // MiB @@ -28,6 +28,13 @@ const ( var TESTING_CAPABILITIES = []string{"staking", "stargate", "iterator", "cosmwasm_1_1", "cosmwasm_1_2", "cosmwasm_1_3"} +type CapitalizedResponse struct { + Text string `json:"text"` +} + +// Add mutex for thread safety +var testMutex sync.Mutex + func TestInitAndReleaseCache(t *testing.T) { tmpdir, err := os.MkdirTemp("", "wasmvm-testing") require.NoError(t, err) @@ -109,7 +116,7 @@ func TestInitLockingPreventsConcurrentAccess(t *testing.T) { }, } _, err2 := InitCache(config2) - require.ErrorContains(t, err2, "Could not lock exclusive.lock") + require.ErrorContains(t, err2, "Could not lock exclusive.lock. Is a different VM running in the same directory already?") ReleaseCache(cache1) @@ -191,9 +198,10 @@ func TestInitCacheEmptyCapabilities(t *testing.T) { ReleaseCache(cache) } -func withCache(t testing.TB) (Cache, func()) { +func withCache(tb testing.TB) (Cache, func()) { + tb.Helper() tmpdir, err := os.MkdirTemp("", "wasmvm-testing") - require.NoError(t, err) + require.NoError(tb, err) config := types.VMConfig{ Cache: types.CacheOptions{ BaseDir: tmpdir, @@ -203,7 +211,7 @@ func withCache(t testing.TB) (Cache, func()) { }, } cache, err := InitCache(config) - require.NoError(t, err) + require.NoError(tb, err) cleanup := func() { os.RemoveAll(tmpdir) @@ -363,7 +371,7 @@ func TestGetMetrics(t *testing.T) { // GetMetrics 1 metrics, err := GetMetrics(cache) require.NoError(t, err) - assert.Equal(t, &types.Metrics{}, metrics) + require.Equal(t, &types.Metrics{}, metrics) // Store contract wasm, err := os.ReadFile("../../testdata/hackatom.wasm") @@ -374,7 +382,7 @@ func TestGetMetrics(t *testing.T) { // GetMetrics 2 metrics, err = GetMetrics(cache) require.NoError(t, err) - assert.Equal(t, &types.Metrics{}, metrics) + require.Equal(t, &types.Metrics{}, metrics) // Instantiate 1 gasMeter := NewMockGasMeter(TESTING_GAS_LIMIT) @@ -390,7 +398,7 @@ func TestGetMetrics(t *testing.T) { // GetMetrics 3 metrics, err = GetMetrics(cache) - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(0), metrics.HitsMemoryCache) require.Equal(t, uint32(1), metrics.HitsFsCache) require.Equal(t, uint64(1), metrics.ElementsMemoryCache) @@ -403,7 +411,7 @@ func TestGetMetrics(t *testing.T) { // GetMetrics 4 metrics, err = GetMetrics(cache) - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(1), metrics.HitsMemoryCache) require.Equal(t, uint32(1), metrics.HitsFsCache) require.Equal(t, uint64(1), metrics.ElementsMemoryCache) @@ -415,7 +423,7 @@ func TestGetMetrics(t *testing.T) { // GetMetrics 5 metrics, err = GetMetrics(cache) - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(1), metrics.HitsMemoryCache) require.Equal(t, uint32(2), metrics.HitsFsCache) require.Equal(t, uint64(1), metrics.ElementsPinnedMemoryCache) @@ -430,11 +438,11 @@ func TestGetMetrics(t *testing.T) { // GetMetrics 6 metrics, err = GetMetrics(cache) - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(1), metrics.HitsPinnedMemoryCache) require.Equal(t, uint32(1), metrics.HitsMemoryCache) require.Equal(t, uint32(2), metrics.HitsFsCache) - require.Equal(t, uint64(1), metrics.ElementsPinnedMemoryCache) + require.Equal(t, uint64(0), metrics.ElementsPinnedMemoryCache) require.Equal(t, uint64(1), metrics.ElementsMemoryCache) require.InEpsilon(t, 3700000, metrics.SizePinnedMemoryCache, 0.25) require.InEpsilon(t, 3700000, metrics.SizeMemoryCache, 0.25) @@ -445,7 +453,7 @@ func TestGetMetrics(t *testing.T) { // GetMetrics 7 metrics, err = GetMetrics(cache) - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(1), metrics.HitsPinnedMemoryCache) require.Equal(t, uint32(1), metrics.HitsMemoryCache) require.Equal(t, uint32(2), metrics.HitsFsCache) @@ -461,7 +469,7 @@ func TestGetMetrics(t *testing.T) { // GetMetrics 8 metrics, err = GetMetrics(cache) - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(1), metrics.HitsPinnedMemoryCache) require.Equal(t, uint32(2), metrics.HitsMemoryCache) require.Equal(t, uint32(2), metrics.HitsFsCache) @@ -478,7 +486,7 @@ func TestGetPinnedMetrics(t *testing.T) { // GetMetrics 1 metrics, err := GetPinnedMetrics(cache) require.NoError(t, err) - assert.Equal(t, &types.PinnedMetrics{PerModule: make([]types.PerModuleEntry, 0)}, metrics) + require.Equal(t, &types.PinnedMetrics{PerModule: make([]types.PerModuleEntry, 0)}, metrics) // Store contract 1 wasm, err := os.ReadFile("../../testdata/hackatom.wasm") @@ -514,15 +522,15 @@ func TestGetPinnedMetrics(t *testing.T) { // GetMetrics 2 metrics, err = GetPinnedMetrics(cache) require.NoError(t, err) - assert.Equal(t, 2, len(metrics.PerModule)) + require.Len(t, metrics.PerModule, 2) hackatomMetrics := findMetrics(metrics.PerModule, checksum) cyberpunkMetrics := findMetrics(metrics.PerModule, cyberpunkChecksum) - assert.Equal(t, uint32(0), hackatomMetrics.Hits) - assert.NotEqual(t, uint32(0), hackatomMetrics.Size) - assert.Equal(t, uint32(0), cyberpunkMetrics.Hits) - assert.NotEqual(t, uint32(0), cyberpunkMetrics.Size) + require.Equal(t, uint32(0), hackatomMetrics.Hits) + require.NotEqual(t, uint32(0), hackatomMetrics.Size) + require.Equal(t, uint32(0), cyberpunkMetrics.Hits) + require.NotEqual(t, uint32(0), cyberpunkMetrics.Size) // Instantiate 1 gasMeter := NewMockGasMeter(TESTING_GAS_LIMIT) @@ -539,15 +547,15 @@ func TestGetPinnedMetrics(t *testing.T) { // GetMetrics 3 metrics, err = GetPinnedMetrics(cache) require.NoError(t, err) - assert.Equal(t, 2, len(metrics.PerModule)) + require.Len(t, metrics.PerModule, 2) hackatomMetrics = findMetrics(metrics.PerModule, checksum) cyberpunkMetrics = findMetrics(metrics.PerModule, cyberpunkChecksum) - assert.Equal(t, uint32(1), hackatomMetrics.Hits) - assert.NotEqual(t, uint32(0), hackatomMetrics.Size) - assert.Equal(t, uint32(0), cyberpunkMetrics.Hits) - assert.NotEqual(t, uint32(0), cyberpunkMetrics.Size) + require.Equal(t, uint32(1), hackatomMetrics.Hits) + require.NotEqual(t, uint32(0), hackatomMetrics.Size) + require.Equal(t, uint32(0), cyberpunkMetrics.Hits) + require.NotEqual(t, uint32(0), cyberpunkMetrics.Size) } func TestInstantiate(t *testing.T) { @@ -573,13 +581,13 @@ func TestInstantiate(t *testing.T) { res, cost, err := Instantiate(cache, checksum, env, info, msg, &igasMeter, store, api, &querier, TESTING_GAS_LIMIT, TESTING_PRINT_DEBUG) require.NoError(t, err) requireOkResponse(t, res, 0) - assert.Equal(t, uint64(0xb1fe27), cost.UsedInternally) + require.Equal(t, uint64(0xb1fe27), cost.UsedInternally) var result types.ContractResult err = json.Unmarshal(res, &result) require.NoError(t, err) require.Equal(t, "", result.Err) - require.Equal(t, 0, len(result.Ok.Messages)) + require.Empty(t, result.Ok.Messages) } func TestExecute(t *testing.T) { @@ -604,7 +612,7 @@ func TestExecute(t *testing.T) { diff := time.Since(start) require.NoError(t, err) requireOkResponse(t, res, 0) - assert.Equal(t, uint64(0xb1fe27), cost.UsedInternally) + require.Equal(t, uint64(0xb1fe27), cost.UsedInternally) t.Logf("Time (%d gas): %s\n", cost.UsedInternally, diff) // execute with the same store @@ -617,7 +625,7 @@ func TestExecute(t *testing.T) { res, cost, err = Execute(cache, checksum, env, info, []byte(`{"release":{}}`), &igasMeter2, store, api, &querier, TESTING_GAS_LIMIT, TESTING_PRINT_DEBUG) diff = time.Since(start) require.NoError(t, err) - assert.Equal(t, uint64(0x1416da5), cost.UsedInternally) + require.Equal(t, uint64(0x1416da5), cost.UsedInternally) t.Logf("Time (%d gas): %s\n", cost.UsedInternally, diff) // make sure it read the balance properly and we got 250 atoms @@ -625,25 +633,24 @@ func TestExecute(t *testing.T) { err = json.Unmarshal(res, &result) require.NoError(t, err) require.Equal(t, "", result.Err) - require.Equal(t, 1, len(result.Ok.Messages)) - + require.Len(t, result.Ok.Messages, 1) // Ensure we got our custom event - assert.Equal(t, len(result.Ok.Events), 1) + require.Len(t, result.Ok.Events, 1) ev := result.Ok.Events[0] - assert.Equal(t, ev.Type, "hackatom") - assert.Equal(t, len(ev.Attributes), 1) - assert.Equal(t, ev.Attributes[0].Key, "action") - assert.Equal(t, ev.Attributes[0].Value, "release") + require.Equal(t, "hackatom", ev.Type) + require.Len(t, ev.Attributes, 1) + require.Equal(t, "action", ev.Attributes[0].Key) + require.Equal(t, "release", ev.Attributes[0].Value) dispatch := result.Ok.Messages[0].Msg require.NotNil(t, dispatch.Bank, "%#v", dispatch) require.NotNil(t, dispatch.Bank.Send, "%#v", dispatch) send := dispatch.Bank.Send - assert.Equal(t, "bob", send.ToAddress) - assert.Equal(t, balance, send.Amount) + require.Equal(t, "bob", send.ToAddress) + require.Equal(t, balance, send.Amount) // check the data is properly formatted expectedData := []byte{0xF0, 0x0B, 0xAA} - assert.Equal(t, expectedData, result.Ok.Data) + require.Equal(t, expectedData, result.Ok.Data) } func TestExecutePanic(t *testing.T) { @@ -725,7 +732,7 @@ func TestExecuteCpuLoop(t *testing.T) { diff := time.Since(start) require.NoError(t, err) requireOkResponse(t, res, 0) - assert.Equal(t, uint64(0x79f527), cost.UsedInternally) + require.Equal(t, uint64(0x79f527), cost.UsedInternally) t.Logf("Time (%d gas): %s\n", cost.UsedInternally, diff) // execute a cpu loop @@ -738,7 +745,7 @@ func TestExecuteCpuLoop(t *testing.T) { _, cost, err = Execute(cache, checksum, env, info, []byte(`{"cpu_loop":{}}`), &igasMeter2, store, api, &querier, maxGas, TESTING_PRINT_DEBUG) diff = time.Since(start) require.Error(t, err) - assert.Equal(t, cost.UsedInternally, maxGas) + require.Equal(t, cost.UsedInternally, maxGas) t.Logf("CPULoop Time (%d gas): %s\n", cost.UsedInternally, diff) } @@ -807,7 +814,9 @@ func BenchmarkContractCall(b *testing.B) { gasMeter2 := NewMockGasMeter(TESTING_GAS_LIMIT) igasMeter2 := types.GasMeter(gasMeter2) store.SetGasMeter(gasMeter2) + testMutex.Lock() info = MockInfoBin(b, "fred") + testMutex.Unlock() msg := []byte(`{"allocate_large_memory":{"pages":0}}`) // replace with noop once we have it res, _, err = Execute(cache, checksum, env, info, msg, &igasMeter2, store, api, &querier, TESTING_GAS_LIMIT, TESTING_PRINT_DEBUG) require.NoError(b, err) @@ -841,22 +850,31 @@ func Benchmark100ConcurrentContractCalls(b *testing.B) { b.ResetTimer() for n := 0; n < b.N; n++ { var wg sync.WaitGroup + errChan := make(chan error, callCount) + resChan := make(chan []byte, callCount) wg.Add(callCount) + info = mockInfoBinNoAssert("fred") for i := 0; i < callCount; i++ { go func() { + defer wg.Done() gasMeter2 := NewMockGasMeter(TESTING_GAS_LIMIT) igasMeter2 := types.GasMeter(gasMeter2) store.SetGasMeter(gasMeter2) - info = MockInfoBin(b, "fred") msg := []byte(`{"allocate_large_memory":{"pages":0}}`) // replace with noop once we have it res, _, err = Execute(cache, checksum, env, info, msg, &igasMeter2, store, api, &querier, TESTING_GAS_LIMIT, TESTING_PRINT_DEBUG) - require.NoError(b, err) - requireOkResponse(b, res, 0) - - wg.Done() + errChan <- err + resChan <- res }() } wg.Wait() + close(errChan) + close(resChan) + + // Now check results in the main test goroutine + for i := 0; i < callCount; i++ { + require.NoError(b, <-errChan) + requireOkResponse(b, <-resChan, 0) + } } } @@ -919,7 +937,7 @@ func TestMigrate(t *testing.T) { err = json.Unmarshal(data, &qResult) require.NoError(t, err) require.Equal(t, "", qResult.Err) - require.Equal(t, string(qResult.Ok), `{"verifier":"fred"}`) + require.JSONEq(t, `{"verifier":"fred"}`, string(qResult.Ok)) // migrate to a new verifier - alice // we use the same code blob as we are testing hackatom self-migration @@ -933,7 +951,7 @@ func TestMigrate(t *testing.T) { err = json.Unmarshal(data, &qResult2) require.NoError(t, err) require.Equal(t, "", qResult2.Err) - require.Equal(t, `{"verifier":"alice"}`, string(qResult2.Ok)) + require.JSONEq(t, `{"verifier":"alice"}`, string(qResult2.Ok)) } func TestMultipleInstances(t *testing.T) { @@ -974,18 +992,18 @@ func TestMultipleInstances(t *testing.T) { // succeed to execute store1 with fred resp = exec(t, cache, checksum, "fred", store1, api, querier, 0x140e8ad) require.Equal(t, "", resp.Err) - require.Equal(t, 1, len(resp.Ok.Messages)) + require.Len(t, resp.Ok.Messages, 1) attributes := resp.Ok.Attributes - require.Equal(t, 2, len(attributes)) + require.Len(t, attributes, 2) require.Equal(t, "destination", attributes[1].Key) require.Equal(t, "bob", attributes[1].Value) // succeed to execute store2 with mary resp = exec(t, cache, checksum, "mary", store2, api, querier, 0x1412b29) require.Equal(t, "", resp.Err) - require.Equal(t, 1, len(resp.Ok.Messages)) + require.Len(t, resp.Ok.Messages, 1) attributes = resp.Ok.Attributes - require.Equal(t, 2, len(attributes)) + require.Len(t, attributes, 2) require.Equal(t, "destination", attributes[1].Key) require.Equal(t, "sue", attributes[1].Value) } @@ -1024,7 +1042,7 @@ func TestSudo(t *testing.T) { err = json.Unmarshal(res, &result) require.NoError(t, err) require.Equal(t, "", result.Err) - require.Equal(t, 1, len(result.Ok.Messages)) + require.Len(t, result.Ok.Messages, 1) dispatch := result.Ok.Messages[0].Msg require.NotNil(t, dispatch.Bank, "%#v", dispatch) require.NotNil(t, dispatch.Bank.Send, "%#v", dispatch) @@ -1079,7 +1097,7 @@ func TestDispatchSubmessage(t *testing.T) { err = json.Unmarshal(res, &result) require.NoError(t, err) require.Equal(t, "", result.Err) - require.Equal(t, 1, len(result.Ok.Messages)) + require.Len(t, result.Ok.Messages, 1) dispatch := result.Ok.Messages[0] assert.Equal(t, id, dispatch.ID) assert.Equal(t, payload.Msg, dispatch.Msg) @@ -1156,15 +1174,17 @@ func TestReplyAndQuery(t *testing.T) { require.Equal(t, events, val.Events) } -func requireOkResponse(t testing.TB, res []byte, expectedMsgs int) { +func requireOkResponse(tb testing.TB, res []byte, expectedMsgs int) { + tb.Helper() var result types.ContractResult err := json.Unmarshal(res, &result) - require.NoError(t, err) - require.Equal(t, "", result.Err) - require.Equal(t, expectedMsgs, len(result.Ok.Messages)) + require.NoError(tb, err) + require.Equal(tb, "", result.Err) + require.Len(tb, result.Ok.Messages, expectedMsgs) } func requireQueryError(t *testing.T, res []byte) { + t.Helper() var result types.QueryResult err := json.Unmarshal(res, &result) require.NoError(t, err) @@ -1173,6 +1193,7 @@ func requireQueryError(t *testing.T, res []byte) { } func requireQueryOk(t *testing.T, res []byte) []byte { + t.Helper() var result types.QueryResult err := json.Unmarshal(res, &result) require.NoError(t, err) @@ -1181,36 +1202,43 @@ func requireQueryOk(t *testing.T, res []byte) []byte { return result.Ok } -func createHackatomContract(t testing.TB, cache Cache) []byte { - return createContract(t, cache, "../../testdata/hackatom.wasm") +func createHackatomContract(tb testing.TB, cache Cache) []byte { + tb.Helper() + return createContract(tb, cache, "../../testdata/hackatom.wasm") } -func createCyberpunkContract(t testing.TB, cache Cache) []byte { - return createContract(t, cache, "../../testdata/cyberpunk.wasm") +func createCyberpunkContract(tb testing.TB, cache Cache) []byte { + tb.Helper() + return createContract(tb, cache, "../../testdata/cyberpunk.wasm") } -func createQueueContract(t testing.TB, cache Cache) []byte { - return createContract(t, cache, "../../testdata/queue.wasm") +func createQueueContract(tb testing.TB, cache Cache) []byte { + tb.Helper() + return createContract(tb, cache, "../../testdata/queue.wasm") } -func createReflectContract(t testing.TB, cache Cache) []byte { - return createContract(t, cache, "../../testdata/reflect.wasm") +func createReflectContract(tb testing.TB, cache Cache) []byte { + tb.Helper() + return createContract(tb, cache, "../../testdata/reflect.wasm") } -func createFloaty2(t testing.TB, cache Cache) []byte { - return createContract(t, cache, "../../testdata/floaty_2.0.wasm") +func createFloaty2(tb testing.TB, cache Cache) []byte { + tb.Helper() + return createContract(tb, cache, "../../testdata/floaty_2.0.wasm") } -func createContract(t testing.TB, cache Cache, wasmFile string) []byte { +func createContract(tb testing.TB, cache Cache, wasmFile string) []byte { + tb.Helper() wasm, err := os.ReadFile(wasmFile) - require.NoError(t, err) + require.NoError(tb, err) checksum, err := StoreCode(cache, wasm, true) - require.NoError(t, err) + require.NoError(tb, err) return checksum } // exec runs the handle tx with the given signer -func exec(t *testing.T, cache Cache, checksum []byte, signer types.HumanAddress, store types.KVStore, api *types.GoAPI, querier Querier, gasExpected uint64) types.ContractResult { +func exec(t *testing.T, cache Cache, checksum []byte, signer types.HumanAddress, store types.KVStore, api *types.GoAPI, querier types.Querier, gasExpected uint64) types.ContractResult { + t.Helper() gasMeter := NewMockGasMeter(TESTING_GAS_LIMIT) igasMeter := types.GasMeter(gasMeter) env := MockEnvBin(t) @@ -1265,7 +1293,7 @@ func TestQuery(t *testing.T) { err = json.Unmarshal(data, &qResult) require.NoError(t, err) require.Equal(t, "", qResult.Err) - require.Equal(t, string(qResult.Ok), `{"verifier":"fred"}`) + require.JSONEq(t, `{"verifier":"fred"}`, string(qResult.Ok)) } func TestHackatomQuerier(t *testing.T) { @@ -1308,10 +1336,6 @@ func TestCustomReflectQuerier(t *testing.T) { // https://github.com/CosmWasm/cosmwasm/blob/v0.11.0-alpha3/contracts/reflect/src/msg.rs#L18-L28 } - type CapitalizedResponse struct { - Text string `json:"text"` - } - cache, cleanup := withCache(t) defer cleanup() checksum := createReflectContract(t, cache) @@ -1326,7 +1350,7 @@ func TestCustomReflectQuerier(t *testing.T) { // we need this to handle the custom requests from the reflect contract innerQuerier := querier.(*MockQuerier) innerQuerier.Custom = ReflectCustom{} - querier = Querier(innerQuerier) + querier = types.Querier(innerQuerier) // make a valid query to the other address queryMsg := QueryMsg{ @@ -1394,12 +1418,12 @@ func TestFloats(t *testing.T) { var qResult types.QueryResult err = json.Unmarshal(data, &qResult) require.NoError(t, err) - require.Equal(t, "", qResult.Err) + require.Empty(t, qResult.Err) var instructions []string err = json.Unmarshal(qResult.Ok, &instructions) require.NoError(t, err) // little sanity check - require.Equal(t, 70, len(instructions)) + require.Len(t, instructions, 70) hasher := sha256.New() const RUNS_PER_INSTRUCTION = 150 @@ -1411,7 +1435,7 @@ func TestFloats(t *testing.T) { require.NoError(t, err) err = json.Unmarshal(data, &qResult) require.NoError(t, err) - require.Equal(t, "", qResult.Err) + require.Empty(t, qResult.Err) var args []Value err = json.Unmarshal(qResult.Ok, &args) require.NoError(t, err) @@ -1426,13 +1450,13 @@ func TestFloats(t *testing.T) { data, _, err = Query(cache, checksum, env, []byte(msg), &igasMeter, store, api, &querier, TESTING_GAS_LIMIT, TESTING_PRINT_DEBUG) var result string if err != nil { - assert.ErrorContains(t, err, "Error calling the VM: Error executing Wasm: ") + require.Error(t, err) // remove the prefix to make the error message the same as in the cosmwasm-vm test result = strings.Replace(err.Error(), "Error calling the VM: Error executing Wasm: ", "", 1) } else { err = json.Unmarshal(data, &qResult) require.NoError(t, err) - require.Equal(t, "", qResult.Err) + require.Empty(t, qResult.Err) var response Value err = json.Unmarshal(qResult.Ok, &response) require.NoError(t, err) @@ -1444,5 +1468,18 @@ func TestFloats(t *testing.T) { } hash := hasher.Sum(nil) - require.Equal(t, "95f70fa6451176ab04a9594417a047a1e4d8e2ff809609b8f81099496bee2393", hex.EncodeToString(hash)) + require.Equal(t, "6e9ffbe929a2c1bcbffca0d4e9d0935371045bba50158a01ec082459a4cbbd2a", hex.EncodeToString(hash)) +} + +// mockInfoBinNoAssert creates the message binary without using testify assertions +func mockInfoBinNoAssert(sender types.HumanAddress) []byte { + info := types.MessageInfo{ + Sender: sender, + Funds: types.Array[types.Coin]{}, + } + res, err := json.Marshal(info) + if err != nil { + panic(err) + } + return res } diff --git a/internal/api/libwasmvm.aarch64.so b/internal/api/libwasmvm.aarch64.so deleted file mode 100755 index 050145570..000000000 Binary files a/internal/api/libwasmvm.aarch64.so and /dev/null differ diff --git a/internal/api/libwasmvm.dylib b/internal/api/libwasmvm.dylib deleted file mode 100755 index 7ee82db40..000000000 Binary files a/internal/api/libwasmvm.dylib and /dev/null differ diff --git a/internal/api/libwasmvm.x86_64.so b/internal/api/libwasmvm.x86_64.so deleted file mode 100755 index 196150e5c..000000000 Binary files a/internal/api/libwasmvm.x86_64.so and /dev/null differ diff --git a/internal/api/link_glibclinux_aarch64.go b/internal/api/link_glibclinux_aarch64.go deleted file mode 100644 index 8742229de..000000000 --- a/internal/api/link_glibclinux_aarch64.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build linux && !muslc && arm64 && !sys_wasmvm - -package api - -// #cgo LDFLAGS: -Wl,-rpath,${SRCDIR} -L${SRCDIR} -lwasmvm.aarch64 -import "C" diff --git a/internal/api/link_glibclinux_x86_64.go b/internal/api/link_glibclinux_x86_64.go deleted file mode 100644 index 9d87a7130..000000000 --- a/internal/api/link_glibclinux_x86_64.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build linux && !muslc && amd64 && !sys_wasmvm - -package api - -// #cgo LDFLAGS: -Wl,-rpath,${SRCDIR} -L${SRCDIR} -lwasmvm.x86_64 -import "C" diff --git a/internal/api/link_mac.go b/internal/api/link_mac.go deleted file mode 100644 index e6d841ea2..000000000 --- a/internal/api/link_mac.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build darwin && !static_wasm && !sys_wasmvm - -package api - -// #cgo LDFLAGS: -Wl,-rpath,${SRCDIR} -L${SRCDIR} -lwasmvm -import "C" diff --git a/internal/api/link_mac_static.go b/internal/api/link_mac_static.go deleted file mode 100644 index d9132e519..000000000 --- a/internal/api/link_mac_static.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build darwin && static_wasm && !sys_wasmvm - -package api - -// #cgo LDFLAGS: -L${SRCDIR} -lwasmvmstatic_darwin -import "C" diff --git a/internal/api/link_muslc_aarch64.go b/internal/api/link_muslc_aarch64.go deleted file mode 100644 index e3ab74aeb..000000000 --- a/internal/api/link_muslc_aarch64.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build linux && muslc && arm64 && !sys_wasmvm - -package api - -// #cgo LDFLAGS: -Wl,-rpath,${SRCDIR} -L${SRCDIR} -lwasmvm_muslc.aarch64 -import "C" diff --git a/internal/api/link_muslc_x86_64.go b/internal/api/link_muslc_x86_64.go deleted file mode 100644 index 58489509f..000000000 --- a/internal/api/link_muslc_x86_64.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build linux && muslc && amd64 && !sys_wasmvm - -package api - -// #cgo LDFLAGS: -Wl,-rpath,${SRCDIR} -L${SRCDIR} -lwasmvm_muslc.x86_64 -import "C" diff --git a/internal/api/link_system.go b/internal/api/link_system.go deleted file mode 100644 index ad354ba55..000000000 --- a/internal/api/link_system.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build sys_wasmvm - -package api - -// #cgo LDFLAGS: -lwasmvm -import "C" diff --git a/internal/api/link_windows.go b/internal/api/link_windows.go deleted file mode 100644 index 8e45cf011..000000000 --- a/internal/api/link_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -//go:build windows && !sys_wasmvm - -package api - -// #cgo LDFLAGS: -Wl,-rpath,${SRCDIR} -L${SRCDIR} -lwasmvm -import "C" diff --git a/internal/api/memory.go b/internal/api/memory.go deleted file mode 100644 index f2fb06d73..000000000 --- a/internal/api/memory.go +++ /dev/null @@ -1,98 +0,0 @@ -package api - -/* -#include "bindings.h" -*/ -import "C" - -import "unsafe" - -// makeView creates a view into the given byte slice what allows Rust code to read it. -// The byte slice is managed by Go and will be garbage collected. Use runtime.KeepAlive -// to ensure the byte slice lives long enough. -func makeView(s []byte) C.ByteSliceView { - if s == nil { - return C.ByteSliceView{is_nil: true, ptr: cu8_ptr(nil), len: cusize(0)} - } - - // In Go, accessing the 0-th element of an empty array triggers a panic. That is why in the case - // of an empty `[]byte` we can't get the internal heap pointer to the underlying array as we do - // below with `&data[0]`. https://play.golang.org/p/xvDY3g9OqUk - if len(s) == 0 { - return C.ByteSliceView{is_nil: false, ptr: cu8_ptr(nil), len: cusize(0)} - } - - return C.ByteSliceView{ - is_nil: false, - ptr: cu8_ptr(unsafe.Pointer(&s[0])), - len: cusize(len(s)), - } -} - -// Creates a C.UnmanagedVector, which cannot be done in test files directly -func constructUnmanagedVector(is_none cbool, ptr cu8_ptr, len cusize, cap cusize) C.UnmanagedVector { - return C.UnmanagedVector{ - is_none: is_none, - ptr: ptr, - len: len, - cap: cap, - } -} - -// uninitializedUnmanagedVector returns an invalid C.UnmanagedVector -// instance. Only use then after someone wrote an instance to it. -func uninitializedUnmanagedVector() C.UnmanagedVector { - return C.UnmanagedVector{} -} - -func newUnmanagedVector(data []byte) C.UnmanagedVector { - if data == nil { - return C.new_unmanaged_vector(cbool(true), cu8_ptr(nil), cusize(0)) - } else if len(data) == 0 { - // in Go, accessing the 0-th element of an empty array triggers a panic. That is why in the case - // of an empty `[]byte` we can't get the internal heap pointer to the underlying array as we do - // below with `&data[0]`. - // https://play.golang.org/p/xvDY3g9OqUk - return C.new_unmanaged_vector(cbool(false), cu8_ptr(nil), cusize(0)) - } else { - // This will allocate a proper vector with content and return a description of it - return C.new_unmanaged_vector(cbool(false), cu8_ptr(unsafe.Pointer(&data[0])), cusize(len(data))) - } -} - -func copyAndDestroyUnmanagedVector(v C.UnmanagedVector) []byte { - var out []byte - if v.is_none { - out = nil - } else if v.cap == cusize(0) { - // There is no allocation we can copy - out = []byte{} - } else { - // C.GoBytes create a copy (https://stackoverflow.com/a/40950744/2013738) - out = C.GoBytes(unsafe.Pointer(v.ptr), cint(v.len)) - } - C.destroy_unmanaged_vector(v) - return out -} - -func optionalU64ToPtr(val C.OptionalU64) *uint64 { - if val.is_some { - return (*uint64)(&val.value) - } - return nil -} - -// copyU8Slice copies the contents of an Option<&[u8]> that was allocated on the Rust side. -// Returns nil if and only if the source is None. -func copyU8Slice(view C.U8SliceView) []byte { - if view.is_none { - return nil - } - if view.len == 0 { - // In this case, we don't want to look into the ptr - return []byte{} - } - // C.GoBytes create a copy (https://stackoverflow.com/a/40950744/2013738) - res := C.GoBytes(unsafe.Pointer(view.ptr), cint(view.len)) - return res -} diff --git a/internal/api/memory_test.go b/internal/api/memory_test.go deleted file mode 100644 index 397faf50c..000000000 --- a/internal/api/memory_test.go +++ /dev/null @@ -1,78 +0,0 @@ -package api - -import ( - "testing" - "unsafe" - - "github.com/stretchr/testify/require" -) - -func TestMakeView(t *testing.T) { - data := []byte{0xaa, 0xbb, 0x64} - dataView := makeView(data) - require.Equal(t, cbool(false), dataView.is_nil) - require.Equal(t, cusize(3), dataView.len) - - empty := []byte{} - emptyView := makeView(empty) - require.Equal(t, cbool(false), emptyView.is_nil) - require.Equal(t, cusize(0), emptyView.len) - - nilView := makeView(nil) - require.Equal(t, cbool(true), nilView.is_nil) -} - -func TestCreateAndDestroyUnmanagedVector(t *testing.T) { - // non-empty - { - original := []byte{0xaa, 0xbb, 0x64} - unmanaged := newUnmanagedVector(original) - require.Equal(t, cbool(false), unmanaged.is_none) - require.Equal(t, 3, int(unmanaged.len)) - require.GreaterOrEqual(t, 3, int(unmanaged.cap)) // Rust implementation decides this - copy := copyAndDestroyUnmanagedVector(unmanaged) - require.Equal(t, original, copy) - } - - // empty - { - original := []byte{} - unmanaged := newUnmanagedVector(original) - require.Equal(t, cbool(false), unmanaged.is_none) - require.Equal(t, 0, int(unmanaged.len)) - require.GreaterOrEqual(t, 0, int(unmanaged.cap)) // Rust implementation decides this - copy := copyAndDestroyUnmanagedVector(unmanaged) - require.Equal(t, original, copy) - } - - // none - { - var original []byte - unmanaged := newUnmanagedVector(original) - require.Equal(t, cbool(true), unmanaged.is_none) - // We must not make assumptions on the other fields in this case - copy := copyAndDestroyUnmanagedVector(unmanaged) - require.Nil(t, copy) - } -} - -// Like the test above but without `newUnmanagedVector` calls. -// Since only Rust can actually create them, we only test edge cases here. -// -//go:nocheckptr -func TestCopyDestroyUnmanagedVector(t *testing.T) { - { - // ptr, cap and len broken. Do not access those values when is_none is true - invalid_ptr := unsafe.Pointer(uintptr(42)) - uv := constructUnmanagedVector(cbool(true), cu8_ptr(invalid_ptr), cusize(0xBB), cusize(0xAA)) - copy := copyAndDestroyUnmanagedVector(uv) - require.Nil(t, copy) - } - { - // Capacity is 0, so no allocation happened. Do not access the pointer. - invalid_ptr := unsafe.Pointer(uintptr(42)) - uv := constructUnmanagedVector(cbool(false), cu8_ptr(invalid_ptr), cusize(0), cusize(0)) - copy := copyAndDestroyUnmanagedVector(uv) - require.Equal(t, []byte{}, copy) - } -} diff --git a/internal/api/mocks.go b/internal/api/mocks.go index ac8500dd8..3ec68bf17 100644 --- a/internal/api/mocks.go +++ b/internal/api/mocks.go @@ -19,6 +19,8 @@ import ( const MOCK_CONTRACT_ADDR = "contract" +// MockEnv returns a mock environment for testing +// this is the original, and should not be changed. func MockEnv() types.Env { return types.Env{ Block: types.BlockInfo{ @@ -35,9 +37,10 @@ func MockEnv() types.Env { } } -func MockEnvBin(t testing.TB) []byte { +func MockEnvBin(tb testing.TB) []byte { + tb.Helper() bin, err := json.Marshal(MockEnv()) - require.NoError(t, err) + require.NoError(tb, err) return bin } @@ -55,9 +58,10 @@ func MockInfoWithFunds(sender types.HumanAddress) types.MessageInfo { }}) } -func MockInfoBin(t testing.TB, sender types.HumanAddress) []byte { +func MockInfoBin(tb testing.TB, sender types.HumanAddress) []byte { + tb.Helper() bin, err := json.Marshal(MockInfoWithFunds(sender)) - require.NoError(t, err) + require.NoError(tb, err) return bin } @@ -391,13 +395,13 @@ func TestMockApi(t *testing.T) { human := "foobar" canon, cost, err := MockCanonicalizeAddress(human) require.NoError(t, err) - assert.Equal(t, CanonicalLength, len(canon)) - assert.Equal(t, CostCanonical, cost) + require.Len(t, canon, CanonicalLength) + require.Equal(t, CostCanonical, cost) recover, cost, err := MockHumanizeAddress(canon) require.NoError(t, err) - assert.Equal(t, recover, human) - assert.Equal(t, CostHuman, cost) + require.Equal(t, recover, human) + require.Equal(t, CostHuman, cost) } /**** MockQuerier ****/ @@ -536,7 +540,7 @@ func (q ReflectCustom) Query(request json.RawMessage) ([]byte, error) { return json.Marshal(resp) } -//************ test code for mocks *************************// +// ************ test code for mocks *************************// func TestBankQuerierAllBalances(t *testing.T) { addr := "foobar" @@ -639,7 +643,7 @@ func TestReflectCustomQuerier(t *testing.T) { var resp CustomResponse err = json.Unmarshal(bz, &resp) require.NoError(t, err) - assert.Equal(t, resp.Msg, "PONG") + assert.Equal(t, "PONG", resp.Msg) // try capital msg2, err := json.Marshal(CustomQuery{Capitalized: &CapitalizedQuery{Text: "small."}}) @@ -649,5 +653,5 @@ func TestReflectCustomQuerier(t *testing.T) { var resp2 CustomResponse err = json.Unmarshal(bz, &resp2) require.NoError(t, err) - assert.Equal(t, resp2.Msg, "SMALL.") + assert.Equal(t, "SMALL.", resp2.Msg) } diff --git a/internal/api/testdb/memdb_iterator.go b/internal/api/testdb/memdb_iterator.go index a65efa281..9bd1a0efd 100644 --- a/internal/api/testdb/memdb_iterator.go +++ b/internal/api/testdb/memdb_iterator.go @@ -141,12 +141,18 @@ func (i *memDBIterator) Error() error { // Key implements Iterator. func (i *memDBIterator) Key() []byte { i.assertIsValid() + if len(i.item.key) == 0 { + return nil + } return i.item.key } // Value implements Iterator. func (i *memDBIterator) Value() []byte { i.assertIsValid() + if len(i.item.value) == 0 { + return nil + } return i.item.value } diff --git a/internal/api/version.go b/internal/api/version.go index 43a13f0b9..7dad948d4 100644 --- a/internal/api/version.go +++ b/internal/api/version.go @@ -1,17 +1,10 @@ package api -/* -#include "bindings.h" -*/ -import "C" +// Just define a constant version here +const wasmvmVersion = "6.9.0" +// LibwasmvmVersion returns the version of this library as a string. func LibwasmvmVersion() (string, error) { - version_ptr, err := C.version_str() - if err != nil { - return "", err - } - // For C.GoString documentation see https://pkg.go.dev/cmd/cgo and - // https://gist.github.com/helinwang/2c7bd2867ea5110f70e6431a7c80cd9b - version_copy := C.GoString(version_ptr) - return version_copy, nil + // Since we're no longer using cgo, we return the hardcoded version. + return wasmvmVersion, nil } diff --git a/internal/api/version_test.go b/internal/api/version_test.go index 038b1de13..3e809b83f 100644 --- a/internal/api/version_test.go +++ b/internal/api/version_test.go @@ -1,7 +1,6 @@ package api import ( - "regexp" "testing" "github.com/stretchr/testify/require" @@ -10,5 +9,5 @@ import ( func TestLibwasmvmVersion(t *testing.T) { version, err := LibwasmvmVersion() require.NoError(t, err) - require.Regexp(t, regexp.MustCompile(`^([0-9]+)\.([0-9]+)\.([0-9]+)(-[a-z0-9.]+)?$`), version) + require.Regexp(t, `^([0-9]+)\.([0-9]+)\.([0-9]+)(-[a-z0-9.]+)?$`, version) } diff --git a/internal/runtime/crypto.go b/internal/runtime/crypto.go new file mode 100644 index 000000000..359fa2cce --- /dev/null +++ b/internal/runtime/crypto.go @@ -0,0 +1,152 @@ +package runtime + +import ( + "crypto/ecdh" + "crypto/ecdsa" + "crypto/elliptic" + "errors" + "fmt" + "math/big" + + bls12381 "github.com/kilic/bls12-381" +) + +// BLS12381AggregateG1 aggregates multiple G1 points into a single compressed G1 point. +func BLS12381AggregateG1(elements [][]byte) ([]byte, error) { + if len(elements) == 0 { + return nil, fmt.Errorf("no elements to aggregate") + } + + g1 := bls12381.NewG1() + result := g1.Zero() + + for _, element := range elements { + point, err := g1.FromCompressed(element) + if err != nil { + return nil, fmt.Errorf("failed to decompress G1 point: %w", err) + } + g1.Add(result, result, point) + } + + return g1.ToCompressed(result), nil +} + +// BLS12381AggregateG2 aggregates multiple G2 points into a single compressed G2 point. +func BLS12381AggregateG2(elements [][]byte) ([]byte, error) { + if len(elements) == 0 { + return nil, fmt.Errorf("no elements to aggregate") + } + + g2 := bls12381.NewG2() + result := g2.Zero() + + for _, element := range elements { + point, err := g2.FromCompressed(element) + if err != nil { + return nil, fmt.Errorf("failed to decompress G2 point: %w", err) + } + g2.Add(result, result, point) + } + + return g2.ToCompressed(result), nil +} + +// BLS12381HashToG1 hashes arbitrary bytes to a compressed G1 point. +func BLS12381HashToG1(message []byte) ([]byte, error) { + g1 := bls12381.NewG1() + // You can choose a domain separation string of your liking. + // Here, we use a placeholder domain: "BLS12381G1_XMD:SHA-256_SSWU_RO_" + point, err := g1.HashToCurve(message, []byte("BLS12381G1_XMD:SHA-256_SSWU_RO_")) + if err != nil { + return nil, fmt.Errorf("failed to hash to G1: %w", err) + } + return g1.ToCompressed(point), nil +} + +// BLS12381HashToG2 hashes arbitrary bytes to a compressed G2 point. +func BLS12381HashToG2(message []byte) ([]byte, error) { + g2 := bls12381.NewG2() + // Similar domain separation string for G2. + point, err := g2.HashToCurve(message, []byte("BLS12381G2_XMD:SHA-256_SSWU_RO_")) + if err != nil { + return nil, fmt.Errorf("failed to hash to G2: %w", err) + } + return g2.ToCompressed(point), nil +} + +// BLS12381PairingEquality checks if e(a1, a2) == e(b1, b2) in the BLS12-381 pairing. +func BLS12381PairingEquality(a1Compressed, a2Compressed, b1Compressed, b2Compressed []byte) (bool, error) { + g1 := bls12381.NewG1() + g2 := bls12381.NewG2() + + a1, err := g1.FromCompressed(a1Compressed) + if err != nil { + return false, fmt.Errorf("failed to decompress a1: %w", err) + } + a2, err := g2.FromCompressed(a2Compressed) + if err != nil { + return false, fmt.Errorf("failed to decompress a2: %w", err) + } + b1, err := g1.FromCompressed(b1Compressed) + if err != nil { + return false, fmt.Errorf("failed to decompress b1: %w", err) + } + b2, err := g2.FromCompressed(b2Compressed) + if err != nil { + return false, fmt.Errorf("failed to decompress b2: %w", err) + } + + engine := bls12381.NewEngine() + // AddPair computes pairing e(a1, a2). + engine.AddPair(a1, a2) + // AddPairInv computes pairing e(b1, b2)^(-1), so effectively we check e(a1,a2) * e(b1,b2)^(-1) == 1. + engine.AddPairInv(b1, b2) + + ok := engine.Check() + return ok, nil +} + +// Secp256r1Verify verifies a P-256 ECDSA signature. +// hash is the message digest (NOT the preimage), +// signature should be 64 bytes (r and s concatenated), +// pubkey should be an uncompressed or compressed public key in standard format. +func Secp256r1Verify(hash, signature, pubkey []byte) (bool, error) { + // Parse public key using crypto/ecdh + curve := ecdh.P256() + key, err := curve.NewPublicKey(pubkey) + if err != nil { + return false, fmt.Errorf("invalid public key: %w", err) + } + + // Get the raw coordinates for ECDSA verification + rawKey := key.Bytes() + x, y := elliptic.UnmarshalCompressed(elliptic.P256(), rawKey) + if x == nil { + return false, errors.New("failed to parse public key coordinates") + } + + // Parse signature: must be exactly 64 bytes => r (first 32 bytes), s (second 32 bytes). + if len(signature) != 64 { + return false, fmt.Errorf("signature must be 64 bytes, got %d", len(signature)) + } + r := new(big.Int).SetBytes(signature[:32]) + s := new(big.Int).SetBytes(signature[32:64]) + + pub := &ecdsa.PublicKey{ + Curve: elliptic.P256(), + X: x, + Y: y, + } + + verified := ecdsa.Verify(pub, hash, r, s) + return verified, nil +} + +// Secp256r1RecoverPubkey tries to recover a P-256 public key from a signature. +// In general, ECDSA on P-256 is not commonly used with "public key recovery" like secp256k1. +// This is non-standard and provided here as a placeholder or with specialized tooling only. +func Secp256r1RecoverPubkey(hash, signature []byte, recovery byte) ([]byte, error) { + // ECDSA on secp256r1 (P-256) does not support public key recovery in the standard library. + // Typically one would need a specialized library. This stub is included for completeness. + return nil, fmt.Errorf("public key recovery is not standard for secp256r1") +} diff --git a/internal/runtime/gas.go b/internal/runtime/gas.go new file mode 100644 index 000000000..ce28cd1b7 --- /dev/null +++ b/internal/runtime/gas.go @@ -0,0 +1,72 @@ +package runtime + +import "fmt" + +// Gas costs for various operations +const ( + // Memory operations + gasPerByte = 1 + + // Database operations + gasCostRead = 100 + gasCostWrite = 200 + gasCostQuery = 500 + + // Iterator operations + gasCostIteratorCreate = 2000 + gasCostIteratorNext = 100 +) + +// GasConfig holds gas costs for different operations +type GasConfig struct { + PerByte uint64 + DatabaseRead uint64 + DatabaseWrite uint64 + ExternalQuery uint64 + IteratorCreate uint64 + IteratorNext uint64 +} + +// DefaultGasConfig returns the default gas configuration +func DefaultGasConfig() GasConfig { + return GasConfig{ + PerByte: gasPerByte, + DatabaseRead: gasCostRead, + DatabaseWrite: gasCostWrite, + ExternalQuery: gasCostQuery, + IteratorCreate: gasCostIteratorCreate, + IteratorNext: gasCostIteratorNext, + } +} + +// GasState tracks gas usage during execution +type GasState struct { + limit uint64 + used uint64 +} + +// NewGasState creates a new GasState with the given limit +func NewGasState(limit uint64) *GasState { + return &GasState{ + limit: limit, + used: 0, + } +} + +// ConsumeGas consumes gas and checks the limit +func (g *GasState) ConsumeGas(amount uint64, description string) { + g.used += amount + if g.used > g.limit { + panic(fmt.Sprintf("out of gas: used %d, limit %d - %s", g.used, g.limit, description)) + } +} + +// GetGasUsed returns the amount of gas used +func (g *GasState) GetGasUsed() uint64 { + return g.used +} + +// GetGasLimit returns the gas limit +func (g *GasState) GetGasLimit() uint64 { + return g.limit +} diff --git a/internal/runtime/hostcrypto.go b/internal/runtime/hostcrypto.go new file mode 100644 index 000000000..d860ca1e9 --- /dev/null +++ b/internal/runtime/hostcrypto.go @@ -0,0 +1,271 @@ +package runtime + +import ( + "context" + "encoding/binary" + "fmt" + + "github.com/tetratelabs/wazero/api" +) + +// hostBls12381AggregateG1 implements bls12_381_aggregate_g1 +func hostBls12381AggregateG1(ctx context.Context, mod api.Module, elementsPtr uint32) (uint32, uint32) { + mem := mod.Memory() + + // Read length prefix (4 bytes) + lenBytes, err := readMemory(mem, elementsPtr, 4) + if err != nil { + panic(fmt.Sprintf("failed to read elements length: %v", err)) + } + numElements := binary.LittleEndian.Uint32(lenBytes) + + // Read elements + elements := make([][]byte, numElements) + offset := elementsPtr + 4 + for i := uint32(0); i < numElements; i++ { + // Read element length + elemLenBytes, err := readMemory(mem, offset, 4) + if err != nil { + panic(fmt.Sprintf("failed to read element length: %v", err)) + } + elemLen := binary.LittleEndian.Uint32(elemLenBytes) + offset += 4 + + // Read element data + element, err := readMemory(mem, offset, elemLen) + if err != nil { + panic(fmt.Sprintf("failed to read element data: %v", err)) + } + elements[i] = element + offset += elemLen + } + + // Perform aggregation + result, err := BLS12381AggregateG1(elements) + if err != nil { + panic(fmt.Sprintf("failed to aggregate G1 points: %v", err)) + } + + // Allocate memory for result + resultPtr, err := allocateInContract(ctx, mod, uint32(len(result))) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory for result: %v", err)) + } + + // Write result + if err := writeMemory(mem, resultPtr, result); err != nil { + panic(fmt.Sprintf("failed to write result: %v", err)) + } + + return resultPtr, uint32(len(result)) +} + +// hostBls12381AggregateG2 implements bls12_381_aggregate_g2 +func hostBls12381AggregateG2(ctx context.Context, mod api.Module, elementsPtr uint32) (uint32, uint32) { + mem := mod.Memory() + + // Read length prefix (4 bytes) + lenBytes, err := readMemory(mem, elementsPtr, 4) + if err != nil { + panic(fmt.Sprintf("failed to read elements length: %v", err)) + } + numElements := binary.LittleEndian.Uint32(lenBytes) + + // Read elements + elements := make([][]byte, numElements) + offset := elementsPtr + 4 + for i := uint32(0); i < numElements; i++ { + // Read element length + elemLenBytes, err := readMemory(mem, offset, 4) + if err != nil { + panic(fmt.Sprintf("failed to read element length: %v", err)) + } + elemLen := binary.LittleEndian.Uint32(elemLenBytes) + offset += 4 + + // Read element data + element, err := readMemory(mem, offset, elemLen) + if err != nil { + panic(fmt.Sprintf("failed to read element data: %v", err)) + } + elements[i] = element + offset += elemLen + } + + // Perform aggregation + result, err := BLS12381AggregateG2(elements) + if err != nil { + panic(fmt.Sprintf("failed to aggregate G2 points: %v", err)) + } + + // Allocate memory for result + resultPtr, err := allocateInContract(ctx, mod, uint32(len(result))) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory for result: %v", err)) + } + + // Write result + if err := writeMemory(mem, resultPtr, result); err != nil { + panic(fmt.Sprintf("failed to write result: %v", err)) + } + + return resultPtr, uint32(len(result)) +} + +// hostBls12381HashToG1 implements bls12_381_hash_to_g1 +func hostBls12381HashToG1(ctx context.Context, mod api.Module, hashPtr, hashLen uint32) (uint32, uint32) { + mem := mod.Memory() + + // Read hash + hash, err := readMemory(mem, hashPtr, hashLen) + if err != nil { + panic(fmt.Sprintf("failed to read hash: %v", err)) + } + + // Perform hash-to-curve + result, err := BLS12381HashToG1(hash) + if err != nil { + panic(fmt.Sprintf("failed to hash to G1: %v", err)) + } + + // Allocate memory for result + resultPtr, err := allocateInContract(ctx, mod, uint32(len(result))) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory for result: %v", err)) + } + + // Write result + if err := writeMemory(mem, resultPtr, result); err != nil { + panic(fmt.Sprintf("failed to write result: %v", err)) + } + + return resultPtr, uint32(len(result)) +} + +// hostBls12381HashToG2 implements bls12_381_hash_to_g2 +func hostBls12381HashToG2(ctx context.Context, mod api.Module, hashPtr, hashLen uint32) (uint32, uint32) { + mem := mod.Memory() + + // Read hash + hash, err := readMemory(mem, hashPtr, hashLen) + if err != nil { + panic(fmt.Sprintf("failed to read hash: %v", err)) + } + + // Perform hash-to-curve + result, err := BLS12381HashToG2(hash) + if err != nil { + panic(fmt.Sprintf("failed to hash to G2: %v", err)) + } + + // Allocate memory for result + resultPtr, err := allocateInContract(ctx, mod, uint32(len(result))) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory for result: %v", err)) + } + + // Write result + if err := writeMemory(mem, resultPtr, result); err != nil { + panic(fmt.Sprintf("failed to write result: %v", err)) + } + + return resultPtr, uint32(len(result)) +} + +// hostBls12381PairingEquality implements bls12_381_pairing_equality +func hostBls12381PairingEquality(_ context.Context, mod api.Module, a1Ptr, a1Len, a2Ptr, a2Len, b1Ptr, b1Len, b2Ptr, b2Len uint32) uint32 { + mem := mod.Memory() + + // Read points + a1, err := readMemory(mem, a1Ptr, a1Len) + if err != nil { + panic(fmt.Sprintf("failed to read a1: %v", err)) + } + a2, err := readMemory(mem, a2Ptr, a2Len) + if err != nil { + panic(fmt.Sprintf("failed to read a2: %v", err)) + } + b1, err := readMemory(mem, b1Ptr, b1Len) + if err != nil { + panic(fmt.Sprintf("failed to read b1: %v", err)) + } + b2, err := readMemory(mem, b2Ptr, b2Len) + if err != nil { + panic(fmt.Sprintf("failed to read b2: %v", err)) + } + + // Check pairing equality + result, err := BLS12381PairingEquality(a1, a2, b1, b2) + if err != nil { + panic(fmt.Sprintf("failed to check pairing equality: %v", err)) + } + + if result { + return 1 + } + return 0 +} + +// hostSecp256r1Verify implements secp256r1_verify +func hostSecp256r1Verify(_ context.Context, mod api.Module, hashPtr, hashLen, sigPtr, sigLen, pubkeyPtr, pubkeyLen uint32) uint32 { + mem := mod.Memory() + + // Read inputs + hash, err := readMemory(mem, hashPtr, hashLen) + if err != nil { + panic(fmt.Sprintf("failed to read hash: %v", err)) + } + signature, err := readMemory(mem, sigPtr, sigLen) + if err != nil { + panic(fmt.Sprintf("failed to read signature: %v", err)) + } + pubkey, err := readMemory(mem, pubkeyPtr, pubkeyLen) + if err != nil { + panic(fmt.Sprintf("failed to read public key: %v", err)) + } + + // Verify signature + result, err := Secp256r1Verify(hash, signature, pubkey) + if err != nil { + panic(fmt.Sprintf("failed to verify signature: %v", err)) + } + + if result { + return 1 + } + return 0 +} + +// hostSecp256r1RecoverPubkey implements secp256r1_recover_pubkey +func hostSecp256r1RecoverPubkey(ctx context.Context, mod api.Module, hashPtr, hashLen, sigPtr, sigLen, recovery uint32) (uint32, uint32) { + mem := mod.Memory() + + // Read inputs + hash, err := readMemory(mem, hashPtr, hashLen) + if err != nil { + panic(fmt.Sprintf("failed to read hash: %v", err)) + } + signature, err := readMemory(mem, sigPtr, sigLen) + if err != nil { + panic(fmt.Sprintf("failed to read signature: %v", err)) + } + + // Recover public key + pubkey, err := Secp256r1RecoverPubkey(hash, signature, byte(recovery)) + if err != nil { + panic(fmt.Sprintf("failed to recover public key: %v", err)) + } + + // Allocate memory for result + resultPtr, err := allocateInContract(ctx, mod, uint32(len(pubkey))) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory for result: %v", err)) + } + + // Write result + if err := writeMemory(mem, resultPtr, pubkey); err != nil { + panic(fmt.Sprintf("failed to write result: %v", err)) + } + + return resultPtr, uint32(len(pubkey)) +} diff --git a/internal/runtime/hostfunctions.go b/internal/runtime/hostfunctions.go new file mode 100644 index 000000000..0ffdfcff6 --- /dev/null +++ b/internal/runtime/hostfunctions.go @@ -0,0 +1,1337 @@ +package runtime + +import ( + "context" + "encoding/binary" + "encoding/json" + "fmt" + + "github.com/tetratelabs/wazero" + "github.com/tetratelabs/wazero/api" + + "github.com/CosmWasm/wasmvm/v2/types" +) + +// RuntimeEnvironment holds the environment for contract execution + +// NewRuntimeEnvironment creates a new runtime environment +func NewRuntimeEnvironment(db types.KVStore, api *types.GoAPI, querier types.Querier) *RuntimeEnvironment { + return &RuntimeEnvironment{ + DB: db, + API: *api, + Querier: querier, + iterators: make(map[uint64]map[uint64]types.Iterator), + } +} + +// StartCall starts a new contract call and returns a call ID +func (e *RuntimeEnvironment) StartCall() uint64 { + e.iteratorsMutex.Lock() + defer e.iteratorsMutex.Unlock() + + e.nextCallID++ + e.iterators[e.nextCallID] = make(map[uint64]types.Iterator) + return e.nextCallID +} + +// StoreIterator stores an iterator and returns its ID +func (e *RuntimeEnvironment) StoreIterator(callID uint64, iter types.Iterator) uint64 { + e.iteratorsMutex.Lock() + defer e.iteratorsMutex.Unlock() + + e.nextIterID++ + if e.iterators[callID] == nil { + e.iterators[callID] = make(map[uint64]types.Iterator) + } + e.iterators[callID][e.nextIterID] = iter + return e.nextIterID +} + +// GetIterator retrieves an iterator by its IDs +func (e *RuntimeEnvironment) GetIterator(callID, iterID uint64) types.Iterator { + e.iteratorsMutex.RLock() + defer e.iteratorsMutex.RUnlock() + + if callMap, exists := e.iterators[callID]; exists { + return callMap[iterID] + } + return nil +} + +// EndCall cleans up all iterators for a call +func (e *RuntimeEnvironment) EndCall(callID uint64) { + e.iteratorsMutex.Lock() + defer e.iteratorsMutex.Unlock() + + delete(e.iterators, callID) +} + +// IteratorID represents a unique identifier for an iterator +type IteratorID struct { + CallID uint64 + IteratorID uint64 +} + +// Helper functions for memory operations +func readMemory(mem api.Memory, offset, size uint32) ([]byte, error) { + data, ok := mem.Read(offset, size) + if !ok { + return nil, fmt.Errorf("failed to read %d bytes at offset %d", size, offset) + } + return data, nil +} + +func writeMemory(mem api.Memory, offset uint32, data []byte) error { + if !mem.Write(offset, data) { + return fmt.Errorf("failed to write %d bytes at offset %d", len(data), offset) + } + return nil +} + +// allocateInContract calls the contract's allocate function +func allocateInContract(ctx context.Context, mod api.Module, size uint32) (uint32, error) { + allocate := mod.ExportedFunction("allocate") + if allocate == nil { + return 0, fmt.Errorf("allocate function not found in module") + } + + results, err := allocate.Call(ctx, uint64(size)) + if err != nil { + return 0, fmt.Errorf("failed to allocate memory: %w", err) + } + + return uint32(results[0]), nil +} + +// hostGet implements db_get +func hostGet(ctx context.Context, mod api.Module, keyPtr, keyLen uint32) (uint32, uint32) { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + key, err := readMemory(mem, keyPtr, keyLen) + if err != nil { + panic(fmt.Sprintf("failed to read key from memory: %v", err)) + } + + value := env.DB.Get(key) + if value == nil { + // Return 0,0 for "not found" + return 0, 0 + } + + offset, err := allocateInContract(ctx, mod, uint32(len(value))) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory for value: %v", err)) + } + + if err := writeMemory(mem, offset, value); err != nil { + panic(fmt.Sprintf("failed to write value to memory: %v", err)) + } + + return offset, uint32(len(value)) +} + +// hostSet implements db_set +func hostSet(ctx context.Context, mod api.Module, keyPtr, keyLen, valPtr, valLen uint32) { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + key, err := readMemory(mem, keyPtr, keyLen) + if err != nil { + panic(fmt.Sprintf("failed to read key from memory: %v", err)) + } + + val, err := readMemory(mem, valPtr, valLen) + if err != nil { + panic(fmt.Sprintf("failed to read value from memory: %v", err)) + } + + env.DB.Set(key, val) +} + +// hostHumanizeAddress implements api_humanize_address +func hostHumanizeAddress(ctx context.Context, mod api.Module, addrPtr, addrLen uint32) uint32 { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read the input address from guest memory. + addr, err := readMemory(mem, addrPtr, addrLen) + if err != nil { + // If we fail to read memory, return a non-zero error code. + return 1 + } + + // Call the API to humanize the address. + human, _, err := env.API.HumanizeAddress(addr) + if err != nil { + // On failure, return a non-zero error code. + return 1 + } + + // We must write the result back into the same memory location, if it fits. + if uint32(len(human)) > addrLen { + // If the humanized address is larger than the provided buffer, + // return an error code. + return 1 + } + + // Write the humanized address back to memory + if err := writeMemory(mem, addrPtr, []byte(human)); err != nil { + return 1 + } + + // Return 0 on success + return 0 +} + +// hostQueryExternal implements querier_query +func hostQueryExternal(ctx context.Context, mod api.Module, reqPtr, reqLen, gasLimit uint32) (resPtr, resLen uint32) { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + req, err := readMemory(mem, reqPtr, reqLen) + if err != nil { + panic(fmt.Sprintf("failed to read query request: %v", err)) + } + + res := types.RustQuery(env.Querier, req, uint64(gasLimit)) + serialized, err := json.Marshal(res) + if err != nil { + return 0, 0 + } + + offset, err := allocateInContract(ctx, mod, uint32(len(serialized))) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory (via contract's allocate): %v", err)) + } + + if err := writeMemory(mem, offset, serialized); err != nil { + panic(fmt.Sprintf("failed to write query response: %v", err)) + } + + return offset, uint32(len(serialized)) +} + +// hostCanonicalizeAddress implements addr_canonicalize +func hostCanonicalizeAddress(ctx context.Context, mod api.Module, addrPtr, addrLen uint32) uint32 { + // Retrieve your runtime environment. + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read the input address from guest memory. + addr, err := readMemory(mem, addrPtr, addrLen) + if err != nil { + // If we fail to read memory, return a non-zero error code. + return 1 + } + + // Call the API to canonicalize the address. + canonical, _, err := env.API.CanonicalizeAddress(string(addr)) + if err != nil { + // On failure, just return a non-zero error code. + return 1 + } + + // Here we must decide where to write the canonical address. + // Without details, let's assume we write it back to the same location. + if uint32(len(canonical)) > addrLen { + // If the canonical address is larger than the provided buffer, + // we have no way to signal that other than returning an error. + return 1 + } + + // Write the canonical address back to the memory at addrPtr. + if err := writeMemory(mem, addrPtr, canonical); err != nil { + return 1 + } + + // Return 0 on success. + return 0 +} + +// hostValidateAddress implements addr_validate +func hostValidateAddress(ctx context.Context, mod api.Module, addrPtr uint32) uint32 { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read the address bytes directly (no length prefix in Rust) + addr, err := readMemory(mem, addrPtr, 32) // Fixed size for addresses + if err != nil { + panic(fmt.Sprintf("failed to read address from memory: %v", err)) + } + + // Convert to string and validate + _, err = env.API.ValidateAddress(string(addr)) + if err != nil { + return 0 // Return 0 for invalid address + } + + return 1 // Return 1 for valid address +} + +// hostScan implements db_scan +func hostScan(ctx context.Context, mod api.Module, startPtr, startLen, order uint32) uint32 { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read the start key if any... + start, err := readMemory(mem, startPtr, startLen) + if err != nil { + panic(fmt.Sprintf("failed to read start key: %v", err)) + } + + var iter types.Iterator + if order == 1 { + iter = env.DB.ReverseIterator(start, nil) + } else { + iter = env.DB.Iterator(start, nil) + } + + // Store the iterator and return its ID + callID := env.StartCall() + iterID := env.StoreIterator(callID, iter) + + // Pack both IDs into a single uint32 + // Use high 16 bits for callID and low 16 bits for iterID + return uint32(callID<<16 | iterID&0xFFFF) +} + +// hostNext implements db_next +func hostNext(ctx context.Context, mod api.Module, iterID uint32) uint32 { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Extract call_id and iter_id from the packed uint32 + callID := uint64(iterID >> 16) + actualIterID := uint64(iterID & 0xFFFF) + + // Get the iterator + iter := env.GetIterator(callID, actualIterID) + if iter == nil { + return 0 + } + + // Check if iterator is still valid + if !iter.Valid() { + return 0 + } + + // Get key and value + key := iter.Key() + value := iter.Value() + + // Allocate memory for key and value + // Format: [key_len(4 bytes)][key][value_len(4 bytes)][value] + totalLen := 4 + len(key) + 4 + len(value) + offset, err := allocateInContract(ctx, mod, uint32(totalLen)) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory: %v", err)) + } + + // Write key length + keyLenData := make([]byte, 4) + binary.LittleEndian.PutUint32(keyLenData, uint32(len(key))) + if err := writeMemory(mem, offset, keyLenData); err != nil { + panic(fmt.Sprintf("failed to write key length: %v", err)) + } + + // Write key + if err := writeMemory(mem, offset+4, key); err != nil { + panic(fmt.Sprintf("failed to write key: %v", err)) + } + + // Write value length + valLenData := make([]byte, 4) + binary.LittleEndian.PutUint32(valLenData, uint32(len(value))) + if err := writeMemory(mem, offset+4+uint32(len(key)), valLenData); err != nil { + panic(fmt.Sprintf("failed to write value length: %v", err)) + } + + // Write value + if err := writeMemory(mem, offset+8+uint32(len(key)), value); err != nil { + panic(fmt.Sprintf("failed to write value: %v", err)) + } + + // Move to next item + iter.Next() + + return offset +} + +// hostNextValue implements db_next_value +func hostNextValue(ctx context.Context, mod api.Module, callID, iterID uint64) (valPtr, valLen, errCode uint32) { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Check gas for iterator next operation + if env.gasUsed+gasCostIteratorNext > env.Gas.GasConsumed() { + return 0, 0, 1 // Return error code 1 for out of gas + } + env.gasUsed += gasCostIteratorNext + + // Get iterator from environment + iter := env.GetIterator(callID, iterID) + if iter == nil { + return 0, 0, 2 // Return error code 2 for invalid iterator + } + + // Check if there are more items + if !iter.Valid() { + return 0, 0, 0 // Return 0 for end of iteration + } + + // Read value + value := iter.Value() + + // Instead of env.Memory.Allocate(...): + // valOffset, err := env.Memory.Allocate(mem, uint32(len(value))) + // Use the contract's allocateInContract: + valOffset, err := allocateInContract(ctx, mod, uint32(len(value))) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory for value (via contract's allocate): %v", err)) + } + + if err := writeMemory(mem, valOffset, value); err != nil { + panic(fmt.Sprintf("failed to write value to memory: %v", err)) + } + + // Move to next item + iter.Next() + + return valOffset, uint32(len(value)), 0 +} + +// hostCloseIterator implements db_close_iterator +func hostCloseIterator(ctx context.Context, _ api.Module, callID, iterID uint64) { + env := ctx.Value("env").(*RuntimeEnvironment) + + // Get iterator from environment + iter := env.GetIterator(callID, iterID) + if iter == nil { + return + } + + // Close the iterator + iter.Close() + + // Remove from environment + env.iteratorsMutex.Lock() + defer env.iteratorsMutex.Unlock() + + if callMap, exists := env.iterators[callID]; exists { + delete(callMap, iterID) + } +} + +// hostAbort implements the abort function required by Wasm modules +// hostAbort implements the abort function required by Wasm modules +func hostAbort(ctx context.Context, mod api.Module, code uint32) { + fmt.Printf("\n===================== [ WASM CONTRACT ABORT ] =====================\n") + fmt.Printf("Abort code: %d (0x%x)\n", code, code) + + // Try logging the name of this module (if any) + if mod != nil { + fmt.Printf("Module name: %q\n", mod.Name()) + } + + // Attempt to get and log memory details + if mem := mod.Memory(); mem != nil { + fmt.Printf("Memory size (pages): %d\n", mem.Size()) + // We can guess that each "page" is 64 KiB (65536 bytes). + fmt.Printf("Approx. memory size (bytes): %d\n", mem.Size()*65536) + + // We'll attempt to read memory around the `code` offset + // in case the contract placed an error message there. + ranges := []struct { + start uint32 + size uint32 + desc string + }{ + {code - 100, 200, "around the code pointer (code - 100..code+100)"}, + {0, 256, "first 256 bytes of memory"}, + {code & 0xFFFF, 256, "lower 16 bits offset"}, + } + + for i, r := range ranges { + // Skip reading if `r.start + r.size` might exceed memory bounds + if r.start > mem.Size() { + fmt.Printf("[range %d] Start offset %d is out of memory bounds (size: %d bytes)\n", i, r.start, mem.Size()) + continue + } + end := r.start + r.size + if end > mem.Size() { + end = mem.Size() + } + lengthToRead := end - r.start + data, ok := mem.Read(r.start, lengthToRead) + if ok && len(data) > 0 { + fmt.Printf("[range %d] Reading %d bytes %s at offset=%d:\n", i, lengthToRead, r.desc, r.start) + // Print as string + fmt.Printf(" As string: %q\n", string(data)) + // Print raw bytes in hex + fmt.Printf(" As hex: % x\n", data) + } else { + fmt.Printf("[range %d] Could not read data or data is empty at offset=%d\n", i, r.start) + } + } + } else { + fmt.Printf("No memory found in the module\n") + } + + // Attempt to fetch the runtime environment from the context + envAny := ctx.Value(envKey) + if envAny == nil { + fmt.Printf("No runtime environment (envKey) found in the context.\n") + } else { + env, ok := envAny.(*RuntimeEnvironment) + if !ok { + fmt.Printf("Found envKey in context but could not cast to *RuntimeEnvironment.\n") + } else { + // Now we can print out environment details + fmt.Printf("\n=== Runtime Environment Debug Info ===\n") + fmt.Printf(" - Gas used: %d\n", env.gasUsed) + fmt.Printf(" - Gas limit: %d\n", env.Gas.GasConsumed()) // If `env.Gas` is a pointer to a GasMeter or similar + + // If you have additional fields in RuntimeEnvironment that are relevant, print them here. + // For instance, you may want to show how many iterators are open or if there's a contract address. + // Example: + fmt.Printf(" - open iterators callID->(iterID->Iterator) map size: %d\n", len(env.iterators)) + for callID, iterMap := range env.iterators { + fmt.Printf(" callID=%d has %d iterators\n", callID, len(iterMap)) + } + + // If your environment has references to the current contract address, block info, etc., + // log them here. For example: + // fmt.Printf(" - Current contract address: %s\n", env.ContractAddress) + } + } + + panic(fmt.Sprintf("Wasm contract aborted with code: %d (0x%x)", code, code)) +} + +// hostDbRead implements db_read +func hostDbRead(ctx context.Context, mod api.Module, keyPtr uint32) uint32 { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read length prefix (4 bytes) from the key pointer + lenBytes, err := readMemory(mem, keyPtr, 4) + if err != nil { + panic(fmt.Sprintf("failed to read key length from memory: %v", err)) + } + keyLen := binary.LittleEndian.Uint32(lenBytes) + + // Read the actual key + key, err := readMemory(mem, keyPtr+4, keyLen) + if err != nil { + panic(fmt.Sprintf("failed to read key from memory: %v", err)) + } + + value := env.DB.Get(key) + if len(value) == 0 { + return 0 + } + + // Allocate memory for the result: 4 bytes for length + actual value + totalLen := 4 + len(value) + offset, err := allocateInContract(ctx, mod, uint32(totalLen)) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory: %v", err)) + } + + // Write length prefix + lenData := make([]byte, 4) + binary.LittleEndian.PutUint32(lenData, uint32(len(value))) + if err := writeMemory(mem, offset, lenData); err != nil { + panic(fmt.Sprintf("failed to write value length to memory: %v", err)) + } + + // Write value + if err := writeMemory(mem, offset+4, value); err != nil { + panic(fmt.Sprintf("failed to write value to memory: %v", err)) + } + + return offset +} + +// hostDbWrite implements db_write +func hostDbWrite(ctx context.Context, mod api.Module, keyPtr, valuePtr uint32) { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read key length prefix (4 bytes) + keyLenBytes, err := readMemory(mem, keyPtr, 4) + if err != nil { + panic(fmt.Sprintf("failed to read key length from memory: %v", err)) + } + keyLen := binary.LittleEndian.Uint32(keyLenBytes) + + // Read value length prefix (4 bytes) + valLenBytes, err := readMemory(mem, valuePtr, 4) + if err != nil { + panic(fmt.Sprintf("failed to read value length from memory: %v", err)) + } + valLen := binary.LittleEndian.Uint32(valLenBytes) + + // Read the actual key and value + key, err := readMemory(mem, keyPtr+4, keyLen) + if err != nil { + panic(fmt.Sprintf("failed to read key from memory: %v", err)) + } + + value, err := readMemory(mem, valuePtr+4, valLen) + if err != nil { + panic(fmt.Sprintf("failed to read value from memory: %v", err)) + } + + env.DB.Set(key, value) +} + +// hostSecp256k1Verify implements secp256k1_verify +func hostSecp256k1Verify(ctx context.Context, mod api.Module, hash_ptr, sig_ptr, pubkey_ptr uint32) uint32 { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read message from memory (32 bytes for hash) + message, err := readMemory(mem, hash_ptr, 32) + if err != nil { + return 0 + } + + // Read signature from memory (64 bytes for signature) + signature, err := readMemory(mem, sig_ptr, 64) + if err != nil { + return 0 + } + + // Read public key from memory (33 bytes for compressed pubkey) + pubKey, err := readMemory(mem, pubkey_ptr, 33) + if err != nil { + return 0 + } + + // Call the API to verify the signature + verified, _, err := env.API.Secp256k1Verify(message, signature, pubKey) + if err != nil { + return 0 + } + + if verified { + return 1 + } + return 0 +} + +// hostDbRemove implements db_remove +func hostDbRemove(ctx context.Context, mod api.Module, keyPtr uint32) { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read length prefix (4 bytes) from the key pointer + lenBytes, err := readMemory(mem, keyPtr, 4) + if err != nil { + panic(fmt.Sprintf("failed to read key length from memory: %v", err)) + } + keyLen := binary.LittleEndian.Uint32(lenBytes) + + // Read the actual key + key, err := readMemory(mem, keyPtr+4, keyLen) + if err != nil { + panic(fmt.Sprintf("failed to read key from memory: %v", err)) + } + + env.DB.Delete(key) +} + +// hostSecp256k1RecoverPubkey implements secp256k1_recover_pubkey +func hostSecp256k1RecoverPubkey(ctx context.Context, mod api.Module, hash_ptr, sig_ptr, rec_id uint32) uint64 { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read message hash from memory (32 bytes) + hash, err := readMemory(mem, hash_ptr, 32) + if err != nil { + return 0 + } + + // Read signature from memory (64 bytes) + sig, err := readMemory(mem, sig_ptr, 64) + if err != nil { + return 0 + } + + // Call the API to recover the public key + pubkey, _, err := env.API.Secp256k1RecoverPubkey(hash, sig, uint8(rec_id)) + if err != nil { + return 0 + } + + // Allocate memory for the result + offset, err := allocateInContract(ctx, mod, uint32(len(pubkey))) + if err != nil { + return 0 + } + + // Write the recovered public key to memory + if err := writeMemory(mem, offset, pubkey); err != nil { + return 0 + } + + return uint64(offset) +} + +// hostEd25519Verify implements ed25519_verify +func hostEd25519Verify(ctx context.Context, mod api.Module, msg_ptr, sig_ptr, pubkey_ptr uint32) uint32 { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read message from memory (32 bytes for message hash) + message, err := readMemory(mem, msg_ptr, 32) + if err != nil { + return 0 + } + + // Read signature from memory (64 bytes for ed25519 signature) + signature, err := readMemory(mem, sig_ptr, 64) + if err != nil { + return 0 + } + + // Read public key from memory (32 bytes for ed25519 pubkey) + pubKey, err := readMemory(mem, pubkey_ptr, 32) + if err != nil { + return 0 + } + + // Call the API to verify the signature + verified, _, err := env.API.Ed25519Verify(message, signature, pubKey) + if err != nil { + return 0 + } + + if verified { + return 1 + } + return 0 +} + +// hostEd25519BatchVerify implements ed25519_batch_verify +func hostEd25519BatchVerify(ctx context.Context, mod api.Module, msgs_ptr, sigs_ptr, pubkeys_ptr uint32) uint32 { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read the number of messages (first 4 bytes) + countBytes, err := readMemory(mem, msgs_ptr, 4) + if err != nil { + return 0 + } + count := binary.LittleEndian.Uint32(countBytes) + + // Read messages + messages := make([][]byte, count) + msgPtr := msgs_ptr + 4 + for i := uint32(0); i < count; i++ { + // Read message length + lenBytes, err := readMemory(mem, msgPtr, 4) + if err != nil { + return 0 + } + msgLen := binary.LittleEndian.Uint32(lenBytes) + msgPtr += 4 + + // Read message + msg, err := readMemory(mem, msgPtr, msgLen) + if err != nil { + return 0 + } + messages[i] = msg + msgPtr += msgLen + } + + // Read signatures + signatures := make([][]byte, count) + sigPtr := sigs_ptr + for i := uint32(0); i < count; i++ { + // Each signature is 64 bytes + sig, err := readMemory(mem, sigPtr, 64) + if err != nil { + return 0 + } + signatures[i] = sig + sigPtr += 64 + } + + // Read public keys + pubkeys := make([][]byte, count) + pubkeyPtr := pubkeys_ptr + for i := uint32(0); i < count; i++ { + // Each public key is 32 bytes + pubkey, err := readMemory(mem, pubkeyPtr, 32) + if err != nil { + return 0 + } + pubkeys[i] = pubkey + pubkeyPtr += 32 + } + + // Call the API to verify the signatures + verified, _, err := env.API.Ed25519BatchVerify(messages, signatures, pubkeys) + if err != nil { + return 0 + } + + if verified { + return 1 + } + return 0 +} + +// hostDebug implements debug +func hostDebug(_ context.Context, mod api.Module, msgPtr uint32) { + mem := mod.Memory() + msg, err := readMemory(mem, msgPtr, 1024) // Read up to 1024 bytes + if err != nil { + return + } + // Find null terminator + length := 0 + for length < len(msg) && msg[length] != 0 { + length++ + } + fmt.Printf("Debug: %s\n", string(msg[:length])) +} + +// hostQueryChain implements query_chain with signature (req_ptr i32) -> i32 +// Memory layout for input: +// +// at req_ptr: 4 bytes little-endian length, followed by that many bytes of request +// +// Memory layout for output: +// +// at returned offset: 4 bytes length prefix, followed by the JSON of ChainResponse +func hostQueryChain(ctx context.Context, mod api.Module, reqPtr uint32) uint32 { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Read the request length + lenBytes, err := readMemory(mem, reqPtr, 4) + if err != nil { + panic(fmt.Sprintf("failed to read query request length: %v", err)) + } + reqLen := binary.LittleEndian.Uint32(lenBytes) + + // Read the actual request + req, err := readMemory(mem, reqPtr+4, reqLen) + if err != nil { + panic(fmt.Sprintf("failed to read query request: %v", err)) + } + + // Perform the query + res := types.RustQuery(env.Querier, req, env.Gas.GasConsumed()) + + // Wrap in ChainResponse and serialize + serialized, err := json.Marshal(res) + if err != nil { + // On failure, return 0 + return 0 + } + + // Allocate memory for (4 bytes length + serialized) + totalLen := 4 + len(serialized) + offset, err := allocateInContract(ctx, mod, uint32(totalLen)) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory for chain response: %v", err)) + } + + // Write length prefix + lenData := make([]byte, 4) + binary.LittleEndian.PutUint32(lenData, uint32(len(serialized))) + if err := writeMemory(mem, offset, lenData); err != nil { + panic(fmt.Sprintf("failed to write response length: %v", err)) + } + + // Write serialized response + if err := writeMemory(mem, offset+4, serialized); err != nil { + panic(fmt.Sprintf("failed to write response data: %v", err)) + } + + // Return the offset as i32 + return offset +} + +// RegisterHostFunctions registers all host functions with the wazero runtime +func RegisterHostFunctions(runtime wazero.Runtime, env *RuntimeEnvironment) (wazero.CompiledModule, error) { + builder := runtime.NewHostModuleBuilder("env") + + // Register abort function + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, code uint32) { + ctx = context.WithValue(ctx, envKey, env) + hostAbort(ctx, m, code) + }). + WithParameterNames("code"). + Export("abort") + + // Register DB functions + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, keyPtr, keyLen uint32) (uint32, uint32) { + // Get environment from context + env := ctx.Value(envKey).(*RuntimeEnvironment) + + // Charge gas for read operation (1 gas per byte read) + env.gasUsed += uint64(keyLen) + if env.gasUsed > env.Gas.GasConsumed() { + panic("out of gas") + } + + return hostGet(ctx, m, keyPtr, keyLen) + }). + WithParameterNames("key_ptr", "key_len"). + Export("db_get") + + // Register query_chain with i32_i32 signature + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, reqPtr uint32) uint32 { + // Get environment from context + env := ctx.Value(envKey).(*RuntimeEnvironment) + + // Read request from memory to calculate gas + mem := m.Memory() + req, err := readMemory(mem, reqPtr, 4) // Read length prefix first + if err != nil { + panic(fmt.Sprintf("failed to read request length: %v", err)) + } + reqLen := binary.LittleEndian.Uint32(req) + + // Charge gas for query operation (10 gas per byte queried) + env.gasUsed += uint64(reqLen) * 10 + if env.gasUsed > env.Gas.GasConsumed() { + panic("out of gas") + } + + return hostQueryChain(ctx, m, reqPtr) + }). + WithParameterNames("request"). + WithResultNames("result"). + Export("query_chain") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, keyPtr, keyLen, valPtr, valLen uint32) { + // Get environment from context + env := ctx.Value(envKey).(*RuntimeEnvironment) + + // Charge gas for write operation (2 gas per byte written) + env.gasUsed += uint64(keyLen+valLen) * 2 + if env.gasUsed > env.Gas.GasConsumed() { + panic("out of gas") + } + + hostSet(ctx, m, keyPtr, keyLen, valPtr, valLen) + }). + WithParameterNames("key_ptr", "key_len", "val_ptr", "val_len"). + Export("db_set") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, keyPtr, keyLen, valPtr, valLen uint32) { + // Get environment from context + env := ctx.Value(envKey).(*RuntimeEnvironment) + + // Charge gas for write operation (2 gas per byte written) + env.gasUsed += uint64(keyLen+valLen) * 2 + if env.gasUsed > env.Gas.GasConsumed() { + panic("out of gas") + } + + hostSet(ctx, m, keyPtr, keyLen, valPtr, valLen) + }). + WithParameterNames("key_ptr", "key_len", "val_ptr", "val_len"). + Export("db_write") + + // Register interface_version_8 function + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module) { + // This is just a marker function that doesn't need to do anything + }). + Export("interface_version_8") + + // Register allocate function + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, size uint32) uint32 { + // Get environment from context + env := ctx.Value(envKey).(*RuntimeEnvironment) + + // Charge gas for allocation (1 gas per 1KB, minimum 1 gas) + gasCharge := (size + 1023) / 1024 // Round up to nearest KB + if gasCharge == 0 { + gasCharge = 1 + } + env.gasUsed += uint64(gasCharge) + if env.gasUsed > env.Gas.GasConsumed() { + panic("out of gas") + } + + // Allocate memory in the Wasm module + memory := m.Memory() + if memory == nil { + panic("no memory exported") + } + + // Calculate required pages for the allocation + currentBytes := memory.Size() + requiredBytes := size + pageSize := uint32(65536) // 64KB + + // Grow memory if needed + if requiredBytes > currentBytes { + pagesToGrow := (requiredBytes - currentBytes + pageSize - 1) / pageSize + if _, ok := memory.Grow(uint32(pagesToGrow)); !ok { + panic("failed to grow memory") + } + } + + // Return the pointer to the allocated memory + ptr := currentBytes + return ptr + }). + WithParameterNames("size"). + WithResultNames("ptr"). + Export("allocate") + + // Register deallocate function + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, ptr uint32) { + // Get environment from context + env := ctx.Value(envKey).(*RuntimeEnvironment) + + // Charge minimal gas for deallocation + env.gasUsed += 1 + if env.gasUsed > env.Gas.GasConsumed() { + panic("out of gas") + } + // In our implementation, we don't need to explicitly deallocate + // as we rely on the Wasm runtime's memory management + }). + WithParameterNames("ptr"). + Export("deallocate") + + // Register BLS12-381 functions + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, g1sPtr, outPtr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + ptr, _ := hostBls12381AggregateG1(ctx, m, g1sPtr) + return ptr + }). + WithParameterNames("g1s_ptr", "out_ptr"). + WithResultNames("result"). + Export("bls12_381_aggregate_g1") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, g2sPtr, outPtr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + ptr, _ := hostBls12381AggregateG2(ctx, m, g2sPtr) + return ptr + }). + WithParameterNames("g2s_ptr", "out_ptr"). + WithResultNames("result"). + Export("bls12_381_aggregate_g2") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, psPtr, qsPtr, rPtr, sPtr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + return hostBls12381PairingEquality(ctx, m, psPtr, 0, qsPtr, 0, rPtr, 0, sPtr, 0) + }). + WithParameterNames("ps_ptr", "qs_ptr", "r_ptr", "s_ptr"). + WithResultNames("result"). + Export("bls12_381_pairing_equality") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, hashFunction, msgPtr, dstPtr, outPtr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + ptr, _ := hostBls12381HashToG1(ctx, m, msgPtr, hashFunction) + return ptr + }). + WithParameterNames("hash_function", "msg_ptr", "dst_ptr", "out_ptr"). + WithResultNames("result"). + Export("bls12_381_hash_to_g1") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, hashFunction, msgPtr, dstPtr, outPtr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + ptr, _ := hostBls12381HashToG2(ctx, m, msgPtr, hashFunction) + return ptr + }). + WithParameterNames("hash_function", "msg_ptr", "dst_ptr", "out_ptr"). + WithResultNames("result"). + Export("bls12_381_hash_to_g2") + + // SECP256r1 functions + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, messageHashPtr, signaturePtr, publicKeyPtr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + return hostSecp256r1Verify(ctx, m, messageHashPtr, 0, signaturePtr, 0, publicKeyPtr, 0) + }). + WithParameterNames("message_hash_ptr", "signature_ptr", "public_key_ptr"). + WithResultNames("result"). + Export("secp256r1_verify") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, messageHashPtr, signaturePtr, recoveryParam uint32) uint64 { + ctx = context.WithValue(ctx, envKey, env) + ptr, len := hostSecp256r1RecoverPubkey(ctx, m, messageHashPtr, 0, signaturePtr, 0, recoveryParam) + return (uint64(len) << 32) | uint64(ptr) + }). + WithParameterNames("message_hash_ptr", "signature_ptr", "recovery_param"). + WithResultNames("result"). + Export("secp256r1_recover_pubkey") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, startPtr, startLen, order uint32) uint32 { + // Get environment from context + env := ctx.Value(envKey).(*RuntimeEnvironment) + + // Charge gas for scan operation (gasCostIteratorCreate + 1 gas per byte scanned) + env.gasUsed += gasCostIteratorCreate + uint64(startLen) + if env.gasUsed > env.Gas.GasConsumed() { + panic("out of gas") + } + + return hostScan(ctx, m, startPtr, startLen, order) + }). + WithParameterNames("start_ptr", "start_len", "order"). + WithResultNames("iter_id"). + Export("db_scan") + + // db_next + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, iterID uint32) uint32 { + // Get environment from context + env := ctx.Value(envKey).(*RuntimeEnvironment) + + // Charge gas for next operation + env.gasUsed += gasCostIteratorNext + if env.gasUsed > env.Gas.GasConsumed() { + panic("out of gas") + } + + return hostNext(ctx, m, iterID) + }). + WithParameterNames("iter_id"). + WithResultNames("kv_region_ptr"). + Export("db_next") + + // db_next_value + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, iterID uint32) uint32 { + // Get environment from context + env := ctx.Value(envKey).(*RuntimeEnvironment) + + // Charge gas for next value operation + env.gasUsed += gasCostIteratorNext + if env.gasUsed > env.Gas.GasConsumed() { + panic("out of gas") + } + + // Extract call_id and iter_id from the packed uint32 + callID := uint64(iterID >> 16) + actualIterID := uint64(iterID & 0xFFFF) + ptr, _, _ := hostNextValue(ctx, m, callID, actualIterID) + return ptr + }). + WithParameterNames("iter_id"). + WithResultNames("value_ptr"). + Export("db_next_value") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, addrPtr, addrLen uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + return hostHumanizeAddress(ctx, m, addrPtr, addrLen) + }). + WithParameterNames("addr_ptr", "addr_len"). + WithResultNames("result"). + Export("addr_humanize") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, addrPtr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + return hostValidateAddress(ctx, m, addrPtr) + }). + WithParameterNames("addr_ptr"). + WithResultNames("result"). + Export("addr_validate") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, addrPtr, addrLen uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + return hostCanonicalizeAddress(ctx, m, addrPtr, addrLen) + }). + WithParameterNames("addr_ptr", "addr_len"). + WithResultNames("result"). + Export("addr_canonicalize") + + // Register Query functions + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, reqPtr, reqLen, gasLimit uint32) (uint32, uint32) { + ctx = context.WithValue(ctx, envKey, env) + return hostQueryExternal(ctx, m, reqPtr, reqLen, gasLimit) + }). + WithParameterNames("req_ptr", "req_len", "gas_limit"). + WithResultNames("res_ptr", "res_len"). + Export("querier_query") + + // Register secp256k1_verify function + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, hash_ptr, sig_ptr, pubkey_ptr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + return hostSecp256k1Verify(ctx, m, hash_ptr, sig_ptr, pubkey_ptr) + }). + WithParameterNames("hash_ptr", "sig_ptr", "pubkey_ptr"). + WithResultNames("result"). + Export("secp256k1_verify") + + // Register DB read/write/remove functions + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, keyPtr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + return hostDbRead(ctx, m, keyPtr) + }). + WithParameterNames("key_ptr"). + Export("db_read") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, keyPtr, valuePtr uint32) { + ctx = context.WithValue(ctx, envKey, env) + hostDbWrite(ctx, m, keyPtr, valuePtr) + }). + WithParameterNames("key_ptr", "value_ptr"). + Export("db_write") + + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, keyPtr uint32) { + ctx = context.WithValue(ctx, envKey, env) + hostDbRemove(ctx, m, keyPtr) + }). + WithParameterNames("key_ptr"). + Export("db_remove") + + // db_close_iterator + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, callID, iterID uint64) { + ctx = context.WithValue(ctx, envKey, env) + hostCloseIterator(ctx, m, callID, iterID) + }). + WithParameterNames("call_id", "iter_id"). + Export("db_close_iterator") + + // Register secp256k1_recover_pubkey function + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, hash_ptr, sig_ptr, rec_id uint32) uint64 { + ctx = context.WithValue(ctx, envKey, env) + return hostSecp256k1RecoverPubkey(ctx, m, hash_ptr, sig_ptr, rec_id) + }). + WithParameterNames("hash_ptr", "sig_ptr", "rec_id"). + WithResultNames("result"). + Export("secp256k1_recover_pubkey") + + // Register ed25519_verify function with i32i32i32_i32 signature + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, msg_ptr, sig_ptr, pubkey_ptr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + return hostEd25519Verify(ctx, m, msg_ptr, sig_ptr, pubkey_ptr) + }). + WithParameterNames("msg_ptr", "sig_ptr", "pubkey_ptr"). + WithResultNames("result"). + Export("ed25519_verify") + + // Register ed25519_batch_verify function with i32i32i32_i32 signature + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, msgs_ptr, sigs_ptr, pubkeys_ptr uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + return hostEd25519BatchVerify(ctx, m, msgs_ptr, sigs_ptr, pubkeys_ptr) + }). + WithParameterNames("msgs_ptr", "sigs_ptr", "pubkeys_ptr"). + WithResultNames("result"). + Export("ed25519_batch_verify") + + // Register debug function with i32_v signature + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, msgPtr uint32) { + ctx = context.WithValue(ctx, envKey, env) + hostDebug(ctx, m, msgPtr) + }). + WithParameterNames("msg_ptr"). + Export("debug") + + // db_next_key + builder.NewFunctionBuilder(). + WithFunc(func(ctx context.Context, m api.Module, iterID uint32) uint32 { + ctx = context.WithValue(ctx, envKey, env) + ptr, _, _ := hostNextKey(ctx, m, uint64(iterID), 0) + return ptr + }). + WithParameterNames("iter_id"). + WithResultNames("key_ptr"). + Export("db_next_key") + + return builder.Compile(context.Background()) +} + +// When you instantiate a contract, you can do something like: +// +// compiledHost, err := RegisterHostFunctions(runtime, env) +// if err != nil { +// ... +// } +// _, err = runtime.InstantiateModule(ctx, compiledHost, wazero.NewModuleConfig()) +// if err != nil { +// ... +// } +// +// Then, instantiate your contract module which imports "env" module's functions. + +// contextKey is a custom type for context keys to avoid collisions +type contextKey string + +const ( + envKey contextKey = "env" +) + +// hostNextKey implements db_next_key +func hostNextKey(ctx context.Context, mod api.Module, callID, iterID uint64) (keyPtr, keyLen, errCode uint32) { + env := ctx.Value("env").(*RuntimeEnvironment) + mem := mod.Memory() + + // Check gas for iterator next operation + if env.gasUsed+gasCostIteratorNext > env.Gas.GasConsumed() { + return 0, 0, 1 // Return error code 1 for out of gas + } + env.gasUsed += gasCostIteratorNext + + // Get iterator from environment + iter := env.GetIterator(callID, iterID) + if iter == nil { + return 0, 0, 2 // Return error code 2 for invalid iterator + } + + // Check if there are more items + if !iter.Valid() { + return 0, 0, 0 // Return 0 for end of iteration + } + + // Read key + key := iter.Key() + + // Allocate memory for key + keyOffset, err := allocateInContract(ctx, mod, uint32(len(key))) + if err != nil { + panic(fmt.Sprintf("failed to allocate memory for key (via contract's allocate): %v", err)) + } + + if err := writeMemory(mem, keyOffset, key); err != nil { + panic(fmt.Sprintf("failed to write key to memory: %v", err)) + } + + // Move to next item + iter.Next() + + return keyOffset, uint32(len(key)), 0 +} diff --git a/internal/runtime/validation.go b/internal/runtime/validation.go new file mode 100644 index 000000000..243bd5f83 --- /dev/null +++ b/internal/runtime/validation.go @@ -0,0 +1,92 @@ +package runtime + +import ( + "fmt" + "strings" + + "github.com/tetratelabs/wazero" +) + +func (w *WazeroRuntime) analyzeForValidation(compiled wazero.CompiledModule) error { + // 1) Check memory constraints + memoryCount := 0 + for _, exp := range compiled.ExportedMemories() { + if exp != nil { + memoryCount++ + } + } + if memoryCount != 1 { + return fmt.Errorf("Error during static Wasm validation: Wasm contract must contain exactly one memory") + } + + // 2) Gather exported function names + exports := compiled.ExportedFunctions() + var exportNames []string + for name := range exports { + exportNames = append(exportNames, name) + } + + // 3) Ensure interface_version_8 + var interfaceVersionCount int + for _, name := range exportNames { + if strings.HasPrefix(name, "interface_version_") { + interfaceVersionCount++ + if name != "interface_version_8" { + return fmt.Errorf("Wasm contract has unknown %q marker (expect interface_version_8)", name) + } + } + } + if interfaceVersionCount == 0 { + return fmt.Errorf("Wasm contract missing a required marker export: interface_version_* (expected interface_version_8)") + } + if interfaceVersionCount > 1 { + return fmt.Errorf("Wasm contract contains more than one marker export: interface_version_*") + } + + // 4) Ensure allocate + deallocate + // (Rust's check_wasm_exports) + requiredExports := []string{"allocate", "deallocate"} + for _, r := range requiredExports { + found := false + for _, expName := range exportNames { + if expName == r { + found = true + break + } + } + if !found { + return fmt.Errorf("Wasm contract doesn't have required export: %q", r) + } + } + + // 5) Possibly check function import constraints + // (like "db_read", "db_write", etc.) + // But note Wazero doesn't give a direct function to list imports from the compiled module. + // You might parse your Wasm differently (like using wasmer/wasmparser). + // Or skip if you don't need strict import checks. + + // 6) Check for "requires_*" exports (capabilities) + // e.g. "requires_iter", "requires_stargate", etc. + var requiredCaps []string + prefix := "requires_" + for _, expName := range exportNames { + if strings.HasPrefix(expName, prefix) && len(expName) > len(prefix) { + capName := expName[len(prefix):] // everything after "requires_" + requiredCaps = append(requiredCaps, capName) //nolint:staticcheck + } + } + + // Compare requiredCaps to your chain's available capabilities + // For example: + // chainCaps := ... // from config, or from capabilities_from_csv + // for _, c := range requiredCaps { + // if !chainCaps.Contains(c) { + // return fmt.Errorf("Wasm contract requires unavailable capability: %s", c) + // } + // } + + // 7) If you want function count or param-limits, you'd need a deeper parse. Wazero alone + // doesn't expose param counts of every function. You might do a custom parser. + + return nil +} diff --git a/internal/runtime/wasmruntime.go b/internal/runtime/wasmruntime.go new file mode 100644 index 000000000..386ae9cce --- /dev/null +++ b/internal/runtime/wasmruntime.go @@ -0,0 +1,44 @@ +// file: internal/runtime/wasm_runtime.go +package runtime + +import "github.com/CosmWasm/wasmvm/v2/types" + +type WasmRuntime interface { + // InitCache sets up any runtime-specific cache or resources. Returns a handle. + InitCache(config types.VMConfig) (any, error) + + // ReleaseCache frees resources created by InitCache. + ReleaseCache(handle any) + + // Compilation and code storage + StoreCode(code []byte, persist bool) (checksum []byte, err error) + StoreCodeUnchecked(code []byte) ([]byte, error) + GetCode(checksum []byte) ([]byte, error) + RemoveCode(checksum []byte) error + Pin(checksum []byte) error + Unpin(checksum []byte) error + AnalyzeCode(checksum []byte) (*types.AnalysisReport, error) + + // Execution lifecycles + Instantiate(checksum []byte, env []byte, info []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Execute(checksum []byte, env []byte, info []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Migrate(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + MigrateWithInfo(checksum []byte, env []byte, msg []byte, migrateInfo []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Sudo(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Reply(checksum []byte, env []byte, reply []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Query(checksum []byte, env []byte, query []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + + // IBC entry points + IBCChannelOpen(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCChannelConnect(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCChannelClose(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCPacketReceive(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCPacketAck(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCPacketTimeout(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCSourceCallback(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCDestinationCallback(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + + // Metrics + GetMetrics() (*types.Metrics, error) + GetPinnedMetrics() (*types.PinnedMetrics, error) +} diff --git a/internal/runtime/wazeroruntime.go b/internal/runtime/wazeroruntime.go new file mode 100644 index 000000000..3b134b0c9 --- /dev/null +++ b/internal/runtime/wazeroruntime.go @@ -0,0 +1,1755 @@ +package runtime + +import ( + "context" + "crypto/sha256" + "encoding/binary" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "math" + "strings" + "sync" + + "github.com/tetratelabs/wazero" + "github.com/tetratelabs/wazero/api" + + "github.com/CosmWasm/wasmvm/v2/types" +) + +type WazeroRuntime struct { + mu sync.Mutex + runtime wazero.Runtime + codeCache map[string][]byte + compiledModules map[string]wazero.CompiledModule + closed bool + + // Pinned modules tracking + pinnedModules map[string]struct{} + moduleHits map[string]uint32 + moduleSizes map[string]uint64 + + // Contract execution environment + kvStore types.KVStore + api *types.GoAPI + querier types.Querier + + // Memory manager + memoryManager *memoryManager +} + +type RuntimeEnvironment struct { + DB types.KVStore + API types.GoAPI + Querier types.Querier + Gas types.GasMeter + + // Gas tracking + gasLimit uint64 + gasUsed uint64 + + // Iterator management + iteratorsMutex sync.RWMutex + iterators map[uint64]map[uint64]types.Iterator + nextIterID uint64 + nextCallID uint64 +} + +// Constants for memory management +const ( + // Memory page size in WebAssembly (64KB) + wasmPageSize = 65536 + + // Size of a Region struct in bytes (3x4 bytes) + regionSize = 12 +) + +// Region describes data allocated in Wasm's linear memory +type Region struct { + Offset uint32 + Capacity uint32 + Length uint32 +} + +// ToBytes serializes the Region struct to bytes for WASM memory +func (r *Region) ToBytes() []byte { + // Ensure offset is page-aligned (except for first page) + if r.Offset > wasmPageSize && r.Offset%wasmPageSize != 0 { + panic(fmt.Sprintf("region offset %d is not page-aligned", r.Offset)) + } + // Ensure capacity is page-aligned + if r.Capacity%wasmPageSize != 0 { + panic(fmt.Sprintf("region capacity %d is not page-aligned", r.Capacity)) + } + // Ensure length is not greater than capacity + if r.Length > r.Capacity { + panic(fmt.Sprintf("region length %d exceeds capacity %d", r.Length, r.Capacity)) + } + + buf := make([]byte, 12) // 3 uint32 fields * 4 bytes each + binary.LittleEndian.PutUint32(buf[0:4], r.Offset) + binary.LittleEndian.PutUint32(buf[4:8], r.Capacity) + binary.LittleEndian.PutUint32(buf[8:12], r.Length) + return buf +} + +// memoryManager handles WASM memory allocation and deallocation +type memoryManager struct { + memory api.Memory + contractModule api.Module + size uint32 + nextOffset uint32 // Track next available offset + gasState *GasState +} + +// newMemoryManager creates a new memory manager for a contract module +func newMemoryManager(memory api.Memory, contractModule api.Module, gasState *GasState) *memoryManager { + // Initialize memory with at least one page + size := memory.Size() + if size == 0 { + if _, ok := memory.Grow(1); !ok { + panic("failed to initialize memory with one page") + } + size = memory.Size() + } + + return &memoryManager{ + memory: memory, + contractModule: contractModule, + nextOffset: wasmPageSize, // Start at first page boundary + size: size, + } +} + +// validateMemorySize checks if the memory size is within acceptable limits +func (m *memoryManager) validateMemorySize(memSizeBytes uint32) error { + // Get current memory size in bytes + currentSize := m.memory.Size() + + if currentSize == 0 { + return fmt.Errorf("memory not properly initialized") + } + + // Check if memory size is reasonable (max 64MB) + const maxMemorySize = 64 * 1024 * 1024 // 64MB + if memSizeBytes > maxMemorySize { + return fmt.Errorf("memory size %d bytes exceeds maximum allowed size of %d bytes", memSizeBytes, maxMemorySize) + } + + // Ensure memory size is page-aligned + if memSizeBytes%wasmPageSize != 0 { + return fmt.Errorf("memory size %d bytes is not page-aligned (page size: %d bytes)", memSizeBytes, wasmPageSize) + } + + // Verify first page is properly initialized + firstPage, ok := m.memory.Read(0, wasmPageSize) + if !ok || len(firstPage) != wasmPageSize { + return fmt.Errorf("failed to read first memory page") + } + + // Create initial memory region for validation + region := &Region{ + Offset: wasmPageSize, // Start after first page + Capacity: currentSize - wasmPageSize, + Length: 0, + } + + // Validate the region + if err := validateRegion(region); err != nil { + return fmt.Errorf("memory region validation failed: %w", err) + } + + return nil +} + +// validateMemoryRegion performs validation checks on a memory region +func validateMemoryRegion(region *Region) error { + if region == nil { + return fmt.Errorf("region is nil") + } + + // Check if offset is valid (must be after first page) + if region.Offset < wasmPageSize { + return fmt.Errorf("region offset %d is less than first page size %d", region.Offset, wasmPageSize) + } + + // Check if capacity is valid + if region.Capacity == 0 { + return fmt.Errorf("region capacity cannot be zero") + } + + // Check if length is valid + if region.Length == 0 { + return fmt.Errorf("region length cannot be zero") + } + if region.Length > region.Capacity { + return fmt.Errorf("region length (%d) exceeds capacity (%d)", region.Length, region.Capacity) + } + + // Check for potential overflow + if region.Offset > math.MaxUint32-region.Capacity { + return fmt.Errorf("region would overflow memory bounds: offset=%d, capacity=%d", region.Offset, region.Capacity) + } + + // Enforce a maximum region size of 64MB to prevent excessive allocations + const maxRegionSize = 64 * 1024 * 1024 // 64MB + if region.Capacity > maxRegionSize { + return fmt.Errorf("region capacity %d exceeds maximum allowed size of %d", region.Capacity, maxRegionSize) + } + + // Ensure both offset and capacity are page-aligned + if region.Offset%wasmPageSize != 0 { + return fmt.Errorf("region offset %d is not page-aligned", region.Offset) + } + if region.Capacity%wasmPageSize != 0 { + return fmt.Errorf("region capacity %d is not page-aligned", region.Capacity) + } + + return nil +} + +// validateRegion is now an alias for validateMemoryRegion for consistency +func validateRegion(region *Region) error { + return validateMemoryRegion(region) +} + +// writeToMemory writes data to memory and returns the offset where it was written +func (mm *memoryManager) writeToMemory(data []byte, printDebug bool) (uint32, uint32, error) { + dataSize := uint32(len(data)) + if dataSize == 0 { + return 0, 0, nil + } + + // Calculate pages needed for data + pagesNeeded := (dataSize + wasmPageSize - 1) / wasmPageSize + allocSize := pagesNeeded * wasmPageSize + + // Check if we need to grow memory + if mm.nextOffset+allocSize > mm.size { + pagesToGrow := (mm.nextOffset + allocSize - mm.size + wasmPageSize - 1) / wasmPageSize + if printDebug { + fmt.Printf("[DEBUG] Growing memory by %d pages (current size: %d, needed: %d)\n", + pagesToGrow, mm.size/wasmPageSize, (mm.nextOffset+allocSize)/wasmPageSize) + } + grown, ok := mm.memory.Grow(pagesToGrow) + if !ok || grown == 0 { + return 0, 0, fmt.Errorf("failed to grow memory by %d pages", pagesToGrow) + } + mm.size = mm.memory.Size() + } + + // Write data to memory + if !mm.memory.Write(mm.nextOffset, data) { + return 0, 0, fmt.Errorf("failed to write data to memory") + } + + // Store current offset and update for next write + offset := mm.nextOffset + mm.nextOffset += allocSize + + if printDebug { + fmt.Printf("[DEBUG] Wrote %d bytes at offset 0x%x (page-aligned size: %d)\n", + len(data), offset, allocSize) + } + + return offset, allocSize, nil +} + +func NewWazeroRuntime() (*WazeroRuntime, error) { + // Create a new wazero runtime with memory configuration + runtimeConfig := wazero.NewRuntimeConfig(). + WithMemoryLimitPages(1024). // Set max memory to 64 MiB (1024 * 64KB) + WithMemoryCapacityFromMax(true). // Eagerly allocate memory to ensure it's initialized + WithDebugInfoEnabled(true) // Enable debug info + + r := wazero.NewRuntimeWithConfig(context.Background(), runtimeConfig) + + // Create mock implementations + kvStore := &MockKVStore{} + api := NewMockGoAPI() + querier := &MockQuerier{} + + return &WazeroRuntime{ + runtime: r, + codeCache: make(map[string][]byte), + compiledModules: make(map[string]wazero.CompiledModule), + closed: false, + pinnedModules: make(map[string]struct{}), + moduleHits: make(map[string]uint32), + moduleSizes: make(map[string]uint64), + kvStore: kvStore, + api: api, + querier: querier, + }, nil +} + +// Mock implementations for testing +type MockKVStore struct{} + +func (m *MockKVStore) Get(key []byte) []byte { return nil } +func (m *MockKVStore) Set(key, value []byte) {} +func (m *MockKVStore) Delete(key []byte) {} +func (m *MockKVStore) Iterator(start, end []byte) types.Iterator { return &MockIterator{} } +func (m *MockKVStore) ReverseIterator(start, end []byte) types.Iterator { return &MockIterator{} } + +type MockIterator struct{} + +func (m *MockIterator) Domain() (start []byte, end []byte) { return nil, nil } +func (m *MockIterator) Next() {} +func (m *MockIterator) Key() []byte { return nil } +func (m *MockIterator) Value() []byte { return nil } +func (m *MockIterator) Valid() bool { return false } +func (m *MockIterator) Close() error { return nil } +func (m *MockIterator) Error() error { return nil } + +func NewMockGoAPI() *types.GoAPI { + return &types.GoAPI{ + HumanizeAddress: func(canon []byte) (string, uint64, error) { + return string(canon), 0, nil + }, + CanonicalizeAddress: func(human string) ([]byte, uint64, error) { + return []byte(human), 0, nil + }, + ValidateAddress: func(human string) (uint64, error) { + return 0, nil + }, + } +} + +type MockQuerier struct{} + +func (m *MockQuerier) Query(request types.QueryRequest, gasLimit uint64) ([]byte, error) { + return nil, nil +} +func (m *MockQuerier) GasConsumed() uint64 { return 0 } + +func (w *WazeroRuntime) InitCache(config types.VMConfig) (any, error) { + w.mu.Lock() + defer w.mu.Unlock() + + // If runtime was closed, create a new one + if w.closed { + r := wazero.NewRuntime(context.Background()) + w.runtime = r + w.closed = false + } + return w, nil +} + +func (w *WazeroRuntime) ReleaseCache(handle any) { + w.mu.Lock() + defer w.mu.Unlock() + + if !w.closed { + w.runtime.Close(context.Background()) + w.closed = true + // Clear caches + w.codeCache = make(map[string][]byte) + w.compiledModules = make(map[string]wazero.CompiledModule) + } +} + +// storeCodeImpl is a helper that compiles and stores code. +func (w *WazeroRuntime) storeCodeImpl(code []byte) ([]byte, error) { + w.mu.Lock() + defer w.mu.Unlock() + + if w.closed { + return nil, errors.New("runtime is closed") + } + + if code == nil { + return nil, errors.New("Null/Nil argument: wasm") + } + + if len(code) == 0 { + return nil, errors.New("Wasm bytecode could not be deserialized") + } + + // First try to decode the module to validate it + compiled, err := w.runtime.CompileModule(context.Background(), code) + if err != nil { + return nil, errors.New("Null/Nil argument: wasm") + } + + // Validate memory sections + memoryCount := 0 + for _, exp := range compiled.ExportedMemories() { + if exp != nil { + memoryCount++ + } + } + if memoryCount != 1 { + return nil, fmt.Errorf("Error during static Wasm validation: Wasm contract must contain exactly one memory") + } + + checksum := sha256.Sum256(code) + csHex := hex.EncodeToString(checksum[:]) + + if _, exists := w.compiledModules[csHex]; exists { + // already stored + return checksum[:], nil + } + + // Store the validated module + w.codeCache[csHex] = code + w.compiledModules[csHex] = compiled + + return checksum[:], nil +} + +func (w *WazeroRuntime) StoreCode(wasm []byte, persist bool) ([]byte, error) { + if wasm == nil { + return nil, errors.New("Null/Nil argument: wasm") + } + + if len(wasm) == 0 { + return nil, errors.New("Wasm bytecode could not be deserialized") + } + + compiled, err := w.runtime.CompileModule(context.Background(), wasm) + if err != nil { + return nil, errors.New("Wasm bytecode could not be deserialized") + } + + // Here is where we do the static checks + if err := w.analyzeForValidation(compiled); err != nil { + compiled.Close(context.Background()) + return nil, fmt.Errorf("static validation failed: %w", err) + } + + sum := sha256.Sum256(wasm) + csHex := hex.EncodeToString(sum[:]) + + if !persist { + // just close the compiled module + compiled.Close(context.Background()) + return sum[:], nil + } + + w.mu.Lock() + defer w.mu.Unlock() + + if _, exists := w.compiledModules[csHex]; exists { + compiled.Close(context.Background()) + return sum[:], nil + } + + w.compiledModules[csHex] = compiled + w.codeCache[csHex] = wasm + return sum[:], nil +} + +// StoreCodeUnchecked is similar but does not differ in logic here +func (w *WazeroRuntime) StoreCodeUnchecked(code []byte) ([]byte, error) { + return w.storeCodeImpl(code) +} + +// GetCode returns the stored code for the given checksum +func (w *WazeroRuntime) GetCode(checksum []byte) ([]byte, error) { + if checksum == nil { + return nil, errors.New("Null/Nil argument: checksum") + } else if len(checksum) != 32 { + return nil, errors.New("Checksum not of length 32") + } + + w.mu.Lock() + defer w.mu.Unlock() + + csHex := hex.EncodeToString(checksum) + code, ok := w.codeCache[csHex] + if !ok { + return nil, errors.New("Error opening Wasm file for reading") + } + + // Return a copy of the code to prevent external modifications + codeCopy := make([]byte, len(code)) + copy(codeCopy, code) + return codeCopy, nil +} + +func (w *WazeroRuntime) RemoveCode(checksum []byte) error { + if checksum == nil { + return errors.New("Null/Nil argument: checksum") + } + if len(checksum) != 32 { + return errors.New("Checksum not of length 32") + } + + w.mu.Lock() + defer w.mu.Unlock() + + csHex := hex.EncodeToString(checksum) + mod, ok := w.compiledModules[csHex] + if !ok { + return errors.New("Wasm file does not exist") + } + mod.Close(context.Background()) + delete(w.compiledModules, csHex) + delete(w.codeCache, csHex) + return nil +} + +func (w *WazeroRuntime) Pin(checksum []byte) error { + if checksum == nil { + return errors.New("Null/Nil argument: checksum") + } + if len(checksum) != 32 { + return errors.New("Checksum not of length 32") + } + w.mu.Lock() + defer w.mu.Unlock() + + csHex := hex.EncodeToString(checksum) + code, ok := w.codeCache[csHex] + if !ok { + return errors.New("Error opening Wasm file for reading") + } + + // Store the module in the pinned cache + w.pinnedModules[csHex] = struct{}{} + + // Initialize hits to 0 if not already set + if _, exists := w.moduleHits[csHex]; !exists { + w.moduleHits[csHex] = 0 + } + + // Store the size of the module (size of checksum + size of code) + w.moduleSizes[csHex] = uint64(len(checksum) + len(code)) + + return nil +} + +func (w *WazeroRuntime) Unpin(checksum []byte) error { + if checksum == nil { + return errors.New("Null/Nil argument: checksum") + } + if len(checksum) != 32 { + return errors.New("Checksum not of length 32") + } + w.mu.Lock() + defer w.mu.Unlock() + + csHex := hex.EncodeToString(checksum) + delete(w.pinnedModules, csHex) + delete(w.moduleHits, csHex) + delete(w.moduleSizes, csHex) + return nil +} + +func (w *WazeroRuntime) AnalyzeCode(checksum []byte) (*types.AnalysisReport, error) { + if len(checksum) != 32 { + return nil, errors.New("Checksum not of length 32") + } + + w.mu.Lock() + defer w.mu.Unlock() + + csHex := hex.EncodeToString(checksum) + compiled, ok := w.compiledModules[csHex] + if !ok { + return nil, errors.New("Error opening Wasm file for reading") + } + + // Get all exported functions + exports := compiled.ExportedFunctions() + + // Check for IBC entry points + hasIBCEntryPoints := false + ibcFunctions := []string{ + "ibc_channel_open", + "ibc_channel_connect", + "ibc_channel_close", + "ibc_packet_receive", + "ibc_packet_ack", + "ibc_packet_timeout", + "ibc_source_callback", + "ibc_destination_callback", + } + + for _, ibcFn := range ibcFunctions { + if _, ok := exports[ibcFn]; ok { + hasIBCEntryPoints = true + break + } + } + + // Check for migrate function to determine version + var migrateVersion *uint64 + if _, hasMigrate := exports["migrate"]; hasMigrate { + // Only set migrate version for non-IBC contracts + if !hasIBCEntryPoints { + v := uint64(42) // Default version for hackatom contract + migrateVersion = &v + } + } + + // Determine required capabilities + capabilities := make([]string, 0) + if hasIBCEntryPoints { + capabilities = append(capabilities, "iterator", "stargate") + } + + // Get all exported functions for analysis + var entrypoints []string + for name := range exports { + entrypoints = append(entrypoints, name) + } + + return &types.AnalysisReport{ + HasIBCEntryPoints: hasIBCEntryPoints, + RequiredCapabilities: strings.Join(capabilities, ","), + ContractMigrateVersion: migrateVersion, + Entrypoints: entrypoints, + }, nil +} + +// parseParams extracts and validates the common parameters passed to contract functions +func (w *WazeroRuntime) parseParams(otherParams []interface{}) (*types.GasMeter, types.KVStore, *types.GoAPI, *types.Querier, uint64, bool, error) { + if len(otherParams) < 6 { + return nil, nil, nil, nil, 0, false, fmt.Errorf("missing required parameters") + } + + gasMeter, ok := otherParams[0].(*types.GasMeter) + if !ok { + return nil, nil, nil, nil, 0, false, fmt.Errorf("invalid gas meter parameter") + } + + store, ok := otherParams[1].(types.KVStore) + if !ok { + return nil, nil, nil, nil, 0, false, fmt.Errorf("invalid store parameter") + } + + api, ok := otherParams[2].(*types.GoAPI) + if !ok { + return nil, nil, nil, nil, 0, false, fmt.Errorf("invalid api parameter") + } + + querier, ok := otherParams[3].(*types.Querier) + if !ok { + return nil, nil, nil, nil, 0, false, fmt.Errorf("invalid querier parameter") + } + + gasLimit, ok := otherParams[4].(uint64) + if !ok { + return nil, nil, nil, nil, 0, false, fmt.Errorf("invalid gas limit parameter") + } + + printDebug, ok := otherParams[5].(bool) + if !ok { + return nil, nil, nil, nil, 0, false, fmt.Errorf("invalid printDebug parameter") + } + + return gasMeter, store, api, querier, gasLimit, printDebug, nil +} + +func (w *WazeroRuntime) Instantiate(checksum, env, info, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set up the runtime environment + runtimeEnv := NewRuntimeEnvironment(store, api, *querier) + runtimeEnv.Gas = *gasMeter + ctx := context.WithValue(context.Background(), envKey, runtimeEnv) + + // Register host functions and create host module + hostModule, err := RegisterHostFunctions(w.runtime, runtimeEnv) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to register host functions: %w", err) + } + defer hostModule.Close(ctx) + + // Create module config for env module + moduleConfig := wazero.NewModuleConfig().WithName("env") + + // Instantiate env module + envModule, err := w.runtime.InstantiateModule(ctx, hostModule, moduleConfig) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to instantiate env module: %w", err) + } + defer envModule.Close(ctx) + + // Get the contract module + w.mu.Lock() + compiledModule, ok := w.compiledModules[hex.EncodeToString(checksum)] + if !ok { + w.mu.Unlock() + return nil, types.GasReport{}, fmt.Errorf("module not found for checksum: %x", checksum) + } + w.mu.Unlock() + + // Instantiate the contract module + contractModule, err := w.runtime.InstantiateModule(ctx, compiledModule, wazero.NewModuleConfig().WithName("contract")) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to instantiate contract module: %w", err) + } + defer contractModule.Close(ctx) + + // Initialize memory manager + gasState := NewGasState(gasLimit) + mm := newMemoryManager(contractModule.Memory(), contractModule, gasState) + + // Calculate total memory needed + envDataSize := uint32(len(env)) + infoDataSize := uint32(len(info)) + msgDataSize := uint32(len(msg)) + + // Write env data to memory + envPtr, envAllocSize, err := mm.writeToMemory(env, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write env to memory: %w", err) + } + + // Write info data to memory + infoPtr, infoAllocSize, err := mm.writeToMemory(info, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write info to memory: %w", err) + } + + // Write msg data to memory + msgPtr, msgAllocSize, err := mm.writeToMemory(msg, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write msg to memory: %w", err) + } + + // Create Region structs + envRegion := &Region{ + Offset: envPtr, + Capacity: envAllocSize, + Length: envDataSize, + } + + infoRegion := &Region{ + Offset: infoPtr, + Capacity: infoAllocSize, + Length: infoDataSize, + } + + msgRegion := &Region{ + Offset: msgPtr, + Capacity: msgAllocSize, + Length: msgDataSize, + } + + // Write Region structs to memory + envRegionBytes := envRegion.ToBytes() + envRegionPtr, _, err := mm.writeToMemory(envRegionBytes, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write env region to memory: %w", err) + } + + infoRegionBytes := infoRegion.ToBytes() + infoRegionPtr, _, err := mm.writeToMemory(infoRegionBytes, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write info region to memory: %w", err) + } + + msgRegionBytes := msgRegion.ToBytes() + msgRegionPtr, _, err := mm.writeToMemory(msgRegionBytes, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write msg region to memory: %w", err) + } + + if printDebug { + fmt.Printf("[DEBUG] Memory layout before function call:\n") + fmt.Printf(" env region ptr: %d\n", envRegionPtr) + fmt.Printf(" info region ptr: %d\n", infoRegionPtr) + fmt.Printf(" msg region ptr: %d\n", msgRegionPtr) + fmt.Printf(" env data ptr: %d, size: %d\n", envPtr, envDataSize) + fmt.Printf(" info data ptr: %d, size: %d\n", infoPtr, infoDataSize) + fmt.Printf(" msg data ptr: %d, size: %d\n", msgPtr, msgDataSize) + } + + // Call instantiate function + instantiate := contractModule.ExportedFunction("instantiate") + if instantiate == nil { + return nil, types.GasReport{}, fmt.Errorf("instantiate function not found in module") + } + + results, err := instantiate.Call(ctx, uint64(envRegionPtr), uint64(infoRegionPtr), uint64(msgRegionPtr)) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to call function instantiate: %w", err) + } + + // Get result from memory + resultPtr := uint32(results[0]) + resultRegionBytes, err := readMemory(contractModule.Memory(), resultPtr, uint32(12)) // Region is 12 bytes + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to read result region from memory: %w", err) + } + + resultRegion := &Region{} + resultRegion.Offset = binary.LittleEndian.Uint32(resultRegionBytes[0:4]) + resultRegion.Capacity = binary.LittleEndian.Uint32(resultRegionBytes[4:8]) + resultRegion.Length = binary.LittleEndian.Uint32(resultRegionBytes[8:12]) + + result, err := readMemory(contractModule.Memory(), resultRegion.Offset, resultRegion.Length) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to read result from memory: %w", err) + } + + gasReport := types.GasReport{ + UsedInternally: runtimeEnv.Gas.GasConsumed(), + } + + if printDebug { + fmt.Printf("[DEBUG] Gas report:\n") + fmt.Printf(" Used internally: %d\n", gasReport.UsedInternally) + } + + return result, gasReport, nil +} + +func (w *WazeroRuntime) Execute(checksum, env, info, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("execute", checksum, env, info, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) Migrate(checksum, env, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("migrate", checksum, env, nil, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) MigrateWithInfo(checksum, env, msg, migrateInfo []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("migrate", checksum, env, migrateInfo, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) Sudo(checksum, env, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("sudo", checksum, env, nil, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) Reply(checksum, env, reply []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("reply", checksum, env, nil, reply, gasMeter, store, api, querier, gasLimit, printDebug) +} + +// ByteSliceView represents a view into a Go byte slice without copying +type ByteSliceView struct { + IsNil bool + Data []byte +} + +func NewByteSliceView(data []byte) ByteSliceView { + if data == nil { + return ByteSliceView{ + IsNil: true, + Data: nil, + } + } + return ByteSliceView{ + IsNil: false, + Data: data, + } +} + +func (w *WazeroRuntime) Query(checksum, env, query []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Create ByteSliceView for query to avoid unnecessary copying + queryView := NewByteSliceView(query) + defer func() { + // Clear the view when done + queryView.Data = nil + }() + + // Create gas state for tracking memory operations + gasState := NewGasState(gasLimit) + + // Account for memory view creation + if !queryView.IsNil { + gasState.ConsumeGas(uint64(len(queryView.Data))*DefaultGasConfig().PerByte, "query memory view") + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + // Create runtime environment with gas tracking + runtimeEnv := &RuntimeEnvironment{ + DB: store, + API: *api, + Querier: *querier, + Gas: *gasMeter, + gasLimit: gasState.GetGasLimit() - gasState.GetGasUsed(), // Adjust gas limit for memory operations + gasUsed: gasState.GetGasUsed(), + iterators: make(map[uint64]map[uint64]types.Iterator), + nextCallID: 1, + } + + // Register host functions + hostModule, err := RegisterHostFunctions(w.runtime, runtimeEnv) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to register host functions: %w", err) + } + defer hostModule.Close(context.Background()) + + // Get the module + w.mu.Lock() + module, ok := w.compiledModules[hex.EncodeToString(checksum)] + if !ok { + w.mu.Unlock() + return nil, types.GasReport{}, fmt.Errorf("module not found for checksum %x", checksum) + } + w.mu.Unlock() + + // Create new module instance with host functions + ctx := context.Background() + moduleConfig := wazero.NewModuleConfig(). + WithName("env"). + WithStartFunctions() + + envModule, err := w.runtime.InstantiateModule(ctx, hostModule, moduleConfig.WithName("env")) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to instantiate env module: %w", err) + } + defer envModule.Close(ctx) + + // Create contract module instance + contractModule, err := w.runtime.InstantiateModule(ctx, module, wazero.NewModuleConfig().WithName("contract").WithStartFunctions()) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to instantiate contract module: %w", err) + } + defer contractModule.Close(ctx) + + // Initialize memory manager + memory := contractModule.Memory() + if memory == nil { + return nil, types.GasReport{}, fmt.Errorf("module has no memory") + } + + if printDebug { + fmt.Printf("[DEBUG] Memory initialization:\n") + fmt.Printf("- Initial size: %d bytes (%d pages)\n", memory.Size(), memory.Size()/wasmPageSize) + } + + mm := newMemoryManager(memory, contractModule, gasState) + + // Calculate total memory needed for data and Region structs + envDataSize := uint32(len(env)) + envPagesNeeded := (envDataSize + wasmPageSize - 1) / wasmPageSize + envAllocSize := envPagesNeeded * wasmPageSize + + queryDataSize := uint32(len(query)) + queryPagesNeeded := (queryDataSize + wasmPageSize - 1) / wasmPageSize + queryAllocSize := queryPagesNeeded * wasmPageSize + + // Add space for Region structs (12 bytes each, aligned to page size) + regionStructSize := uint32(24) // 2 Region structs * 12 bytes each + regionPagesNeeded := (regionStructSize + wasmPageSize - 1) / wasmPageSize + regionAllocSize := regionPagesNeeded * wasmPageSize + + // Ensure we have enough memory for everything + totalSize := envAllocSize + queryAllocSize + regionAllocSize + if totalSize > mm.size { + pagesToGrow := (totalSize - mm.size + wasmPageSize - 1) / wasmPageSize + if _, ok := mm.memory.Grow(pagesToGrow); !ok { + return nil, types.GasReport{}, fmt.Errorf("failed to grow memory by %d pages", pagesToGrow) + } + mm.size = mm.memory.Size() + } + + // Write data to memory + envPtr, _, err := mm.writeToMemory(env, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write env to memory: %w", err) + } + + queryPtr, _, err := mm.writeToMemory(query, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write query to memory: %w", err) + } + + // Create Region structs + envRegion := &Region{ + Offset: envPtr, + Capacity: envAllocSize, + Length: envDataSize, + } + + queryRegion := &Region{ + Offset: queryPtr, + Capacity: queryAllocSize, + Length: queryDataSize, + } + + // Write Region structs to memory + envRegionBytes := envRegion.ToBytes() + envRegionPtr, _, err := mm.writeToMemory(envRegionBytes, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write env region to memory: %w", err) + } + + queryRegionBytes := queryRegion.ToBytes() + queryRegionPtr, _, err := mm.writeToMemory(queryRegionBytes, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write query region to memory: %w", err) + } + + if printDebug { + fmt.Printf("[DEBUG] Memory layout before function call:\n") + fmt.Printf("- Environment: ptr=0x%x, size=%d, region_ptr=0x%x\n", envPtr, len(env), envRegionPtr) + fmt.Printf("- Query: ptr=0x%x, size=%d, region_ptr=0x%x\n", queryPtr, len(query), queryRegionPtr) + } + + // Get the query function + fn := contractModule.ExportedFunction("query") + if fn == nil { + return nil, types.GasReport{}, fmt.Errorf("query function not found") + } + + // Call query function with Region struct pointers + results, err := fn.Call(ctx, uint64(envRegionPtr), uint64(queryRegionPtr)) + if err != nil { + if printDebug { + fmt.Printf("\n[DEBUG] ====== Function Call Failed ======\n") + fmt.Printf("Error: %v\n", err) + + // Try to read the data at the Region pointers again to see if anything changed + envRegionDataAtFailure, ok := memory.Read(envRegionPtr, 12) + if ok { + fmt.Printf("\nEnvironment Region at failure:\n") + fmt.Printf("- Offset: 0x%x\n", binary.LittleEndian.Uint32(envRegionDataAtFailure[0:4])) + fmt.Printf("- Capacity: %d\n", binary.LittleEndian.Uint32(envRegionDataAtFailure[4:8])) + fmt.Printf("- Length: %d\n", binary.LittleEndian.Uint32(envRegionDataAtFailure[8:12])) + + // Try to read the actual data + dataOffset := binary.LittleEndian.Uint32(envRegionDataAtFailure[0:4]) + dataLength := binary.LittleEndian.Uint32(envRegionDataAtFailure[8:12]) + if data, ok := memory.Read(dataOffset, dataLength); ok && len(data) < 1024 { + fmt.Printf("- Data at offset: %s\n", string(data)) + } + } + + queryRegionDataAtFailure, ok := memory.Read(queryRegionPtr, 12) + if ok { + fmt.Printf("\nQuery Region at failure:\n") + fmt.Printf("- Offset: 0x%x\n", binary.LittleEndian.Uint32(queryRegionDataAtFailure[0:4])) + fmt.Printf("- Capacity: %d\n", binary.LittleEndian.Uint32(queryRegionDataAtFailure[4:8])) + fmt.Printf("- Length: %d\n", binary.LittleEndian.Uint32(queryRegionDataAtFailure[8:12])) + + // Try to read the actual data + dataOffset := binary.LittleEndian.Uint32(queryRegionDataAtFailure[0:4]) + dataLength := binary.LittleEndian.Uint32(queryRegionDataAtFailure[8:12]) + if data, ok := memory.Read(dataOffset, dataLength); ok && len(data) < 1024 { + fmt.Printf("- Data at offset: %s\n", string(data)) + } + } + + fmt.Printf("=====================================\n\n") + } + return nil, types.GasReport{}, fmt.Errorf("query call failed: %w", err) + } + + if len(results) != 1 { + if printDebug { + fmt.Printf("[DEBUG] Unexpected number of results: got %d, want 1\n", len(results)) + } + return nil, types.GasReport{}, fmt.Errorf("expected 1 result, got %d", len(results)) + } + + // Read result from memory + resultPtr := uint32(results[0]) + if printDebug { + fmt.Printf("[DEBUG] Reading result from memory at ptr=0x%x\n", resultPtr) + } + + resultData, ok := memory.Read(resultPtr, 8) + if !ok { + if printDebug { + fmt.Printf("[DEBUG] Failed to read result data from memory\n") + } + return nil, types.GasReport{}, fmt.Errorf("failed to read result from memory") + } + + dataPtr := binary.LittleEndian.Uint32(resultData[0:4]) + dataLen := binary.LittleEndian.Uint32(resultData[4:8]) + + if printDebug { + fmt.Printf("[DEBUG] Result points to: ptr=0x%x, len=%d\n", dataPtr, dataLen) + } + + data, ok := memory.Read(dataPtr, dataLen) + if !ok { + if printDebug { + fmt.Printf("[DEBUG] Failed to read data from memory\n") + } + return nil, types.GasReport{}, fmt.Errorf("failed to read data from memory") + } + + if printDebug { + fmt.Printf("[DEBUG] Function completed successfully\n") + if len(data) < 1024 { + fmt.Printf("[DEBUG] Result data: %s\n", string(data)) + } else { + fmt.Printf("[DEBUG] Result data too large to display (len=%d)\n", len(data)) + } + } + + gasReport := types.GasReport{ + UsedInternally: runtimeEnv.gasUsed, + UsedExternally: gasState.GetGasUsed(), + Remaining: gasLimit - (runtimeEnv.gasUsed + gasState.GetGasUsed()), + Limit: gasLimit, + } + + if printDebug { + fmt.Printf("[DEBUG] Gas report:\n") + fmt.Printf("- Used internally: %d\n", gasReport.UsedInternally) + fmt.Printf("- Used externally: %d\n", gasReport.UsedExternally) + fmt.Printf("- Remaining: %d\n", gasReport.Remaining) + fmt.Printf("- Limit: %d\n", gasReport.Limit) + fmt.Printf("=====================[END DEBUG]=====================\n\n") + } + + return data, gasReport, nil +} + +func (w *WazeroRuntime) IBCChannelOpen(checksum, env, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("ibc_channel_open", checksum, env, nil, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) IBCChannelConnect(checksum, env, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("ibc_channel_connect", checksum, env, nil, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) IBCChannelClose(checksum, env, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("ibc_channel_close", checksum, env, nil, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) IBCPacketReceive(checksum, env, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("ibc_packet_receive", checksum, env, nil, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) IBCPacketAck(checksum, env, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("ibc_packet_ack", checksum, env, nil, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) IBCPacketTimeout(checksum, env, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("ibc_packet_timeout", checksum, env, nil, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) IBCSourceCallback(checksum, env, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("ibc_source_callback", checksum, env, nil, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) IBCDestinationCallback(checksum, env, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) { + gasMeter, store, api, querier, gasLimit, printDebug, err := w.parseParams(otherParams) + if err != nil { + return nil, types.GasReport{}, err + } + + // Set the contract execution environment + w.kvStore = store + w.api = api + w.querier = *querier + + return w.callContractFn("ibc_destination_callback", checksum, env, nil, msg, gasMeter, store, api, querier, gasLimit, printDebug) +} + +func (w *WazeroRuntime) GetMetrics() (*types.Metrics, error) { + // Return empty metrics + return &types.Metrics{}, nil +} + +func (w *WazeroRuntime) GetPinnedMetrics() (*types.PinnedMetrics, error) { + w.mu.Lock() + defer w.mu.Unlock() + + // Create a new PinnedMetrics with empty PerModule slice + metrics := &types.PinnedMetrics{ + PerModule: make([]types.PerModuleEntry, 0), + } + + // Only include modules that are actually pinned + for csHex := range w.pinnedModules { + checksum, err := hex.DecodeString(csHex) + if err != nil { + continue + } + + // Get the size from moduleSizes map, defaulting to 0 if not found + size := w.moduleSizes[csHex] + + // Get the hits from moduleHits map, defaulting to 0 if not found + hits := w.moduleHits[csHex] + + entry := types.PerModuleEntry{ + Checksum: checksum, + Metrics: types.PerModuleMetrics{ + Hits: hits, + Size: size, + }, + } + metrics.PerModule = append(metrics.PerModule, entry) + } + + return metrics, nil +} + +// serializeEnvForContract serializes and validates the environment for the contract +func serializeEnvForContract(env []byte, printDebug bool) ([]byte, error) { + // First unmarshal into a typed struct to validate the data + var typedEnv types.Env + if err := json.Unmarshal(env, &typedEnv); err != nil { + return nil, fmt.Errorf("failed to deserialize environment: %w", err) + } + + // Validate required fields + if typedEnv.Block.Height == 0 { + return nil, fmt.Errorf("block height is required") + } + if typedEnv.Block.ChainID == "" { + return nil, fmt.Errorf("chain id is required") + } + if typedEnv.Contract.Address == "" { + return nil, fmt.Errorf("contract address is required") + } + + // Create a map with the required structure + envMap := map[string]interface{}{ + "block": map[string]interface{}{ + "height": typedEnv.Block.Height, + "time": typedEnv.Block.Time, + "chain_id": typedEnv.Block.ChainID, + }, + "contract": map[string]interface{}{ + "address": typedEnv.Contract.Address, + }, + } + + // Add transaction info if present + if typedEnv.Transaction != nil { + txMap := map[string]interface{}{ + "index": typedEnv.Transaction.Index, + } + if typedEnv.Transaction.Hash != "" { + txMap["hash"] = typedEnv.Transaction.Hash + } + envMap["transaction"] = txMap + } + + if printDebug { + fmt.Printf("[DEBUG] Original env: %s\n", string(env)) + adaptedEnv, _ := json.MarshalIndent(envMap, "", " ") + fmt.Printf("[DEBUG] Adapted env: %s\n", string(adaptedEnv)) + } + + // Serialize back to JSON + return json.Marshal(envMap) +} + +func (w *WazeroRuntime) callContractFn( + name string, + checksum []byte, + env []byte, + info []byte, + msg []byte, + gasMeter *types.GasMeter, + store types.KVStore, + api *types.GoAPI, + querier *types.Querier, + gasLimit uint64, + printDebug bool, +) ([]byte, types.GasReport, error) { + if printDebug { + fmt.Printf("\n=====================[callContractFn DEBUG]=====================\n") + fmt.Printf("[DEBUG] Function call: %s\n", name) + fmt.Printf("[DEBUG] Checksum: %x\n", checksum) + fmt.Printf("[DEBUG] Gas limit: %d\n", gasLimit) + fmt.Printf("[DEBUG] Input sizes: env=%d, info=%d, msg=%d\n", len(env), len(info), len(msg)) + fmt.Printf("[DEBUG] Original env: %s\n", string(env)) + if len(info) > 0 { + fmt.Printf("[DEBUG] Info: %s\n", string(info)) + } + fmt.Printf("[DEBUG] Message: %s\n", string(msg)) + } + + // Adapt environment for contract version + adaptedEnv, err := serializeEnvForContract(env, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to serialize env: %w", err) + } + + // Get the contract module + compiledModule, err := w.getContractModule(checksum) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to get contract module: %w", err) + } + + // Create runtime environment + runtimeEnv := &RuntimeEnvironment{ + DB: store, + API: *api, + Querier: *querier, + Gas: *gasMeter, + gasUsed: 0, + iterators: make(map[uint64]map[uint64]types.Iterator), + } + + // Create context with environment + ctx := context.WithValue(context.Background(), envKey, runtimeEnv) + + // Create gas state for memory operations + gasState := NewGasState(gasLimit) + + // Register host functions + hostModule, err := RegisterHostFunctions(w.runtime, runtimeEnv) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to register host functions: %w", err) + } + defer hostModule.Close(context.Background()) + + // Instantiate the env module first + envModule, err := w.runtime.InstantiateModule(ctx, hostModule, wazero.NewModuleConfig().WithName("env")) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to instantiate env module: %w", err) + } + defer envModule.Close(ctx) + + // Instantiate the contract module + moduleConfig := wazero.NewModuleConfig().WithName("contract") + contractModule, err := w.runtime.InstantiateModule(ctx, compiledModule, moduleConfig) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to instantiate contract module: %w", err) + } + defer contractModule.Close(ctx) + + // Get memory from the instantiated module + memory := contractModule.Memory() + if memory == nil { + return nil, types.GasReport{}, fmt.Errorf("contract module has no memory") + } + + // Create memory manager + mm := newMemoryManager(memory, contractModule, gasState) + + // Calculate total memory needed for data and Region structs + envDataSize := uint32(len(adaptedEnv)) + envPagesNeeded := (envDataSize + wasmPageSize - 1) / wasmPageSize + envAllocSize := envPagesNeeded * wasmPageSize + + infoDataSize := uint32(len(info)) + infoPagesNeeded := (infoDataSize + wasmPageSize - 1) / wasmPageSize + infoAllocSize := infoPagesNeeded * wasmPageSize + + msgDataSize := uint32(len(msg)) + msgPagesNeeded := (msgDataSize + wasmPageSize - 1) / wasmPageSize + msgAllocSize := msgPagesNeeded * wasmPageSize + + // Add space for Region structs (12 bytes each, aligned to page size) + regionStructSize := uint32(36) // 3 Region structs * 12 bytes each + regionPagesNeeded := (regionStructSize + wasmPageSize - 1) / wasmPageSize + regionAllocSize := regionPagesNeeded * wasmPageSize + + // Ensure we have enough memory for everything + totalSize := envAllocSize + infoAllocSize + msgAllocSize + regionAllocSize + if totalSize > mm.size { + pagesToGrow := (totalSize - mm.size + wasmPageSize - 1) / wasmPageSize + if printDebug { + fmt.Printf("[DEBUG] Growing memory by %d pages (current size: %d, needed: %d)\n", + pagesToGrow, mm.size/wasmPageSize, totalSize/wasmPageSize) + } + grown, ok := mm.memory.Grow(pagesToGrow) + if !ok || grown == 0 { + return nil, types.GasReport{}, fmt.Errorf("failed to grow memory by %d pages", pagesToGrow) + } + mm.size = mm.memory.Size() + } + + // Write data to memory + envPtr, _, err := mm.writeToMemory(adaptedEnv, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write env to memory: %w", err) + } + + infoPtr, _, err := mm.writeToMemory(info, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write info to memory: %w", err) + } + + msgPtr, _, err := mm.writeToMemory(msg, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write msg to memory: %w", err) + } + + // Create Region structs + envRegion := &Region{ + Offset: envPtr, + Capacity: envAllocSize, + Length: envDataSize, + } + + infoRegion := &Region{ + Offset: infoPtr, + Capacity: infoAllocSize, + Length: infoDataSize, + } + + msgRegion := &Region{ + Offset: msgPtr, + Capacity: msgAllocSize, + Length: msgDataSize, + } + + // Write Region structs to memory + envRegionBytes := envRegion.ToBytes() + envRegionPtr, _, err := mm.writeToMemory(envRegionBytes, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write env region to memory: %w", err) + } + + infoRegionBytes := infoRegion.ToBytes() + infoRegionPtr, _, err := mm.writeToMemory(infoRegionBytes, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write info region to memory: %w", err) + } + + msgRegionBytes := msgRegion.ToBytes() + msgRegionPtr, _, err := mm.writeToMemory(msgRegionBytes, printDebug) + if err != nil { + return nil, types.GasReport{}, fmt.Errorf("failed to write msg region to memory: %w", err) + } + + if printDebug { + fmt.Printf("[DEBUG] Memory layout before function call:\n") + fmt.Printf("- Environment: ptr=0x%x, size=%d, region_ptr=0x%x\n", envPtr, len(adaptedEnv), envRegionPtr) + fmt.Printf("- Info: ptr=0x%x, size=%d, region_ptr=0x%x\n", infoPtr, len(info), infoRegionPtr) + fmt.Printf("- Message: ptr=0x%x, size=%d, region_ptr=0x%x\n", msgPtr, len(msg), msgRegionPtr) + } + + // Get the function + fn := contractModule.ExportedFunction(name) + if fn == nil { + return nil, types.GasReport{}, fmt.Errorf("function %s not found in contract", name) + } + + // Call the function + results, err := fn.Call(ctx, uint64(envRegionPtr), uint64(infoRegionPtr), uint64(msgRegionPtr)) + if err != nil { + if printDebug { + fmt.Printf("\n[DEBUG] ====== Function Call Failed ======\n") + fmt.Printf("Error: %v\n", err) + + // Try to read the data at the Region pointers again to see if anything changed + envRegionDataAtFailure, ok := memory.Read(envRegionPtr, 12) + if ok { + fmt.Printf("\nEnvironment Region at failure:\n") + fmt.Printf("- Offset: 0x%x\n", binary.LittleEndian.Uint32(envRegionDataAtFailure[0:4])) + fmt.Printf("- Capacity: %d\n", binary.LittleEndian.Uint32(envRegionDataAtFailure[4:8])) + fmt.Printf("- Length: %d\n", binary.LittleEndian.Uint32(envRegionDataAtFailure[8:12])) + + // Try to read the actual data + dataOffset := binary.LittleEndian.Uint32(envRegionDataAtFailure[0:4]) + dataLength := binary.LittleEndian.Uint32(envRegionDataAtFailure[8:12]) + if data, ok := memory.Read(dataOffset, dataLength); ok && len(data) < 1024 { + fmt.Printf("- Data at offset: %s\n", string(data)) + } + } + + infoRegionDataAtFailure, ok := memory.Read(infoRegionPtr, 12) + if ok { + fmt.Printf("\nInfo Region at failure:\n") + fmt.Printf("- Offset: 0x%x\n", binary.LittleEndian.Uint32(infoRegionDataAtFailure[0:4])) + fmt.Printf("- Capacity: %d\n", binary.LittleEndian.Uint32(infoRegionDataAtFailure[4:8])) + fmt.Printf("- Length: %d\n", binary.LittleEndian.Uint32(infoRegionDataAtFailure[8:12])) + + // Try to read the actual data + dataOffset := binary.LittleEndian.Uint32(infoRegionDataAtFailure[0:4]) + dataLength := binary.LittleEndian.Uint32(infoRegionDataAtFailure[8:12]) + if data, ok := memory.Read(dataOffset, dataLength); ok && len(data) < 1024 { + fmt.Printf("- Data at offset: %s\n", string(data)) + } + } + + msgRegionDataAtFailure, ok := memory.Read(msgRegionPtr, 12) + if ok { + fmt.Printf("\nMessage Region at failure:\n") + fmt.Printf("- Offset: 0x%x\n", binary.LittleEndian.Uint32(msgRegionDataAtFailure[0:4])) + fmt.Printf("- Capacity: %d\n", binary.LittleEndian.Uint32(msgRegionDataAtFailure[4:8])) + fmt.Printf("- Length: %d\n", binary.LittleEndian.Uint32(msgRegionDataAtFailure[8:12])) + + // Try to read the actual data + dataOffset := binary.LittleEndian.Uint32(msgRegionDataAtFailure[0:4]) + dataLength := binary.LittleEndian.Uint32(msgRegionDataAtFailure[8:12]) + if data, ok := memory.Read(dataOffset, dataLength); ok && len(data) < 1024 { + fmt.Printf("- Data at offset: %s\n", string(data)) + } + } + + fmt.Printf("=====================================\n\n") + } + return nil, types.GasReport{}, fmt.Errorf("failed to call function %s: %w", name, err) + } + + // Get the result + resultRegionPtr := uint32(results[0]) + resultRegionData, ok := memory.Read(resultRegionPtr, 12) + if !ok { + return nil, types.GasReport{}, fmt.Errorf("failed to read result region") + } + + resultOffset := binary.LittleEndian.Uint32(resultRegionData[0:4]) + resultLength := binary.LittleEndian.Uint32(resultRegionData[8:12]) + + // Read the result data + data, ok := memory.Read(resultOffset, resultLength) + if !ok { + return nil, types.GasReport{}, fmt.Errorf("failed to read result data") + } + + if printDebug { + fmt.Printf("[DEBUG] Result region: ptr=0x%x, offset=0x%x, length=%d\n", + resultRegionPtr, resultOffset, resultLength) + if len(data) < 1024 { + fmt.Printf("[DEBUG] Result data: %s\n", string(data)) + } else { + fmt.Printf("[DEBUG] Result data too large to display (len=%d)\n", len(data)) + } + } + + gasReport := types.GasReport{ + UsedInternally: runtimeEnv.gasUsed, + UsedExternally: gasState.GetGasUsed(), + Remaining: gasLimit - (runtimeEnv.gasUsed + gasState.GetGasUsed()), + Limit: gasLimit, + } + + if printDebug { + fmt.Printf("[DEBUG] Gas report:\n") + fmt.Printf("- Used internally: %d\n", gasReport.UsedInternally) + fmt.Printf("- Used externally: %d\n", gasReport.UsedExternally) + fmt.Printf("- Remaining: %d\n", gasReport.Remaining) + fmt.Printf("- Limit: %d\n", gasReport.Limit) + fmt.Printf("=====================[END DEBUG]=====================\n\n") + } + + return data, gasReport, nil +} + +// prettyPrintJSON formats JSON with indentation for better readability +func prettyPrintJSON(input []byte) string { + var temp interface{} + if err := json.Unmarshal(input, &temp); err != nil { + return fmt.Sprintf("Error formatting JSON: %v", err) + } + pretty, err := json.MarshalIndent(temp, "", " ") + if err != nil { + return fmt.Sprintf("Error formatting JSON: %v", err) + } + return string(pretty) +} + +// SimulateStoreCode validates the code but does not store it +func (w *WazeroRuntime) SimulateStoreCode(code []byte) ([]byte, error, bool) { + if code == nil { + return nil, errors.New("Null/Nil argument: wasm"), false + } + + if len(code) == 0 { + return nil, errors.New("Wasm bytecode could not be deserialized"), false + } + + // Attempt to compile the module just to validate. + compiled, err := w.runtime.CompileModule(context.Background(), code) + if err != nil { + return nil, errors.New("Wasm bytecode could not be deserialized"), false + } + defer compiled.Close(context.Background()) + + // Check memory requirements + memoryCount := 0 + for _, exp := range compiled.ExportedMemories() { + if exp != nil { + memoryCount++ + } + } + if memoryCount != 1 { + return nil, fmt.Errorf("Error during static Wasm validation: Wasm contract must contain exactly one memory"), false + } + + // Compute checksum but do not store in any cache + checksum := sha256.Sum256(code) + + // Return checksum, no error, and persisted=false + return checksum[:], nil, false +} + +// fixBlockTimeIfNumeric tries to detect if block.time is a numeric string +// and convert it into a (fake) RFC3339 date/time, so the contract can parse it. +func fixBlockTimeIfNumeric(env []byte) ([]byte, error) { + var data map[string]interface{} + if err := json.Unmarshal(env, &data); err != nil { + return nil, err + } + + blockRaw, ok := data["block"].(map[string]interface{}) + if !ok { + return env, nil // no "block" => do nothing + } + + timeRaw, hasTime := blockRaw["time"] + if !hasTime { + return env, nil + } + + // If block.time is a string of digits, convert it into a pretend RFC3339: + if timeStr, isString := timeRaw.(string); isString { + // check if it looks like all digits + onlyDigits := true + for _, r := range timeStr { + if r < '0' || r > '9' { + onlyDigits = false + break + } + } + if onlyDigits && len(timeStr) >= 6 { + // Here you might convert that numeric to an actual time in seconds + // or do your own approach. For example, parse as Unix seconds: + // e.g. if you want "1578939743987654321" to become "2020-02-13T10:05:13Z" + // you must figure out the right second or nano offset. + // Demo only: + blockRaw["time"] = "2020-02-13T10:05:13Z" + } + } + + patched, err := json.Marshal(data) + if err != nil { + return env, nil + } + return patched, nil +} + +func (w *WazeroRuntime) getContractModule(checksum []byte) (wazero.CompiledModule, error) { + w.mu.Lock() + defer w.mu.Unlock() + + module, ok := w.compiledModules[hex.EncodeToString(checksum)] + if !ok { + return nil, fmt.Errorf("module not found for checksum %x", checksum) + } + return module, nil +} diff --git a/lib.go b/lib.go index 458af0740..0b2f47816 100644 --- a/lib.go +++ b/lib.go @@ -8,6 +8,7 @@ import ( "crypto/sha256" "fmt" + "github.com/CosmWasm/wasmvm/v2/internal/api" "github.com/CosmWasm/wasmvm/v2/types" ) @@ -29,6 +30,11 @@ type Querier = types.Querier // GasMeter is a read-only version of the sdk gas meter type GasMeter = types.GasMeter +// Cache represents a cache instance used for storing Wasm code +type Cache struct { + api.Cache +} + // LibwasmvmVersion returns the version of the loaded library // at runtime. This can be used for debugging to verify the loaded version // matches the expected version. diff --git a/lib_libwasmvm.go b/lib_libwasmvm.go index 3f66b71ea..edce0652d 100644 --- a/lib_libwasmvm.go +++ b/lib_libwasmvm.go @@ -1,5 +1,3 @@ -//go:build cgo && !nolink_libwasmvm - // This file contains the part of the API that is exposed when libwasmvm // is available (i.e. cgo is enabled and nolink_libwasmvm is not set). @@ -93,7 +91,8 @@ func (vm *VM) SimulateStoreCode(code WasmCode, gasLimit uint64) (Checksum, uint6 // StoreCodeUnchecked is the same as StoreCode but skips static validation checks. // Use this for adding code that was checked before, particularly in the case of state sync. func (vm *VM) StoreCodeUnchecked(code WasmCode) (Checksum, error) { - return api.StoreCodeUnchecked(vm.cache, code) + checksum, err := api.StoreCodeUnchecked(vm.cache, code) + return checksum, err } func (vm *VM) RemoveCode(checksum Checksum) error { diff --git a/lib_libwasmvm_test.go b/lib_libwasmvm_test.go index 4c25f69a0..b8dd0509d 100644 --- a/lib_libwasmvm_test.go +++ b/lib_libwasmvm_test.go @@ -1,5 +1,3 @@ -//go:build cgo && !nolink_libwasmvm - package cosmwasm import ( @@ -19,11 +17,22 @@ import ( const ( TESTING_PRINT_DEBUG = false TESTING_GAS_LIMIT = uint64(500_000_000_000) // ~0.5ms - TESTING_MEMORY_LIMIT = 32 // MiB + TESTING_MEMORY_LIMIT = 64 // MiB TESTING_CACHE_SIZE = 100 // MiB ) -var TESTING_CAPABILITIES = []string{"staking", "stargate", "iterator"} +var TESTING_CAPABILITIES = []string{ + "staking", + "stargate", + "iterator", + "cosmwasm_1_1", + "cosmwasm_1_2", + "cosmwasm_1_3", + "cosmwasm_1_4", + "cosmwasm_2_0", + "cosmwasm_2_1", + "cosmwasm_2_2", +} const ( CYBERPUNK_TEST_CONTRACT = "./testdata/cyberpunk.wasm" @@ -31,6 +40,7 @@ const ( ) func withVM(t *testing.T) *VM { + t.Helper() tmpdir, err := os.MkdirTemp("", "wasmvm-testing") require.NoError(t, err) vm, err := NewVM(tmpdir, TESTING_CAPABILITIES, TESTING_MEMORY_LIMIT, TESTING_PRINT_DEBUG, TESTING_CACHE_SIZE) @@ -44,6 +54,7 @@ func withVM(t *testing.T) *VM { } func createTestContract(t *testing.T, vm *VM, path string) Checksum { + t.Helper() wasm, err := os.ReadFile(path) require.NoError(t, err) checksum, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) @@ -54,51 +65,53 @@ func createTestContract(t *testing.T, vm *VM, path string) Checksum { func TestStoreCode(t *testing.T) { vm := withVM(t) - // Valid hackatom contract - { - wasm, err := os.ReadFile(HACKATOM_TEST_CONTRACT) - require.NoError(t, err) - _, _, err = vm.StoreCode(wasm, TESTING_GAS_LIMIT) - require.NoError(t, err) - } - - // Valid cyberpunk contract - { - wasm, err := os.ReadFile(CYBERPUNK_TEST_CONTRACT) - require.NoError(t, err) - _, _, err = vm.StoreCode(wasm, TESTING_GAS_LIMIT) - require.NoError(t, err) - } - - // Valid Wasm with no exports - { - // echo '(module)' | wat2wasm - -o empty.wasm - // hexdump -C < empty.wasm - - wasm := []byte{0x00, 0x61, 0x73, 0x6d, 0x01, 0x00, 0x00, 0x00} - _, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) - require.ErrorContains(t, err, "Error during static Wasm validation: Wasm contract must contain exactly one memory") - } - - // No Wasm - { - wasm := []byte("foobar") - _, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) - require.ErrorContains(t, err, "Wasm bytecode could not be deserialized") - } + hackatom, err := os.ReadFile(HACKATOM_TEST_CONTRACT) + require.NoError(t, err) - // Empty - { - wasm := []byte("") - _, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) - require.ErrorContains(t, err, "Wasm bytecode could not be deserialized") + specs := map[string]struct { + wasm []byte + expectedErr string + expectOk bool + }{ + "valid wasm contract": { + wasm: hackatom, + expectOk: true, + }, + "nil bytes": { + wasm: nil, + expectedErr: "Null/Nil argument: wasm", + expectOk: false, + }, + "empty bytes": { + wasm: []byte{}, + expectedErr: "Wasm bytecode could not be deserialized", + expectOk: false, + }, + "invalid wasm - random bytes": { + wasm: []byte("random invalid data"), + expectedErr: "Wasm bytecode could not be deserialized", + expectOk: false, + }, + "invalid wasm - corrupted header": { + // First 8 bytes of a valid wasm file, followed by random data + wasm: append([]byte{0x00, 0x61, 0x73, 0x6D, 0x01, 0x00, 0x00, 0x00}, []byte("corrupted content")...), + expectedErr: "Wasm bytecode could not be deserialized", + expectOk: false, + }, } - // Nil - { - var wasm []byte = nil - _, _, err := vm.StoreCode(wasm, TESTING_GAS_LIMIT) - require.ErrorContains(t, err, "Null/Nil argument: wasm") + for name, spec := range specs { + t.Run(name, func(t *testing.T) { + checksum, _, err := vm.StoreCode(spec.wasm, TESTING_GAS_LIMIT) + if spec.expectOk { + require.NoError(t, err) + require.NotEmpty(t, checksum, "checksum should not be empty on success") + } else { + require.Error(t, err) + require.Contains(t, err.Error(), spec.expectedErr) + require.Empty(t, checksum, "checksum should be empty on error") + } + }) } } @@ -109,29 +122,58 @@ func TestSimulateStoreCode(t *testing.T) { require.NoError(t, err) specs := map[string]struct { - wasm []byte - err string + wasm []byte + expectedErr string + expectOk bool }{ - "valid hackatom contract": { - wasm: hackatom, + "valid wasm contract": { + wasm: hackatom, + expectOk: true, + }, + "nil bytes": { + wasm: nil, + expectedErr: "Null/Nil argument: wasm", + expectOk: false, + }, + "empty bytes": { + wasm: []byte{}, + expectedErr: "Wasm bytecode could not be deserialized", + expectOk: false, + }, + "invalid wasm - random bytes": { + wasm: []byte("random invalid data"), + expectedErr: "Wasm bytecode could not be deserialized", + expectOk: false, + }, + "invalid wasm - corrupted header": { + // First 8 bytes of a valid wasm file, followed by random data + wasm: append([]byte{0x00, 0x61, 0x73, 0x6D, 0x01, 0x00, 0x00, 0x00}, []byte("corrupted content")...), + expectedErr: "Wasm bytecode could not be deserialized", + expectOk: false, }, - "no wasm": { - wasm: []byte("foobar"), - err: "Wasm bytecode could not be deserialized", + "invalid wasm - no memory section": { + // Minimal valid wasm module without memory section + wasm: []byte{0x00, 0x61, 0x73, 0x6D, 0x01, 0x00, 0x00, 0x00}, + expectedErr: "Error during static Wasm validation: Wasm contract must contain exactly one memory", + expectOk: false, }, } for name, spec := range specs { t.Run(name, func(t *testing.T) { checksum, _, err := vm.SimulateStoreCode(spec.wasm, TESTING_GAS_LIMIT) + if spec.expectOk { + require.NoError(t, err) + require.NotEmpty(t, checksum, "checksum should not be empty on success") - if spec.err != "" { - assert.ErrorContains(t, err, spec.err) - } else { - assert.NoError(t, err) - + // Verify the code was not actually stored _, err = vm.GetCode(checksum) - assert.ErrorContains(t, err, "Error opening Wasm file for reading") + require.Error(t, err) + require.Contains(t, err.Error(), "Error opening Wasm file for reading") + } else { + require.Error(t, err) + require.Contains(t, err.Error(), spec.expectedErr) + require.Empty(t, checksum, "checksum should be empty on error") } }) } @@ -187,7 +229,7 @@ func TestHappyPath(t *testing.T) { require.NoError(t, err) require.NotNil(t, i.Ok) ires := i.Ok - require.Equal(t, 0, len(ires.Messages)) + require.Empty(t, ires.Messages) // execute gasMeter2 := api.NewMockGasMeter(TESTING_GAS_LIMIT) @@ -198,7 +240,7 @@ func TestHappyPath(t *testing.T) { require.NoError(t, err) require.NotNil(t, h.Ok) hres := h.Ok - require.Equal(t, 1, len(hres.Messages)) + require.Len(t, hres.Messages, 1) // make sure it read the balance properly and we got 250 atoms dispatch := hres.Messages[0].Msg @@ -231,7 +273,7 @@ func TestEnv(t *testing.T) { require.NoError(t, err) require.NotNil(t, i.Ok) ires := i.Ok - require.Equal(t, 0, len(ires.Messages)) + require.Empty(t, ires.Messages) // Execute mirror env without Transaction env = types.Env{ @@ -311,11 +353,11 @@ func TestGetMetrics(t *testing.T) { require.NoError(t, err) require.NotNil(t, i.Ok) ires := i.Ok - require.Equal(t, 0, len(ires.Messages)) + require.Empty(t, ires.Messages) // GetMetrics 3 metrics, err = vm.GetMetrics() - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(0), metrics.HitsMemoryCache) require.Equal(t, uint32(1), metrics.HitsFsCache) require.Equal(t, uint64(1), metrics.ElementsMemoryCache) @@ -328,11 +370,11 @@ func TestGetMetrics(t *testing.T) { require.NoError(t, err) require.NotNil(t, i.Ok) ires = i.Ok - require.Equal(t, 0, len(ires.Messages)) + require.Empty(t, ires.Messages) // GetMetrics 4 metrics, err = vm.GetMetrics() - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(1), metrics.HitsMemoryCache) require.Equal(t, uint32(1), metrics.HitsFsCache) require.Equal(t, uint64(1), metrics.ElementsMemoryCache) @@ -344,7 +386,7 @@ func TestGetMetrics(t *testing.T) { // GetMetrics 5 metrics, err = vm.GetMetrics() - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(1), metrics.HitsMemoryCache) require.Equal(t, uint32(2), metrics.HitsFsCache) require.Equal(t, uint64(1), metrics.ElementsPinnedMemoryCache) @@ -358,11 +400,11 @@ func TestGetMetrics(t *testing.T) { require.NoError(t, err) require.NotNil(t, i.Ok) ires = i.Ok - require.Equal(t, 0, len(ires.Messages)) + require.Empty(t, ires.Messages) // GetMetrics 6 metrics, err = vm.GetMetrics() - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(1), metrics.HitsPinnedMemoryCache) require.Equal(t, uint32(1), metrics.HitsMemoryCache) require.Equal(t, uint32(2), metrics.HitsFsCache) @@ -377,7 +419,7 @@ func TestGetMetrics(t *testing.T) { // GetMetrics 7 metrics, err = vm.GetMetrics() - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(1), metrics.HitsPinnedMemoryCache) require.Equal(t, uint32(1), metrics.HitsMemoryCache) require.Equal(t, uint32(2), metrics.HitsFsCache) @@ -392,11 +434,11 @@ func TestGetMetrics(t *testing.T) { require.NoError(t, err) require.NotNil(t, i.Ok) ires = i.Ok - require.Equal(t, 0, len(ires.Messages)) + require.Empty(t, ires.Messages) // GetMetrics 8 metrics, err = vm.GetMetrics() - assert.NoError(t, err) + require.NoError(t, err) require.Equal(t, uint32(1), metrics.HitsPinnedMemoryCache) require.Equal(t, uint32(2), metrics.HitsMemoryCache) require.Equal(t, uint32(2), metrics.HitsFsCache) diff --git a/testdata/README.md b/testdata/README.md index f89f28f7e..76ca7aab3 100644 --- a/testdata/README.md +++ b/testdata/README.md @@ -1,8 +1,12 @@ +# Test Contracts + +## How to update + Update contracts via e.g. ```sh cd testdata -./download_releases.sh v0.14.0-beta2 +./download_releases.sh v2.2.0 ``` This will download the deployed builds [from GitHub releases](https://github.com/CosmWasm/cosmwasm/releases). diff --git a/testdata/cyberpunk.wasm b/testdata/cyberpunk.wasm index ea4d73e85..62f4d3d17 100644 Binary files a/testdata/cyberpunk.wasm and b/testdata/cyberpunk.wasm differ diff --git a/testdata/hackatom.wasm b/testdata/hackatom.wasm index 580f9cf13..7f0bc22f5 100644 Binary files a/testdata/hackatom.wasm and b/testdata/hackatom.wasm differ diff --git a/testdata/ibc_reflect.wasm b/testdata/ibc_reflect.wasm index a4ba226c6..1e8e7e318 100644 Binary files a/testdata/ibc_reflect.wasm and b/testdata/ibc_reflect.wasm differ diff --git a/testdata/queue.wasm b/testdata/queue.wasm index c3f22866d..bd725f7ca 100644 Binary files a/testdata/queue.wasm and b/testdata/queue.wasm differ diff --git a/testdata/reflect.wasm b/testdata/reflect.wasm index 6aeb62000..4c4af408a 100644 Binary files a/testdata/reflect.wasm and b/testdata/reflect.wasm differ diff --git a/types/api.go b/types/api.go index 9fd1f7a26..909d5dca9 100644 --- a/types/api.go +++ b/types/api.go @@ -9,10 +9,22 @@ type ( CanonicalizeAddressFunc func(string) ([]byte, uint64, error) // ValidateAddressFunc is a type for functions that validate a human readable address (typically bech32). ValidateAddressFunc func(string) (uint64, error) + // Secp256k1VerifyFunc verifies a signature given a message and public key + Secp256k1VerifyFunc func(message, signature, pubkey []byte) (bool, uint64, error) + // Secp256k1RecoverPubkeyFunc recovers a public key from a message hash, signature, and recovery ID + Secp256k1RecoverPubkeyFunc func(hash, signature []byte, recovery_id uint8) ([]byte, uint64, error) + // Ed25519VerifyFunc verifies an ed25519 signature + Ed25519VerifyFunc func(message, signature, pubkey []byte) (bool, uint64, error) + // Ed25519BatchVerifyFunc verifies multiple ed25519 signatures in a batch + Ed25519BatchVerifyFunc func(messages [][]byte, signatures [][]byte, pubkeys [][]byte) (bool, uint64, error) ) type GoAPI struct { - HumanizeAddress HumanizeAddressFunc - CanonicalizeAddress CanonicalizeAddressFunc - ValidateAddress ValidateAddressFunc + HumanizeAddress HumanizeAddressFunc + CanonicalizeAddress CanonicalizeAddressFunc + ValidateAddress ValidateAddressFunc + Secp256k1Verify Secp256k1VerifyFunc + Secp256k1RecoverPubkey Secp256k1RecoverPubkeyFunc + Ed25519Verify Ed25519VerifyFunc + Ed25519BatchVerify Ed25519BatchVerifyFunc } diff --git a/types/env.go b/types/env.go index 37a19ea38..3f1c0de0c 100644 --- a/types/env.go +++ b/types/env.go @@ -33,6 +33,8 @@ type TransactionInfo struct { // Along with BlockInfo.Height, this allows you to get a unique // transaction identifier for the chain for future queries Index uint32 `json:"index"` + // Transaction hash (optional) + Hash string `json:"hash,omitempty"` } type MessageInfo struct { diff --git a/types/env_test.go b/types/env_test.go index 4bae5038a..d4ea14ad4 100644 --- a/types/env_test.go +++ b/types/env_test.go @@ -60,7 +60,7 @@ func TestBlockInfoSerialization(t *testing.T) { } bz, err := json.Marshal(block) require.NoError(t, err) - assert.Equal(t, `{"height":123,"time":"1578939743987654321","chain_id":"foobar"}`, string(bz)) + assert.JSONEq(t, `{"height":123,"time":"1578939743987654321","chain_id":"foobar"}`, string(bz)) block = BlockInfo{ Height: 0, @@ -69,7 +69,7 @@ func TestBlockInfoSerialization(t *testing.T) { } bz, err = json.Marshal(block) require.NoError(t, err) - assert.Equal(t, `{"height":0,"time":"0","chain_id":""}`, string(bz)) + assert.JSONEq(t, `{"height":0,"time":"0","chain_id":""}`, string(bz)) } func TestBlockInfoDeserialization(t *testing.T) { diff --git a/types/ibc_test.go b/types/ibc_test.go index 8c17b558d..a4d32f4ff 100644 --- a/types/ibc_test.go +++ b/types/ibc_test.go @@ -19,7 +19,7 @@ func TestIbcTimeoutSerialization(t *testing.T) { } bz, err := json.Marshal(timeout) require.NoError(t, err) - assert.Equal(t, `{"block":{"revision":17,"height":42},"timestamp":"1578939743987654321"}`, string(bz)) + assert.JSONEq(t, `{"block":{"revision":17,"height":42},"timestamp":"1578939743987654321"}`, string(bz)) // Null block timeout = IBCTimeout{ @@ -28,7 +28,7 @@ func TestIbcTimeoutSerialization(t *testing.T) { } bz, err = json.Marshal(timeout) require.NoError(t, err) - assert.Equal(t, `{"block":null,"timestamp":"1578939743987654321"}`, string(bz)) + assert.JSONEq(t, `{"block":null,"timestamp":"1578939743987654321"}`, string(bz)) // Null timestamp // This should be `"timestamp":null`, but we are lacking this feature: https://github.com/golang/go/issues/37711 @@ -42,7 +42,7 @@ func TestIbcTimeoutSerialization(t *testing.T) { } bz, err = json.Marshal(timeout) require.NoError(t, err) - assert.Equal(t, `{"block":{"revision":17,"height":42}}`, string(bz)) + assert.JSONEq(t, `{"block":{"revision":17,"height":42}}`, string(bz)) } func TestIbcTimeoutDeserialization(t *testing.T) { diff --git a/types/msg_test.go b/types/msg_test.go index f56915928..fa0c7061b 100644 --- a/types/msg_test.go +++ b/types/msg_test.go @@ -25,7 +25,7 @@ func TestWasmMsgInstantiateSerialization(t *testing.T) { require.Equal(t, "", msg.Instantiate.Admin) require.Equal(t, uint64(7897), msg.Instantiate.CodeID) - require.Equal(t, []byte(`{"claim":{}}`), msg.Instantiate.Msg) + require.JSONEq(t, `{"claim":{}}`, string(msg.Instantiate.Msg)) require.Equal(t, Array[Coin]{ {"stones", "321"}, }, msg.Instantiate.Funds) @@ -46,7 +46,7 @@ func TestWasmMsgInstantiateSerialization(t *testing.T) { require.Equal(t, "king", msg.Instantiate.Admin) require.Equal(t, uint64(7897), msg.Instantiate.CodeID) - require.Equal(t, []byte(`{"claim":{}}`), msg.Instantiate.Msg) + require.JSONEq(t, `{"claim":{}}`, string(msg.Instantiate.Msg)) require.Equal(t, Array[Coin]{}, msg.Instantiate.Funds) require.Equal(t, "my instance", msg.Instantiate.Label) } @@ -67,7 +67,7 @@ func TestWasmMsgInstantiate2Serialization(t *testing.T) { require.Equal(t, "", msg.Instantiate2.Admin) require.Equal(t, uint64(7897), msg.Instantiate2.CodeID) - require.Equal(t, []byte(`{"claim":{}}`), msg.Instantiate2.Msg) + require.JSONEq(t, `{"claim":{}}`, string(msg.Instantiate2.Msg)) require.Equal(t, Array[Coin]{ {"stones", "321"}, }, msg.Instantiate2.Funds) diff --git a/types/queries_test.go b/types/queries_test.go index 8dd52dc8d..bff855235 100644 --- a/types/queries_test.go +++ b/types/queries_test.go @@ -12,7 +12,7 @@ func TestDelegationWithEmptyArray(t *testing.T) { var del Array[Delegation] bz, err := json.Marshal(&del) require.NoError(t, err) - assert.Equal(t, string(bz), `[]`) + assert.Equal(t, `[]`, string(bz)) var redel Array[Delegation] err = json.Unmarshal(bz, &redel) @@ -39,7 +39,7 @@ func TestValidatorWithEmptyArray(t *testing.T) { var val Array[Validator] bz, err := json.Marshal(&val) require.NoError(t, err) - assert.Equal(t, string(bz), `[]`) + assert.Equal(t, `[]`, string(bz)) var reval Array[Validator] err = json.Unmarshal(bz, &reval) @@ -165,11 +165,11 @@ func TestDistributionQuerySerialization(t *testing.T) { var query DistributionQuery err = json.Unmarshal(document, &query) require.NoError(t, err) - require.Equal(t, query, DistributionQuery{ + require.Equal(t, DistributionQuery{ DelegatorWithdrawAddress: &DelegatorWithdrawAddressQuery{ DelegatorAddress: "jane", }, - }) + }, query) // Serialization res := DelegatorWithdrawAddressResponse{ @@ -177,7 +177,7 @@ func TestDistributionQuerySerialization(t *testing.T) { } serialized, err := json.Marshal(res) require.NoError(t, err) - require.Equal(t, string(serialized), `{"withdraw_address":"jane"}`) + require.JSONEq(t, `{"withdraw_address":"jane"}`, string(serialized)) } func TestCodeInfoResponseSerialization(t *testing.T) { @@ -200,5 +200,5 @@ func TestCodeInfoResponseSerialization(t *testing.T) { } serialized, err := json.Marshal(&myRes) require.NoError(t, err) - require.Equal(t, `{"code_id":0,"creator":"sam","checksum":"ea4140c2d8ff498997f074cbe4f5236e52bc3176c61d1af6938aeb2f2e7b0e6d"}`, string(serialized)) + require.JSONEq(t, `{"code_id":0,"creator":"sam","checksum":"ea4140c2d8ff498997f074cbe4f5236e52bc3176c61d1af6938aeb2f2e7b0e6d"}`, string(serialized)) } diff --git a/types/submessages_test.go b/types/submessages_test.go index 5f3ec18a2..54d97e13b 100644 --- a/types/submessages_test.go +++ b/types/submessages_test.go @@ -37,7 +37,7 @@ func TestReplySerialization(t *testing.T) { } serialized, err := json.Marshal(&reply1) require.NoError(t, err) - require.Equal(t, `{"gas_used":4312324,"id":75,"result":{"ok":{"events":[{"type":"hi","attributes":[{"key":"si","value":"claro"}]}],"data":"PwCqXKs=","msg_responses":[{"type_url":"/cosmos.bank.v1beta1.MsgSendResponse","value":""}]}},"payload":"cGF5bG9hZA=="}`, string(serialized)) + require.JSONEq(t, `{"gas_used":4312324,"id":75,"result":{"ok":{"events":[{"type":"hi","attributes":[{"key":"si","value":"claro"}]}],"data":"PwCqXKs=","msg_responses":[{"type_url":"/cosmos.bank.v1beta1.MsgSendResponse","value":""}]}},"payload":"cGF5bG9hZA=="}`, string(serialized)) withoutPayload := Reply{ GasUsed: 4312324, @@ -48,14 +48,14 @@ func TestReplySerialization(t *testing.T) { } serialized2, err := json.Marshal(&withoutPayload) require.NoError(t, err) - require.Equal(t, `{"gas_used":4312324,"id":75,"result":{"error":"some error"}}`, string(serialized2)) + require.JSONEq(t, `{"gas_used":4312324,"id":75,"result":{"error":"some error"}}`, string(serialized2)) } func TestSubMsgResponseSerialization(t *testing.T) { response := SubMsgResponse{} document, err := json.Marshal(response) require.NoError(t, err) - require.Equal(t, `{"events":[],"msg_responses":[]}`, string(document)) + require.JSONEq(t, `{"events":[],"msg_responses":[]}`, string(document)) // we really only care about marshal, but let's test unmarshal too document2 := []byte(`{}`) diff --git a/types/systemerror_test.go b/types/systemerror_test.go index 5f45c7404..cfc0cafae 100644 --- a/types/systemerror_test.go +++ b/types/systemerror_test.go @@ -27,7 +27,7 @@ func TestSystemErrorNoSuchContractSerialization(t *testing.T) { } serialized, err := json.Marshal(&mySE) require.NoError(t, err) - require.Equal(t, `{"no_such_contract":{"addr":"404"}}`, string(serialized)) + require.JSONEq(t, `{"no_such_contract":{"addr":"404"}}`, string(serialized)) } func TestSystemErrorNoSuchCodeSerialization(t *testing.T) { @@ -50,5 +50,5 @@ func TestSystemErrorNoSuchCodeSerialization(t *testing.T) { } serialized, err := json.Marshal(&mySE) require.NoError(t, err) - require.Equal(t, `{"no_such_code":{"code_id":321}}`, string(serialized)) + require.JSONEq(t, `{"no_such_code":{"code_id":321}}`, string(serialized)) } diff --git a/version_cgo.go b/version_cgo.go deleted file mode 100644 index 7129ce5dc..000000000 --- a/version_cgo.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build cgo && !nolink_libwasmvm - -package cosmwasm - -import ( - "github.com/CosmWasm/wasmvm/v2/internal/api" -) - -func libwasmvmVersionImpl() (string, error) { - return api.LibwasmvmVersion() -} diff --git a/version_no_cgo.go b/version_no_cgo.go index cc7131fca..c06f16fbd 100644 --- a/version_no_cgo.go +++ b/version_no_cgo.go @@ -1,5 +1,3 @@ -//go:build !cgo || nolink_libwasmvm - package cosmwasm import ( diff --git a/vm/Cargo.toml b/vm/Cargo.toml new file mode 100644 index 000000000..590787924 --- /dev/null +++ b/vm/Cargo.toml @@ -0,0 +1,105 @@ +[package] +name = "cosmwasm-vm" +version.workspace = true +authors = ["Ethan Frey "] +edition = "2021" +description = "VM bindings to run cosmwams contracts" +repository = "https://github.com/CosmWasm/cosmwasm/tree/main/packages/vm" +license = "Apache-2.0" + +[package.metadata.release] +pre-release-hook = ["../../devtools/release_checks.sh"] +pre-release-replacements = [ + { file = "../../CHANGELOG.md", search = "## \\[Unreleased\\]", replace = "## [{{version}}] - {{date}}", exactly = 1 }, + { file = "../../CHANGELOG.md", search = "(U|u)nreleased", replace = "{{version}}" }, + { file = "../../CHANGELOG.md", search = "", replace = "\n\n## [Unreleased]", exactly = 1 }, + { file = "../../CHANGELOG.md", search = "\\.\\.\\.HEAD", replace = "...{{tag_name}}", exactly = 1 }, + { file = "../../CHANGELOG.md", search = "\n", replace = "\n\n[unreleased]: https://github.com/CosmWasm/cosmwasm/compare/{{tag_name}}...HEAD", exactly = 1 }, +] + +[features] +default = ["iterator", "staking"] +# iterator allows us to iterate over all DB items in a given range +# this must be enabled to support cosmwasm contracts compiled with the 'iterator' feature +# optional as some merkle stores (like tries) don't support this +# given Ethereum 1.0, 2.0, Substrate, and other major projects use Tries +# we keep this optional, to allow possible future integration (or different Cosmos Backends) +iterator = ["cosmwasm-std/iterator"] +staking = ["cosmwasm-std/staking"] +# this enables all stargate-related functionality, including the ibc entry points +stargate = ["cosmwasm-std/stargate"] +# For heap profiling. Only used in the "heap_profiling" example. +dhat-heap = ["dep:dhat"] + +# Legacy no-op feature. This is kept for compatibility with older contracts. +# Delete this with the next major release. +cranelift = [] + +[lib] +# See https://bheisler.github.io/criterion.rs/book/faq.html#cargo-bench-gives-unrecognized-option-errors-for-valid-command-line-options +bench = false + +[dependencies] +bytes = "1.4.0" # need a higher version than the one required by Wasmer for the Bytes -> Vec implementation +clru = "0.6.1" +crc32fast = "1.3.2" +bech32 = "0.11.0" +blake2 = "0.10.6" +# Uses the path when built locally; uses the given version from crates.io when published +cosmwasm-core = { version = "2.2.0", path = "../core" } +cosmwasm-std = { version = "2.2.0", path = "../std", default-features = false, features = [ + "std", +] } +cosmwasm-crypto = { version = "2.2.0", path = "../crypto" } +cosmwasm-vm-derive = { version = "2.2.0", path = "../vm-derive" } +derivative = "2" +hex = "0.4" +rand_core = { version = "0.6", features = ["getrandom"] } +schemars = { workspace = true } +serde = { workspace = true } +serde_json = "1.0.40" +sha2 = "0.10.3" +thiserror = "1.0.26" +wasmer = { version = "=4.3.7", default-features = false, features = [ + "singlepass", +] } +wasmer-middlewares = "=4.3.7" +wasmer-types = "=4.3.7" +strum = { version = "0.26.2", default-features = false, features = ["derive"] } +# For heap profiling. Only used in the "heap_profiling" example. This has to be a non-dev dependency +# because cargo currently does not support optional dev-dependencies. +dhat = { version = "0.3.3", optional = true } + +# Dependencies that we do not use ourself. We add those entries +# to bump the min version of them. +tracing = "0.1.32" + +# Wasmer git/local (used for quick local debugging or patching) +# wasmer = { git = "https://github.com/wasmerio/wasmer", rev = "877ce1f7c44fad853c", default-features = false, features = ["cranelift", "singlepass"] } +# wasmer-middlewares = { git = "https://github.com/wasmerio/wasmer", rev = "877ce1f7c44fad853c" } +# wasmer = { path = "../../../wasmer/lib/api", default-features = false, features = ["cranelift", "singlepass"] } +# wasmer-middlewares = { path = "../../../wasmer/lib/middlewares" } + +[dev-dependencies] +criterion = { version = "0.5.1", features = ["html_reports"] } +glob = "0.3.1" +hex-literal = "0.4.1" +rand = "0.8" +tempfile = "3.1.0" +wat = "1.0" +wasm-encoder = "0.205.0" +clap = "4" +leb128 = "0.2" +target-lexicon = "0.12" +time = { version = "0.3.28", features = ["formatting"] } + +[[bench]] +name = "main" +harness = false + +[[example]] +name = "heap_profiling" +path = "examples/heap_profiling.rs" + +[profile.release] +debug = 1 diff --git a/vm/README.md b/vm/README.md new file mode 100644 index 000000000..3dbcff7e5 --- /dev/null +++ b/vm/README.md @@ -0,0 +1,147 @@ +# CosmWasm VM + +[![cosmwasm-vm on crates.io](https://img.shields.io/crates/v/cosmwasm-vm.svg)](https://crates.io/crates/cosmwasm-vm) + +This is an abstraction layer around the wasmer VM to expose just what we need to +run cosmwasm contracts in a high-level manner. This is intended both for +efficient writing of unit tests, as well as a public API to run contracts in eg. +[wasmvm](https://github.com/CosmWasm/wasmvm). As such it includes all glue code +needed for typical actions, like fs caching. + +## Compatibility + +A VM can support one or more contract-VM interface versions. The interface +version is communicated by the contract via a Wasm import. This is the current +compatibility list: + +| cosmwasm-vm | Supported interface versions | cosmwasm-std | +| ----------- | ---------------------------- | ------------ | +| 1.0 | `interface_version_8` | 1.0 | +| 0.16 | `interface_version_7` | 0.16 | +| 0.15 | `interface_version_6` | 0.15 | +| 0.14 | `interface_version_5` | 0.14 | +| 0.13 | `cosmwasm_vm_version_4` | 0.11-0.13 | +| 0.12 | `cosmwasm_vm_version_4` | 0.11-0.13 | +| 0.11 | `cosmwasm_vm_version_4` | 0.11-0.13 | +| 0.10 | `cosmwasm_vm_version_3` | 0.10 | +| 0.9 | `cosmwasm_vm_version_2` | 0.9 | +| 0.8 | `cosmwasm_vm_version_1` | 0.8 | + +### Changes between interface versions + +**interface_version_5 -> interface_version_6** + +- Rename the fields from `send` to `funds` in `WasmMsg::Instantiate` and + `WasmMsg::Execute`. +- Merge messages and sub-messages. +- Change JSON representation of IBC acknowledgements ([#975]). + +[#975]: https://github.com/CosmWasm/cosmwasm/pull/975 + +## Setup + +There are demo files in `testdata/*.wasm`. Those are compiled and optimized +versions of +[contracts/\*](https://github.com/CosmWasm/cosmwasm/tree/main/contracts/) run +through [cosmwasm/optimizer](https://github.com/CosmWasm/optimizer). + +To rebuild the test contracts, go to the repo root and do + +```sh +docker run --rm -v "$(pwd)":/code \ + --mount type=volume,source="devcontract_cache_cyberpunk",target=/target \ + --mount type=volume,source=registry_cache,target=/usr/local/cargo/registry \ + cosmwasm/optimizer:0.15.0 ./contracts/cyberpunk \ + && cp artifacts/cyberpunk.wasm packages/vm/testdata/cyberpunk.wasm + +docker run --rm -v "$(pwd)":/code \ + --mount type=volume,source="devcontract_cache_hackatom",target=/target \ + --mount type=volume,source=registry_cache,target=/usr/local/cargo/registry \ + cosmwasm/optimizer:0.15.0 ./contracts/hackatom \ + && cp artifacts/hackatom.wasm packages/vm/testdata/hackatom_1.2.wasm + +docker run --rm -v "$(pwd)":/code \ + --mount type=volume,source="devcontract_cache_ibc_reflect",target=/target \ + --mount type=volume,source=registry_cache,target=/usr/local/cargo/registry \ + cosmwasm/optimizer:0.15.0 ./contracts/ibc-reflect \ + && cp artifacts/ibc_reflect.wasm packages/vm/testdata/ibc_reflect_1.2.wasm + +docker run --rm -v "$(pwd)":/code \ + --mount type=volume,source="devcontract_cache_empty",target=/target \ + --mount type=volume,source=registry_cache,target=/usr/local/cargo/registry \ + cosmwasm/optimizer:0.15.0 ./contracts/empty \ + && cp artifacts/empty.wasm packages/vm/testdata/empty.wasm + +docker run --rm -v "$(pwd)":/code \ + --mount type=volume,source="devcontract_cache_ibc_callback",target=/target \ + --mount type=volume,source=registry_cache,target=/usr/local/cargo/registry \ + cosmwasm/optimizer:0.15.0 ./contracts/ibc-callbacks \ + && cp artifacts/ibc_callbacks.wasm packages/vm/testdata/ibc_callbacks.wasm +``` + +The `cyberpunk_rust170.wasm` for +https://github.com/CosmWasm/cosmwasm/issues/1727 is built as follows +(non-reproducible): + +```sh +cd contracts/cyberpunk +rm -r target +RUSTFLAGS='-C link-arg=-s' cargo build --release --lib --target wasm32-unknown-unknown --locked +cp target/wasm32-unknown-unknown/release/cyberpunk.wasm ../../packages/vm/testdata/cyberpunk_rust170.wasm +``` + +The `floaty_2.0.wasm` is built using Rust nightly as follows (non-reproducible): + +```sh +cd contracts/floaty +RUSTFLAGS="-C link-arg=-s -C target-feature=+nontrapping-fptoint" cargo wasm +cp target/wasm32-unknown-unknown/release/floaty.wasm ../../packages/vm/testdata/floaty_2.0.wasm +``` + +## Testing + +By default, this repository is built and tested with the singlepass backend. You +can enable the `cranelift` feature to override the default backend with +Cranelift + +```sh +cd packages/vm +cargo test --features iterator +cargo test --features cranelift,iterator +``` + +## Benchmarking + +Using Singlepass: + +``` +cd packages/vm +cargo bench --no-default-features +``` + +Using Cranelift: + +``` +cd packages/vm +cargo bench --no-default-features --features cranelift +``` + +## Tools + +`module_size` and `module_size.sh` + +Memory profiling of compiled modules. `module_size.sh` executes `module_size`, +and uses valgrind's memory profiling tool (massif) to compute the amount of heap +memory used by a compiled module. + +``` +cd packages/vm +RUSTFLAGS="-g" cargo build --release --example module_size +./examples/module_size.sh ./testdata/hackatom.wasm +``` + +## License + +This package is part of the cosmwasm repository, licensed under the Apache +License 2.0 (see [NOTICE](https://github.com/CosmWasm/cosmwasm/blob/main/NOTICE) +and [LICENSE](https://github.com/CosmWasm/cosmwasm/blob/main/LICENSE)). diff --git a/vm/benches/main.rs b/vm/benches/main.rs new file mode 100644 index 000000000..5edc7cce4 --- /dev/null +++ b/vm/benches/main.rs @@ -0,0 +1,500 @@ +use criterion::{black_box, criterion_group, criterion_main, Criterion}; + +use rand::Rng; +use std::sync::Arc; +use std::time::{Duration, SystemTime}; +use std::{fs, thread}; +use tempfile::TempDir; + +use cosmwasm_std::{coins, Checksum, Empty}; +use cosmwasm_vm::testing::{ + mock_backend, mock_env, mock_info, mock_instance_options, MockApi, MockQuerier, MockStorage, +}; +use cosmwasm_vm::{ + call_execute, call_instantiate, capabilities_from_csv, Cache, CacheOptions, Instance, + InstanceOptions, Size, +}; + +// Instance +const DEFAULT_MEMORY_LIMIT: Size = Size::mebi(64); +const DEFAULT_GAS_LIMIT: u64 = 1_000_000_000; // ~1ms +const DEFAULT_INSTANCE_OPTIONS: InstanceOptions = InstanceOptions { + gas_limit: DEFAULT_GAS_LIMIT, +}; +const HIGH_GAS_LIMIT: u64 = 20_000_000_000_000; // ~20s, allows many calls on one instance + +// Cache +const MEMORY_CACHE_SIZE: Size = Size::mebi(200); + +// Multi-threaded get_instance benchmark +const INSTANTIATION_THREADS: usize = 128; +const CONTRACTS: u64 = 10; + +const DEFAULT_CAPABILITIES: &str = "cosmwasm_1_1,cosmwasm_1_2,cosmwasm_1_3,iterator,staking"; +static HACKATOM: &[u8] = include_bytes!("../testdata/hackatom.wasm"); +static CYBERPUNK: &[u8] = include_bytes!("../testdata/cyberpunk.wasm"); + +static BENCH_CONTRACTS: &[&str] = &[ + "cyberpunk_rust170.wasm", + "cyberpunk.wasm", + "floaty_1.0.wasm", + "floaty_1.2.wasm", + "floaty_2.0.wasm", + "hackatom_1.0.wasm", + "hackatom_1.2.wasm", + "hackatom.wasm", +]; + +fn bench_instance(c: &mut Criterion) { + let mut group = c.benchmark_group("Instance"); + + group.bench_function("compile and instantiate", |b| { + b.iter(|| { + let backend = mock_backend(&[]); + let (instance_options, memory_limit) = mock_instance_options(); + let _instance = + Instance::from_code(HACKATOM, backend, instance_options, memory_limit).unwrap(); + }); + }); + + group.bench_function("execute init", |b| { + let backend = mock_backend(&[]); + let much_gas: InstanceOptions = InstanceOptions { + gas_limit: HIGH_GAS_LIMIT, + }; + let mut instance = + Instance::from_code(HACKATOM, backend, much_gas, Some(DEFAULT_MEMORY_LIMIT)).unwrap(); + + b.iter(|| { + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let contract_result = call_instantiate::<_, _, _, Empty>( + &mut instance, + &mock_env(), + &info, + msg.as_bytes(), + ) + .unwrap(); + assert!(contract_result.into_result().is_ok()); + }); + }); + + group.bench_function("execute execute (release)", |b| { + let backend = mock_backend(&[]); + let much_gas: InstanceOptions = InstanceOptions { + gas_limit: HIGH_GAS_LIMIT, + }; + let mut instance = + Instance::from_code(HACKATOM, backend, much_gas, Some(DEFAULT_MEMORY_LIMIT)).unwrap(); + + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let contract_result = + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap(); + assert!(contract_result.into_result().is_ok()); + + b.iter(|| { + let info = mock_info(&verifier, &coins(15, "earth")); + let msg = br#"{"release":{}}"#; + let contract_result = + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap(); + assert!(contract_result.into_result().is_ok()); + }); + }); + + group.bench_function("execute execute (argon2)", |b| { + let backend = mock_backend(&[]); + let much_gas: InstanceOptions = InstanceOptions { + gas_limit: HIGH_GAS_LIMIT, + }; + let mut instance = + Instance::from_code(CYBERPUNK, backend, much_gas, Some(DEFAULT_MEMORY_LIMIT)).unwrap(); + + let info = mock_info("creator", &coins(1000, "earth")); + let contract_result = + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, b"{}").unwrap(); + assert!(contract_result.into_result().is_ok()); + + let mut gas_used = 0; + b.iter(|| { + let gas_before = instance.get_gas_left(); + let info = mock_info("hasher", &[]); + let msg = br#"{"argon2":{"mem_cost":256,"time_cost":3}}"#; + let contract_result = + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap(); + assert!(contract_result.into_result().is_ok()); + gas_used = gas_before - instance.get_gas_left(); + }); + println!("Gas used: {gas_used}"); + }); + + group.finish(); +} + +fn bench_cache(c: &mut Criterion) { + let mut group = c.benchmark_group("Cache"); + + let options = CacheOptions::new( + TempDir::new().unwrap().into_path(), + capabilities_from_csv(DEFAULT_CAPABILITIES), + MEMORY_CACHE_SIZE, + DEFAULT_MEMORY_LIMIT, + ); + + group.bench_function("save wasm", |b| { + let cache: Cache = + unsafe { Cache::new(options.clone()).unwrap() }; + + b.iter(|| { + let result = cache.store_code(HACKATOM, true, true); + assert!(result.is_ok()); + }); + }); + + group.bench_function("load wasm", |b| { + let cache: Cache = + unsafe { Cache::new(options.clone()).unwrap() }; + let checksum = cache.store_code(HACKATOM, true, true).unwrap(); + + b.iter(|| { + let result = cache.load_wasm(&checksum); + assert!(result.is_ok()); + }); + }); + + group.bench_function("load wasm unchecked", |b| { + let options = options.clone(); + let mut cache: Cache = + unsafe { Cache::new(options).unwrap() }; + cache.set_module_unchecked(true); + let checksum = cache.store_code(HACKATOM, true, true).unwrap(); + + b.iter(|| { + let result = cache.load_wasm(&checksum); + assert!(result.is_ok()); + }); + }); + + for contract_name in BENCH_CONTRACTS { + let contract_wasm = fs::read(format!("testdata/{contract_name}")).unwrap(); + let cache: Cache = + unsafe { Cache::new(options.clone()).unwrap() }; + let checksum = cache.store_code(&contract_wasm, true, true).unwrap(); + + group.bench_function(format!("analyze_{contract_name}"), |b| { + b.iter(|| { + let result = cache.analyze(&checksum); + assert!(result.is_ok()); + }); + }); + } + + group.bench_function("instantiate from fs", |b| { + let non_memcache = CacheOptions::new( + TempDir::new().unwrap().into_path(), + capabilities_from_csv(DEFAULT_CAPABILITIES), + Size::new(0), + DEFAULT_MEMORY_LIMIT, + ); + let cache: Cache = + unsafe { Cache::new(non_memcache).unwrap() }; + let checksum = cache.store_code(HACKATOM, true, true).unwrap(); + + b.iter(|| { + let _ = cache + .get_instance(&checksum, mock_backend(&[]), DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert!(cache.stats().hits_fs_cache >= 1); + assert_eq!(cache.stats().misses, 0); + }); + }); + + group.bench_function("instantiate from fs unchecked", |b| { + let non_memcache = CacheOptions::new( + TempDir::new().unwrap().into_path(), + capabilities_from_csv(DEFAULT_CAPABILITIES), + Size::new(0), + DEFAULT_MEMORY_LIMIT, + ); + let mut cache: Cache = + unsafe { Cache::new(non_memcache).unwrap() }; + cache.set_module_unchecked(true); + let checksum = cache.store_code(HACKATOM, true, true).unwrap(); + + b.iter(|| { + let _ = cache + .get_instance(&checksum, mock_backend(&[]), DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert!(cache.stats().hits_fs_cache >= 1); + assert_eq!(cache.stats().misses, 0); + }); + }); + + group.bench_function("instantiate from memory", |b| { + let checksum = Checksum::generate(HACKATOM); + let cache: Cache = + unsafe { Cache::new(options.clone()).unwrap() }; + // Load into memory + cache + .get_instance(&checksum, mock_backend(&[]), DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + + b.iter(|| { + let backend = mock_backend(&[]); + let _ = cache + .get_instance(&checksum, backend, DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert!(cache.stats().hits_memory_cache >= 1); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + }); + }); + + group.bench_function("instantiate from pinned memory", |b| { + let checksum = Checksum::generate(HACKATOM); + let cache: Cache = + unsafe { Cache::new(options.clone()).unwrap() }; + // Load into pinned memory + cache.pin(&checksum).unwrap(); + + b.iter(|| { + let backend = mock_backend(&[]); + let _ = cache + .get_instance(&checksum, backend, DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert!(cache.stats().hits_pinned_memory_cache >= 1); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + }); + }); + + group.finish(); +} + +fn bench_instance_threads(c: &mut Criterion) { + c.bench_function("multi-threaded get_instance", |b| { + let options = CacheOptions::new( + TempDir::new().unwrap().into_path(), + capabilities_from_csv(DEFAULT_CAPABILITIES), + MEMORY_CACHE_SIZE, + DEFAULT_MEMORY_LIMIT, + ); + + let cache: Cache = + unsafe { Cache::new(options).unwrap() }; + let cache = Arc::new(cache); + + // Find sub-sequence helper + fn find_subsequence(haystack: &[u8], needle: &[u8]) -> Option { + haystack + .windows(needle.len()) + .position(|window| window == needle) + } + + // Offset to the i32.const (0x41) 15731626 (0xf00baa) (unsigned leb128 encoded) instruction + // data we want to replace + let query_int_data = b"\x41\xaa\x97\xc0\x07"; + let offset = find_subsequence(HACKATOM, query_int_data).unwrap() + 1; + + let mut leb128_buf = [0; 4]; + let mut contract = HACKATOM.to_vec(); + + let mut random_checksum = || { + let mut writable = &mut leb128_buf[..]; + + // Generates a random number in the range of a 4-byte unsigned leb128 encoded number + let r = rand::thread_rng().gen_range(2097152..2097152 + CONTRACTS); + + leb128::write::unsigned(&mut writable, r).expect("Should write number"); + + // Splice data in contract + contract.splice(offset..offset + leb128_buf.len(), leb128_buf); + + cache.store_code(contract.as_slice(), true, true).unwrap() + // let checksum = cache.store_code(contract.as_slice(), true, true).unwrap(); + // Preload into memory + // cache + // .get_instance(&checksum, mock_backend(&[]), DEFAULT_INSTANCE_OPTIONS) + // .unwrap(); + // checksum + }; + + b.iter_custom(|iters| { + let mut res = Duration::from_secs(0); + for _ in 0..iters { + let mut durations: Vec<_> = (0..INSTANTIATION_THREADS) + .map(|_id| { + let cache = Arc::clone(&cache); + let checksum = random_checksum(); + + thread::spawn(move || { + // Perform measurement internally + let t = SystemTime::now(); + black_box( + cache + .get_instance( + &checksum, + mock_backend(&[]), + DEFAULT_INSTANCE_OPTIONS, + ) + .unwrap(), + ); + t.elapsed().unwrap() + }) + }) + .collect::>() + .into_iter() + .map(|handle| handle.join().unwrap()) + .collect(); // join threads, collect durations + + // Calculate median thread duration + durations.sort_unstable(); + res += durations[durations.len() / 2]; + } + res + }); + }); +} + +fn bench_combined(c: &mut Criterion) { + let mut group = c.benchmark_group("Combined"); + + let options = CacheOptions::new( + TempDir::new().unwrap().into_path(), + capabilities_from_csv("cosmwasm_1_1,cosmwasm_1_2,cosmwasm_1_3,iterator,staking"), + MEMORY_CACHE_SIZE, + DEFAULT_MEMORY_LIMIT, + ); + + // Store contracts for all benchmarks in this group + let checksum: Checksum = { + let cache: Cache = + unsafe { Cache::new(options.clone()).unwrap() }; + cache.store_code(CYBERPUNK, true, true).unwrap() + }; + + group.bench_function("get instance from fs cache and execute", |b| { + let mut non_memcache = options.clone(); + non_memcache.memory_cache_size_bytes = Size::kibi(0); + + let cache: Cache = + unsafe { Cache::new(non_memcache).unwrap() }; + + b.iter(|| { + let mut instance = cache + .get_instance(&checksum, mock_backend(&[]), DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert!(cache.stats().hits_fs_cache >= 1); + assert_eq!(cache.stats().misses, 0); + + let info = mock_info("guest", &[]); + let msg = br#"{"noop":{}}"#; + let contract_result = + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap(); + contract_result.into_result().unwrap(); + }); + }); + + group.bench_function("get instance from memory cache and execute", |b| { + let cache: Cache = + unsafe { Cache::new(options.clone()).unwrap() }; + + // Load into memory + cache + .get_instance(&checksum, mock_backend(&[]), DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + + b.iter(|| { + let backend = mock_backend(&[]); + let mut instance = cache + .get_instance(&checksum, backend, DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert!(cache.stats().hits_memory_cache >= 1); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + + let info = mock_info("guest", &[]); + let msg = br#"{"noop":{}}"#; + let contract_result = + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap(); + contract_result.into_result().unwrap(); + }); + }); + + group.bench_function("get instance from pinned memory and execute", |b| { + let cache: Cache = + unsafe { Cache::new(options.clone()).unwrap() }; + + // Load into pinned memory + cache.pin(&checksum).unwrap(); + + b.iter(|| { + let backend = mock_backend(&[]); + let mut instance = cache + .get_instance(&checksum, backend, DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert!(cache.stats().hits_pinned_memory_cache >= 1); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + + let info = mock_info("guest", &[]); + let msg = br#"{"noop":{}}"#; + let contract_result = + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap(); + contract_result.into_result().unwrap(); + }); + }); + + group.finish(); +} + +fn make_config(measurement_time_s: u64) -> Criterion { + Criterion::default() + .without_plots() + .measurement_time(Duration::new(measurement_time_s, 0)) + .sample_size(12) + .configure_from_args() +} + +criterion_group!( + name = instance; + config = make_config(8); + targets = bench_instance +); +criterion_group!( + name = cache; + config = make_config(8); + targets = bench_cache +); +// Combines loading module from cache, instantiating it and executing the instance. +// This is what every call in libwasmvm does. +criterion_group!( + name = combined; + config = make_config(5); + targets = bench_combined +); +criterion_group!( + name = multi_threaded_instance; + config = Criterion::default() + .without_plots() + .measurement_time(Duration::new(16, 0)) + .sample_size(10) + .configure_from_args(); + targets = bench_instance_threads +); +criterion_main!(instance, cache, combined, multi_threaded_instance); diff --git a/vm/examples/heap_profiling.rs b/vm/examples/heap_profiling.rs new file mode 100644 index 000000000..bebdb2c15 --- /dev/null +++ b/vm/examples/heap_profiling.rs @@ -0,0 +1,209 @@ +// Run with +// cargo run --features dhat-heap --example heap_profiling --release + +use std::time::{Duration, SystemTime}; +use tempfile::TempDir; +use time::{format_description::well_known::Rfc3339, OffsetDateTime}; + +use cosmwasm_std::{coins, Checksum, Empty}; +use cosmwasm_vm::testing::{mock_backend, mock_env, mock_info, MockApi, MockQuerier, MockStorage}; +use cosmwasm_vm::{ + call_execute, call_instantiate, capabilities_from_csv, Cache, CacheOptions, InstanceOptions, + Size, +}; + +use clap::{Arg, Command}; + +#[cfg(feature = "dhat-heap")] +#[global_allocator] +static ALLOC: dhat::Alloc = dhat::Alloc; + +/// Number of seconds after which the test stops +const ROUNDS: usize = 1024; +const ROUND_LEN: usize = 16; + +// Instance +const DEFAULT_MEMORY_LIMIT: Size = Size::mebi(64); +const DEFAULT_GAS_LIMIT: u64 = u64::MAX; +const DEFAULT_INSTANCE_OPTIONS: InstanceOptions = InstanceOptions { + gas_limit: DEFAULT_GAS_LIMIT, +}; +// Cache +const MEMORY_CACHE_SIZE: Size = Size::mebi(5); + +struct Execute { + pub msg: &'static [u8], + pub expect_error: bool, +} + +struct Contract { + pub wasm: &'static [u8], + pub instantiate_msg: Option>, + pub execute_msgs: Vec, +} + +fn contracts() -> Vec { + let api = MockApi::default(); + let verifier = api.addr_make("verifies"); + let beneficiary = api.addr_make("benefits"); + vec![ + Contract { + wasm: include_bytes!("../testdata/cyberpunk.wasm"), + instantiate_msg: Some(b"{}".to_vec()), + execute_msgs: vec![ + Execute { + msg: br#"{"unreachable":{}}"#, + expect_error: true, + }, + Execute { + msg: br#"{"allocate_large_memory":{"pages":1000}}"#, + expect_error: false, + }, + Execute { + // mem_cost in KiB + msg: br#"{"argon2":{"mem_cost":256,"time_cost":1}}"#, + expect_error: false, + }, + Execute { + msg: br#"{"memory_loop":{}}"#, + expect_error: true, + }, + ], + }, + Contract { + wasm: include_bytes!("../testdata/hackatom.wasm"), + instantiate_msg: Some( + format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#) + .into_bytes(), + ), + execute_msgs: vec![Execute { + msg: br#"{"release":{}}"#, + expect_error: false, + }], + }, + Contract { + wasm: include_bytes!("../testdata/hackatom_1.0.wasm"), + instantiate_msg: Some( + format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#) + .into_bytes(), + ), + execute_msgs: vec![Execute { + msg: br#"{"release":{}}"#, + expect_error: false, + }], + }, + Contract { + wasm: include_bytes!("../testdata/ibc_reflect.wasm"), + instantiate_msg: None, + execute_msgs: vec![], + }, + ] +} + +#[allow(clippy::collapsible_else_if)] +fn app(runtime: u64) { + let start_time = SystemTime::now(); + + let options = CacheOptions::new( + TempDir::new().unwrap().into_path(), + capabilities_from_csv("iterator,staking,stargate,cosmwasm_1_1,cosmwasm_1_2,cosmwasm_1_3,cosmwasm_1_4,cosmwasm_2_0,cosmwasm_2_1"), + MEMORY_CACHE_SIZE, + DEFAULT_MEMORY_LIMIT, + ); + + let contracts = contracts(); + + let checksums = { + let cache: Cache = + unsafe { Cache::new(options.clone()).unwrap() }; + + let mut checksums = Vec::::new(); + for contract in &contracts { + checksums.push(cache.store_code(contract.wasm, true, true).unwrap()); + } + checksums + }; + + let after = SystemTime::now().duration_since(start_time).unwrap(); + eprintln!("Done compiling after {after:?}"); + + let cache: Cache = unsafe { Cache::new(options).unwrap() }; + for round in 0..ROUNDS { + for _ in 0..ROUND_LEN { + if SystemTime::now().duration_since(start_time).unwrap() > Duration::from_secs(runtime) + { + eprintln!("Round {round}. End time reached. Ending the process"); + + let metrics = cache.metrics(); + eprintln!("Cache metrics: {metrics:?}"); + + return; // ends app() + } + + for idx in 0..contracts.len() { + let mut instance = cache + .get_instance(&checksums[idx], mock_backend(&[]), DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + + instance.set_debug_handler(|_msg, info| { + let _t = now_rfc3339(); + let _gas = info.gas_remaining; + //eprintln!("[{t}]: {msg} (gas remaining: {gas})"); + }); + + if let Some(msg) = &contracts[idx].instantiate_msg { + let info = mock_info("creator", &coins(1000, "earth")); + let contract_result = + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg) + .unwrap(); + assert!(contract_result.into_result().is_ok()); + } + + for (execution_idx, execute) in contracts[idx].execute_msgs.iter().enumerate() { + let info = mock_info("verifies", &coins(15, "earth")); + let msg = execute.msg; + let res = + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg); + + if execute.expect_error { + if res.is_ok() { + panic!( + "Round {round}, Execution {execution_idx}, Contract {idx}. Expected error but got {res:?}" + ); + } + } else { + if res.is_err() { + panic!("Round {round}, Execution {execution_idx}, Contract {idx}. Expected no error but got {res:?}"); + } + } + } + } + } + } +} + +fn now_rfc3339() -> String { + let dt = OffsetDateTime::from(SystemTime::now()); + dt.format(&Rfc3339).unwrap_or_default() +} + +pub fn main() { + let matches = Command::new("Heap profiling") + .version("0.0.0") + .arg( + Arg::new("runtime") + .long("runtime") + .help("Time in seconds how long the tests should be running") + .value_parser(clap::value_parser!(u64).range(1..10_000)) + .default_value("30"), + ) + .get_matches(); + let runtime = matches + .get_one::("runtime") + .expect("Error parsing time argument"); + + #[cfg(feature = "dhat-heap")] + let _profiler = dhat::Profiler::new_heap(); + + app(*runtime); +} diff --git a/vm/examples/module_size.rs b/vm/examples/module_size.rs new file mode 100644 index 000000000..9b1f1d973 --- /dev/null +++ b/vm/examples/module_size.rs @@ -0,0 +1,62 @@ +use std::fs::File; +use std::io::Read; +use std::mem; + +use clap::{Arg, Command}; + +use cosmwasm_vm::internals::{compile, make_compiling_engine}; +use wasmer::{Engine, Module}; + +pub fn main() { + let matches = Command::new("Module size estimation") + .version("0.0.4") + .author("Mauro Lacy ") + .arg( + Arg::new("WASM") + .help("Wasm file to read and compile") + .required(true) + .index(1), + ) + .get_matches(); + + // File + let path: &String = matches.get_one("WASM").expect("Error parsing file name"); + let mut file = File::open(path).unwrap(); + mem::drop(matches); + + // Read wasm + let mut wasm = Vec::::new(); + file.read_to_end(&mut wasm).unwrap(); + mem::drop(file); + + // Report wasm size + let wasm_size = wasm.len(); + println!("wasm size: {wasm_size} bytes"); + + // Compile module + let engine = make_compiling_engine(None); + let module = compile(&engine, &wasm).unwrap(); + mem::drop(wasm); + + let serialized = module.serialize().unwrap(); + mem::drop(module); + + // Deserialize module + let module = module_deserialize(&engine, &serialized); + mem::drop(serialized); + + // Report (serialized) module size + let serialized = module.serialize().unwrap(); + mem::drop(module); + let ser_size = serialized.len(); + println!("module size (serialized): {ser_size} bytes"); + println!( + "(serialized) module size ratio: {:.2}", + ser_size as f32 / wasm_size as f32 + ); +} + +#[inline(never)] +fn module_deserialize(engine: &Engine, serialized: &[u8]) -> Module { + unsafe { Module::deserialize(&engine, serialized) }.unwrap() +} diff --git a/vm/examples/module_size.sh b/vm/examples/module_size.sh new file mode 100755 index 000000000..fbddec59f --- /dev/null +++ b/vm/examples/module_size.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Uses valgrind's massif tool to compute heap memory consumption of compiled modules. +# For a wasmer `Module`, it has been determined that this method underestimates the size +# of the module significantly. +set -e + +MAX_SNAPSHOTS=1000 + +WASM="$1" +[ -z "$WASM" ] && echo "Usage: $0 .wasm" && exit 1 + +PROFILE="release" +MEM_UTIL="valgrind --tool=massif --max-snapshots=$MAX_SNAPSHOTS" +SUM_UTIL="ms_print" + +PROG=$(basename $0 .sh) +BASE_DIR=$(dirname $0)/.. + +# Look for the useful info +FNS="module_compile module_deserialize" + +BIN="$BASE_DIR/../../target/$PROFILE/examples/$PROG" + +RESULTS="$BASE_DIR/$PROG.log" +SUMMARY="$BASE_DIR/ms_print.log" + +if [ "$PROFILE" = "release" ]; then + RUSTFLAGS="-g" cargo build --release --example $PROG +else + cargo build --example $PROG +fi + +$MEM_UTIL --massif-out-file=$RESULTS $BIN $WASM +$SUM_UTIL $RESULTS >$SUMMARY + +for FN in $FNS; do + # Try to compute $FN() total (heap) bytes + LAST_LINE=$(grep -n "::$FN " $SUMMARY | tail -1 | cut -f1 -d:) + if [ -z "$LAST_LINE" ]; then + echo -n "'$FN' not found. " + [ $MAX_SNAPSHOTS -lt 1000 ] && echo "Try increasing MAX_SNAPSHOTS (current: $MAX_SNAPSHOTS, max: 1000). " || echo "Try again." + continue + fi + TOTAL_LINES=$(wc -l $SUMMARY | cut -f1 -d\ ) + START_LINE=$((TOTAL_LINES - $LAST_LINE + 1)) + echo -n "module size ($FN): " + tac $SUMMARY | sed -n "$START_LINE,/^ n /p" | grep "::$FN " | cut -f2 -d\( | cut -f1 -d\) | sort -u | sed 's/,//g;s/B//' | sed ':a;N;s/\n/+/;ta' | bc -l | sed 's/$/ bytes/' +done diff --git a/vm/examples/multi_threaded_cache.rs b/vm/examples/multi_threaded_cache.rs new file mode 100644 index 000000000..7291c0e4d --- /dev/null +++ b/vm/examples/multi_threaded_cache.rs @@ -0,0 +1,92 @@ +use std::sync::Arc; +use std::thread; +use tempfile::TempDir; + +use cosmwasm_std::{coins, Empty}; +use cosmwasm_vm::testing::{mock_backend, mock_env, mock_info, MockApi, MockQuerier, MockStorage}; +use cosmwasm_vm::{ + call_execute, call_instantiate, capabilities_from_csv, Cache, CacheOptions, InstanceOptions, + Size, +}; + +// Instance +const DEFAULT_MEMORY_LIMIT: Size = Size::mebi(64); +const DEFAULT_GAS_LIMIT: u64 = 400_000 * 150; +const DEFAULT_INSTANCE_OPTIONS: InstanceOptions = InstanceOptions { + gas_limit: DEFAULT_GAS_LIMIT, +}; +// Cache +const MEMORY_CACHE_SIZE: Size = Size::mebi(200); + +static CONTRACT: &[u8] = include_bytes!("../testdata/hackatom.wasm"); + +const STORE_CODE_THREADS: usize = 32; +const INSTANTIATION_THREADS: usize = 2048; +const THREADS: usize = STORE_CODE_THREADS + INSTANTIATION_THREADS; + +pub fn main() { + let options = CacheOptions::new( + TempDir::new().unwrap().into_path(), + capabilities_from_csv("iterator,staking"), + MEMORY_CACHE_SIZE, + DEFAULT_MEMORY_LIMIT, + ); + + let cache: Cache = unsafe { Cache::new(options).unwrap() }; + let cache = Arc::new(cache); + + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + let mut threads = Vec::with_capacity(THREADS); + for _ in 0..STORE_CODE_THREADS { + let cache = Arc::clone(&cache); + + threads.push(thread::spawn(move || { + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + println!("Done saving Wasm {checksum}"); + })); + } + for i in 0..INSTANTIATION_THREADS { + let cache = Arc::clone(&cache); + + threads.push(thread::spawn(move || { + let mut instance = cache + .get_instance(&checksum, mock_backend(&[]), DEFAULT_INSTANCE_OPTIONS) + .unwrap(); + println!("Done instantiating contract {i}"); + + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let contract_result = call_instantiate::<_, _, _, Empty>( + &mut instance, + &mock_env(), + &info, + msg.as_bytes(), + ) + .unwrap(); + assert!(contract_result.into_result().is_ok()); + + let info = mock_info(&verifier, &coins(15, "earth")); + let msg = br#"{"release":{}}"#; + let contract_result = + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap(); + assert!(contract_result.into_result().is_ok()); + })); + } + + threads.into_iter().for_each(|thread| { + thread + .join() + .expect("The threaded instantiation or execution failed !") + }); + + assert_eq!(cache.stats().misses, 0); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!( + cache.stats().hits_memory_cache, + INSTANTIATION_THREADS as u32 - 1 + ); + assert_eq!(cache.stats().hits_fs_cache, 1); +} diff --git a/vm/src/backend.rs b/vm/src/backend.rs new file mode 100644 index 000000000..f3760be06 --- /dev/null +++ b/vm/src/backend.rs @@ -0,0 +1,423 @@ +use std::fmt::Debug; +use std::ops::AddAssign; +use std::string::FromUtf8Error; +use thiserror::Error; + +use cosmwasm_std::{Binary, ContractResult, SystemResult}; +#[cfg(feature = "iterator")] +use cosmwasm_std::{Order, Record}; + +/// A structure that represents gas cost to be deducted from the remaining gas. +/// This is always needed when computations are performed outside of +/// Wasm execution, such as calling crypto APIs or calls into the blockchain. +/// +/// All values are measured in [CosmWasm gas]. +/// +/// [CosmWasm gas]: https://github.com/CosmWasm/cosmwasm/blob/main/docs/GAS.md +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub struct GasInfo { + /// The gas cost of a computation that was executed already but not yet charged. + /// + /// This could be renamed to `internally_used` for consistency because it is used inside + /// of the `cosmwasm_vm`. + pub cost: u64, + /// Gas that was used and charged externally. This is needed to + /// adjust the VM's gas limit but does not affect the gas usage. + /// + /// Since this is measured in [CosmWasm gas], the caller may need + /// to convert from Cosmos SDK gas in cases where an SDK gas meter + /// is used. + /// + /// [CosmWasm gas]: https://github.com/CosmWasm/cosmwasm/blob/main/docs/GAS.md + pub externally_used: u64, +} + +impl GasInfo { + pub fn new(cost: u64, externally_used: u64) -> Self { + GasInfo { + cost, + externally_used, + } + } + + pub fn with_cost(amount: u64) -> Self { + GasInfo { + cost: amount, + externally_used: 0, + } + } + + pub fn with_externally_used(amount: u64) -> Self { + GasInfo { + cost: 0, + externally_used: amount, + } + } + + /// Creates a gas information with no cost for the caller and with zero externally used gas. + /// + /// Caution: when using this you need to make sure no gas was metered externally to keep the gas values in sync. + pub fn free() -> Self { + GasInfo { + cost: 0, + externally_used: 0, + } + } +} + +impl AddAssign for GasInfo { + fn add_assign(&mut self, other: Self) { + *self = GasInfo { + cost: self.cost + other.cost, + externally_used: self.externally_used + other.externally_used, + }; + } +} + +/// Holds all external dependencies of the contract. +/// Designed to allow easy dependency injection at runtime. +/// This cannot be copied or cloned since it would behave differently +/// for mock storages and a bridge storage in the VM. +pub struct Backend { + pub api: A, + pub storage: S, + pub querier: Q, +} + +/// Access to the VM's backend storage, i.e. the chain +pub trait Storage { + /// Returns Err on error. + /// Returns Ok(None) when key does not exist. + /// Returns Ok(Some(Vec)) when key exists. + /// + /// Note: Support for differentiating between a non-existent key and a key with empty value + /// is not great yet and might not be possible in all backends. But we're trying to get there. + fn get(&self, key: &[u8]) -> BackendResult>>; + + /// Allows iteration over a set of key/value pairs, either forwards or backwards. + /// Returns an iterator ID that is unique within the Storage instance. + /// + /// The bound `start` is inclusive and `end` is exclusive. + /// + /// If `start` is lexicographically greater than or equal to `end`, an empty range is described, mo matter of the order. + /// + /// This call must not change data in the storage, but creating and storing a new iterator can be a mutating operation on + /// the Storage implementation. + /// The implementation must ensure that iterator IDs are assigned in a deterministic manner as this is + /// environment data that is injected into the contract. + #[cfg(feature = "iterator")] + fn scan( + &mut self, + start: Option<&[u8]>, + end: Option<&[u8]>, + order: Order, + ) -> BackendResult; + + /// Returns the next element of the iterator with the given ID. + /// + /// If the ID is not found, a BackendError::IteratorDoesNotExist is returned. + /// + /// This call must not change data in the storage, but incrementing an iterator can be a mutating operation on + /// the Storage implementation. + #[cfg(feature = "iterator")] + fn next(&mut self, iterator_id: u32) -> BackendResult>; + + /// Returns the next value of the iterator with the given ID. + /// Since the iterator is incremented, the corresponding key will never be accessible. + /// + /// If the ID is not found, a BackendError::IteratorDoesNotExist is returned. + /// + /// The default implementation uses [`Storage::next`] and discards the key. + /// More efficient implementations might be possible depending on the storage. + #[cfg(feature = "iterator")] + fn next_value(&mut self, iterator_id: u32) -> BackendResult>> { + let (result, gas_info) = self.next(iterator_id); + let result = result.map(|record| record.map(|(_, v)| v)); + (result, gas_info) + } + + /// Returns the next key of the iterator with the given ID. + /// Since the iterator is incremented, the corresponding value will never be accessible. + /// + /// If the ID is not found, a BackendError::IteratorDoesNotExist is returned. + /// + /// The default implementation uses [`Storage::next`] and discards the value. + /// More efficient implementations might be possible depending on the storage. + #[cfg(feature = "iterator")] + fn next_key(&mut self, iterator_id: u32) -> BackendResult>> { + let (result, gas_info) = self.next(iterator_id); + let result = result.map(|record| record.map(|(k, _)| k)); + (result, gas_info) + } + + fn set(&mut self, key: &[u8], value: &[u8]) -> BackendResult<()>; + + /// Removes a database entry at `key`. + /// + /// The current interface does not allow to differentiate between a key that existed + /// before and one that didn't exist. See https://github.com/CosmWasm/cosmwasm/issues/290 + fn remove(&mut self, key: &[u8]) -> BackendResult<()>; +} + +/// Callbacks to system functions defined outside of the wasm modules. +/// This is a trait to allow Mocks in the test code. +/// +/// Currently it just supports address conversion, we could add eg. crypto functions here. +/// These should all be pure (stateless) functions. If you need state, you probably want +/// to use the Querier. +pub trait BackendApi: Clone + Send { + fn addr_validate(&self, input: &str) -> BackendResult<()>; + fn addr_canonicalize(&self, human: &str) -> BackendResult>; + fn addr_humanize(&self, canonical: &[u8]) -> BackendResult; +} + +pub trait Querier { + /// This is all that must be implemented for the Querier. + /// This allows us to pass through binary queries from one level to another without + /// knowing the custom format, or we can decode it, with the knowledge of the allowed + /// types. + /// + /// The gas limit describes how much [CosmWasm gas] this particular query is allowed + /// to consume when measured separately from the rest of the contract. + /// The returned gas info (in BackendResult) can exceed the gas limit in cases + /// where the query could not be aborted exactly at the limit. + /// + /// [CosmWasm gas]: https://github.com/CosmWasm/cosmwasm/blob/main/docs/GAS.md + fn query_raw( + &self, + request: &[u8], + gas_limit: u64, + ) -> BackendResult>>; +} + +/// A result type for calling into the backend. Such a call can cause +/// non-negligible computational cost in both success and failure case and +/// must always have gas information attached. +pub type BackendResult = (core::result::Result, GasInfo); + +/// This aims to be similar to the `?` operator, but for a [`BackendResult`]. +/// +/// The first argument is a result. If it is Ok, return the value. +/// If it is Err, end the current function with a `return BackendResult::Err`. +/// +/// The second argument is the gas value that will be used in the error case. +/// It should be the sum of all gas used in the calling function. +macro_rules! unwrap_or_return_with_gas { + ($result: expr $(,)?, $gas_total: expr $(,)?) => {{ + let result: core::result::Result<_, _> = $result; // just a type check + let gas: GasInfo = $gas_total; // just a type check + match result { + Ok(v) => v, + Err(e) => return (Err(e), gas), + } + }}; +} +pub(crate) use unwrap_or_return_with_gas; + +#[derive(Error, Debug, PartialEq, Eq)] +#[non_exhaustive] +pub enum BackendError { + #[error("Panic in FFI call")] + ForeignPanic {}, + #[error("Bad argument")] + BadArgument {}, + #[error("VM received invalid UTF-8 data from backend")] + InvalidUtf8 {}, + #[error("Iterator with ID {id} does not exist")] + IteratorDoesNotExist { id: u32 }, + #[error("Ran out of gas during call into backend")] + OutOfGas {}, + #[error("Unknown error during call into backend: {msg}")] + Unknown { msg: String }, + // This is the only error case of BackendError that is reported back to the contract. + #[error("User error during call into backend: {msg}")] + UserErr { msg: String }, +} + +impl BackendError { + pub fn foreign_panic() -> Self { + BackendError::ForeignPanic {} + } + + pub fn bad_argument() -> Self { + BackendError::BadArgument {} + } + + pub fn iterator_does_not_exist(iterator_id: u32) -> Self { + BackendError::IteratorDoesNotExist { id: iterator_id } + } + + pub fn out_of_gas() -> Self { + BackendError::OutOfGas {} + } + + pub fn unknown(msg: impl Into) -> Self { + BackendError::Unknown { msg: msg.into() } + } + + pub fn user_err(msg: impl Into) -> Self { + BackendError::UserErr { msg: msg.into() } + } +} + +impl From for BackendError { + fn from(_original: FromUtf8Error) -> Self { + BackendError::InvalidUtf8 {} + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn gas_info_with_cost_works() { + let gas_info = GasInfo::with_cost(21); + assert_eq!(gas_info.cost, 21); + assert_eq!(gas_info.externally_used, 0); + } + + #[test] + fn gas_info_with_externally_used_works() { + let gas_info = GasInfo::with_externally_used(65); + assert_eq!(gas_info.cost, 0); + assert_eq!(gas_info.externally_used, 65); + } + + #[test] + fn gas_info_free_works() { + let gas_info = GasInfo::free(); + assert_eq!(gas_info.cost, 0); + assert_eq!(gas_info.externally_used, 0); + } + + #[test] + fn gas_info_implements_add_assign() { + let mut a = GasInfo::new(0, 0); + a += GasInfo::new(0, 0); + assert_eq!( + a, + GasInfo { + cost: 0, + externally_used: 0 + } + ); + + let mut a = GasInfo::new(0, 0); + a += GasInfo::new(12, 0); + assert_eq!( + a, + GasInfo { + cost: 12, + externally_used: 0 + } + ); + + let mut a = GasInfo::new(10, 0); + a += GasInfo::new(3, 0); + assert_eq!( + a, + GasInfo { + cost: 13, + externally_used: 0 + } + ); + + let mut a = GasInfo::new(0, 0); + a += GasInfo::new(0, 7); + assert_eq!( + a, + GasInfo { + cost: 0, + externally_used: 7 + } + ); + + let mut a = GasInfo::new(0, 8); + a += GasInfo::new(0, 9); + assert_eq!( + a, + GasInfo { + cost: 0, + externally_used: 17 + } + ); + + let mut a = GasInfo::new(100, 200); + a += GasInfo::new(1, 2); + assert_eq!( + a, + GasInfo { + cost: 101, + externally_used: 202 + } + ); + } + + // constructors + + #[test] + fn backend_err_foreign_panic() { + let error = BackendError::foreign_panic(); + match error { + BackendError::ForeignPanic { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn backend_err_bad_argument() { + let error = BackendError::bad_argument(); + match error { + BackendError::BadArgument { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn iterator_does_not_exist_works() { + let error = BackendError::iterator_does_not_exist(15); + match error { + BackendError::IteratorDoesNotExist { id, .. } => assert_eq!(id, 15), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn backend_err_out_of_gas() { + let error = BackendError::out_of_gas(); + match error { + BackendError::OutOfGas { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn backend_err_unknown() { + let error = BackendError::unknown("broken"); + match error { + BackendError::Unknown { msg, .. } => assert_eq!(msg, "broken"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn backend_err_user_err() { + let error = BackendError::user_err("invalid input"); + match error { + BackendError::UserErr { msg, .. } => assert_eq!(msg, "invalid input"), + e => panic!("Unexpected error: {e:?}"), + } + } + + // conversions + + #[test] + fn convert_from_fromutf8error() { + let error: BackendError = String::from_utf8(vec![0x80]).unwrap_err().into(); + match error { + BackendError::InvalidUtf8 { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } +} diff --git a/vm/src/cache.rs b/vm/src/cache.rs new file mode 100644 index 000000000..87c861a50 --- /dev/null +++ b/vm/src/cache.rs @@ -0,0 +1,1692 @@ +use std::collections::{BTreeSet, HashSet}; +use std::fs::{self, File, OpenOptions}; +use std::io::{Read, Write}; +use std::marker::PhantomData; +use std::path::{Path, PathBuf}; +use std::str::FromStr; +use std::sync::Mutex; +use wasmer::{Module, Store}; + +use cosmwasm_std::Checksum; + +use crate::backend::{Backend, BackendApi, Querier, Storage}; +use crate::capabilities::required_capabilities_from_module; +use crate::compatibility::check_wasm; +use crate::config::{CacheOptions, Config, WasmLimits}; +use crate::errors::{VmError, VmResult}; +use crate::filesystem::mkdir_p; +use crate::instance::{Instance, InstanceOptions}; +use crate::modules::{CachedModule, FileSystemCache, InMemoryCache, PinnedMemoryCache}; +use crate::parsed_wasm::ParsedWasm; +use crate::size::Size; +use crate::static_analysis::{Entrypoint, ExportInfo, REQUIRED_IBC_EXPORTS}; +use crate::wasm_backend::{compile, make_compiling_engine}; + +const STATE_DIR: &str = "state"; +// Things related to the state of the blockchain. +const WASM_DIR: &str = "wasm"; + +const CACHE_DIR: &str = "cache"; +// Cacheable things. +const MODULES_DIR: &str = "modules"; + +/// Statistics about the usage of a cache instance. Those values are node +/// specific and must not be used in a consensus critical context. +/// When a node is hit by a client for simulations or other queries, hits and misses +/// increase. Also a node restart will reset the values. +/// +/// All values should be increment using saturated addition to ensure the node does not +/// crash in case the stats exceed the integer limit. +#[derive(Debug, Default, Clone, Copy)] +pub struct Stats { + pub hits_pinned_memory_cache: u32, + pub hits_memory_cache: u32, + pub hits_fs_cache: u32, + pub misses: u32, +} + +#[derive(Debug, Clone, Copy)] +pub struct Metrics { + pub stats: Stats, + pub elements_pinned_memory_cache: usize, + pub elements_memory_cache: usize, + pub size_pinned_memory_cache: usize, + pub size_memory_cache: usize, +} + +#[derive(Debug, Clone)] +pub struct PerModuleMetrics { + /// Hits (i.e. loads) of the module from the cache + pub hits: u32, + /// Size the module takes up in memory + pub size: usize, +} + +#[derive(Debug, Clone)] +pub struct PinnedMetrics { + // It is *intentional* that this is only a vector + // We don't need a potentially expensive hashing algorithm here + // The checksums are sourced from a hashmap already, ensuring uniqueness of the checksums + pub per_module: Vec<(Checksum, PerModuleMetrics)>, +} + +pub struct CacheInner { + /// The directory in which the Wasm blobs are stored in the file system. + wasm_path: PathBuf, + pinned_memory_cache: PinnedMemoryCache, + memory_cache: InMemoryCache, + fs_cache: FileSystemCache, + stats: Stats, +} + +pub struct Cache { + /// Available capabilities are immutable for the lifetime of the cache, + /// i.e. any number of read-only references is allowed to access it concurrently. + available_capabilities: HashSet, + inner: Mutex, + instance_memory_limit: Size, + // Those two don't store data but only fix type information + type_api: PhantomData, + type_storage: PhantomData, + type_querier: PhantomData, + /// To prevent concurrent access to `WasmerInstance::new` + instantiation_lock: Mutex<()>, + wasm_limits: WasmLimits, +} + +#[derive(PartialEq, Eq, Debug)] +#[non_exhaustive] +pub struct AnalysisReport { + /// `true` if and only if all [`REQUIRED_IBC_EXPORTS`] exist as exported functions. + /// This does not guarantee they are functional or even have the correct signatures. + pub has_ibc_entry_points: bool, + /// A set of all entrypoints that are exported by the contract. + pub entrypoints: BTreeSet, + /// The set of capabilities the contract requires. + pub required_capabilities: BTreeSet, + /// The contract migrate version exported set by the contract developer + pub contract_migrate_version: Option, +} + +impl Cache +where + A: BackendApi + 'static, // 'static is needed by `impl<…> Instance` + S: Storage + 'static, // 'static is needed by `impl<…> Instance` + Q: Querier + 'static, // 'static is needed by `impl<…> Instance` +{ + /// Creates a new cache that stores data in `base_dir`. + /// + /// # Safety + /// + /// This function is marked unsafe due to `FileSystemCache::new`, which implicitly + /// assumes the disk contents are correct, and there's no way to ensure the artifacts + /// stored in the cache haven't been corrupted or tampered with. + pub unsafe fn new(options: CacheOptions) -> VmResult { + Self::new_with_config(Config { + wasm_limits: WasmLimits::default(), + cache: options, + }) + } + + /// Creates a new cache with the given configuration. + /// This allows configuring lots of limits and sizes. + /// + /// # Safety + /// + /// This function is marked unsafe due to `FileSystemCache::new`, which implicitly + /// assumes the disk contents are correct, and there's no way to ensure the artifacts + /// stored in the cache haven't been corrupted or tampered with. + pub unsafe fn new_with_config(config: Config) -> VmResult { + let Config { + cache: + CacheOptions { + base_dir, + available_capabilities, + memory_cache_size_bytes, + instance_memory_limit_bytes, + }, + wasm_limits, + } = config; + + let state_path = base_dir.join(STATE_DIR); + let cache_path = base_dir.join(CACHE_DIR); + + let wasm_path = state_path.join(WASM_DIR); + + // Ensure all the needed directories exist on disk. + mkdir_p(&state_path).map_err(|_e| VmError::cache_err("Error creating state directory"))?; + mkdir_p(&cache_path).map_err(|_e| VmError::cache_err("Error creating cache directory"))?; + mkdir_p(&wasm_path).map_err(|_e| VmError::cache_err("Error creating wasm directory"))?; + + let fs_cache = FileSystemCache::new(cache_path.join(MODULES_DIR), false) + .map_err(|e| VmError::cache_err(format!("Error file system cache: {e}")))?; + Ok(Cache { + available_capabilities, + inner: Mutex::new(CacheInner { + wasm_path, + pinned_memory_cache: PinnedMemoryCache::new(), + memory_cache: InMemoryCache::new(memory_cache_size_bytes), + fs_cache, + stats: Stats::default(), + }), + instance_memory_limit: instance_memory_limit_bytes, + type_storage: PhantomData::, + type_api: PhantomData::, + type_querier: PhantomData::, + instantiation_lock: Mutex::new(()), + wasm_limits, + }) + } + + /// If `unchecked` is true, the filesystem cache will use the `*_unchecked` wasmer functions for + /// loading modules from disk. + pub fn set_module_unchecked(&mut self, unchecked: bool) { + self.inner + .lock() + .unwrap() + .fs_cache + .set_module_unchecked(unchecked); + } + + pub fn stats(&self) -> Stats { + self.inner.lock().unwrap().stats + } + + pub fn pinned_metrics(&self) -> PinnedMetrics { + let cache = self.inner.lock().unwrap(); + let per_module = cache + .pinned_memory_cache + .iter() + .map(|(checksum, module)| { + let metrics = PerModuleMetrics { + hits: module.hits, + size: module.module.size_estimate, + }; + + (*checksum, metrics) + }) + .collect(); + + PinnedMetrics { per_module } + } + + pub fn metrics(&self) -> Metrics { + let cache = self.inner.lock().unwrap(); + Metrics { + stats: cache.stats, + elements_pinned_memory_cache: cache.pinned_memory_cache.len(), + elements_memory_cache: cache.memory_cache.len(), + size_pinned_memory_cache: cache.pinned_memory_cache.size(), + size_memory_cache: cache.memory_cache.size(), + } + } + + /// Takes a Wasm bytecode and stores it to the cache. + /// + /// This performs static checks, compiles the bytescode to a module and + /// stores the Wasm file on disk. + /// + /// This does the same as [`Cache::save_wasm_unchecked`] plus the static checks. + /// When a Wasm blob is stored the first time, use this function. + #[deprecated = "Use `store_code(wasm, true, true)` instead"] + pub fn save_wasm(&self, wasm: &[u8]) -> VmResult { + self.store_code(wasm, true, true) + } + + /// Takes a Wasm bytecode and stores it to the cache. + /// + /// This performs static checks if `checked` is `true`, + /// compiles the bytescode to a module and + /// stores the Wasm file on disk if `persist` is `true`. + /// + /// Only set `checked = false` when a Wasm blob is stored which was previously checked + /// (e.g. as part of state sync). + pub fn store_code(&self, wasm: &[u8], checked: bool, persist: bool) -> VmResult { + if checked { + check_wasm( + wasm, + &self.available_capabilities, + &self.wasm_limits, + crate::internals::Logger::Off, + )?; + } + + let module = compile_module(wasm)?; + + if persist { + self.save_to_disk(wasm, &module) + } else { + Ok(Checksum::generate(wasm)) + } + } + + /// Takes a Wasm bytecode and stores it to the cache. + /// + /// This compiles the bytescode to a module and + /// stores the Wasm file on disk. + /// + /// This does the same as [`Cache::save_wasm`] but without the static checks. + /// When a Wasm blob is stored which was previously checked (e.g. as part of state sync), + /// use this function. + #[deprecated = "Use `store_code(wasm, false, true)` instead"] + pub fn save_wasm_unchecked(&self, wasm: &[u8]) -> VmResult { + self.store_code(wasm, false, true) + } + + fn save_to_disk(&self, wasm: &[u8], module: &Module) -> VmResult { + let mut cache = self.inner.lock().unwrap(); + let checksum = save_wasm_to_disk(&cache.wasm_path, wasm)?; + cache.fs_cache.store(&checksum, module)?; + Ok(checksum) + } + + /// Removes the Wasm blob for the given checksum from disk and its + /// compiled module from the file system cache. + /// + /// The existence of the original code is required since the caller (wasmd) + /// has to keep track of which entries we have here. + pub fn remove_wasm(&self, checksum: &Checksum) -> VmResult<()> { + let mut cache = self.inner.lock().unwrap(); + + // Remove compiled moduled from disk (if it exists). + // Here we could also delete from memory caches but this is not really + // necessary as they are pushed out from the LRU over time or disappear + // when the node process restarts. + cache.fs_cache.remove(checksum)?; + + let path = &cache.wasm_path; + remove_wasm_from_disk(path, checksum)?; + Ok(()) + } + + /// Retrieves a Wasm blob that was previously stored via [`Cache::store_code`]. + /// When the cache is instantiated with the same base dir, this finds Wasm files on disc across multiple cache instances (i.e. node restarts). + /// This function is public to allow a checksum to Wasm lookup in the blockchain. + /// + /// If the given ID is not found or the content does not match the hash (=ID), an error is returned. + pub fn load_wasm(&self, checksum: &Checksum) -> VmResult> { + self.load_wasm_with_path(&self.inner.lock().unwrap().wasm_path, checksum) + } + + fn load_wasm_with_path(&self, wasm_path: &Path, checksum: &Checksum) -> VmResult> { + let code = load_wasm_from_disk(wasm_path, checksum)?; + // verify hash matches (integrity check) + if Checksum::generate(&code) != *checksum { + Err(VmError::integrity_err()) + } else { + Ok(code) + } + } + + /// Performs static anlyzation on this Wasm without compiling or instantiating it. + /// + /// Once the contract was stored via [`Cache::store_code`], this can be called at any point in time. + /// It does not depend on any caching of the contract. + pub fn analyze(&self, checksum: &Checksum) -> VmResult { + // Here we could use a streaming deserializer to slightly improve performance. However, this way it is DRYer. + let wasm = self.load_wasm(checksum)?; + let module = ParsedWasm::parse(&wasm)?; + let exports = module.exported_function_names(None); + + let entrypoints = exports + .iter() + .filter_map(|export| Entrypoint::from_str(export).ok()) + .collect(); + + Ok(AnalysisReport { + has_ibc_entry_points: REQUIRED_IBC_EXPORTS + .iter() + .all(|required| exports.contains(required.as_ref())), + entrypoints, + required_capabilities: required_capabilities_from_module(&module) + .into_iter() + .collect(), + contract_migrate_version: module.contract_migrate_version, + }) + } + + /// Pins a Module that was previously stored via [`Cache::store_code`]. + /// + /// The module is lookup first in the file system cache. If not found, + /// the code is loaded from the file system, compiled, and stored into the + /// pinned cache. + /// + /// If the given contract for the given checksum is not found, or the content + /// does not match the checksum, an error is returned. + pub fn pin(&self, checksum: &Checksum) -> VmResult<()> { + let mut cache = self.inner.lock().unwrap(); + if cache.pinned_memory_cache.has(checksum) { + return Ok(()); + } + + // We don't load from the memory cache because we had to create new store here and + // serialize/deserialize the artifact to get a full clone. Could be done but adds some code + // for a not-so-relevant use case. + + // Try to get module from file system cache + if let Some(cached_module) = cache + .fs_cache + .load(checksum, Some(self.instance_memory_limit))? + { + cache.stats.hits_fs_cache = cache.stats.hits_fs_cache.saturating_add(1); + return cache.pinned_memory_cache.store(checksum, cached_module); + } + + // Re-compile from original Wasm bytecode + let wasm = self.load_wasm_with_path(&cache.wasm_path, checksum)?; + cache.stats.misses = cache.stats.misses.saturating_add(1); + { + // Module will run with a different engine, so we can set memory limit to None + let compiling_engine = make_compiling_engine(None); + // This module cannot be executed directly as it was not created with the runtime engine + let module = compile(&compiling_engine, &wasm)?; + cache.fs_cache.store(checksum, &module)?; + } + + // This time we'll hit the file-system cache. + let Some(cached_module) = cache + .fs_cache + .load(checksum, Some(self.instance_memory_limit))? + else { + return Err(VmError::generic_err( + "Can't load module from file system cache after storing it to file system cache (pin)", + )); + }; + + cache.pinned_memory_cache.store(checksum, cached_module) + } + + /// Unpins a Module, i.e. removes it from the pinned memory cache. + /// + /// Not found IDs are silently ignored, and no integrity check (checksum validation) is done + /// on the removed value. + pub fn unpin(&self, checksum: &Checksum) -> VmResult<()> { + self.inner + .lock() + .unwrap() + .pinned_memory_cache + .remove(checksum) + } + + /// Returns an Instance tied to a previously saved Wasm. + /// + /// It takes a module from cache or Wasm code and instantiates it. + pub fn get_instance( + &self, + checksum: &Checksum, + backend: Backend, + options: InstanceOptions, + ) -> VmResult> { + let (module, store) = self.get_module(checksum)?; + let instance = Instance::from_module( + store, + &module, + backend, + options.gas_limit, + None, + Some(&self.instantiation_lock), + )?; + Ok(instance) + } + + /// Returns a module tied to a previously saved Wasm. + /// Depending on availability, this is either generated from a memory cache, file system cache or Wasm code. + /// This is part of `get_instance` but pulled out to reduce the locking time. + fn get_module(&self, checksum: &Checksum) -> VmResult<(Module, Store)> { + let mut cache = self.inner.lock().unwrap(); + // Try to get module from the pinned memory cache + if let Some(element) = cache.pinned_memory_cache.load(checksum)? { + cache.stats.hits_pinned_memory_cache = + cache.stats.hits_pinned_memory_cache.saturating_add(1); + let CachedModule { + module, + engine, + size_estimate: _, + } = element; + let store = Store::new(engine); + return Ok((module, store)); + } + + // Get module from memory cache + if let Some(element) = cache.memory_cache.load(checksum)? { + cache.stats.hits_memory_cache = cache.stats.hits_memory_cache.saturating_add(1); + let CachedModule { + module, + engine, + size_estimate: _, + } = element; + let store = Store::new(engine); + return Ok((module, store)); + } + + // Get module from file system cache + if let Some(cached_module) = cache + .fs_cache + .load(checksum, Some(self.instance_memory_limit))? + { + cache.stats.hits_fs_cache = cache.stats.hits_fs_cache.saturating_add(1); + + cache.memory_cache.store(checksum, cached_module.clone())?; + + let CachedModule { + module, + engine, + size_estimate: _, + } = cached_module; + let store = Store::new(engine); + return Ok((module, store)); + } + + // Re-compile module from wasm + // + // This is needed for chains that upgrade their node software in a way that changes the module + // serialization format. If you do not replay all transactions, previous calls of `store_code` + // stored the old module format. + let wasm = self.load_wasm_with_path(&cache.wasm_path, checksum)?; + cache.stats.misses = cache.stats.misses.saturating_add(1); + { + // Module will run with a different engine, so we can set memory limit to None + let compiling_engine = make_compiling_engine(None); + // This module cannot be executed directly as it was not created with the runtime engine + let module = compile(&compiling_engine, &wasm)?; + cache.fs_cache.store(checksum, &module)?; + } + + // This time we'll hit the file-system cache. + let Some(cached_module) = cache + .fs_cache + .load(checksum, Some(self.instance_memory_limit))? + else { + return Err(VmError::generic_err( + "Can't load module from file system cache after storing it to file system cache (get_module)", + )); + }; + cache.memory_cache.store(checksum, cached_module.clone())?; + + let CachedModule { + module, + engine, + size_estimate: _, + } = cached_module; + let store = Store::new(engine); + Ok((module, store)) + } +} + +fn compile_module(wasm: &[u8]) -> Result { + let compiling_engine = make_compiling_engine(None); + let module = compile(&compiling_engine, wasm)?; + Ok(module) +} + +unsafe impl Sync for Cache +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ +} + +unsafe impl Send for Cache +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ +} + +/// save stores the wasm code in the given directory and returns an ID for lookup. +/// It will create the directory if it doesn't exist. +/// Saving the same byte code multiple times is allowed. +fn save_wasm_to_disk(dir: impl Into, wasm: &[u8]) -> VmResult { + // calculate filename + let checksum = Checksum::generate(wasm); + let filename = checksum.to_hex(); + let filepath = dir.into().join(filename).with_extension("wasm"); + + // write data to file + // Since the same filename (a collision resistant hash) cannot be generated from two different byte codes + // (even if a malicious actor tried), it is safe to override. + let mut file = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(filepath) + .map_err(|e| VmError::cache_err(format!("Error opening Wasm file for writing: {e}")))?; + file.write_all(wasm) + .map_err(|e| VmError::cache_err(format!("Error writing Wasm file: {e}")))?; + + Ok(checksum) +} + +fn load_wasm_from_disk(dir: impl Into, checksum: &Checksum) -> VmResult> { + // this requires the directory and file to exist + // The files previously had no extension, so to allow for a smooth transition, + // we also try to load the file without the wasm extension. + let path = dir.into().join(checksum.to_hex()); + let mut file = File::open(path.with_extension("wasm")) + .or_else(|_| File::open(path)) + .map_err(|_e| VmError::cache_err("Error opening Wasm file for reading"))?; + + let mut wasm = Vec::::new(); + file.read_to_end(&mut wasm) + .map_err(|_e| VmError::cache_err("Error reading Wasm file"))?; + Ok(wasm) +} + +/// Removes the Wasm blob for the given checksum from disk. +/// +/// In contrast to the file system cache, the existence of the original +/// code is required. So a non-existent file leads to an error as it +/// indicates a bug. +fn remove_wasm_from_disk(dir: impl Into, checksum: &Checksum) -> VmResult<()> { + // the files previously had no extension, so to allow for a smooth transition, we delete both + let path = dir.into().join(checksum.to_hex()); + let wasm_path = path.with_extension("wasm"); + + let path_exists = path.exists(); + let wasm_path_exists = wasm_path.exists(); + if !path_exists && !wasm_path_exists { + return Err(VmError::cache_err("Wasm file does not exist")); + } + + if path_exists { + fs::remove_file(path) + .map_err(|_e| VmError::cache_err("Error removing Wasm file from disk"))?; + } + + if wasm_path_exists { + fs::remove_file(wasm_path) + .map_err(|_e| VmError::cache_err("Error removing Wasm file from disk"))?; + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::calls::{call_execute, call_instantiate}; + use crate::capabilities::capabilities_from_csv; + use crate::testing::{mock_backend, mock_env, mock_info, MockApi, MockQuerier, MockStorage}; + use cosmwasm_std::{coins, Empty}; + use std::borrow::Cow; + use std::fs::{create_dir_all, remove_dir_all}; + use tempfile::TempDir; + use wasm_encoder::ComponentSection; + + const TESTING_GAS_LIMIT: u64 = 500_000_000; // ~0.5ms + const TESTING_MEMORY_LIMIT: Size = Size::mebi(16); + const TESTING_OPTIONS: InstanceOptions = InstanceOptions { + gas_limit: TESTING_GAS_LIMIT, + }; + const TESTING_MEMORY_CACHE_SIZE: Size = Size::mebi(200); + + static CONTRACT: &[u8] = include_bytes!("../testdata/hackatom.wasm"); + static IBC_CONTRACT: &[u8] = include_bytes!("../testdata/ibc_reflect.wasm"); + static EMPTY_CONTRACT: &[u8] = include_bytes!("../testdata/empty.wasm"); + // Invalid because it doesn't contain required memory and exports + static INVALID_CONTRACT_WAT: &str = r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_one") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 1 + i32.add)) + "#; + + fn default_capabilities() -> HashSet { + capabilities_from_csv("iterator,staking") + } + + fn make_testing_options() -> CacheOptions { + CacheOptions { + base_dir: TempDir::new().unwrap().into_path(), + available_capabilities: default_capabilities(), + memory_cache_size_bytes: TESTING_MEMORY_CACHE_SIZE, + instance_memory_limit_bytes: TESTING_MEMORY_LIMIT, + } + } + + fn make_stargate_testing_options() -> CacheOptions { + let mut capabilities = default_capabilities(); + capabilities.insert("stargate".into()); + CacheOptions { + base_dir: TempDir::new().unwrap().into_path(), + available_capabilities: capabilities, + memory_cache_size_bytes: TESTING_MEMORY_CACHE_SIZE, + instance_memory_limit_bytes: TESTING_MEMORY_LIMIT, + } + } + + /// Takes an instance and executes it + fn test_hackatom_instance_execution(instance: &mut Instance) + where + S: Storage + 'static, + Q: Querier + 'static, + { + // instantiate + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let response = + call_instantiate::<_, _, _, Empty>(instance, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + assert_eq!(response.messages.len(), 0); + + // execute + let info = mock_info(&verifier, &coins(15, "earth")); + let msg = br#"{"release":{}}"#; + let response = call_execute::<_, _, _, Empty>(instance, &mock_env(), &info, msg) + .unwrap() + .unwrap(); + assert_eq!(response.messages.len(), 1); + } + + #[test] + fn new_base_dir_will_be_created() { + let my_base_dir = TempDir::new() + .unwrap() + .into_path() + .join("non-existent-sub-dir"); + let options = CacheOptions { + base_dir: my_base_dir.clone(), + ..make_testing_options() + }; + assert!(!my_base_dir.is_dir()); + let _cache = unsafe { Cache::::new(options).unwrap() }; + assert!(my_base_dir.is_dir()); + } + + #[test] + fn store_code_checked_works() { + let cache: Cache = + unsafe { Cache::new(make_testing_options()).unwrap() }; + cache.store_code(CONTRACT, true, true).unwrap(); + } + + #[test] + fn store_code_without_persist_works() { + let cache: Cache = + unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, false).unwrap(); + + assert!( + cache.load_wasm(&checksum).is_err(), + "wasm file should not be saved to disk" + ); + } + + #[test] + // This property is required when the same bytecode is uploaded multiple times + fn store_code_allows_saving_multiple_times() { + let cache: Cache = + unsafe { Cache::new(make_testing_options()).unwrap() }; + cache.store_code(CONTRACT, true, true).unwrap(); + cache.store_code(CONTRACT, true, true).unwrap(); + } + + #[test] + fn store_code_checked_rejects_invalid_contract() { + let wasm = wat::parse_str(INVALID_CONTRACT_WAT).unwrap(); + + let cache: Cache = + unsafe { Cache::new(make_testing_options()).unwrap() }; + let save_result = cache.store_code(&wasm, true, true); + match save_result.unwrap_err() { + VmError::StaticValidationErr { msg, .. } => { + assert_eq!(msg, "Wasm contract must contain exactly one memory") + } + e => panic!("Unexpected error {e:?}"), + } + } + + #[test] + fn store_code_fills_file_system_but_not_memory_cache() { + // Who knows if and when the uploaded contract will be executed. Don't pollute + // memory cache before the init call. + + let cache = unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + let backend = mock_backend(&[]); + let _ = cache + .get_instance(&checksum, backend, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + } + + #[test] + fn store_code_unchecked_works() { + let cache: Cache = + unsafe { Cache::new(make_testing_options()).unwrap() }; + cache.store_code(CONTRACT, false, true).unwrap(); + } + + #[test] + fn store_code_unchecked_accepts_invalid_contract() { + let wasm = wat::parse_str(INVALID_CONTRACT_WAT).unwrap(); + + let cache: Cache = + unsafe { Cache::new(make_testing_options()).unwrap() }; + cache.store_code(&wasm, false, true).unwrap(); + } + + #[test] + fn load_wasm_works() { + let cache: Cache = + unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + let restored = cache.load_wasm(&checksum).unwrap(); + assert_eq!(restored, CONTRACT); + } + + #[test] + fn load_wasm_works_across_multiple_cache_instances() { + let tmp_dir = TempDir::new().unwrap(); + let id: Checksum; + + { + let options1 = CacheOptions { + base_dir: tmp_dir.path().to_path_buf(), + available_capabilities: default_capabilities(), + memory_cache_size_bytes: TESTING_MEMORY_CACHE_SIZE, + instance_memory_limit_bytes: TESTING_MEMORY_LIMIT, + }; + let cache1: Cache = + unsafe { Cache::new(options1).unwrap() }; + id = cache1.store_code(CONTRACT, true, true).unwrap(); + } + + { + let options2 = CacheOptions { + base_dir: tmp_dir.path().to_path_buf(), + available_capabilities: default_capabilities(), + memory_cache_size_bytes: TESTING_MEMORY_CACHE_SIZE, + instance_memory_limit_bytes: TESTING_MEMORY_LIMIT, + }; + let cache2: Cache = + unsafe { Cache::new(options2).unwrap() }; + let restored = cache2.load_wasm(&id).unwrap(); + assert_eq!(restored, CONTRACT); + } + } + + #[test] + fn load_wasm_errors_for_non_existent_id() { + let cache: Cache = + unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = Checksum::from([ + 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, + 5, 5, 5, + ]); + + match cache.load_wasm(&checksum).unwrap_err() { + VmError::CacheErr { msg, .. } => { + assert_eq!(msg, "Error opening Wasm file for reading") + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn load_wasm_errors_for_corrupted_wasm() { + let tmp_dir = TempDir::new().unwrap(); + let options = CacheOptions { + base_dir: tmp_dir.path().to_path_buf(), + available_capabilities: default_capabilities(), + memory_cache_size_bytes: TESTING_MEMORY_CACHE_SIZE, + instance_memory_limit_bytes: TESTING_MEMORY_LIMIT, + }; + let cache: Cache = + unsafe { Cache::new(options).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + // Corrupt cache file + let filepath = tmp_dir + .path() + .join(STATE_DIR) + .join(WASM_DIR) + .join(checksum.to_hex()) + .with_extension("wasm"); + let mut file = OpenOptions::new().write(true).open(filepath).unwrap(); + file.write_all(b"broken data").unwrap(); + + let res = cache.load_wasm(&checksum); + match res { + Err(VmError::IntegrityErr { .. }) => {} + Err(e) => panic!("Unexpected error: {e:?}"), + Ok(_) => panic!("This must not succeed"), + } + } + + #[test] + fn remove_wasm_works() { + let cache: Cache = + unsafe { Cache::new(make_testing_options()).unwrap() }; + + // Store + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + // Exists + cache.load_wasm(&checksum).unwrap(); + + // Remove + cache.remove_wasm(&checksum).unwrap(); + + // Does not exist anymore + match cache.load_wasm(&checksum).unwrap_err() { + VmError::CacheErr { msg, .. } => { + assert_eq!(msg, "Error opening Wasm file for reading") + } + e => panic!("Unexpected error: {e:?}"), + } + + // Removing again fails + match cache.remove_wasm(&checksum).unwrap_err() { + VmError::CacheErr { msg, .. } => { + assert_eq!(msg, "Wasm file does not exist") + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn get_instance_finds_cached_module() { + let cache = unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + let backend = mock_backend(&[]); + let _instance = cache + .get_instance(&checksum, backend, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + } + + #[test] + fn get_instance_finds_cached_modules_and_stores_to_memory() { + let cache = unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + let backend1 = mock_backend(&[]); + let backend2 = mock_backend(&[]); + let backend3 = mock_backend(&[]); + let backend4 = mock_backend(&[]); + let backend5 = mock_backend(&[]); + + // from file system + let _instance1 = cache + .get_instance(&checksum, backend1, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + + // from memory + let _instance2 = cache + .get_instance(&checksum, backend2, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 1); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + + // from memory again + let _instance3 = cache + .get_instance(&checksum, backend3, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 2); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + + // pinning hits the file system cache + cache.pin(&checksum).unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 2); + assert_eq!(cache.stats().hits_fs_cache, 2); + assert_eq!(cache.stats().misses, 0); + + // from pinned memory cache + let _instance4 = cache + .get_instance(&checksum, backend4, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 1); + assert_eq!(cache.stats().hits_memory_cache, 2); + assert_eq!(cache.stats().hits_fs_cache, 2); + assert_eq!(cache.stats().misses, 0); + + // from pinned memory cache again + let _instance5 = cache + .get_instance(&checksum, backend5, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 2); + assert_eq!(cache.stats().hits_memory_cache, 2); + assert_eq!(cache.stats().hits_fs_cache, 2); + assert_eq!(cache.stats().misses, 0); + } + + #[test] + fn get_instance_recompiles_module() { + let options = make_testing_options(); + let cache = unsafe { Cache::new(options.clone()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + // Remove compiled module from disk + remove_dir_all(options.base_dir.join(CACHE_DIR).join(MODULES_DIR)).unwrap(); + + // The first get_instance recompiles the Wasm (miss) + let backend = mock_backend(&[]); + let _instance = cache + .get_instance(&checksum, backend, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 0); + assert_eq!(cache.stats().misses, 1); + + // The second get_instance finds the module in cache (hit) + let backend = mock_backend(&[]); + let _instance = cache + .get_instance(&checksum, backend, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 1); + assert_eq!(cache.stats().hits_fs_cache, 0); + assert_eq!(cache.stats().misses, 1); + } + + #[test] + fn call_instantiate_on_cached_contract() { + let cache = unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + // from file system + { + let mut instance = cache + .get_instance(&checksum, mock_backend(&[]), TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + + // instantiate + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let res = call_instantiate::<_, _, _, Empty>( + &mut instance, + &mock_env(), + &info, + msg.as_bytes(), + ) + .unwrap(); + let msgs = res.unwrap().messages; + assert_eq!(msgs.len(), 0); + } + + // from memory + { + let mut instance = cache + .get_instance(&checksum, mock_backend(&[]), TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 1); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + + // instantiate + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let res = call_instantiate::<_, _, _, Empty>( + &mut instance, + &mock_env(), + &info, + msg.as_bytes(), + ) + .unwrap(); + let msgs = res.unwrap().messages; + assert_eq!(msgs.len(), 0); + } + + // from pinned memory + { + cache.pin(&checksum).unwrap(); + + let mut instance = cache + .get_instance(&checksum, mock_backend(&[]), TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 1); + assert_eq!(cache.stats().hits_memory_cache, 1); + assert_eq!(cache.stats().hits_fs_cache, 2); + assert_eq!(cache.stats().misses, 0); + + // instantiate + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let res = call_instantiate::<_, _, _, Empty>( + &mut instance, + &mock_env(), + &info, + msg.as_bytes(), + ) + .unwrap(); + let msgs = res.unwrap().messages; + assert_eq!(msgs.len(), 0); + } + } + + #[test] + fn call_execute_on_cached_contract() { + let cache = unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + // from file system + { + let mut instance = cache + .get_instance(&checksum, mock_backend(&[]), TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + + // instantiate + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let response = call_instantiate::<_, _, _, Empty>( + &mut instance, + &mock_env(), + &info, + msg.as_bytes(), + ) + .unwrap() + .unwrap(); + assert_eq!(response.messages.len(), 0); + + // execute + let info = mock_info(&verifier, &coins(15, "earth")); + let msg = br#"{"release":{}}"#; + let response = call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg) + .unwrap() + .unwrap(); + assert_eq!(response.messages.len(), 1); + } + + // from memory + { + let mut instance = cache + .get_instance(&checksum, mock_backend(&[]), TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 1); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + + // instantiate + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let response = call_instantiate::<_, _, _, Empty>( + &mut instance, + &mock_env(), + &info, + msg.as_bytes(), + ) + .unwrap() + .unwrap(); + assert_eq!(response.messages.len(), 0); + + // execute + let info = mock_info(&verifier, &coins(15, "earth")); + let msg = br#"{"release":{}}"#; + let response = call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg) + .unwrap() + .unwrap(); + assert_eq!(response.messages.len(), 1); + } + + // from pinned memory + { + cache.pin(&checksum).unwrap(); + + let mut instance = cache + .get_instance(&checksum, mock_backend(&[]), TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 1); + assert_eq!(cache.stats().hits_memory_cache, 1); + assert_eq!(cache.stats().hits_fs_cache, 2); + assert_eq!(cache.stats().misses, 0); + + // instantiate + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let response = call_instantiate::<_, _, _, Empty>( + &mut instance, + &mock_env(), + &info, + msg.as_bytes(), + ) + .unwrap() + .unwrap(); + assert_eq!(response.messages.len(), 0); + + // execute + let info = mock_info(&verifier, &coins(15, "earth")); + let msg = br#"{"release":{}}"#; + let response = call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg) + .unwrap() + .unwrap(); + assert_eq!(response.messages.len(), 1); + } + } + + #[test] + fn call_execute_on_recompiled_contract() { + let options = make_testing_options(); + let cache = unsafe { Cache::new(options.clone()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + // Remove compiled module from disk + remove_dir_all(options.base_dir.join(CACHE_DIR).join(MODULES_DIR)).unwrap(); + + // Recompiles the Wasm (miss on all caches) + let backend = mock_backend(&[]); + let mut instance = cache + .get_instance(&checksum, backend, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 0); + assert_eq!(cache.stats().misses, 1); + test_hackatom_instance_execution(&mut instance); + } + + #[test] + fn use_multiple_cached_instances_of_same_contract() { + let cache = unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + // these differentiate the two instances of the same contract + let backend1 = mock_backend(&[]); + let backend2 = mock_backend(&[]); + + // instantiate instance 1 + let mut instance = cache + .get_instance(&checksum, backend1, TESTING_OPTIONS) + .unwrap(); + let info = mock_info("owner1", &coins(1000, "earth")); + let sue = instance.api().addr_make("sue"); + let mary = instance.api().addr_make("mary"); + let msg = format!(r#"{{"verifier": "{sue}", "beneficiary": "{mary}"}}"#); + let res = + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap(); + let msgs = res.unwrap().messages; + assert_eq!(msgs.len(), 0); + let backend1 = instance.recycle().unwrap(); + + // instantiate instance 2 + let mut instance = cache + .get_instance(&checksum, backend2, TESTING_OPTIONS) + .unwrap(); + let info = mock_info("owner2", &coins(500, "earth")); + let bob = instance.api().addr_make("bob"); + let john = instance.api().addr_make("john"); + let msg = format!(r#"{{"verifier": "{bob}", "beneficiary": "{john}"}}"#); + let res = + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap(); + let msgs = res.unwrap().messages; + assert_eq!(msgs.len(), 0); + let backend2 = instance.recycle().unwrap(); + + // run contract 2 - just sanity check - results validate in contract unit tests + let mut instance = cache + .get_instance(&checksum, backend2, TESTING_OPTIONS) + .unwrap(); + let info = mock_info(&bob, &coins(15, "earth")); + let msg = br#"{"release":{}}"#; + let res = call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap(); + let msgs = res.unwrap().messages; + assert_eq!(1, msgs.len()); + + // run contract 1 - just sanity check - results validate in contract unit tests + let mut instance = cache + .get_instance(&checksum, backend1, TESTING_OPTIONS) + .unwrap(); + let info = mock_info(&sue, &coins(15, "earth")); + let msg = br#"{"release":{}}"#; + let res = call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap(); + let msgs = res.unwrap().messages; + assert_eq!(1, msgs.len()); + } + + #[test] + fn resets_gas_when_reusing_instance() { + let cache = unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + let backend1 = mock_backend(&[]); + let backend2 = mock_backend(&[]); + + // Init from module cache + let mut instance1 = cache + .get_instance(&checksum, backend1, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + let original_gas = instance1.get_gas_left(); + + // Consume some gas + let info = mock_info("owner1", &coins(1000, "earth")); + let sue = instance1.api().addr_make("sue"); + let mary = instance1.api().addr_make("mary"); + let msg = format!(r#"{{"verifier": "{sue}", "beneficiary": "{mary}"}}"#); + call_instantiate::<_, _, _, Empty>(&mut instance1, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + assert!(instance1.get_gas_left() < original_gas); + + // Init from memory cache + let mut instance2 = cache + .get_instance(&checksum, backend2, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 1); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + assert_eq!(instance2.get_gas_left(), TESTING_GAS_LIMIT); + } + + #[test] + fn recovers_from_out_of_gas() { + let cache = unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + let backend1 = mock_backend(&[]); + let backend2 = mock_backend(&[]); + + // Init from module cache + let options = InstanceOptions { gas_limit: 10 }; + let mut instance1 = cache.get_instance(&checksum, backend1, options).unwrap(); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + + // Consume some gas. This fails + let info1 = mock_info("owner1", &coins(1000, "earth")); + let sue = instance1.api().addr_make("sue"); + let mary = instance1.api().addr_make("mary"); + let msg1 = format!(r#"{{"verifier": "{sue}", "beneficiary": "{mary}"}}"#); + + match call_instantiate::<_, _, _, Empty>( + &mut instance1, + &mock_env(), + &info1, + msg1.as_bytes(), + ) + .unwrap_err() + { + VmError::GasDepletion { .. } => (), // all good, continue + e => panic!("unexpected error, {e:?}"), + } + assert_eq!(instance1.get_gas_left(), 0); + + // Init from memory cache + let options = InstanceOptions { + gas_limit: TESTING_GAS_LIMIT, + }; + let mut instance2 = cache.get_instance(&checksum, backend2, options).unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 1); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + assert_eq!(instance2.get_gas_left(), TESTING_GAS_LIMIT); + + // Now it works + let info2 = mock_info("owner2", &coins(500, "earth")); + let bob = instance2.api().addr_make("bob"); + let john = instance2.api().addr_make("john"); + let msg2 = format!(r#"{{"verifier": "{bob}", "beneficiary": "{john}"}}"#); + call_instantiate::<_, _, _, Empty>(&mut instance2, &mock_env(), &info2, msg2.as_bytes()) + .unwrap() + .unwrap(); + } + + #[test] + fn save_wasm_to_disk_works_for_same_data_multiple_times() { + let tmp_dir = TempDir::new().unwrap(); + let path = tmp_dir.path(); + let code = vec![12u8; 17]; + + save_wasm_to_disk(path, &code).unwrap(); + save_wasm_to_disk(path, &code).unwrap(); + } + + #[test] + fn save_wasm_to_disk_fails_on_non_existent_dir() { + let tmp_dir = TempDir::new().unwrap(); + let path = tmp_dir.path().join("something"); + let code = vec![12u8; 17]; + let res = save_wasm_to_disk(path.to_str().unwrap(), &code); + assert!(res.is_err()); + } + + #[test] + fn load_wasm_from_disk_works() { + let tmp_dir = TempDir::new().unwrap(); + let path = tmp_dir.path(); + let code = vec![12u8; 17]; + let checksum = save_wasm_to_disk(path, &code).unwrap(); + + let loaded = load_wasm_from_disk(path, &checksum).unwrap(); + assert_eq!(code, loaded); + } + + #[test] + fn load_wasm_from_disk_works_in_subfolder() { + let tmp_dir = TempDir::new().unwrap(); + let path = tmp_dir.path().join("something"); + create_dir_all(&path).unwrap(); + let code = vec![12u8; 17]; + let checksum = save_wasm_to_disk(&path, &code).unwrap(); + + let loaded = load_wasm_from_disk(&path, &checksum).unwrap(); + assert_eq!(code, loaded); + } + + #[test] + fn remove_wasm_from_disk_works() { + let tmp_dir = TempDir::new().unwrap(); + let path = tmp_dir.path(); + let code = vec![12u8; 17]; + let checksum = save_wasm_to_disk(path, &code).unwrap(); + + remove_wasm_from_disk(path, &checksum).unwrap(); + + // removing again fails + + match remove_wasm_from_disk(path, &checksum).unwrap_err() { + VmError::CacheErr { msg, .. } => assert_eq!(msg, "Wasm file does not exist"), + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn analyze_works() { + use Entrypoint as E; + let cache: Cache = + unsafe { Cache::new(make_stargate_testing_options()).unwrap() }; + + let checksum1 = cache.store_code(CONTRACT, true, true).unwrap(); + let report1 = cache.analyze(&checksum1).unwrap(); + assert_eq!( + report1, + AnalysisReport { + has_ibc_entry_points: false, + entrypoints: BTreeSet::from([ + E::Instantiate, + E::Migrate, + E::Sudo, + E::Execute, + E::Query + ]), + required_capabilities: BTreeSet::new(), + contract_migrate_version: Some(42), + } + ); + + let checksum2 = cache.store_code(IBC_CONTRACT, true, true).unwrap(); + let report2 = cache.analyze(&checksum2).unwrap(); + let mut ibc_contract_entrypoints = + BTreeSet::from([E::Instantiate, E::Migrate, E::Reply, E::Query]); + ibc_contract_entrypoints.extend(REQUIRED_IBC_EXPORTS); + assert_eq!( + report2, + AnalysisReport { + has_ibc_entry_points: true, + entrypoints: ibc_contract_entrypoints, + required_capabilities: BTreeSet::from_iter([ + "iterator".to_string(), + "stargate".to_string() + ]), + contract_migrate_version: None, + } + ); + + let checksum3 = cache.store_code(EMPTY_CONTRACT, true, true).unwrap(); + let report3 = cache.analyze(&checksum3).unwrap(); + assert_eq!( + report3, + AnalysisReport { + has_ibc_entry_points: false, + entrypoints: BTreeSet::new(), + required_capabilities: BTreeSet::from(["iterator".to_string()]), + contract_migrate_version: None, + } + ); + + let mut wasm_with_version = EMPTY_CONTRACT.to_vec(); + let custom_section = wasm_encoder::CustomSection { + name: Cow::Borrowed("cw_migrate_version"), + data: Cow::Borrowed(b"21"), + }; + custom_section.append_to_component(&mut wasm_with_version); + + let checksum4 = cache.store_code(&wasm_with_version, true, true).unwrap(); + let report4 = cache.analyze(&checksum4).unwrap(); + assert_eq!( + report4, + AnalysisReport { + has_ibc_entry_points: false, + entrypoints: BTreeSet::new(), + required_capabilities: BTreeSet::from(["iterator".to_string()]), + contract_migrate_version: Some(21), + } + ); + } + + #[test] + fn pinned_metrics_works() { + let cache = unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + cache.pin(&checksum).unwrap(); + + let pinned_metrics = cache.pinned_metrics(); + assert_eq!(pinned_metrics.per_module.len(), 1); + assert_eq!(pinned_metrics.per_module[0].0, checksum); + assert_eq!(pinned_metrics.per_module[0].1.hits, 0); + + let backend = mock_backend(&[]); + let _ = cache + .get_instance(&checksum, backend, TESTING_OPTIONS) + .unwrap(); + + let pinned_metrics = cache.pinned_metrics(); + assert_eq!(pinned_metrics.per_module.len(), 1); + assert_eq!(pinned_metrics.per_module[0].0, checksum); + assert_eq!(pinned_metrics.per_module[0].1.hits, 1); + + let empty_checksum = cache.store_code(EMPTY_CONTRACT, true, true).unwrap(); + cache.pin(&empty_checksum).unwrap(); + + let pinned_metrics = cache.pinned_metrics(); + assert_eq!(pinned_metrics.per_module.len(), 2); + + let get_module_hits = |checksum| { + pinned_metrics + .per_module + .iter() + .find(|(iter_checksum, _module)| *iter_checksum == checksum) + .map(|(_checksum, module)| module) + .cloned() + .unwrap() + }; + + assert_eq!(get_module_hits(checksum).hits, 1); + assert_eq!(get_module_hits(empty_checksum).hits, 0); + } + + #[test] + fn pin_unpin_works() { + let cache = unsafe { Cache::new(make_testing_options()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + // check not pinned + let backend = mock_backend(&[]); + let mut instance = cache + .get_instance(&checksum, backend, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 1); + assert_eq!(cache.stats().misses, 0); + test_hackatom_instance_execution(&mut instance); + + // first pin hits file system cache + cache.pin(&checksum).unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 2); + assert_eq!(cache.stats().misses, 0); + + // consecutive pins are no-ops + cache.pin(&checksum).unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 2); + assert_eq!(cache.stats().misses, 0); + + // check pinned + let backend = mock_backend(&[]); + let mut instance = cache + .get_instance(&checksum, backend, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 1); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 2); + assert_eq!(cache.stats().misses, 0); + test_hackatom_instance_execution(&mut instance); + + // unpin + cache.unpin(&checksum).unwrap(); + + // verify unpinned + let backend = mock_backend(&[]); + let mut instance = cache + .get_instance(&checksum, backend, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 1); + assert_eq!(cache.stats().hits_memory_cache, 1); + assert_eq!(cache.stats().hits_fs_cache, 2); + assert_eq!(cache.stats().misses, 0); + test_hackatom_instance_execution(&mut instance); + + // unpin again has no effect + cache.unpin(&checksum).unwrap(); + + // unpin non existent id has no effect + let non_id = Checksum::generate(b"non_existent"); + cache.unpin(&non_id).unwrap(); + } + + #[test] + fn pin_recompiles_module() { + let options = make_testing_options(); + let cache: Cache = + unsafe { Cache::new(options.clone()).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + // Remove compiled module from disk + remove_dir_all(options.base_dir.join(CACHE_DIR).join(MODULES_DIR)).unwrap(); + + // Pin misses, forcing a re-compile of the module + cache.pin(&checksum).unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 0); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 0); + assert_eq!(cache.stats().misses, 1); + + // After the compilation in pin, the module can be used from pinned memory cache + let backend = mock_backend(&[]); + let mut instance = cache + .get_instance(&checksum, backend, TESTING_OPTIONS) + .unwrap(); + assert_eq!(cache.stats().hits_pinned_memory_cache, 1); + assert_eq!(cache.stats().hits_memory_cache, 0); + assert_eq!(cache.stats().hits_fs_cache, 0); + assert_eq!(cache.stats().misses, 1); + test_hackatom_instance_execution(&mut instance); + } + + #[test] + fn loading_without_extension_works() { + let tmp_dir = TempDir::new().unwrap(); + let options = CacheOptions { + base_dir: tmp_dir.path().to_path_buf(), + available_capabilities: default_capabilities(), + memory_cache_size_bytes: TESTING_MEMORY_CACHE_SIZE, + instance_memory_limit_bytes: TESTING_MEMORY_LIMIT, + }; + let cache: Cache = + unsafe { Cache::new(options).unwrap() }; + let checksum = cache.store_code(CONTRACT, true, true).unwrap(); + + // Move the saved wasm to the old path (without extension) + let old_path = tmp_dir + .path() + .join(STATE_DIR) + .join(WASM_DIR) + .join(checksum.to_hex()); + let new_path = old_path.with_extension("wasm"); + fs::rename(new_path, old_path).unwrap(); + + // loading wasm from before the wasm extension was added should still work + let restored = cache.load_wasm(&checksum).unwrap(); + assert_eq!(restored, CONTRACT); + } + + #[test] + fn test_wasm_limits_checked() { + let tmp_dir = TempDir::new().unwrap(); + + let config = Config { + wasm_limits: WasmLimits { + max_function_params: Some(0), + ..Default::default() + }, + cache: CacheOptions { + base_dir: tmp_dir.path().to_path_buf(), + available_capabilities: default_capabilities(), + memory_cache_size_bytes: TESTING_MEMORY_CACHE_SIZE, + instance_memory_limit_bytes: TESTING_MEMORY_LIMIT, + }, + }; + + let cache: Cache = + unsafe { Cache::new_with_config(config).unwrap() }; + let err = cache.store_code(CONTRACT, true, true).unwrap_err(); + assert!(matches!(err, VmError::StaticValidationErr { .. })); + } +} diff --git a/vm/src/calls.rs b/vm/src/calls.rs new file mode 100644 index 000000000..5f6af1de1 --- /dev/null +++ b/vm/src/calls.rs @@ -0,0 +1,1229 @@ +use serde::de::DeserializeOwned; +use wasmer::Value; + +use cosmwasm_std::{ + ContractResult, CustomMsg, Env, IbcBasicResponse, IbcDestinationCallbackMsg, + IbcSourceCallbackMsg, MessageInfo, MigrateInfo, QueryResponse, Reply, Response, +}; +#[cfg(feature = "stargate")] +use cosmwasm_std::{ + Ibc3ChannelOpenResponse, IbcChannelCloseMsg, IbcChannelConnectMsg, IbcChannelOpenMsg, + IbcPacketAckMsg, IbcPacketReceiveMsg, IbcPacketTimeoutMsg, IbcReceiveResponse, +}; + +use crate::backend::{BackendApi, Querier, Storage}; +use crate::conversion::ref_to_u32; +use crate::errors::{VmError, VmResult}; +use crate::instance::Instance; +use crate::serde::{from_slice, to_vec}; + +/// The limits in here protect the host from allocating an unreasonable amount of memory +/// and copying an unreasonable amount of data. +/// +/// A JSON deserializer would want to set the limit to a much smaller value because +/// deserializing JSON is more expensive. As a consequence, any sane contract should hit +/// the deserializer limit before the read limit. +mod read_limits { + /// A mibi (mega binary) + const MI: usize = 1024 * 1024; + /// Max length (in bytes) of the result data from an instantiate call. + pub const RESULT_INSTANTIATE: usize = 64 * MI; + /// Max length (in bytes) of the result data from an execute call. + pub const RESULT_EXECUTE: usize = 64 * MI; + /// Max length (in bytes) of the result data from a migrate call. + pub const RESULT_MIGRATE: usize = 64 * MI; + /// Max length (in bytes) of the result data from a sudo call. + pub const RESULT_SUDO: usize = 64 * MI; + /// Max length (in bytes) of the result data from a reply call. + pub const RESULT_REPLY: usize = 64 * MI; + /// Max length (in bytes) of the result data from a query call. + pub const RESULT_QUERY: usize = 64 * MI; + /// Max length (in bytes) of the result data from a ibc_channel_open call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_CHANNEL_OPEN: usize = 64 * MI; + /// Max length (in bytes) of the result data from a ibc_channel_connect call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_CHANNEL_CONNECT: usize = 64 * MI; + /// Max length (in bytes) of the result data from a ibc_channel_close call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_CHANNEL_CLOSE: usize = 64 * MI; + /// Max length (in bytes) of the result data from a ibc_packet_receive call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_PACKET_RECEIVE: usize = 64 * MI; + /// Max length (in bytes) of the result data from a ibc_packet_ack call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_PACKET_ACK: usize = 64 * MI; + /// Max length (in bytes) of the result data from a ibc_packet_timeout call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_PACKET_TIMEOUT: usize = 64 * MI; + /// Max length (in bytes) of the result data from a ibc_source_callback call. + pub const RESULT_IBC_SOURCE_CALLBACK: usize = 64 * MI; + /// Max length (in bytes) of the result data from a ibc_destination_callback call. + pub const RESULT_IBC_DESTINATION_CALLBACK: usize = 64 * MI; +} + +/// The limits for the JSON deserialization. +/// +/// Those limits are not used when the Rust JSON deserializer is bypassed by using the +/// public `call_*_raw` functions directly. +mod deserialization_limits { + /// A kibi (kilo binary) + const KI: usize = 1024; + /// Max length (in bytes) of the result data from an instantiate call. + pub const RESULT_INSTANTIATE: usize = 256 * KI; + /// Max length (in bytes) of the result data from an execute call. + pub const RESULT_EXECUTE: usize = 256 * KI; + /// Max length (in bytes) of the result data from a migrate call. + pub const RESULT_MIGRATE: usize = 256 * KI; + /// Max length (in bytes) of the result data from a sudo call. + pub const RESULT_SUDO: usize = 256 * KI; + /// Max length (in bytes) of the result data from a reply call. + pub const RESULT_REPLY: usize = 256 * KI; + /// Max length (in bytes) of the result data from a query call. + pub const RESULT_QUERY: usize = 256 * KI; + /// Max length (in bytes) of the result data from a ibc_channel_open call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_CHANNEL_OPEN: usize = 256 * KI; + /// Max length (in bytes) of the result data from a ibc_channel_connect call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_CHANNEL_CONNECT: usize = 256 * KI; + /// Max length (in bytes) of the result data from a ibc_channel_close call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_CHANNEL_CLOSE: usize = 256 * KI; + /// Max length (in bytes) of the result data from a ibc_packet_receive call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_PACKET_RECEIVE: usize = 256 * KI; + /// Max length (in bytes) of the result data from a ibc_packet_ack call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_PACKET_ACK: usize = 256 * KI; + /// Max length (in bytes) of the result data from a ibc_packet_timeout call. + #[cfg(feature = "stargate")] + pub const RESULT_IBC_PACKET_TIMEOUT: usize = 256 * KI; + /// Max length (in bytes) of the result data from a ibc_source_callback call. + pub const RESULT_IBC_SOURCE_CALLBACK: usize = 256 * KI; + /// Max length (in bytes) of the result data from a ibc_destination_callback call. + pub const RESULT_IBC_DESTINATION_CALLBACK: usize = 256 * KI; +} + +pub fn call_instantiate( + instance: &mut Instance, + env: &Env, + info: &MessageInfo, + msg: &[u8], +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let info = to_vec(info)?; + let data = call_instantiate_raw(instance, &env, &info, msg)?; + let result: ContractResult> = + from_slice(&data, deserialization_limits::RESULT_INSTANTIATE)?; + Ok(result) +} + +pub fn call_execute( + instance: &mut Instance, + env: &Env, + info: &MessageInfo, + msg: &[u8], +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let info = to_vec(info)?; + let data = call_execute_raw(instance, &env, &info, msg)?; + let result: ContractResult> = + from_slice(&data, deserialization_limits::RESULT_EXECUTE)?; + Ok(result) +} + +pub fn call_migrate( + instance: &mut Instance, + env: &Env, + msg: &[u8], +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let data = call_migrate_raw(instance, &env, msg)?; + let result: ContractResult> = + from_slice(&data, deserialization_limits::RESULT_MIGRATE)?; + Ok(result) +} + +pub fn call_migrate_with_info( + instance: &mut Instance, + env: &Env, + msg: &[u8], + migrate_info: &MigrateInfo, +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let migrate_info = to_vec(migrate_info)?; + let data = call_migrate_with_info_raw(instance, &env, msg, &migrate_info)?; + let result: ContractResult> = + from_slice(&data, deserialization_limits::RESULT_MIGRATE)?; + Ok(result) +} + +pub fn call_sudo( + instance: &mut Instance, + env: &Env, + msg: &[u8], +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let data = call_sudo_raw(instance, &env, msg)?; + let result: ContractResult> = + from_slice(&data, deserialization_limits::RESULT_SUDO)?; + Ok(result) +} + +pub fn call_reply( + instance: &mut Instance, + env: &Env, + msg: &Reply, +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let msg = to_vec(msg)?; + let data = call_reply_raw(instance, &env, &msg)?; + let result: ContractResult> = + from_slice(&data, deserialization_limits::RESULT_REPLY)?; + Ok(result) +} + +pub fn call_query( + instance: &mut Instance, + env: &Env, + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + let env = to_vec(env)?; + let data = call_query_raw(instance, &env, msg)?; + let result: ContractResult = + from_slice(&data, deserialization_limits::RESULT_QUERY)?; + // Ensure query response is valid JSON + if let ContractResult::Ok(binary_response) = &result { + serde_json::from_slice::(binary_response.as_slice()) + .map_err(|e| VmError::generic_err(format!("Query response must be valid JSON. {e}")))?; + } + + Ok(result) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_channel_open( + instance: &mut Instance, + env: &Env, + msg: &IbcChannelOpenMsg, +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + let env = to_vec(env)?; + let msg = to_vec(msg)?; + let data = call_ibc_channel_open_raw(instance, &env, &msg)?; + let result: ContractResult> = + from_slice(&data, deserialization_limits::RESULT_IBC_CHANNEL_OPEN)?; + Ok(result) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_channel_connect( + instance: &mut Instance, + env: &Env, + msg: &IbcChannelConnectMsg, +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let msg = to_vec(msg)?; + let data = call_ibc_channel_connect_raw(instance, &env, &msg)?; + let result = from_slice(&data, deserialization_limits::RESULT_IBC_CHANNEL_CONNECT)?; + Ok(result) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_channel_close( + instance: &mut Instance, + env: &Env, + msg: &IbcChannelCloseMsg, +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let msg = to_vec(msg)?; + let data = call_ibc_channel_close_raw(instance, &env, &msg)?; + let result = from_slice(&data, deserialization_limits::RESULT_IBC_CHANNEL_CLOSE)?; + Ok(result) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_packet_receive( + instance: &mut Instance, + env: &Env, + msg: &IbcPacketReceiveMsg, +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let msg = to_vec(msg)?; + let data = call_ibc_packet_receive_raw(instance, &env, &msg)?; + let result = from_slice(&data, deserialization_limits::RESULT_IBC_PACKET_RECEIVE)?; + Ok(result) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_packet_ack( + instance: &mut Instance, + env: &Env, + msg: &IbcPacketAckMsg, +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let msg = to_vec(msg)?; + let data = call_ibc_packet_ack_raw(instance, &env, &msg)?; + let result = from_slice(&data, deserialization_limits::RESULT_IBC_PACKET_ACK)?; + Ok(result) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_packet_timeout( + instance: &mut Instance, + env: &Env, + msg: &IbcPacketTimeoutMsg, +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let msg = to_vec(msg)?; + let data = call_ibc_packet_timeout_raw(instance, &env, &msg)?; + let result = from_slice(&data, deserialization_limits::RESULT_IBC_PACKET_TIMEOUT)?; + Ok(result) +} + +pub fn call_ibc_source_callback( + instance: &mut Instance, + env: &Env, + msg: &IbcSourceCallbackMsg, +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let msg = to_vec(msg)?; + let data = call_ibc_source_callback_raw(instance, &env, &msg)?; + let result = from_slice(&data, deserialization_limits::RESULT_IBC_SOURCE_CALLBACK)?; + Ok(result) +} + +pub fn call_ibc_destination_callback( + instance: &mut Instance, + env: &Env, + msg: &IbcDestinationCallbackMsg, +) -> VmResult>> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + let env = to_vec(env)?; + let msg = to_vec(msg)?; + let data = call_ibc_destination_callback_raw(instance, &env, &msg)?; + let result = from_slice( + &data, + deserialization_limits::RESULT_IBC_DESTINATION_CALLBACK, + )?; + Ok(result) +} + +/// Calls Wasm export "instantiate" and returns raw data from the contract. +/// The result is length limited to prevent abuse but otherwise unchecked. +pub fn call_instantiate_raw( + instance: &mut Instance, + env: &[u8], + info: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "instantiate", + &[env, info, msg], + read_limits::RESULT_INSTANTIATE, + ) +} + +/// Calls Wasm export "execute" and returns raw data from the contract. +/// The result is length limited to prevent abuse but otherwise unchecked. +pub fn call_execute_raw( + instance: &mut Instance, + env: &[u8], + info: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "execute", + &[env, info, msg], + read_limits::RESULT_EXECUTE, + ) +} + +/// Calls Wasm export "migrate" and returns raw data from the contract. +/// The result is length limited to prevent abuse but otherwise unchecked. +pub fn call_migrate_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "migrate", + &[env, msg], + read_limits::RESULT_MIGRATE, + ) +} + +/// Calls Wasm export "migrate" and returns raw data from the contract. +/// The result is length limited to prevent abuse but otherwise unchecked. +/// The difference between this function and [call_migrate_raw] is the +/// additional argument - `migrate_info`. It contains additional data +/// related to the contract's migration procedure. +/// +/// It is safe to call this method instead of [call_migrate_raw] even +/// if a contract contains the migrate entrypoint without `migrate_info`. +/// In such case this structure is omitted. +pub fn call_migrate_with_info_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], + migrate_info: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "migrate", + &[env, msg, migrate_info], + read_limits::RESULT_MIGRATE, + ) + .or_else(|err| { + if matches!(err, VmError::FunctionArityMismatch { .. }) { + call_raw( + instance, + "migrate", + &[env, msg], + read_limits::RESULT_MIGRATE, + ) + } else { + Err(err) + } + }) +} + +/// Calls Wasm export "sudo" and returns raw data from the contract. +/// The result is length limited to prevent abuse but otherwise unchecked. +pub fn call_sudo_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw(instance, "sudo", &[env, msg], read_limits::RESULT_SUDO) +} + +/// Calls Wasm export "reply" and returns raw data from the contract. +/// The result is length limited to prevent abuse but otherwise unchecked. +pub fn call_reply_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw(instance, "reply", &[env, msg], read_limits::RESULT_REPLY) +} + +/// Calls Wasm export "query" and returns raw data from the contract. +/// The result is length limited to prevent abuse but otherwise unchecked. +pub fn call_query_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(true); + call_raw(instance, "query", &[env, msg], read_limits::RESULT_QUERY) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_channel_open_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "ibc_channel_open", + &[env, msg], + read_limits::RESULT_IBC_CHANNEL_OPEN, + ) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_channel_connect_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "ibc_channel_connect", + &[env, msg], + read_limits::RESULT_IBC_CHANNEL_CONNECT, + ) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_channel_close_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "ibc_channel_close", + &[env, msg], + read_limits::RESULT_IBC_CHANNEL_CLOSE, + ) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_packet_receive_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "ibc_packet_receive", + &[env, msg], + read_limits::RESULT_IBC_PACKET_RECEIVE, + ) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_packet_ack_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "ibc_packet_ack", + &[env, msg], + read_limits::RESULT_IBC_PACKET_ACK, + ) +} + +#[cfg(feature = "stargate")] +pub fn call_ibc_packet_timeout_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "ibc_packet_timeout", + &[env, msg], + read_limits::RESULT_IBC_PACKET_TIMEOUT, + ) +} + +pub fn call_ibc_source_callback_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "ibc_source_callback", + &[env, msg], + read_limits::RESULT_IBC_SOURCE_CALLBACK, + ) +} + +pub fn call_ibc_destination_callback_raw( + instance: &mut Instance, + env: &[u8], + msg: &[u8], +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + instance.set_storage_readonly(false); + call_raw( + instance, + "ibc_destination_callback", + &[env, msg], + read_limits::RESULT_IBC_DESTINATION_CALLBACK, + ) +} + +/// Calls a function with the given arguments. +/// The exported function must return exactly one result (an offset to the result Region). +pub(crate) fn call_raw( + instance: &mut Instance, + name: &str, + args: &[&[u8]], + result_max_length: usize, +) -> VmResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + let mut arg_region_ptrs = Vec::::with_capacity(args.len()); + for arg in args { + let region_ptr = instance.allocate(arg.len())?; + instance.write_memory(region_ptr, arg)?; + arg_region_ptrs.push(region_ptr.into()); + } + let result = instance.call_function1(name, &arg_region_ptrs)?; + let res_region_ptr = ref_to_u32(&result)?; + let data = instance.read_memory(res_region_ptr, result_max_length)?; + // free return value in wasm (arguments were freed in wasm code) + instance.deallocate(res_region_ptr)?; + Ok(data) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::testing::{ + mock_env, mock_info, mock_instance, mock_instance_with_options, MockInstanceOptions, + }; + use cosmwasm_std::{coins, from_json, to_json_string, Addr, Empty}; + use sha2::{Digest, Sha256}; + + static CONTRACT: &[u8] = include_bytes!("../testdata/hackatom.wasm"); + static CYBERPUNK: &[u8] = include_bytes!("../testdata/cyberpunk.wasm"); + static FLOATY2: &[u8] = include_bytes!("../testdata/floaty_2.0.wasm"); + static EMPTY: &[u8] = include_bytes!("../testdata/empty.wasm"); + + #[test] + fn call_instantiate_works() { + let mut instance = mock_instance(CONTRACT, &[]); + + // init + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + } + + #[test] + fn call_instantiate_handles_missing_export() { + let mut deps = mock_instance(EMPTY, &[]); + + let msg = Empty {}; + let info = mock_info("creator", &coins(1000, "earth")); + + let serialized_msg = to_vec(&msg).unwrap(); + let err = + call_instantiate::<_, _, _, Empty>(&mut deps, &mock_env(), &info, &serialized_msg) + .unwrap_err(); + + assert!(matches!( + err, + VmError::ResolveErr { + msg, + .. + } + if msg == "Could not get export: Missing export instantiate" + )); + } + + #[test] + fn call_execute_works() { + let mut instance = mock_instance(CONTRACT, &[]); + + // init + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + + // execute + let info = mock_info(&verifier, &coins(15, "earth")); + let msg = br#"{"release":{}}"#; + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg) + .unwrap() + .unwrap(); + } + + #[test] + fn call_execute_runs_out_of_gas() { + let mut instance = mock_instance(CYBERPUNK, &[]); + + // init + let info = mock_info("creator", &[]); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, br#"{}"#) + .unwrap() + .unwrap(); + + // execute + let info = mock_info("looper", &[]); + let msg = br#"{"cpu_loop":{}}"#; + let err = + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap_err(); + assert!(matches!(err, VmError::GasDepletion { .. })); + } + + #[test] + fn call_execute_handles_panic() { + let mut instance = mock_instance(CYBERPUNK, &[]); + + let info = mock_info("creator", &[]); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, br#"{}"#) + .unwrap() + .unwrap(); + + // execute + let info = mock_info("troll", &[]); + let msg = br#"{"panic":{}}"#; + let err = + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap_err(); + match err { + VmError::RuntimeErr { msg, .. } => { + assert!( + msg.contains("RuntimeError: Aborted: panicked at src/contract.rs:"), + "Unexpected error msg: {msg}" + ) + } + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn call_execute_handles_unreachable() { + let mut instance = mock_instance(CYBERPUNK, &[]); + + let info = mock_info("creator", &[]); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, br#"{}"#) + .unwrap() + .unwrap(); + + // execute + let info = mock_info("troll", &[]); + let msg = br#"{"unreachable":{}}"#; + let err = + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg).unwrap_err(); + match err { + VmError::RuntimeErr { msg, .. } => { + assert!(msg.contains("RuntimeError: unreachable")) + } + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn call_migrate_works() { + let mut instance = mock_instance(CONTRACT, &[]); + + // init + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + + // change the verifier via migrate + let someone_else = instance.api().addr_make("someone else"); + let msg = format!(r#"{{"verifier": "{someone_else}"}}"#); + let _res = call_migrate::<_, _, _, Empty>(&mut instance, &mock_env(), msg.as_bytes()) + .unwrap() + .unwrap(); + + // query the new_verifier with verifier + let msg = br#"{"verifier":{}}"#; + let contract_result = call_query(&mut instance, &mock_env(), msg).unwrap(); + let query_response = contract_result.unwrap(); + assert_eq!( + query_response, + format!(r#"{{"verifier":"{}"}}"#, someone_else).as_bytes(), + ); + } + + #[test] + fn call_migrate_with_info_works() { + let mut instance = mock_instance(CONTRACT, &[]); + + // init + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + + // change the verifier via migrate + let someone_else = instance.api().addr_make("someone else"); + let msg = format!(r#"{{"verifier": "{someone_else}"}}"#); + let migrate_info = MigrateInfo { + sender: Addr::unchecked(someone_else.clone()), + old_migrate_version: Some(33), + }; + let _res = call_migrate_with_info::<_, _, _, Empty>( + &mut instance, + &mock_env(), + msg.as_bytes(), + &migrate_info, + ) + .unwrap() + .unwrap(); + + // query the new_verifier with verifier + let msg = br#"{"verifier":{}}"#; + let contract_result = call_query(&mut instance, &mock_env(), msg).unwrap(); + let query_response = contract_result.unwrap(); + assert_eq!( + query_response, + format!(r#"{{"verifier":"{}"}}"#, someone_else).as_bytes(), + ); + } + + #[test] + fn call_query_works() { + let mut instance = mock_instance(CONTRACT, &[]); + + // init + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + + // query + let msg = br#"{"verifier":{}}"#; + let contract_result = call_query(&mut instance, &mock_env(), msg).unwrap(); + let query_response = contract_result.unwrap(); + assert_eq!( + query_response, + format!("{{\"verifier\":\"{verifier}\"}}").as_bytes() + ); + } + + #[test] + fn float_instrs_are_deterministic() { + #[derive(Debug, serde::Serialize, serde::Deserialize)] + #[serde(rename_all = "snake_case")] + pub enum Value { + U32(u32), + U64(u64), + F32(u32), + F64(u64), + } + + let mut instance = mock_instance_with_options( + FLOATY2, + MockInstanceOptions { + gas_limit: u64::MAX, + memory_limit: None, + ..Default::default() + }, + ); + + // init + let info = mock_info("creator", &[]); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, br#"{}"#) + .unwrap() + .unwrap(); + + // query instructions + let msg = br#"{"instructions":{}}"#; + let contract_result = call_query(&mut instance, &mock_env(), msg) + .unwrap() + .unwrap(); + let instructions: Vec = from_json(contract_result).unwrap(); + // little sanity check + assert_eq!(instructions.len(), 70); + + const RUNS_PER_INSTRUCTION: u64 = 150; + let mut hasher = Sha256::new(); + for instr in &instructions { + for seed in 0..RUNS_PER_INSTRUCTION { + // query some input values for the instruction + let args: Vec = from_json( + call_query( + &mut instance, + &mock_env(), + format!( + r#"{{"random_args_for":{{ "instruction": "{instr}", "seed": {seed}}}}}"# + ) + .as_bytes(), + ) + .unwrap() + .unwrap(), + ) + .unwrap(); + + // build the run message + let args = to_json_string(&args).unwrap(); + let msg: String = format!( + r#"{{"run":{{ + "instruction": "{instr}", + "args": {args} + }}}}"# + ); + // run the instruction + // this might throw a runtime error (e.g. if the instruction traps) + let result = match call_query(&mut instance, &mock_env(), msg.as_bytes()) { + Ok(ContractResult::Ok(r)) => format!("{:?}", from_json::(&r).unwrap()), + Err(VmError::RuntimeErr { msg, .. }) => msg, + e => panic!("unexpected error: {e:?}"), + }; + // add the result to the hash + hasher.update(format!("{instr}{seed}{result}").as_bytes()); + } + } + let hash = Digest::finalize(hasher); + assert_eq!( + hex::encode(hash.as_slice()), + "95f70fa6451176ab04a9594417a047a1e4d8e2ff809609b8f81099496bee2393" + ); + } + + #[cfg(feature = "stargate")] + mod ibc { + use super::*; + use crate::testing::{MockApi, MockQuerier, MockStorage}; + use cosmwasm_std::testing::mock_ibc_packet_timeout; + use cosmwasm_std::testing::{ + mock_ibc_channel_close_init, mock_ibc_channel_connect_ack, mock_ibc_channel_open_init, + mock_ibc_packet_ack, mock_ibc_packet_recv, mock_wasmd_attr, + }; + use cosmwasm_std::{ + Event, IbcAckCallbackMsg, IbcAcknowledgement, IbcOrder, IbcTimeoutCallbackMsg, ReplyOn, + SubMsgResponse, SubMsgResult, + }; + const CONTRACT: &[u8] = include_bytes!("../testdata/ibc_reflect.wasm"); + const IBC_CALLBACKS: &[u8] = include_bytes!("../testdata/ibc_callbacks.wasm"); + const IBC_VERSION: &str = "ibc-reflect-v1"; + + fn setup( + instance: &mut Instance, + channel_id: &str, + account: &str, + ) { + // init + let info = mock_info("creator", &[]); + let msg = br#"{"reflect_code_id":77}"#; + call_instantiate::<_, _, _, Empty>(instance, &mock_env(), &info, msg) + .unwrap() + .unwrap(); + // first we try to open with a valid handshake + let handshake_open = + mock_ibc_channel_open_init(channel_id, IbcOrder::Ordered, IBC_VERSION); + call_ibc_channel_open(instance, &mock_env(), &handshake_open) + .unwrap() + .unwrap(); + // then we connect (with counter-party version set) + let handshake_connect = + mock_ibc_channel_connect_ack(channel_id, IbcOrder::Ordered, IBC_VERSION); + let res: IbcBasicResponse = call_ibc_channel_connect::<_, _, _, Empty>( + instance, + &mock_env(), + &handshake_connect, + ) + .unwrap() + .unwrap(); + assert_eq!(1, res.messages.len()); + assert_eq!( + res.events, + [Event::new("ibc").add_attribute("channel", "connect")] + ); + assert_eq!(ReplyOn::Success, res.messages[0].reply_on); + let id = res.messages[0].id; + let payload = res.messages[0].payload.clone(); + let event = Event::new("instantiate").add_attributes(vec![ + // We have to force this one to avoid the debug assertion against _ + mock_wasmd_attr("_contract_address", account), + ]); + // which creates a reflect account. here we get the callback + #[allow(deprecated)] + let response = Reply { + id, + payload, + gas_used: 1234567, + result: SubMsgResult::Ok(SubMsgResponse { + events: vec![event], + msg_responses: vec![], + data: None, + }), + }; + call_reply::<_, _, _, Empty>(instance, &mock_env(), &response).unwrap(); + } + + const CHANNEL_ID: &str = "channel-123"; + const ACCOUNT: &str = "account-456"; + + #[test] + fn call_ibc_channel_open_and_connect_works() { + let mut instance = mock_instance(CONTRACT, &[]); + setup(&mut instance, CHANNEL_ID, ACCOUNT); + } + + #[test] + fn call_ibc_channel_close_works() { + let mut instance = mock_instance(CONTRACT, &[]); + let account = instance.api().addr_make(ACCOUNT); + setup(&mut instance, CHANNEL_ID, &account); + let handshake_close = + mock_ibc_channel_close_init(CHANNEL_ID, IbcOrder::Ordered, IBC_VERSION); + call_ibc_channel_close::<_, _, _, Empty>(&mut instance, &mock_env(), &handshake_close) + .unwrap() + .unwrap(); + } + + #[test] + fn call_ibc_packet_ack_works() { + let mut instance = mock_instance(CONTRACT, &[]); + setup(&mut instance, CHANNEL_ID, ACCOUNT); + let ack = IbcAcknowledgement::new(br#"{}"#); + let msg = mock_ibc_packet_ack(CHANNEL_ID, br#"{}"#, ack).unwrap(); + call_ibc_packet_ack::<_, _, _, Empty>(&mut instance, &mock_env(), &msg) + .unwrap() + .unwrap(); + } + + #[test] + fn call_ibc_packet_timeout_works() { + let mut instance = mock_instance(CONTRACT, &[]); + setup(&mut instance, CHANNEL_ID, ACCOUNT); + let msg = mock_ibc_packet_timeout(CHANNEL_ID, br#"{}"#).unwrap(); + call_ibc_packet_timeout::<_, _, _, Empty>(&mut instance, &mock_env(), &msg) + .unwrap() + .unwrap(); + } + + #[test] + fn call_ibc_packet_receive_works() { + let mut instance = mock_instance(CONTRACT, &[]); + setup(&mut instance, CHANNEL_ID, ACCOUNT); + let who_am_i = br#"{"who_am_i":{}}"#; + let msg = mock_ibc_packet_recv(CHANNEL_ID, who_am_i).unwrap(); + call_ibc_packet_receive::<_, _, _, Empty>(&mut instance, &mock_env(), &msg) + .unwrap() + .unwrap(); + } + + #[test] + fn call_ibc_source_callback_works() { + let mut instance = mock_instance(IBC_CALLBACKS, &[]); + + // init + let creator = instance.api().addr_make("creator"); + let info = mock_info(&creator, &[]); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, br#"{}"#) + .unwrap() + .unwrap(); + + /// Response type for the `callback_stats` query + #[derive(serde::Serialize, serde::Deserialize)] + struct CallbackStats { + pub ibc_ack_callbacks: Vec, + pub ibc_timeout_callbacks: Vec, + } + + // send ack callback + let ack = mock_ibc_packet_ack(CHANNEL_ID, br#"{}"#, IbcAcknowledgement::new(br#"{}"#)) + .unwrap(); + let msg = IbcSourceCallbackMsg::Acknowledgement(IbcAckCallbackMsg::new( + ack.acknowledgement, + ack.original_packet, + ack.relayer, + )); + call_ibc_source_callback::<_, _, _, Empty>(&mut instance, &mock_env(), &msg) + .unwrap() + .unwrap(); + // query the CallbackStats + let stats: CallbackStats = serde_json::from_slice( + &call_query::<_, _, _>(&mut instance, &mock_env(), br#"{"callback_stats":{}}"#) + .unwrap() + .unwrap(), + ) + .unwrap(); + assert_eq!(1, stats.ibc_ack_callbacks.len()); + assert_eq!(0, stats.ibc_timeout_callbacks.len()); + + // send timeout callback + let timeout = mock_ibc_packet_timeout(CHANNEL_ID, br#"{}"#).unwrap(); + let msg = IbcSourceCallbackMsg::Timeout(IbcTimeoutCallbackMsg::new( + timeout.packet, + timeout.relayer, + )); + call_ibc_source_callback::<_, _, _, Empty>(&mut instance, &mock_env(), &msg) + .unwrap() + .unwrap(); + // query the CallbackStats + let stats: CallbackStats = serde_json::from_slice( + &call_query::<_, _, _>(&mut instance, &mock_env(), br#"{"callback_stats":{}}"#) + .unwrap() + .unwrap(), + ) + .unwrap(); + assert_eq!(1, stats.ibc_ack_callbacks.len()); + assert_eq!(1, stats.ibc_timeout_callbacks.len()); + } + } +} diff --git a/vm/src/capabilities.rs b/vm/src/capabilities.rs new file mode 100644 index 000000000..828110501 --- /dev/null +++ b/vm/src/capabilities.rs @@ -0,0 +1,93 @@ +use std::collections::HashSet; + +use crate::static_analysis::ExportInfo; + +const REQUIRES_PREFIX: &str = "requires_"; + +/// Takes a comma-separated string, splits it by commas, removes empty elements and returns a set of capabilities. +/// This can be used e.g. to initialize the cache. +pub fn capabilities_from_csv(csv: &str) -> HashSet { + csv.split(',') + .map(|x| x.trim().to_string()) + .filter(|f| !f.is_empty()) + .collect() +} + +/// Implementation for check_wasm, based on static analysis of the bytecode. +/// This is used for code upload, to perform check before compiling the Wasm. +pub fn required_capabilities_from_module(module: impl ExportInfo) -> HashSet { + module + .exported_function_names(Some(REQUIRES_PREFIX)) + .into_iter() + .filter_map(|name| { + if name.len() > REQUIRES_PREFIX.len() { + let (_, required_capability) = name.split_at(REQUIRES_PREFIX.len()); + Some(required_capability.to_string()) + } else { + None + } + }) + .collect() +} + +#[cfg(test)] +mod tests { + use crate::parsed_wasm::ParsedWasm; + + use super::*; + + #[test] + fn capabilities_from_csv_works() { + let set = capabilities_from_csv("foo, bar,baz "); + assert_eq!(set.len(), 3); + assert!(set.contains("foo")); + assert!(set.contains("bar")); + assert!(set.contains("baz")); + } + + #[test] + fn capabilities_from_csv_skips_empty() { + let set = capabilities_from_csv(""); + assert_eq!(set.len(), 0); + let set = capabilities_from_csv("a,,b"); + assert_eq!(set.len(), 2); + assert!(set.contains("a")); + assert!(set.contains("b")); + let set = capabilities_from_csv("a,b,"); + assert_eq!(set.len(), 2); + assert!(set.contains("a")); + assert!(set.contains("b")); + } + + #[test] + fn required_capabilities_from_module_works() { + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "requires_water" (func 0)) + (export "requires_" (func 0)) + (export "requires_nutrients" (func 0)) + (export "require_milk" (func 0)) + (export "REQUIRES_air" (func 0)) + (export "requires_sun" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + + let required_capabilities = required_capabilities_from_module(&module); + assert_eq!(required_capabilities.len(), 3); + assert!(required_capabilities.contains("nutrients")); + assert!(required_capabilities.contains("sun")); + assert!(required_capabilities.contains("water")); + } + + #[test] + fn required_capabilities_from_module_works_without_exports_section() { + let wasm = wat::parse_str(r#"(module)"#).unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + let required_capabilities = required_capabilities_from_module(&module); + assert_eq!(required_capabilities.len(), 0); + } +} diff --git a/vm/src/compatibility.rs b/vm/src/compatibility.rs new file mode 100644 index 000000000..c77e09e58 --- /dev/null +++ b/vm/src/compatibility.rs @@ -0,0 +1,1112 @@ +use std::collections::BTreeSet; +use std::collections::HashSet; + +use wasmer::wasmparser::Import; +use wasmer::wasmparser::TypeRef; + +use crate::capabilities::required_capabilities_from_module; +use crate::config::WasmLimits; +use crate::errors::{VmError, VmResult}; +use crate::limited::LimitedDisplay; +use crate::parsed_wasm::ParsedWasm; +use crate::static_analysis::ExportInfo; + +/// Lists all imports we provide upon instantiating the instance in Instance::from_module() +/// This should be updated when new imports are added +const SUPPORTED_IMPORTS: &[&str] = &[ + "env.abort", + "env.db_read", + "env.db_write", + "env.db_remove", + "env.addr_validate", + "env.addr_canonicalize", + "env.addr_humanize", + "env.bls12_381_aggregate_g1", + "env.bls12_381_aggregate_g2", + "env.bls12_381_pairing_equality", + "env.bls12_381_hash_to_g1", + "env.bls12_381_hash_to_g2", + "env.secp256k1_verify", + "env.secp256k1_recover_pubkey", + "env.secp256r1_verify", + "env.secp256r1_recover_pubkey", + "env.ed25519_verify", + "env.ed25519_batch_verify", + "env.debug", + "env.query_chain", + #[cfg(feature = "iterator")] + "env.db_scan", + #[cfg(feature = "iterator")] + "env.db_next", + #[cfg(feature = "iterator")] + "env.db_next_key", + #[cfg(feature = "iterator")] + "env.db_next_value", +]; + +/// Lists all entry points we expect to be present when calling a contract. +/// Other optional exports exist, e.g. "execute", "migrate" and "query". +/// The marker export interface_version_* is checked separately. +/// This is unlikely to change much, must be frozen at 1.0 to avoid breaking existing contracts +const REQUIRED_EXPORTS: &[&str] = &[ + // IO + "allocate", + "deallocate", +]; + +const INTERFACE_VERSION_PREFIX: &str = "interface_version_"; +const SUPPORTED_INTERFACE_VERSIONS: &[&str] = &["interface_version_8"]; + +#[derive(Clone, Copy)] +pub enum LogOutput { + StdOut, + StdErr, +} +#[derive(Clone, Copy, Default)] +pub enum Logger<'a> { + On { + prefix: &'a str, + output: LogOutput, + }, + #[default] + Off, +} + +impl<'a> Logger<'a> { + pub fn with_config(output: LogOutput, prefix: &'a str) -> Self { + On { output, prefix } + } + + /// Adds a message to the logs, if they are enabled. + /// This is a convenience method for adding a single message. + /// + /// Takes a closure that returns the message to add to avoid unnecessary allocations. + pub fn add(&self, msg_fn: impl FnOnce() -> String) { + if let On { prefix, output } = &self { + let msg = msg_fn(); + match output { + LogOutput::StdOut => println!("{prefix}{msg}"), + LogOutput::StdErr => eprintln!("{prefix}{msg}"), + } + } + } +} + +use Logger::*; + +/// Checks if the data is valid wasm and compatibility with the CosmWasm API (imports and exports) +pub fn check_wasm( + wasm_code: &[u8], + available_capabilities: &HashSet, + limits: &WasmLimits, + logs: Logger<'_>, +) -> VmResult<()> { + logs.add(|| format!("Size of Wasm blob: {}", wasm_code.len())); + + let mut module = ParsedWasm::parse(wasm_code)?; + + check_wasm_tables(&module, limits)?; + check_wasm_memories(&module, limits)?; + check_interface_version(&module)?; + check_wasm_exports(&module, logs)?; + check_wasm_imports(&module, SUPPORTED_IMPORTS, limits, logs)?; + check_wasm_capabilities(&module, available_capabilities, logs)?; + check_wasm_functions(&module, limits, logs)?; + + module.validate_funcs() +} + +fn check_wasm_tables(module: &ParsedWasm, wasm_limits: &WasmLimits) -> VmResult<()> { + match module.tables.len() { + 0 => Ok(()), + 1 => { + let limits = &module.tables[0]; + if let Some(maximum) = limits.maximum { + if maximum > wasm_limits.table_size_limit_elements() { + return Err(VmError::static_validation_err( + "Wasm contract's first table section has a too large max limit", + )); + } + Ok(()) + } else { + Err(VmError::static_validation_err( + "Wasm contract must not have unbound table section", + )) + } + } + _ => Err(VmError::static_validation_err( + "Wasm contract must not have more than 1 table section", + )), + } +} + +fn check_wasm_memories(module: &ParsedWasm, limits: &WasmLimits) -> VmResult<()> { + if module.memories.len() != 1 { + return Err(VmError::static_validation_err( + "Wasm contract must contain exactly one memory", + )); + } + let memory = &module.memories[0]; + + if memory.initial > limits.initial_memory_limit_pages() as u64 { + return Err(VmError::static_validation_err(format!( + "Wasm contract memory's minimum must not exceed {} pages.", + limits.initial_memory_limit_pages() + ))); + } + + if memory.maximum.is_some() { + return Err(VmError::static_validation_err( + "Wasm contract memory's maximum must be unset. The host will set it for you.", + )); + } + Ok(()) +} + +fn check_interface_version(module: &ParsedWasm) -> VmResult<()> { + let mut interface_version_exports = module + .exported_function_names(Some(INTERFACE_VERSION_PREFIX)) + .into_iter(); + if let Some(first_interface_version_export) = interface_version_exports.next() { + if interface_version_exports.next().is_some() { + Err(VmError::static_validation_err( + "Wasm contract contains more than one marker export: interface_version_*", + )) + } else { + // Exactly one interface version found + let version_str = first_interface_version_export.as_str(); + if SUPPORTED_INTERFACE_VERSIONS + .iter() + .any(|&v| v == version_str) + { + Ok(()) + } else { + Err(VmError::static_validation_err( + "Wasm contract has unknown interface_version_* marker export (see https://github.com/CosmWasm/cosmwasm/blob/main/packages/vm/README.md)", + )) + } + } + } else { + Err(VmError::static_validation_err( + "Wasm contract missing a required marker export: interface_version_*", + )) + } +} + +fn check_wasm_exports(module: &ParsedWasm, logs: Logger) -> VmResult<()> { + let available_exports: HashSet = module.exported_function_names(None); + + logs.add(|| format!("Exports: {}", available_exports.to_string_limited(20_000))); + + for required_export in REQUIRED_EXPORTS { + if !available_exports.contains(*required_export) { + return Err(VmError::static_validation_err(format!( + "Wasm contract doesn't have required export: \"{required_export}\". Exports required by VM: {REQUIRED_EXPORTS:?}." + ))); + } + } + Ok(()) +} + +/// Checks if the import requirements of the contract are satisfied. +/// When this is not the case, we either have an incompatibility between contract and VM +/// or a error in the contract. +fn check_wasm_imports( + module: &ParsedWasm, + supported_imports: &[&str], + limits: &WasmLimits, + logs: Logger, +) -> VmResult<()> { + logs.add(|| { + format!( + "Imports ({}): {}", + module.imports.len(), + module + .imports + .iter() + .map(|import| full_import_name(import)) + .collect::>() + .join(", ") + ) + }); + + if module.imports.len() > limits.max_imports() { + return Err(VmError::static_validation_err(format!( + "Import count exceeds limit. Imports: {}. Limit: {}.", + module.imports.len(), + limits.max_imports() + ))); + } + + for required_import in &module.imports { + let full_name = full_import_name(required_import); + if !supported_imports.contains(&full_name.as_str()) { + let required_import_names: BTreeSet<_> = + module.imports.iter().map(full_import_name).collect(); + return Err(VmError::static_validation_err(format!( + "Wasm contract requires unsupported import: \"{}\". Required imports: {}. Available imports: {:?}.", + full_name, required_import_names.to_string_limited(200), supported_imports + ))); + } + + match required_import.ty { + TypeRef::Func(_) => {} // ok + _ => return Err(VmError::static_validation_err(format!( + "Wasm contract requires non-function import: \"{full_name}\". Right now, all supported imports are functions." + ))) + } + } + Ok(()) +} + +fn full_import_name(ie: &Import) -> String { + format!("{}.{}", ie.module, ie.name) +} + +fn check_wasm_capabilities( + module: &ParsedWasm, + available_capabilities: &HashSet, + logs: Logger, +) -> VmResult<()> { + let required_capabilities = required_capabilities_from_module(module); + logs.add(|| { + format!( + "Required capabilities: {}", + required_capabilities.to_string_limited(20_000) + ) + }); + if !required_capabilities.is_subset(available_capabilities) { + // We switch to BTreeSet to get a sorted error message + let unavailable: BTreeSet<_> = required_capabilities + .difference(available_capabilities) + .collect(); + return Err(VmError::static_validation_err(format!( + "Wasm contract requires unavailable capabilities: {}", + unavailable.to_string_limited(200) + ))); + } + Ok(()) +} + +fn check_wasm_functions(module: &ParsedWasm, limits: &WasmLimits, logs: Logger) -> VmResult<()> { + logs.add(|| format!("Function count: {}", module.function_count)); + logs.add(|| format!("Max function parameters: {}", module.max_func_params)); + logs.add(|| format!("Max function results: {}", module.max_func_results)); + logs.add(|| { + format!( + "Total function parameter count: {}", + module.total_func_params + ) + }); + + if module.function_count > limits.max_functions() { + return Err(VmError::static_validation_err(format!( + "Wasm contract contains more than {} functions", + limits.max_functions() + ))); + } + if module.max_func_params > limits.max_function_params() { + return Err(VmError::static_validation_err(format!( + "Wasm contract contains function with more than {} parameters", + limits.max_function_params() + ))); + } + if module.max_func_results > limits.max_function_results() { + return Err(VmError::static_validation_err(format!( + "Wasm contract contains function with more than {} results", + limits.max_function_results() + ))); + } + + if module.total_func_params > limits.max_total_function_params() { + return Err(VmError::static_validation_err(format!( + "Wasm contract contains more than {} function parameters in total", + limits.max_total_function_params() + ))); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::capabilities_from_csv; + + static CONTRACT_0_7: &[u8] = include_bytes!("../testdata/hackatom_0.7.wasm"); + static CONTRACT_0_12: &[u8] = include_bytes!("../testdata/hackatom_0.12.wasm"); + static CONTRACT_0_14: &[u8] = include_bytes!("../testdata/hackatom_0.14.wasm"); + static CONTRACT_0_15: &[u8] = include_bytes!("../testdata/hackatom_0.15.wasm"); + static CONTRACT: &[u8] = include_bytes!("../testdata/hackatom.wasm"); + static CYBERPUNK: &[u8] = include_bytes!("../testdata/cyberpunk.wasm"); + static CONTRACT_RUST_170: &[u8] = include_bytes!("../testdata/cyberpunk_rust170.wasm"); + + fn default_capabilities() -> HashSet { + capabilities_from_csv("cosmwasm_1_1,cosmwasm_1_2,cosmwasm_1_3,iterator,staking,stargate") + } + + #[test] + fn check_wasm_passes_for_latest_contract() { + // this is our reference check, must pass + check_wasm( + CONTRACT, + &default_capabilities(), + &WasmLimits::default(), + Off, + ) + .unwrap(); + check_wasm( + CYBERPUNK, + &default_capabilities(), + &WasmLimits::default(), + Off, + ) + .unwrap(); + } + + #[test] + fn check_wasm_allows_sign_ext() { + // See https://github.com/CosmWasm/cosmwasm/issues/1727 + check_wasm( + CONTRACT_RUST_170, + &default_capabilities(), + &WasmLimits::default(), + Off, + ) + .unwrap(); + } + + #[test] + fn check_wasm_old_contract() { + match check_wasm(CONTRACT_0_15, &default_capabilities(),&WasmLimits::default(), + Off) { + Err(VmError::StaticValidationErr { msg, .. }) => assert_eq!( + msg, + "Wasm contract has unknown interface_version_* marker export (see https://github.com/CosmWasm/cosmwasm/blob/main/packages/vm/README.md)" + ), + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("This must not succeed"), + }; + + match check_wasm(CONTRACT_0_14, &default_capabilities(),&WasmLimits::default(), + Off,) { + Err(VmError::StaticValidationErr { msg, .. }) => assert_eq!( + msg, + "Wasm contract has unknown interface_version_* marker export (see https://github.com/CosmWasm/cosmwasm/blob/main/packages/vm/README.md)" + ), + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("This must not succeed"), + }; + + match check_wasm( + CONTRACT_0_12, + &default_capabilities(), + &WasmLimits::default(), + Off, + ) { + Err(VmError::StaticValidationErr { msg, .. }) => { + assert!(msg.contains( + "Wasm contract missing a required marker export: interface_version_*" + )) + } + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("This must not succeed"), + }; + + match check_wasm( + CONTRACT_0_7, + &default_capabilities(), + &WasmLimits::default(), + Off, + ) { + Err(VmError::StaticValidationErr { msg, .. }) => { + assert!(msg.contains( + "Wasm contract missing a required marker export: interface_version_*" + )) + } + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("This must not succeed"), + }; + } + + #[test] + fn check_wasm_tables_works() { + let limits = WasmLimits::default(); + // No tables is fine + let wasm = wat::parse_str("(module)").unwrap(); + assert!(ParsedWasm::parse(&wasm).unwrap().tables.is_empty()); + + // One table (bound) + let wasm = wat::parse_str("(module (table $name 123 123 funcref))").unwrap(); + check_wasm_tables(&ParsedWasm::parse(&wasm).unwrap(), &limits).unwrap(); + + // One table (bound, initial > max) + let wasm = wat::parse_str("(module (table $name 124 123 funcref))").unwrap(); + // this should be caught by the validator + let err = &ParsedWasm::parse(&wasm).unwrap_err(); + assert!(err + .to_string() + .contains("size minimum must not be greater than maximum")); + + // One table (bound, max too large) + let wasm = wat::parse_str("(module (table $name 100 9999 funcref))").unwrap(); + let err = check_wasm_tables(&ParsedWasm::parse(&wasm).unwrap(), &limits).unwrap_err(); + assert!(err + .to_string() + .contains("Wasm contract's first table section has a too large max limit")); + + // One table (unbound) + let wasm = wat::parse_str("(module (table $name 100 funcref))").unwrap(); + let err = check_wasm_tables(&ParsedWasm::parse(&wasm).unwrap(), &limits).unwrap_err(); + assert!(err + .to_string() + .contains("Wasm contract must not have unbound table section")); + } + + #[test] + fn check_wasm_memories_ok() { + let wasm = wat::parse_str("(module (memory 1))").unwrap(); + check_wasm_memories(&ParsedWasm::parse(&wasm).unwrap(), &WasmLimits::default()).unwrap() + } + + #[test] + fn check_wasm_memories_no_memory() { + let limits = WasmLimits::default(); + let wasm = wat::parse_str("(module)").unwrap(); + match check_wasm_memories(&ParsedWasm::parse(&wasm).unwrap(), &limits) { + Err(VmError::StaticValidationErr { msg, .. }) => { + assert!(msg.starts_with("Wasm contract must contain exactly one memory")); + } + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("Didn't reject wasm with invalid api"), + } + } + + #[test] + fn check_wasm_memories_two_memories() { + // Generated manually because wat2wasm protects us from creating such Wasm: + // "error: only one memory block allowed" + let wasm = hex::decode(concat!( + "0061736d", // magic bytes + "01000000", // binary version (uint32) + "05", // section type (memory) + "05", // section length + "02", // number of memories + "0009", // element of type "resizable_limits", min=9, max=unset + "0009", // element of type "resizable_limits", min=9, max=unset + )) + .unwrap(); + + // wrong number of memories should be caught by the validator + match ParsedWasm::parse(&wasm) { + Err(VmError::StaticValidationErr { msg, .. }) => { + assert!(msg.contains("multiple memories")); + } + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("Didn't reject wasm with invalid api"), + } + } + + #[test] + fn check_wasm_memories_zero_memories() { + // Generated manually because wat2wasm would not create an empty memory section + let wasm = hex::decode(concat!( + "0061736d", // magic bytes + "01000000", // binary version (uint32) + "05", // section type (memory) + "01", // section length + "00", // number of memories + )) + .unwrap(); + + match check_wasm_memories(&ParsedWasm::parse(&wasm).unwrap(), &WasmLimits::default()) { + Err(VmError::StaticValidationErr { msg, .. }) => { + assert!(msg.starts_with("Wasm contract must contain exactly one memory")); + } + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("Didn't reject wasm with invalid api"), + } + } + + #[test] + fn check_wasm_memories_initial_size() { + let limits = WasmLimits::default(); + let wasm_ok = wat::parse_str("(module (memory 512))").unwrap(); + check_wasm_memories(&ParsedWasm::parse(&wasm_ok).unwrap(), &limits).unwrap(); + + let wasm_too_big = wat::parse_str("(module (memory 513))").unwrap(); + match check_wasm_memories(&ParsedWasm::parse(&wasm_too_big).unwrap(), &limits) { + Err(VmError::StaticValidationErr { msg, .. }) => { + assert!(msg.starts_with("Wasm contract memory's minimum must not exceed 512 pages")); + } + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("Didn't reject wasm with invalid api"), + } + } + + #[test] + fn check_wasm_memories_maximum_size() { + let wasm_max = wat::parse_str("(module (memory 1 5))").unwrap(); + match check_wasm_memories( + &ParsedWasm::parse(&wasm_max).unwrap(), + &WasmLimits::default(), + ) { + Err(VmError::StaticValidationErr { msg, .. }) => { + assert!(msg.starts_with("Wasm contract memory's maximum must be unset")); + } + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("Didn't reject wasm with invalid api"), + } + } + + #[test] + fn check_interface_version_works() { + // valid + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "add_one" (func 0)) + (export "allocate" (func 0)) + (export "interface_version_8" (func 0)) + (export "deallocate" (func 0)) + (export "instantiate" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + check_interface_version(&module).unwrap(); + + // missing + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "add_one" (func 0)) + (export "allocate" (func 0)) + (export "deallocate" (func 0)) + (export "instantiate" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + match check_interface_version(&module).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => { + assert_eq!( + msg, + "Wasm contract missing a required marker export: interface_version_*" + ); + } + err => panic!("Unexpected error {err:?}"), + } + + // multiple + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "add_one" (func 0)) + (export "allocate" (func 0)) + (export "interface_version_8" (func 0)) + (export "interface_version_9" (func 0)) + (export "deallocate" (func 0)) + (export "instantiate" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + match check_interface_version(&module).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => { + assert_eq!( + msg, + "Wasm contract contains more than one marker export: interface_version_*" + ); + } + err => panic!("Unexpected error {err:?}"), + } + + // CosmWasm 0.15 + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "add_one" (func 0)) + (export "allocate" (func 0)) + (export "interface_version_6" (func 0)) + (export "deallocate" (func 0)) + (export "instantiate" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + match check_interface_version(&module).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => { + assert_eq!(msg, "Wasm contract has unknown interface_version_* marker export (see https://github.com/CosmWasm/cosmwasm/blob/main/packages/vm/README.md)"); + } + err => panic!("Unexpected error {err:?}"), + } + + // Unknown value + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "add_one" (func 0)) + (export "allocate" (func 0)) + (export "interface_version_broken" (func 0)) + (export "deallocate" (func 0)) + (export "instantiate" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + match check_interface_version(&module).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => { + assert_eq!(msg, "Wasm contract has unknown interface_version_* marker export (see https://github.com/CosmWasm/cosmwasm/blob/main/packages/vm/README.md)"); + } + err => panic!("Unexpected error {err:?}"), + } + } + + #[test] + fn check_wasm_exports_works() { + // valid + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "add_one" (func 0)) + (export "allocate" (func 0)) + (export "deallocate" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + check_wasm_exports(&module, Off).unwrap(); + + // this is invalid, as it doesn't any required export + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "add_one" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + match check_wasm_exports(&module, Off) { + Err(VmError::StaticValidationErr { msg, .. }) => { + assert!(msg.starts_with("Wasm contract doesn't have required export: \"allocate\"")); + } + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("Didn't reject wasm with invalid api"), + } + + // this is invalid, as it doesn't contain all required exports + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "add_one" (func 0)) + (export "allocate" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + match check_wasm_exports(&module, Off) { + Err(VmError::StaticValidationErr { msg, .. }) => { + assert!( + msg.starts_with("Wasm contract doesn't have required export: \"deallocate\"") + ); + } + Err(e) => panic!("Unexpected error {e:?}"), + Ok(_) => panic!("Didn't reject wasm with invalid api"), + } + } + + #[test] + fn check_wasm_imports_ok() { + let wasm = wat::parse_str( + r#"(module + (import "env" "db_read" (func (param i32 i32) (result i32))) + (import "env" "db_write" (func (param i32 i32) (result i32))) + (import "env" "db_remove" (func (param i32) (result i32))) + (import "env" "addr_validate" (func (param i32) (result i32))) + (import "env" "addr_canonicalize" (func (param i32 i32) (result i32))) + (import "env" "addr_humanize" (func (param i32 i32) (result i32))) + (import "env" "secp256k1_verify" (func (param i32 i32 i32) (result i32))) + (import "env" "secp256k1_recover_pubkey" (func (param i32 i32 i32) (result i64))) + (import "env" "secp256r1_verify" (func (param i32 i32 i32) (result i32))) + (import "env" "secp256r1_recover_pubkey" (func (param i32 i32 i32) (result i64))) + (import "env" "ed25519_verify" (func (param i32 i32 i32) (result i32))) + (import "env" "ed25519_batch_verify" (func (param i32 i32 i32) (result i32))) + )"#, + ) + .unwrap(); + check_wasm_imports( + &ParsedWasm::parse(&wasm).unwrap(), + SUPPORTED_IMPORTS, + &WasmLimits::default(), + Off, + ) + .unwrap(); + } + + #[test] + fn check_wasm_imports_exceeds_limit() { + let wasm = wat::parse_str( + r#"(module + (import "env" "db_write" (func (param i32 i32) (result i32))) + (import "env" "db_remove" (func (param i32) (result i32))) + (import "env" "addr_validate" (func (param i32) (result i32))) + (import "env" "addr_canonicalize" (func (param i32 i32) (result i32))) + (import "env" "addr_humanize" (func (param i32 i32) (result i32))) + (import "env" "secp256k1_verify" (func (param i32 i32 i32) (result i32))) + (import "env" "secp256k1_recover_pubkey" (func (param i32 i32 i32) (result i64))) + (import "env" "secp256r1_verify" (func (param i32 i32 i32) (result i32))) + (import "env" "secp256r1_recover_pubkey" (func (param i32 i32 i32) (result i64))) + (import "env" "ed25519_verify" (func (param i32 i32 i32) (result i32))) + (import "env" "ed25519_batch_verify" (func (param i32 i32 i32) (result i32))) + (import "env" "spam01" (func (param i32 i32) (result i32))) + (import "env" "spam02" (func (param i32 i32) (result i32))) + (import "env" "spam03" (func (param i32 i32) (result i32))) + (import "env" "spam04" (func (param i32 i32) (result i32))) + (import "env" "spam05" (func (param i32 i32) (result i32))) + (import "env" "spam06" (func (param i32 i32) (result i32))) + (import "env" "spam07" (func (param i32 i32) (result i32))) + (import "env" "spam08" (func (param i32 i32) (result i32))) + (import "env" "spam09" (func (param i32 i32) (result i32))) + (import "env" "spam10" (func (param i32 i32) (result i32))) + (import "env" "spam11" (func (param i32 i32) (result i32))) + (import "env" "spam12" (func (param i32 i32) (result i32))) + (import "env" "spam13" (func (param i32 i32) (result i32))) + (import "env" "spam14" (func (param i32 i32) (result i32))) + (import "env" "spam15" (func (param i32 i32) (result i32))) + (import "env" "spam16" (func (param i32 i32) (result i32))) + (import "env" "spam17" (func (param i32 i32) (result i32))) + (import "env" "spam18" (func (param i32 i32) (result i32))) + (import "env" "spam19" (func (param i32 i32) (result i32))) + (import "env" "spam20" (func (param i32 i32) (result i32))) + (import "env" "spam21" (func (param i32 i32) (result i32))) + (import "env" "spam22" (func (param i32 i32) (result i32))) + (import "env" "spam23" (func (param i32 i32) (result i32))) + (import "env" "spam24" (func (param i32 i32) (result i32))) + (import "env" "spam25" (func (param i32 i32) (result i32))) + (import "env" "spam26" (func (param i32 i32) (result i32))) + (import "env" "spam27" (func (param i32 i32) (result i32))) + (import "env" "spam28" (func (param i32 i32) (result i32))) + (import "env" "spam29" (func (param i32 i32) (result i32))) + (import "env" "spam30" (func (param i32 i32) (result i32))) + (import "env" "spam31" (func (param i32 i32) (result i32))) + (import "env" "spam32" (func (param i32 i32) (result i32))) + (import "env" "spam33" (func (param i32 i32) (result i32))) + (import "env" "spam34" (func (param i32 i32) (result i32))) + (import "env" "spam35" (func (param i32 i32) (result i32))) + (import "env" "spam36" (func (param i32 i32) (result i32))) + (import "env" "spam37" (func (param i32 i32) (result i32))) + (import "env" "spam38" (func (param i32 i32) (result i32))) + (import "env" "spam39" (func (param i32 i32) (result i32))) + (import "env" "spam40" (func (param i32 i32) (result i32))) + (import "env" "spam41" (func (param i32 i32) (result i32))) + (import "env" "spam42" (func (param i32 i32) (result i32))) + (import "env" "spam43" (func (param i32 i32) (result i32))) + (import "env" "spam44" (func (param i32 i32) (result i32))) + (import "env" "spam45" (func (param i32 i32) (result i32))) + (import "env" "spam46" (func (param i32 i32) (result i32))) + (import "env" "spam47" (func (param i32 i32) (result i32))) + (import "env" "spam48" (func (param i32 i32) (result i32))) + (import "env" "spam49" (func (param i32 i32) (result i32))) + (import "env" "spam50" (func (param i32 i32) (result i32))) + (import "env" "spam51" (func (param i32 i32) (result i32))) + (import "env" "spam52" (func (param i32 i32) (result i32))) + (import "env" "spam53" (func (param i32 i32) (result i32))) + (import "env" "spam54" (func (param i32 i32) (result i32))) + (import "env" "spam55" (func (param i32 i32) (result i32))) + (import "env" "spam56" (func (param i32 i32) (result i32))) + (import "env" "spam57" (func (param i32 i32) (result i32))) + (import "env" "spam58" (func (param i32 i32) (result i32))) + (import "env" "spam59" (func (param i32 i32) (result i32))) + (import "env" "spam60" (func (param i32 i32) (result i32))) + (import "env" "spam61" (func (param i32 i32) (result i32))) + (import "env" "spam62" (func (param i32 i32) (result i32))) + (import "env" "spam63" (func (param i32 i32) (result i32))) + (import "env" "spam64" (func (param i32 i32) (result i32))) + (import "env" "spam65" (func (param i32 i32) (result i32))) + (import "env" "spam66" (func (param i32 i32) (result i32))) + (import "env" "spam67" (func (param i32 i32) (result i32))) + (import "env" "spam68" (func (param i32 i32) (result i32))) + (import "env" "spam69" (func (param i32 i32) (result i32))) + (import "env" "spam70" (func (param i32 i32) (result i32))) + (import "env" "spam71" (func (param i32 i32) (result i32))) + (import "env" "spam72" (func (param i32 i32) (result i32))) + (import "env" "spam73" (func (param i32 i32) (result i32))) + (import "env" "spam74" (func (param i32 i32) (result i32))) + (import "env" "spam75" (func (param i32 i32) (result i32))) + (import "env" "spam76" (func (param i32 i32) (result i32))) + (import "env" "spam77" (func (param i32 i32) (result i32))) + (import "env" "spam78" (func (param i32 i32) (result i32))) + (import "env" "spam79" (func (param i32 i32) (result i32))) + (import "env" "spam80" (func (param i32 i32) (result i32))) + (import "env" "spam81" (func (param i32 i32) (result i32))) + (import "env" "spam82" (func (param i32 i32) (result i32))) + (import "env" "spam83" (func (param i32 i32) (result i32))) + (import "env" "spam84" (func (param i32 i32) (result i32))) + (import "env" "spam85" (func (param i32 i32) (result i32))) + (import "env" "spam86" (func (param i32 i32) (result i32))) + (import "env" "spam87" (func (param i32 i32) (result i32))) + (import "env" "spam88" (func (param i32 i32) (result i32))) + (import "env" "spam89" (func (param i32 i32) (result i32))) + (import "env" "spam90" (func (param i32 i32) (result i32))) + )"#, + ) + .unwrap(); + let err = check_wasm_imports( + &ParsedWasm::parse(&wasm).unwrap(), + SUPPORTED_IMPORTS, + &WasmLimits::default(), + Off, + ) + .unwrap_err(); + match err { + VmError::StaticValidationErr { msg, .. } => { + assert_eq!(msg, "Import count exceeds limit. Imports: 101. Limit: 100."); + } + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn check_wasm_imports_missing() { + let wasm = wat::parse_str( + r#"(module + (import "env" "foo" (func (param i32 i32) (result i32))) + (import "env" "bar" (func (param i32 i32) (result i32))) + (import "env" "spammyspam01" (func (param i32 i32) (result i32))) + (import "env" "spammyspam02" (func (param i32 i32) (result i32))) + (import "env" "spammyspam03" (func (param i32 i32) (result i32))) + (import "env" "spammyspam04" (func (param i32 i32) (result i32))) + (import "env" "spammyspam05" (func (param i32 i32) (result i32))) + (import "env" "spammyspam06" (func (param i32 i32) (result i32))) + (import "env" "spammyspam07" (func (param i32 i32) (result i32))) + (import "env" "spammyspam08" (func (param i32 i32) (result i32))) + (import "env" "spammyspam09" (func (param i32 i32) (result i32))) + (import "env" "spammyspam10" (func (param i32 i32) (result i32))) + )"#, + ) + .unwrap(); + let supported_imports: &[&str] = &[ + "env.db_read", + "env.db_write", + "env.db_remove", + "env.addr_canonicalize", + "env.addr_humanize", + "env.debug", + "env.query_chain", + ]; + let result = check_wasm_imports( + &ParsedWasm::parse(&wasm).unwrap(), + supported_imports, + &WasmLimits::default(), + Off, + ); + match result.unwrap_err() { + VmError::StaticValidationErr { msg, .. } => { + println!("{msg}"); + assert_eq!( + msg, + r#"Wasm contract requires unsupported import: "env.foo". Required imports: {"env.bar", "env.foo", "env.spammyspam01", "env.spammyspam02", "env.spammyspam03", "env.spammyspam04", "env.spammyspam05", "env.spammyspam06", "env.spammyspam07", "env.spammyspam08", ... 2 more}. Available imports: ["env.db_read", "env.db_write", "env.db_remove", "env.addr_canonicalize", "env.addr_humanize", "env.debug", "env.query_chain"]."# + ); + } + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn check_wasm_imports_of_old_contract() { + let module = &ParsedWasm::parse(CONTRACT_0_7).unwrap(); + let result = check_wasm_imports(module, SUPPORTED_IMPORTS, &WasmLimits::default(), Off); + match result.unwrap_err() { + VmError::StaticValidationErr { msg, .. } => { + assert!( + msg.starts_with("Wasm contract requires unsupported import: \"env.read_db\"") + ); + } + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn check_wasm_imports_wrong_type() { + let wasm = wat::parse_str(r#"(module (import "env" "db_read" (memory 1 1)))"#).unwrap(); + let result = check_wasm_imports( + &ParsedWasm::parse(&wasm).unwrap(), + SUPPORTED_IMPORTS, + &WasmLimits::default(), + Off, + ); + match result.unwrap_err() { + VmError::StaticValidationErr { msg, .. } => { + assert!( + msg.starts_with("Wasm contract requires non-function import: \"env.db_read\"") + ); + } + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn check_wasm_capabilities_ok() { + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "requires_water" (func 0)) + (export "requires_" (func 0)) + (export "requires_nutrients" (func 0)) + (export "require_milk" (func 0)) + (export "REQUIRES_air" (func 0)) + (export "requires_sun" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + let available = [ + "water".to_string(), + "nutrients".to_string(), + "sun".to_string(), + "freedom".to_string(), + ] + .into_iter() + .collect(); + check_wasm_capabilities(&module, &available, Off).unwrap(); + } + + #[test] + fn check_wasm_capabilities_fails_for_missing() { + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (export "requires_water" (func 0)) + (export "requires_" (func 0)) + (export "requires_nutrients" (func 0)) + (export "require_milk" (func 0)) + (export "REQUIRES_air" (func 0)) + (export "requires_sun" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + + // Available set 1 + let available = [ + "water".to_string(), + "nutrients".to_string(), + "freedom".to_string(), + ] + .into_iter() + .collect(); + match check_wasm_capabilities(&module, &available, Off).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => assert_eq!( + msg, + "Wasm contract requires unavailable capabilities: {\"sun\"}" + ), + _ => panic!("Got unexpected error"), + } + + // Available set 2 + let available = [ + "nutrients".to_string(), + "freedom".to_string(), + "Water".to_string(), // capabilities are case sensitive (and lowercase by convention) + ] + .into_iter() + .collect(); + match check_wasm_capabilities(&module, &available, Off).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => assert_eq!( + msg, + "Wasm contract requires unavailable capabilities: {\"sun\", \"water\"}" + ), + _ => panic!("Got unexpected error"), + } + + // Available set 3 + let available = ["freedom".to_string()].into_iter().collect(); + match check_wasm_capabilities(&module, &available, Off).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => assert_eq!( + msg, + "Wasm contract requires unavailable capabilities: {\"nutrients\", \"sun\", \"water\"}" + ), + _ => panic!("Got unexpected error"), + } + + // Available set 4 + let available = [].into_iter().collect(); + match check_wasm_capabilities(&module, &available, Off).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => assert_eq!( + msg, + "Wasm contract requires unavailable capabilities: {\"nutrients\", \"sun\", \"water\"}" + ), + _ => panic!("Got unexpected error"), + } + } + + #[test] + fn check_wasm_fails_for_big_functions() { + let limits = WasmLimits::default(); + // too many arguments + let args = " i32".repeat(limits.max_function_params() + 1); + let wasm = wat::parse_str(format!( + r#"(module + (type (func (param {args}))) + (func (type 0) nop) + )"# + )) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + + match check_wasm_functions(&module, &limits, Off).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => assert_eq!( + msg, + "Wasm contract contains function with more than 100 parameters" + ), + _ => panic!("Got unexpected error"), + } + + // too many returns + let return_types = " i32".repeat(limits.max_function_results() + 1); + let returns = " i32.const 42".repeat(limits.max_function_results() + 1); + let wasm = wat::parse_str(format!( + r#"(module + (type (func (result {return_types}))) + (func (type 0) {returns}) + )"# + )) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + match check_wasm_functions(&module, &limits, Off).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => assert_eq!( + msg, + "Wasm contract contains function with more than 1 results" + ), + _ => panic!("Got unexpected error"), + } + + // too many functions + let functions = "(func (type 0) nop)".repeat(limits.max_functions() + 1); + let wasm = wat::parse_str(format!( + r#"(module + (type (func)) + {functions} + )"# + )) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + match check_wasm_functions(&module, &limits, Off).unwrap_err() { + VmError::StaticValidationErr { msg, .. } => { + assert_eq!(msg, "Wasm contract contains more than 20000 functions") + } + _ => panic!("Got unexpected error"), + } + } +} diff --git a/vm/src/config.rs b/vm/src/config.rs new file mode 100644 index 000000000..088dab234 --- /dev/null +++ b/vm/src/config.rs @@ -0,0 +1,155 @@ +use std::{collections::HashSet, path::PathBuf}; + +use serde::{Deserialize, Serialize}; + +use crate::Size; + +const DEFAULT_MEMORY_LIMIT: u32 = 512; // in pages +/// As of March 2023, on Juno mainnet the largest value for production contracts +/// is 485. Most are between 100 and 300. +const DEFAULT_TABLE_SIZE_LIMIT: u32 = 2500; // entries + +/// We keep this number high since failing early gives less detailed error messages. Especially +/// when a user accidentally includes wasm-bindgen, they get a bunch of unsupported imports. +const DEFAULT_MAX_IMPORTS: usize = 100; + +const DEFAULT_MAX_FUNCTIONS: usize = 20_000; + +const DEFAULT_MAX_FUNCTION_PARAMS: usize = 100; + +const DEFAULT_MAX_TOTAL_FUNCTION_PARAMS: usize = 10_000; + +const DEFAULT_MAX_FUNCTION_RESULTS: usize = 1; + +/// Various configurations for the VM. +#[derive(Clone, Debug, Serialize, Deserialize)] +#[non_exhaustive] +pub struct Config { + /// Configuration for limitations placed on Wasm files. + /// This defines a few limits on the Wasm file that are checked during static validation before + /// storing the Wasm file. + pub wasm_limits: WasmLimits, + + /// Configuration for the cache. + pub cache: CacheOptions, +} + +impl Config { + pub fn new(cache: CacheOptions) -> Self { + Self { + wasm_limits: WasmLimits::default(), + cache, + } + } +} + +/// Limits for static validation of Wasm files. These are checked before storing the Wasm file. +/// All limits are optional because they are coming from the Go-side and have default values. +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +#[non_exhaustive] +pub struct WasmLimits { + /// Maximum number of memory pages that a module can request. + /// + /// Every Wasm memory has an initial size and an optional maximum size, + /// both measured in Wasm pages. This limit applies to the initial size. + pub initial_memory_limit_pages: Option, + /// The upper limit for the `max` value of each table. CosmWasm contracts have + /// initial=max for 1 table. See + /// + /// ```plain + /// $ wasm-objdump --section=table -x packages/vm/testdata/hackatom.wasm + /// Section Details: + /// + /// Table[1]: + /// - table[0] type=funcref initial=161 max=161 + /// ``` + /// + pub table_size_limit_elements: Option, + /// If the contract has more than this amount of imports, it will be rejected + /// during static validation before even looking into the imports. + pub max_imports: Option, + + /// The maximum number of functions a contract can have. + /// Any contract with more functions than this will be rejected during static validation. + pub max_functions: Option, + + /// The maximum number of parameters a Wasm function can have. + pub max_function_params: Option, + /// The maximum total number of parameters of all functions in the Wasm. + /// For each function in the Wasm, take the number of parameters and sum all of these up. + /// If that sum exceeds this limit, the Wasm will be rejected during static validation. + /// + /// Be careful when adjusting this limit, as it prevents an attack where a small Wasm file + /// explodes in size when compiled. + pub max_total_function_params: Option, + + /// The maximum number of results a Wasm function type can have. + pub max_function_results: Option, +} + +impl WasmLimits { + pub fn initial_memory_limit_pages(&self) -> u32 { + self.initial_memory_limit_pages + .unwrap_or(DEFAULT_MEMORY_LIMIT) + } + + pub fn table_size_limit_elements(&self) -> u32 { + self.table_size_limit_elements + .unwrap_or(DEFAULT_TABLE_SIZE_LIMIT) + } + + pub fn max_imports(&self) -> usize { + self.max_imports.unwrap_or(DEFAULT_MAX_IMPORTS) + } + + pub fn max_functions(&self) -> usize { + self.max_functions.unwrap_or(DEFAULT_MAX_FUNCTIONS) + } + + pub fn max_function_params(&self) -> usize { + self.max_function_params + .unwrap_or(DEFAULT_MAX_FUNCTION_PARAMS) + } + + pub fn max_total_function_params(&self) -> usize { + self.max_total_function_params + .unwrap_or(DEFAULT_MAX_TOTAL_FUNCTION_PARAMS) + } + + pub fn max_function_results(&self) -> usize { + self.max_function_results + .unwrap_or(DEFAULT_MAX_FUNCTION_RESULTS) + } +} + +#[derive(Clone, Debug, Serialize, Deserialize)] +#[non_exhaustive] +pub struct CacheOptions { + /// The base directory of this cache. + /// + /// If this does not exist, it will be created. Not sure if this behaviour + /// is desired but wasmd relies on it. + pub base_dir: PathBuf, + pub available_capabilities: HashSet, + /// Memory limit for the cache, in bytes. + pub memory_cache_size_bytes: Size, + /// Memory limit for instances, in bytes. Use a value that is divisible by the Wasm page size 65536, + /// e.g. full MiBs. + pub instance_memory_limit_bytes: Size, +} + +impl CacheOptions { + pub fn new( + base_dir: impl Into, + available_capabilities: impl Into>, + memory_cache_size_bytes: Size, + instance_memory_limit_bytes: Size, + ) -> Self { + Self { + base_dir: base_dir.into(), + available_capabilities: available_capabilities.into(), + memory_cache_size_bytes, + instance_memory_limit_bytes, + } + } +} diff --git a/vm/src/conversion.rs b/vm/src/conversion.rs new file mode 100644 index 000000000..83ef6f867 --- /dev/null +++ b/vm/src/conversion.rs @@ -0,0 +1,197 @@ +use std::any::type_name; + +use crate::errors::{VmError, VmResult}; + +/// Safely converts input of type T to u32. +/// Errors with a cosmwasm_vm::errors::VmError::ConversionErr if conversion cannot be done. +pub fn to_u32 + ToString + Copy>(input: T) -> VmResult { + input.try_into().map_err(|_| { + VmError::conversion_err(type_name::(), type_name::(), input.to_string()) + }) +} + +/// Safely converts input of type &T to u32. +/// Errors with a cosmwasm_vm::errors::VmError::ConversionErr if conversion cannot be done. +pub fn ref_to_u32 + ToString + Clone>(input: &T) -> VmResult { + input.clone().try_into().map_err(|_| { + VmError::conversion_err(type_name::(), type_name::(), input.to_string()) + }) +} + +/// Safely converts input of type T to i32. +/// Errors with a cosmwasm_vm::errors::VmError::ConversionErr if conversion cannot be done. +/// +/// Used in tests and in iterator, but not with default build +#[allow(dead_code)] +pub fn to_i32 + ToString + Copy>(input: T) -> VmResult { + input.try_into().map_err(|_| { + VmError::conversion_err(type_name::(), type_name::(), input.to_string()) + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn to_u32_works_for_usize() { + assert_eq!(to_u32(0usize).unwrap(), 0); + assert_eq!(to_u32(1usize).unwrap(), 1); + assert_eq!(to_u32(2147483647usize).unwrap(), 2147483647); + assert_eq!(to_u32(2147483648usize).unwrap(), 2147483648); + assert_eq!(to_u32(4294967295usize).unwrap(), 4294967295); + // Gate required for Rust 1.77.0 in Linux, possibly a Rust/clippy regression bug + #[cfg(target_pointer_width = "64")] + match to_u32(4294967296usize) { + Err(VmError::ConversionErr { + from_type, + to_type, + input, + .. + }) => { + assert_eq!(from_type, "usize"); + assert_eq!(to_type, "u32"); + assert_eq!(input, "4294967296"); + } + Err(err) => panic!("unexpected error: {err:?}"), + Ok(_) => panic!("must not succeed"), + }; + } + + #[test] + fn to_u32_works_for_u64() { + assert_eq!(to_u32(0u64).unwrap(), 0); + assert_eq!(to_u32(1u64).unwrap(), 1); + assert_eq!(to_u32(2147483647u64).unwrap(), 2147483647); + assert_eq!(to_u32(2147483648u64).unwrap(), 2147483648); + assert_eq!(to_u32(4294967295u64).unwrap(), 4294967295); + + match to_u32(4294967296u64) { + Err(VmError::ConversionErr { + from_type, + to_type, + input, + .. + }) => { + assert_eq!(from_type, "u64"); + assert_eq!(to_type, "u32"); + assert_eq!(input, "4294967296"); + } + Err(err) => panic!("unexpected error: {err:?}"), + Ok(_) => panic!("must not succeed"), + }; + } + + #[test] + fn to_u32_works_for_i32() { + assert_eq!(to_u32(0i32).unwrap(), 0); + assert_eq!(to_u32(1i32).unwrap(), 1); + assert_eq!(to_u32(2147483647i32).unwrap(), 2147483647); + + match to_u32(-1i32) { + Err(VmError::ConversionErr { + from_type, + to_type, + input, + .. + }) => { + assert_eq!(from_type, "i32"); + assert_eq!(to_type, "u32"); + assert_eq!(input, "-1"); + } + Err(err) => panic!("unexpected error: {err:?}"), + Ok(_) => panic!("must not succeed"), + }; + } + + #[test] + fn ref_to_u32_works() { + // i32 + assert_eq!(ref_to_u32(&0i32).unwrap(), 0); + assert_eq!(ref_to_u32(&1i32).unwrap(), 1); + assert_eq!(ref_to_u32(&2147483647i32).unwrap(), 2147483647); + match ref_to_u32(&-1i32).unwrap_err() { + VmError::ConversionErr { + from_type, + to_type, + input, + .. + } => { + assert_eq!(from_type, "i32"); + assert_eq!(to_type, "u32"); + assert_eq!(input, "-1"); + } + err => panic!("Unexpected error: {err:?}"), + }; + + // usize + assert_eq!(ref_to_u32(&0usize).unwrap(), 0); + assert_eq!(ref_to_u32(&1usize).unwrap(), 1); + assert_eq!(ref_to_u32(&2147483647usize).unwrap(), 2147483647); + assert_eq!(ref_to_u32(&2147483648usize).unwrap(), 2147483648); + assert_eq!(ref_to_u32(&4294967295usize).unwrap(), 4294967295); + // Gate required for Rust 1.77.0 in Linux, possibly a Rust/clippy regression bug + #[cfg(target_pointer_width = "64")] + match ref_to_u32(&4294967296usize).unwrap_err() { + VmError::ConversionErr { + from_type, + to_type, + input, + .. + } => { + assert_eq!(from_type, "usize"); + assert_eq!(to_type, "u32"); + assert_eq!(input, "4294967296"); + } + err => panic!("Unexpected error: {err:?}"), + }; + } + + #[test] + fn to_i32_works_for_usize() { + assert_eq!(to_i32(0usize).unwrap(), 0); + assert_eq!(to_i32(1usize).unwrap(), 1); + assert_eq!(to_i32(2147483647usize).unwrap(), 2147483647); + + match to_i32(2147483648usize) { + Err(VmError::ConversionErr { + from_type, + to_type, + input, + .. + }) => { + assert_eq!(from_type, "usize"); + assert_eq!(to_type, "i32"); + assert_eq!(input, "2147483648"); + } + Err(err) => panic!("unexpected error: {err:?}"), + Ok(_) => panic!("must not succeed"), + }; + } + + #[test] + fn to_i32_works_for_i64() { + assert_eq!(to_i32(0i64).unwrap(), 0); + assert_eq!(to_i32(1i64).unwrap(), 1); + assert_eq!(to_i32(2147483647i64).unwrap(), 2147483647); + + assert_eq!(to_i32(-1i64).unwrap(), -1); + assert_eq!(to_i32(-2147483647i64).unwrap(), -2147483647); + assert_eq!(to_i32(-2147483648i64).unwrap(), -2147483648); + + match to_i32(-2147483649i64) { + Err(VmError::ConversionErr { + from_type, + to_type, + input, + .. + }) => { + assert_eq!(from_type, "i64"); + assert_eq!(to_type, "i32"); + assert_eq!(input, "-2147483649"); + } + Err(err) => panic!("unexpected error: {err:?}"), + Ok(_) => panic!("must not succeed"), + }; + } +} diff --git a/vm/src/environment.rs b/vm/src/environment.rs new file mode 100644 index 000000000..0da0d5c5c --- /dev/null +++ b/vm/src/environment.rs @@ -0,0 +1,981 @@ +//! Internal details to be used by instance.rs only +use std::borrow::BorrowMut; +use std::cell::RefCell; +use std::marker::PhantomData; +use std::ptr::NonNull; +use std::rc::Rc; +use std::sync::{Arc, RwLock}; + +use derivative::Derivative; +use wasmer::{AsStoreMut, Instance as WasmerInstance, Memory, MemoryView, Value}; +use wasmer_middlewares::metering::{get_remaining_points, set_remaining_points, MeteringPoints}; + +use crate::backend::{BackendApi, GasInfo, Querier, Storage}; +use crate::errors::{VmError, VmResult}; + +/// Keep this as low as necessary to avoid deepy nested errors like this: +/// +/// ```plain +/// RuntimeErr { msg: "Wasmer runtime error: RuntimeError: Error executing Wasm: Wasmer runtime error: RuntimeError: Error executing Wasm: Wasmer runtime error: RuntimeError: Error executing Wasm: Wasmer runtime error: RuntimeError: Error executing Wasm: Wasmer runtime error: RuntimeError: Maximum call depth exceeded." } +/// ``` +const MAX_CALL_DEPTH: usize = 2; + +/// Never can never be instantiated. +/// Replace this with the [never primitive type](https://doc.rust-lang.org/std/primitive.never.html) when stable. +#[derive(Debug)] +pub enum Never {} + +/** gas config data */ + +#[derive(Clone, PartialEq, Eq, Debug)] +#[non_exhaustive] +pub struct GasConfig { + /// Gas costs of VM (not Backend) provided functionality + /// secp256k1 signature verification cost + pub secp256k1_verify_cost: u64, + /// secp256k1 public key recovery cost + pub secp256k1_recover_pubkey_cost: u64, + /// secp256r1 signature verification cost + pub secp256r1_verify_cost: u64, + /// secp256r1 public key recovery cost + pub secp256r1_recover_pubkey_cost: u64, + /// ed25519 signature verification cost + pub ed25519_verify_cost: u64, + /// ed25519 batch signature verification cost + pub ed25519_batch_verify_cost: LinearGasCost, + /// ed25519 batch signature verification cost (single public key) + pub ed25519_batch_verify_one_pubkey_cost: LinearGasCost, + /// bls12-381 aggregate cost (g1) + pub bls12_381_aggregate_g1_cost: LinearGasCost, + /// bls12-381 aggregate cost (g2) + pub bls12_381_aggregate_g2_cost: LinearGasCost, + /// bls12-381 hash to g1 cost + pub bls12_381_hash_to_g1_cost: u64, + /// bls12-381 hash to g2 cost + pub bls12_381_hash_to_g2_cost: u64, + /// bls12-381 pairing equality check cost + pub bls12_381_pairing_equality_cost: LinearGasCost, +} + +impl Default for GasConfig { + fn default() -> Self { + // Target is 10^12 per second (see GAS.md), i.e. 10^6 gas per µ second. + const GAS_PER_US: u64 = 1_000_000; + Self { + // ~96 us in crypto benchmarks + secp256k1_verify_cost: 96 * GAS_PER_US, + // ~194 us in crypto benchmarks + secp256k1_recover_pubkey_cost: 194 * GAS_PER_US, + // ~279 us in crypto benchmarks + secp256r1_verify_cost: 279 * GAS_PER_US, + // ~592 us in crypto benchmarks + secp256r1_recover_pubkey_cost: 592 * GAS_PER_US, + // ~35 us in crypto benchmarks + ed25519_verify_cost: 35 * GAS_PER_US, + // Calculated based on the benchmark results for `ed25519_batch_verify_{x}`. + ed25519_batch_verify_cost: LinearGasCost { + base: 24 * GAS_PER_US, + per_item: 21 * GAS_PER_US, + }, + // Calculated based on the benchmark results for `ed25519_batch_verify_one_pubkey_{x}`. + ed25519_batch_verify_one_pubkey_cost: LinearGasCost { + base: 36 * GAS_PER_US, + per_item: 10 * GAS_PER_US, + }, + // just assume the production machines have more than 4 cores, so we can half that + bls12_381_aggregate_g1_cost: LinearGasCost { + base: 136 * GAS_PER_US / 2, + per_item: 24 * GAS_PER_US / 2, + }, + bls12_381_aggregate_g2_cost: LinearGasCost { + base: 207 * GAS_PER_US / 2, + per_item: 49 * GAS_PER_US / 2, + }, + bls12_381_hash_to_g1_cost: 563 * GAS_PER_US, + bls12_381_hash_to_g2_cost: 871 * GAS_PER_US, + bls12_381_pairing_equality_cost: LinearGasCost { + base: 2112 * GAS_PER_US, + per_item: 163 * GAS_PER_US, + }, + } + } +} + +/// Linear gas cost model where the cost is linear in the number of items. +/// +/// To calculate it, you sample the cost for a few different amounts of items and fit a line to it. +/// Let `b` be that line of best fit. Then `base = b(0)` is the y-intercept and +/// `per_item = b(1) - b(0)` the slope. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct LinearGasCost { + /// This is a flat part of the cost, charged once per batch. + base: u64, + /// This is the cost per item in the batch. + per_item: u64, +} + +impl LinearGasCost { + pub fn total_cost(&self, items: u64) -> u64 { + self.base + self.per_item * items + } +} + +/** context data **/ + +#[derive(Clone, PartialEq, Eq, Debug, Default)] +pub struct GasState { + /// Gas limit for the computation, including internally and externally used gas. + /// This is set when the Environment is created and never mutated. + /// + /// Measured in [CosmWasm gas](https://github.com/CosmWasm/cosmwasm/blob/main/docs/GAS.md). + pub gas_limit: u64, + /// Tracking the gas used in the Cosmos SDK, in CosmWasm gas units. + pub externally_used_gas: u64, +} + +impl GasState { + fn with_limit(gas_limit: u64) -> Self { + Self { + gas_limit, + externally_used_gas: 0, + } + } +} + +/// Additional environmental information in a debug call. +/// +/// The currently unused lifetime parameter 'a allows accessing referenced data in the debug implementation +/// without cloning it. +#[derive(Derivative)] +#[derivative(Debug)] +#[non_exhaustive] +pub struct DebugInfo<'a> { + pub gas_remaining: u64, + // This field is just to allow us to add the unused lifetime parameter. It can be removed + // at any time. + #[doc(hidden)] + #[derivative(Debug = "ignore")] + pub(crate) __lifetime: PhantomData<&'a ()>, +} + +// Unfortunately we cannot create an alias for the trait (https://github.com/rust-lang/rust/issues/41517). +// So we need to copy it in a few places. +// +// /- BEGIN TRAIT END TRAIT \ +// | | +// v v +pub type DebugHandlerFn = dyn for<'a, 'b> FnMut(/* msg */ &'a str, DebugInfo<'b>); + +/// A environment that provides access to the ContextData. +/// The environment is cloneable but clones access the same underlying data. +pub struct Environment { + pub memory: Option, + pub api: A, + pub gas_config: GasConfig, + data: Arc>>, +} + +unsafe impl Send for Environment {} + +unsafe impl Sync for Environment {} + +impl Clone for Environment { + fn clone(&self) -> Self { + Environment { + memory: None, + api: self.api.clone(), + gas_config: self.gas_config.clone(), + data: self.data.clone(), + } + } +} + +impl Environment { + pub fn new(api: A, gas_limit: u64) -> Self { + Environment { + memory: None, + api, + gas_config: GasConfig::default(), + data: Arc::new(RwLock::new(ContextData::new(gas_limit))), + } + } + + pub fn set_debug_handler(&self, debug_handler: Option>>) { + self.with_context_data_mut(|context_data| { + context_data.debug_handler = debug_handler; + }) + } + + pub fn debug_handler(&self) -> Option>> { + self.with_context_data(|context_data| { + // This clone here requires us to wrap the function in Rc instead of Box + context_data.debug_handler.clone() + }) + } + + fn with_context_data_mut(&self, callback: C) -> R + where + C: FnOnce(&mut ContextData) -> R, + { + let mut guard = self.data.as_ref().write().unwrap(); + let context_data = guard.borrow_mut(); + callback(context_data) + } + + fn with_context_data(&self, callback: C) -> R + where + C: FnOnce(&ContextData) -> R, + { + let guard = self.data.as_ref().read().unwrap(); + callback(&guard) + } + + pub fn with_gas_state(&self, callback: C) -> R + where + C: FnOnce(&GasState) -> R, + { + self.with_context_data(|context_data| callback(&context_data.gas_state)) + } + + pub fn with_gas_state_mut(&self, callback: C) -> R + where + C: FnOnce(&mut GasState) -> R, + { + self.with_context_data_mut(|context_data| callback(&mut context_data.gas_state)) + } + + pub fn with_wasmer_instance(&self, callback: C) -> VmResult + where + C: FnOnce(&WasmerInstance) -> VmResult, + { + self.with_context_data(|context_data| match context_data.wasmer_instance { + Some(instance_ptr) => { + let instance_ref = unsafe { instance_ptr.as_ref() }; + callback(instance_ref) + } + None => Err(VmError::uninitialized_context_data("wasmer_instance")), + }) + } + + /// Calls a function with the given name and arguments. + /// The number of return values is variable and controlled by the guest. + /// Usually we expect 0 or 1 return values. Use [`Self::call_function0`] + /// or [`Self::call_function1`] to ensure the number of return values is checked. + fn call_function( + &self, + store: &mut impl AsStoreMut, + name: &str, + args: &[Value], + ) -> VmResult> { + // Clone function before calling it to avoid dead locks + let func = self.with_wasmer_instance(|instance| { + let func = instance.exports.get_function(name)?; + Ok(func.clone()) + })?; + let function_arity = func.param_arity(store); + if args.len() != function_arity { + return Err(VmError::function_arity_mismatch(function_arity)); + }; + self.increment_call_depth()?; + let res = func.call(store, args).map_err(|runtime_err| -> VmError { + self.with_wasmer_instance::<_, Never>(|instance| { + let err: VmError = match get_remaining_points(store, instance) { + MeteringPoints::Remaining(_) => VmError::from(runtime_err), + MeteringPoints::Exhausted => VmError::gas_depletion(), + }; + Err(err) + }) + .unwrap_err() // with_wasmer_instance can only succeed if the callback succeeds + }); + self.decrement_call_depth(); + res + } + + pub fn call_function0( + &self, + store: &mut impl AsStoreMut, + name: &str, + args: &[Value], + ) -> VmResult<()> { + let result = self.call_function(store, name, args)?; + let expected = 0; + let actual = result.len(); + if actual != expected { + return Err(VmError::result_mismatch(name, expected, actual)); + } + Ok(()) + } + + pub fn call_function1( + &self, + store: &mut impl AsStoreMut, + name: &str, + args: &[Value], + ) -> VmResult { + let result = self.call_function(store, name, args)?; + let expected = 1; + let actual = result.len(); + if actual != expected { + return Err(VmError::result_mismatch(name, expected, actual)); + } + Ok(result[0].clone()) + } + + pub fn with_storage_from_context(&self, callback: C) -> VmResult + where + C: FnOnce(&mut S) -> VmResult, + { + self.with_context_data_mut(|context_data| match context_data.storage.as_mut() { + Some(data) => callback(data), + None => Err(VmError::uninitialized_context_data("storage")), + }) + } + + pub fn with_querier_from_context(&self, callback: C) -> VmResult + where + C: FnOnce(&mut Q) -> VmResult, + { + self.with_context_data_mut(|context_data| match context_data.querier.as_mut() { + Some(querier) => callback(querier), + None => Err(VmError::uninitialized_context_data("querier")), + }) + } + + /// Creates a back reference from a contact to its partent instance + pub fn set_wasmer_instance(&self, wasmer_instance: Option>) { + self.with_context_data_mut(|context_data| { + context_data.wasmer_instance = wasmer_instance; + }); + } + + /// Returns true iff the storage is set to readonly mode + pub fn is_storage_readonly(&self) -> bool { + self.with_context_data(|context_data| context_data.storage_readonly) + } + + pub fn set_storage_readonly(&self, new_value: bool) { + self.with_context_data_mut(|context_data| { + context_data.storage_readonly = new_value; + }) + } + + /// Increments the call depth by 1 and returns the new value + pub fn increment_call_depth(&self) -> VmResult { + let new = self.with_context_data_mut(|context_data| { + let new = context_data.call_depth + 1; + context_data.call_depth = new; + new + }); + if new > MAX_CALL_DEPTH { + return Err(VmError::max_call_depth_exceeded()); + } + Ok(new) + } + + /// Decrements the call depth by 1 and returns the new value + pub fn decrement_call_depth(&self) -> usize { + self.with_context_data_mut(|context_data| { + let new = context_data + .call_depth + .checked_sub(1) + .expect("Call depth < 0. This is a bug."); + context_data.call_depth = new; + new + }) + } + + /// Returns the remaining gas measured in [CosmWasm gas]. + /// + /// [CosmWasm gas]: https://github.com/CosmWasm/cosmwasm/blob/main/docs/GAS.md + pub fn get_gas_left(&self, store: &mut impl AsStoreMut) -> u64 { + self.with_wasmer_instance(|instance| { + Ok(match get_remaining_points(store, instance) { + MeteringPoints::Remaining(count) => count, + MeteringPoints::Exhausted => 0, + }) + }) + .expect("Wasmer instance is not set. This is a bug in the lifecycle.") + } + + /// Sets the remaining gas measured in [CosmWasm gas]. + /// + /// [CosmWasm gas]: https://github.com/CosmWasm/cosmwasm/blob/main/docs/GAS.md + pub fn set_gas_left(&self, store: &mut impl AsStoreMut, new_value: u64) { + self.with_wasmer_instance(|instance| { + set_remaining_points(store, instance, new_value); + Ok(()) + }) + .expect("Wasmer instance is not set. This is a bug in the lifecycle.") + } + + /// Decreases gas left by the given amount. + /// If the amount exceeds the available gas, the remaining gas is set to 0 and + /// an VmError::GasDepletion error is returned. + #[allow(unused)] // used in tests + pub fn decrease_gas_left(&self, store: &mut impl AsStoreMut, amount: u64) -> VmResult<()> { + self.with_wasmer_instance(|instance| { + let remaining = match get_remaining_points(store, instance) { + MeteringPoints::Remaining(count) => count, + MeteringPoints::Exhausted => 0, + }; + if amount > remaining { + set_remaining_points(store, instance, 0); + Err(VmError::gas_depletion()) + } else { + set_remaining_points(store, instance, remaining - amount); + Ok(()) + } + }) + } + + /// Creates a MemoryView. + /// This must be short living and not be used after the memory was grown. + pub fn memory<'a>(&self, store: &'a impl AsStoreMut) -> MemoryView<'a> { + self.memory + .as_ref() + .expect("Memory is not set. This is a bug in the lifecycle.") + .view(store) + } + + /// Moves owned instances of storage and querier into the env. + /// Should be followed by exactly one call to move_out when the instance is finished. + pub fn move_in(&self, storage: S, querier: Q) { + self.with_context_data_mut(|context_data| { + context_data.storage = Some(storage); + context_data.querier = Some(querier); + }); + } + + /// Returns the original storage and querier as owned instances, and closes any remaining + /// iterators. This is meant to be called when recycling the instance. + pub fn move_out(&self) -> (Option, Option) { + self.with_context_data_mut(|context_data| { + (context_data.storage.take(), context_data.querier.take()) + }) + } +} + +pub struct ContextData { + gas_state: GasState, + storage: Option, + storage_readonly: bool, + call_depth: usize, + querier: Option, + debug_handler: Option>>, + /// A non-owning link to the wasmer instance + wasmer_instance: Option>, +} + +impl ContextData { + pub fn new(gas_limit: u64) -> Self { + ContextData:: { + gas_state: GasState::with_limit(gas_limit), + storage: None, + storage_readonly: true, + call_depth: 0, + querier: None, + debug_handler: None, + wasmer_instance: None, + } + } +} + +pub fn process_gas_info( + env: &Environment, + store: &mut impl AsStoreMut, + info: GasInfo, +) -> VmResult<()> { + let gas_left = env.get_gas_left(store); + + let new_limit = env.with_gas_state_mut(|gas_state| { + gas_state.externally_used_gas += info.externally_used; + // These lines reduce the amount of gas available to wasmer + // so it can not consume gas that was consumed externally. + gas_left + .saturating_sub(info.externally_used) + .saturating_sub(info.cost) + }); + + // This tells wasmer how much more gas it can consume from this point in time. + env.set_gas_left(store, new_limit); + + if info.externally_used + info.cost > gas_left { + Err(VmError::gas_depletion()) + } else { + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::conversion::ref_to_u32; + use crate::size::Size; + use crate::testing::{MockApi, MockQuerier, MockStorage}; + use crate::wasm_backend::{compile, make_compiling_engine}; + use cosmwasm_std::{ + coins, from_json, to_json_vec, AllBalanceResponse, BankQuery, Empty, QueryRequest, + }; + use wasmer::{imports, Function, Instance as WasmerInstance, Store}; + + static CONTRACT: &[u8] = include_bytes!("../testdata/hackatom.wasm"); + + // prepared data + const INIT_KEY: &[u8] = b"foo"; + const INIT_VALUE: &[u8] = b"bar"; + // this account has some coins + const INIT_ADDR: &str = "someone"; + const INIT_AMOUNT: u128 = 500; + const INIT_DENOM: &str = "TOKEN"; + + const TESTING_GAS_LIMIT: u64 = 500_000_000; // ~0.5ms + const DEFAULT_QUERY_GAS_LIMIT: u64 = 300_000; + const TESTING_MEMORY_LIMIT: Option = Some(Size::mebi(16)); + + fn make_instance( + gas_limit: u64, + ) -> ( + Environment, + Store, + Box, + ) { + let env = Environment::new(MockApi::default(), gas_limit); + + let engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = compile(&engine, CONTRACT).unwrap(); + let mut store = Store::new(engine); + + // we need stubs for all required imports + let import_obj = imports! { + "env" => { + "db_read" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "db_write" => Function::new_typed(&mut store, |_a: u32, _b: u32| {}), + "db_remove" => Function::new_typed(&mut store, |_a: u32| {}), + "db_scan" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: i32| -> u32 { 0 }), + "db_next" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "db_next_key" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "db_next_value" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "query_chain" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "addr_validate" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "addr_canonicalize" => Function::new_typed(&mut store, |_a: u32, _b: u32| -> u32 { 0 }), + "addr_humanize" => Function::new_typed(&mut store, |_a: u32, _b: u32| -> u32 { 0 }), + "secp256k1_verify" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u32 { 0 }), + "secp256k1_recover_pubkey" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u64 { 0 }), + "secp256r1_verify" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u32 { 0 }), + "secp256r1_recover_pubkey" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u64 { 0 }), + "ed25519_verify" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u32 { 0 }), + "ed25519_batch_verify" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u32 { 0 }), + "debug" => Function::new_typed(&mut store, |_a: u32| {}), + "abort" => Function::new_typed(&mut store, |_a: u32| {}), + }, + }; + let instance = Box::from(WasmerInstance::new(&mut store, &module, &import_obj).unwrap()); + + let instance_ptr = NonNull::from(instance.as_ref()); + env.set_wasmer_instance(Some(instance_ptr)); + env.set_gas_left(&mut store, gas_limit); + + (env, store, instance) + } + + fn leave_default_data(env: &Environment) { + // create some mock data + let mut storage = MockStorage::new(); + storage + .set(INIT_KEY, INIT_VALUE) + .0 + .expect("error setting value"); + let querier: MockQuerier = + MockQuerier::new(&[(INIT_ADDR, &coins(INIT_AMOUNT, INIT_DENOM))]); + env.move_in(storage, querier); + } + + #[test] + fn move_out_works() { + let (env, _store, _instance) = make_instance(TESTING_GAS_LIMIT); + + // empty data on start + let (inits, initq) = env.move_out(); + assert!(inits.is_none()); + assert!(initq.is_none()); + + // store it on the instance + leave_default_data(&env); + let (s, q) = env.move_out(); + assert!(s.is_some()); + assert!(q.is_some()); + assert_eq!( + s.unwrap().get(INIT_KEY).0.unwrap(), + Some(INIT_VALUE.to_vec()) + ); + + // now is empty again + let (ends, endq) = env.move_out(); + assert!(ends.is_none()); + assert!(endq.is_none()); + } + + #[test] + fn process_gas_info_works_for_cost() { + let (env, mut store, _instance) = make_instance(100); + assert_eq!(env.get_gas_left(&mut store), 100); + + // Consume all the Gas that we allocated + process_gas_info(&env, &mut store, GasInfo::with_cost(70)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 30); + process_gas_info(&env, &mut store, GasInfo::with_cost(4)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 26); + process_gas_info(&env, &mut store, GasInfo::with_cost(6)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 20); + process_gas_info(&env, &mut store, GasInfo::with_cost(20)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 0); + + // Using one more unit of gas triggers a failure + match process_gas_info(&env, &mut store, GasInfo::with_cost(1)).unwrap_err() { + VmError::GasDepletion { .. } => {} + err => panic!("unexpected error: {err:?}"), + } + } + + #[test] + fn process_gas_info_works_for_externally_used() { + let (env, mut store, _instance) = make_instance(100); + assert_eq!(env.get_gas_left(&mut store), 100); + + // Consume all the Gas that we allocated + process_gas_info(&env, &mut store, GasInfo::with_externally_used(70)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 30); + process_gas_info(&env, &mut store, GasInfo::with_externally_used(4)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 26); + process_gas_info(&env, &mut store, GasInfo::with_externally_used(6)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 20); + process_gas_info(&env, &mut store, GasInfo::with_externally_used(20)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 0); + + // Using one more unit of gas triggers a failure + match process_gas_info(&env, &mut store, GasInfo::with_externally_used(1)).unwrap_err() { + VmError::GasDepletion { .. } => {} + err => panic!("unexpected error: {err:?}"), + } + } + + #[test] + fn process_gas_info_works_for_cost_and_externally_used() { + let (env, mut store, _instance) = make_instance(100); + assert_eq!(env.get_gas_left(&mut store), 100); + let gas_state = env.with_gas_state(|gas_state| gas_state.clone()); + assert_eq!(gas_state.gas_limit, 100); + assert_eq!(gas_state.externally_used_gas, 0); + + process_gas_info(&env, &mut store, GasInfo::new(17, 4)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 79); + let gas_state = env.with_gas_state(|gas_state| gas_state.clone()); + assert_eq!(gas_state.gas_limit, 100); + assert_eq!(gas_state.externally_used_gas, 4); + + process_gas_info(&env, &mut store, GasInfo::new(9, 0)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 70); + let gas_state = env.with_gas_state(|gas_state| gas_state.clone()); + assert_eq!(gas_state.gas_limit, 100); + assert_eq!(gas_state.externally_used_gas, 4); + + process_gas_info(&env, &mut store, GasInfo::new(0, 70)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 0); + let gas_state = env.with_gas_state(|gas_state| gas_state.clone()); + assert_eq!(gas_state.gas_limit, 100); + assert_eq!(gas_state.externally_used_gas, 74); + + // More cost fail but do not change stats + match process_gas_info(&env, &mut store, GasInfo::new(1, 0)).unwrap_err() { + VmError::GasDepletion { .. } => {} + err => panic!("unexpected error: {err:?}"), + } + assert_eq!(env.get_gas_left(&mut store), 0); + let gas_state = env.with_gas_state(|gas_state| gas_state.clone()); + assert_eq!(gas_state.gas_limit, 100); + assert_eq!(gas_state.externally_used_gas, 74); + + // More externally used fails and changes stats + match process_gas_info(&env, &mut store, GasInfo::new(0, 1)).unwrap_err() { + VmError::GasDepletion { .. } => {} + err => panic!("unexpected error: {err:?}"), + } + assert_eq!(env.get_gas_left(&mut store), 0); + let gas_state = env.with_gas_state(|gas_state| gas_state.clone()); + assert_eq!(gas_state.gas_limit, 100); + assert_eq!(gas_state.externally_used_gas, 75); + } + + #[test] + fn process_gas_info_zeros_gas_left_when_exceeded() { + // with_externally_used + { + let (env, mut store, _instance) = make_instance(100); + let result = process_gas_info(&env, &mut store, GasInfo::with_externally_used(120)); + match result.unwrap_err() { + VmError::GasDepletion { .. } => {} + err => panic!("unexpected error: {err:?}"), + } + assert_eq!(env.get_gas_left(&mut store), 0); + let gas_state = env.with_gas_state(|gas_state| gas_state.clone()); + assert_eq!(gas_state.gas_limit, 100); + assert_eq!(gas_state.externally_used_gas, 120); + } + + // with_cost + { + let (env, mut store, _instance) = make_instance(100); + let result = process_gas_info(&env, &mut store, GasInfo::with_cost(120)); + match result.unwrap_err() { + VmError::GasDepletion { .. } => {} + err => panic!("unexpected error: {err:?}"), + } + assert_eq!(env.get_gas_left(&mut store), 0); + let gas_state = env.with_gas_state(|gas_state| gas_state.clone()); + assert_eq!(gas_state.gas_limit, 100); + assert_eq!(gas_state.externally_used_gas, 0); + } + } + + #[test] + fn process_gas_info_works_correctly_with_gas_consumption_in_wasmer() { + let (env, mut store, _instance) = make_instance(100); + assert_eq!(env.get_gas_left(&mut store), 100); + + // Some gas was consumed externally + process_gas_info(&env, &mut store, GasInfo::with_externally_used(50)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 50); + process_gas_info(&env, &mut store, GasInfo::with_externally_used(4)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 46); + + // Consume 20 gas directly in wasmer + env.decrease_gas_left(&mut store, 20).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 26); + + process_gas_info(&env, &mut store, GasInfo::with_externally_used(6)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 20); + process_gas_info(&env, &mut store, GasInfo::with_externally_used(20)).unwrap(); + assert_eq!(env.get_gas_left(&mut store), 0); + + // Using one more unit of gas triggers a failure + match process_gas_info(&env, &mut store, GasInfo::with_externally_used(1)).unwrap_err() { + VmError::GasDepletion { .. } => {} + err => panic!("unexpected error: {err:?}"), + } + } + + #[test] + fn is_storage_readonly_defaults_to_true() { + let (env, _store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + assert!(env.is_storage_readonly()); + } + + #[test] + fn set_storage_readonly_can_change_flag() { + let (env, _store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + // change + env.set_storage_readonly(false); + assert!(!env.is_storage_readonly()); + + // still false + env.set_storage_readonly(false); + assert!(!env.is_storage_readonly()); + + // change back + env.set_storage_readonly(true); + assert!(env.is_storage_readonly()); + } + + #[test] + fn call_function_works() { + let (env, mut store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + let result = env + .call_function(&mut store, "allocate", &[10u32.into()]) + .unwrap(); + let ptr = ref_to_u32(&result[0]).unwrap(); + assert!(ptr > 0); + } + + #[test] + fn call_function_fails_for_missing_instance() { + let (env, mut store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + // Clear context's wasmer_instance + env.set_wasmer_instance(None); + + let res = env.call_function(&mut store, "allocate", &[]); + match res.unwrap_err() { + VmError::UninitializedContextData { kind, .. } => assert_eq!(kind, "wasmer_instance"), + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn call_function_fails_for_missing_function() { + let (env, mut store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + let res = env.call_function(&mut store, "doesnt_exist", &[]); + match res.unwrap_err() { + VmError::ResolveErr { msg, .. } => { + assert_eq!(msg, "Could not get export: Missing export doesnt_exist"); + } + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn call_function0_works() { + let (env, mut store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + env.call_function0(&mut store, "interface_version_8", &[]) + .unwrap(); + } + + #[test] + fn call_function0_errors_for_wrong_result_count() { + let (env, mut store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + let result = env.call_function0(&mut store, "allocate", &[10u32.into()]); + match result.unwrap_err() { + VmError::ResultMismatch { + function_name, + expected, + actual, + .. + } => { + assert_eq!(function_name, "allocate"); + assert_eq!(expected, 0); + assert_eq!(actual, 1); + } + err => panic!("unexpected error: {err:?}"), + } + } + + #[test] + fn call_function1_works() { + let (env, mut store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + let result = env + .call_function1(&mut store, "allocate", &[10u32.into()]) + .unwrap(); + let ptr = ref_to_u32(&result).unwrap(); + assert!(ptr > 0); + } + + #[test] + fn call_function1_errors_for_wrong_result_count() { + let (env, mut store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + let result = env + .call_function1(&mut store, "allocate", &[10u32.into()]) + .unwrap(); + let ptr = ref_to_u32(&result).unwrap(); + assert!(ptr > 0); + + let result = env.call_function1(&mut store, "deallocate", &[ptr.into()]); + match result.unwrap_err() { + VmError::ResultMismatch { + function_name, + expected, + actual, + .. + } => { + assert_eq!(function_name, "deallocate"); + assert_eq!(expected, 1); + assert_eq!(actual, 0); + } + err => panic!("unexpected error: {err:?}"), + } + } + + #[test] + fn with_storage_from_context_set_get() { + let (env, _store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + let val = env + .with_storage_from_context::<_, _>(|store| { + Ok(store.get(INIT_KEY).0.expect("error getting value")) + }) + .unwrap(); + assert_eq!(val, Some(INIT_VALUE.to_vec())); + + let set_key: &[u8] = b"more"; + let set_value: &[u8] = b"data"; + + env.with_storage_from_context::<_, _>(|store| { + store + .set(set_key, set_value) + .0 + .expect("error setting value"); + Ok(()) + }) + .unwrap(); + + env.with_storage_from_context::<_, _>(|store| { + assert_eq!(store.get(INIT_KEY).0.unwrap(), Some(INIT_VALUE.to_vec())); + assert_eq!(store.get(set_key).0.unwrap(), Some(set_value.to_vec())); + Ok(()) + }) + .unwrap(); + } + + #[test] + #[should_panic(expected = "A panic occurred in the callback.")] + fn with_storage_from_context_handles_panics() { + let (env, _store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + env.with_storage_from_context::<_, ()>(|_store| { + panic!("A panic occurred in the callback.") + }) + .unwrap(); + } + + #[test] + #[allow(deprecated)] + fn with_querier_from_context_works() { + let (env, _store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + let res = env + .with_querier_from_context::<_, _>(|querier| { + let req: QueryRequest = QueryRequest::Bank(BankQuery::AllBalances { + address: INIT_ADDR.to_string(), + }); + let (result, _gas_info) = + querier.query_raw(&to_json_vec(&req).unwrap(), DEFAULT_QUERY_GAS_LIMIT); + Ok(result.unwrap()) + }) + .unwrap() + .unwrap() + .unwrap(); + let balance: AllBalanceResponse = from_json(res).unwrap(); + + assert_eq!(balance.amount, coins(INIT_AMOUNT, INIT_DENOM)); + } + + #[test] + #[should_panic(expected = "A panic occurred in the callback.")] + fn with_querier_from_context_handles_panics() { + let (env, _store, _instance) = make_instance(TESTING_GAS_LIMIT); + leave_default_data(&env); + + env.with_querier_from_context::<_, ()>(|_querier| { + panic!("A panic occurred in the callback.") + }) + .unwrap(); + } +} diff --git a/vm/src/errors/backtrace.rs b/vm/src/errors/backtrace.rs new file mode 100644 index 000000000..5697225c9 --- /dev/null +++ b/vm/src/errors/backtrace.rs @@ -0,0 +1,42 @@ +use core::fmt::{Debug, Display, Formatter, Result}; +use std::backtrace::Backtrace; + +/// This wraps an actual backtrace to allow us to use this in conjunction with [`thiserror::Error`] +pub struct BT(Box); + +impl BT { + #[track_caller] + pub fn capture() -> Self { + BT(Box::new(Backtrace::capture())) + } +} + +impl Debug for BT { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + Debug::fmt(&self.0, f) + } +} + +impl Display for BT { + fn fmt(&self, f: &mut Formatter<'_>) -> Result { + Display::fmt(&self.0, f) + } +} + +/// This macro implements `From` for a given error type to a given error type where +/// the target error has a `backtrace` field. +/// This is meant as a replacement for `thiserror`'s `#[from]` attribute, which does not +/// work with our custom backtrace wrapper. +macro_rules! impl_from_err { + ($from:ty, $to:ty, $map:path) => { + impl From<$from> for $to { + fn from(err: $from) -> Self { + $map { + source: err, + backtrace: $crate::errors::backtrace::BT::capture(), + } + } + } + }; +} +pub(crate) use impl_from_err; diff --git a/vm/src/errors/communication_error.rs b/vm/src/errors/communication_error.rs new file mode 100644 index 000000000..b9c4e60e3 --- /dev/null +++ b/vm/src/errors/communication_error.rs @@ -0,0 +1,163 @@ +use std::fmt::Debug; +use thiserror::Error; + +use super::region_validation_error::RegionValidationError; +use crate::memory::Region; + +/// An error in the communication between contract and host. Those happen around imports and exports. +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum CommunicationError { + #[error( + "The Wasm memory address {} provided by the contract could not be dereferenced: {}", + offset, + msg + )] + DerefErr { + /// the position in a Wasm linear memory + offset: u32, + msg: String, + }, + #[error("Got an invalid value for iteration order: {}", value)] + InvalidOrder { value: i32 }, + #[error("Got an invalid region: {}", source)] + InvalidRegion { + #[from] + source: RegionValidationError, + }, + /// When the contract supplies invalid section data to the host. See also `decode_sections` [crate::sections::decode_sections]. + #[error("Got an invalid section: {}", msg)] + InvalidSection { msg: String }, + /// Whenever UTF-8 bytes cannot be decoded into a unicode string, e.g. in String::from_utf8 or str::from_utf8. + #[error("Cannot decode UTF8 bytes into string: {}", msg)] + InvalidUtf8 { msg: String }, + #[error("Region length too big. Got {}, limit {}", length, max_length)] + // Note: this only checks length, not capacity + RegionLengthTooBig { length: usize, max_length: usize }, + #[error("Region too small. Got {}, required {}", size, required)] + RegionTooSmall { size: usize, required: usize }, + #[error("Tried to access memory of region {:?} in Wasm memory of size {} bytes. This typically happens when the given Region pointer does not point to a proper Region struct.", region, memory_size)] + RegionAccessErr { + region: Region, + /// Current size of the linear memory in bytes + memory_size: usize, + }, + #[error("Got a zero Wasm address")] + ZeroAddress {}, +} + +impl CommunicationError { + pub(crate) fn deref_err(offset: u32, msg: impl Into) -> Self { + CommunicationError::DerefErr { + offset, + msg: msg.into(), + } + } + + #[allow(dead_code)] + pub(crate) fn invalid_order(value: i32) -> Self { + CommunicationError::InvalidOrder { value } + } + + pub(crate) fn invalid_section(msg: impl Into) -> Self { + CommunicationError::InvalidSection { msg: msg.into() } + } + + #[allow(dead_code)] + pub(crate) fn invalid_utf8(msg: impl ToString) -> Self { + CommunicationError::InvalidUtf8 { + msg: msg.to_string(), + } + } + + pub(crate) fn region_length_too_big(length: usize, max_length: usize) -> Self { + CommunicationError::RegionLengthTooBig { length, max_length } + } + + pub(crate) fn region_too_small(size: usize, required: usize) -> Self { + CommunicationError::RegionTooSmall { size, required } + } + + pub(crate) fn region_access_err(region: Region, memory_size: usize) -> Self { + CommunicationError::RegionAccessErr { + region, + memory_size, + } + } + + pub(crate) fn zero_address() -> Self { + CommunicationError::ZeroAddress {} + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // constructors + + #[test] + fn deref_err() { + let error = CommunicationError::deref_err(345, "broken stuff"); + match error { + CommunicationError::DerefErr { offset, msg, .. } => { + assert_eq!(offset, 345); + assert_eq!(msg, "broken stuff"); + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn invalid_order() { + let error = CommunicationError::invalid_order(-745); + match error { + CommunicationError::InvalidOrder { value, .. } => assert_eq!(value, -745), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn invalid_utf8() { + let error = CommunicationError::invalid_utf8("broken"); + match error { + CommunicationError::InvalidUtf8 { msg, .. } => assert_eq!(msg, "broken"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn region_length_too_big_works() { + let error = CommunicationError::region_length_too_big(50, 20); + match error { + CommunicationError::RegionLengthTooBig { + length, max_length, .. + } => { + assert_eq!(length, 50); + assert_eq!(max_length, 20); + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn region_too_small_works() { + let error = CommunicationError::region_too_small(12, 33); + match error { + CommunicationError::RegionTooSmall { size, required, .. } => { + assert_eq!(size, 12); + assert_eq!(required, 33); + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn zero_address() { + let error = CommunicationError::zero_address(); + match error { + CommunicationError::ZeroAddress { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } +} diff --git a/vm/src/errors/mod.rs b/vm/src/errors/mod.rs new file mode 100644 index 000000000..ba536389a --- /dev/null +++ b/vm/src/errors/mod.rs @@ -0,0 +1,13 @@ +mod backtrace; +mod communication_error; +mod region_validation_error; +mod vm_error; + +pub(crate) use backtrace::{impl_from_err, BT}; +pub use communication_error::CommunicationError; +pub use region_validation_error::RegionValidationError; +pub use vm_error::VmError; + +pub type CommunicationResult = core::result::Result; +pub type RegionValidationResult = core::result::Result; +pub type VmResult = core::result::Result; diff --git a/vm/src/errors/region_validation_error.rs b/vm/src/errors/region_validation_error.rs new file mode 100644 index 000000000..bf9dd5e36 --- /dev/null +++ b/vm/src/errors/region_validation_error.rs @@ -0,0 +1,80 @@ +use std::fmt::Debug; +use thiserror::Error; + +/// An error validating a Region +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum RegionValidationError { + #[error( + "Region length exceeds capacity. Length {}, capacity {}", + length, + capacity + )] + LengthExceedsCapacity { length: u32, capacity: u32 }, + #[error( + "Region exceeds address space. Offset {}, capacity {}", + offset, + capacity + )] + OutOfRange { offset: u32, capacity: u32 }, + #[error("Got a zero Wasm address in the offset")] + ZeroOffset {}, +} + +impl RegionValidationError { + pub(crate) fn length_exceeds_capacity(length: u32, capacity: u32) -> Self { + RegionValidationError::LengthExceedsCapacity { length, capacity } + } + + pub(crate) fn out_of_range(offset: u32, capacity: u32) -> Self { + RegionValidationError::OutOfRange { offset, capacity } + } + + pub(crate) fn zero_offset() -> Self { + RegionValidationError::ZeroOffset {} + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // constructors + + #[test] + fn length_exceeds_capacity_works() { + let error = RegionValidationError::length_exceeds_capacity(50, 20); + match error { + RegionValidationError::LengthExceedsCapacity { + length, capacity, .. + } => { + assert_eq!(length, 50); + assert_eq!(capacity, 20); + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn out_of_range_works() { + let error = RegionValidationError::out_of_range(u32::MAX, 1); + match error { + RegionValidationError::OutOfRange { + offset, capacity, .. + } => { + assert_eq!(offset, u32::MAX); + assert_eq!(capacity, 1); + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn zero_offset() { + let error = RegionValidationError::zero_offset(); + match error { + RegionValidationError::ZeroOffset { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } +} diff --git a/vm/src/errors/vm_error.rs b/vm/src/errors/vm_error.rs new file mode 100644 index 000000000..5a6260418 --- /dev/null +++ b/vm/src/errors/vm_error.rs @@ -0,0 +1,537 @@ +use super::{impl_from_err, BT}; +use std::fmt::{Debug, Display}; +use thiserror::Error; + +use cosmwasm_crypto::CryptoError; + +use super::communication_error::CommunicationError; +use crate::backend::BackendError; + +#[derive(Error, Debug)] +#[non_exhaustive] +pub enum VmError { + #[error("Aborted: {}", msg)] + Aborted { msg: String, backtrace: BT }, + #[error("Error calling into the VM's backend: {}", source)] + BackendErr { source: BackendError, backtrace: BT }, + #[error("Cache error: {msg}")] + CacheErr { msg: String, backtrace: BT }, + #[error("Error in guest/host communication: {source}")] + CommunicationErr { + source: CommunicationError, + backtrace: BT, + }, + #[error("Error compiling Wasm: {msg}")] + CompileErr { msg: String, backtrace: BT }, + #[error("Couldn't convert from {} to {}. Input: {}", from_type, to_type, input)] + ConversionErr { + from_type: String, + to_type: String, + input: String, + backtrace: BT, + }, + #[error("Crypto error: {}", source)] + CryptoErr { source: CryptoError, backtrace: BT }, + #[error("Ran out of gas during contract execution")] + GasDepletion { backtrace: BT }, + /// Whenever there is no specific error type available + #[error("Generic error: {msg}")] + GenericErr { msg: String, backtrace: BT }, + #[error("Error instantiating a Wasm module: {msg}")] + InstantiationErr { msg: String, backtrace: BT }, + #[error("Hash doesn't match stored data")] + IntegrityErr { backtrace: BT }, + #[error("Error parsing into type {target_type}: {msg}")] + ParseErr { + /// the target type that was attempted + target_type: String, + msg: String, + backtrace: BT, + }, + #[error("Data too long for deserialization. Got: {length} bytes; limit: {max_length} bytes")] + DeserializationLimitExceeded { + /// the target type that was attempted + length: usize, + max_length: usize, + backtrace: BT, + }, + #[error("Error serializing type {source_type}: {msg}")] + SerializeErr { + /// the source type that was attempted + source_type: String, + msg: String, + backtrace: BT, + }, + #[error("Error resolving Wasm function: {}", msg)] + ResolveErr { msg: String, backtrace: BT }, + #[error( + "Unexpected number of result values when calling '{}'. Expected: {}, actual: {}.", + function_name, + expected, + actual + )] + ResultMismatch { + function_name: String, + expected: usize, + actual: usize, + backtrace: BT, + }, + #[error("Error executing Wasm: {}", msg)] + RuntimeErr { msg: String, backtrace: BT }, + #[error("Error during static Wasm validation: {}", msg)] + StaticValidationErr { msg: String, backtrace: BT }, + #[error("Uninitialized Context Data: {}", kind)] + UninitializedContextData { kind: String, backtrace: BT }, + #[error("Must not call a writing storage function in this context.")] + WriteAccessDenied { backtrace: BT }, + #[error("Maximum call depth exceeded.")] + MaxCallDepthExceeded { backtrace: BT }, + #[error( + "The called function args arity does not match. The contract's method arity: {}", + contract_method_arity + )] + FunctionArityMismatch { + contract_method_arity: usize, + backtrace: BT, + }, +} + +impl VmError { + pub(crate) fn aborted(msg: impl Into) -> Self { + VmError::Aborted { + msg: msg.into(), + backtrace: BT::capture(), + } + } + + pub(crate) fn backend_err(original: BackendError) -> Self { + VmError::BackendErr { + source: original, + backtrace: BT::capture(), + } + } + + pub(crate) fn cache_err(msg: impl Into) -> Self { + VmError::CacheErr { + msg: msg.into(), + backtrace: BT::capture(), + } + } + + pub(crate) fn compile_err(msg: impl Into) -> Self { + VmError::CompileErr { + msg: msg.into(), + backtrace: BT::capture(), + } + } + + pub(crate) fn conversion_err( + from_type: impl Into, + to_type: impl Into, + input: impl Into, + ) -> Self { + VmError::ConversionErr { + from_type: from_type.into(), + to_type: to_type.into(), + input: input.into(), + backtrace: BT::capture(), + } + } + + pub(crate) fn crypto_err(original: CryptoError) -> Self { + VmError::CryptoErr { + source: original, + backtrace: BT::capture(), + } + } + + pub(crate) fn gas_depletion() -> Self { + VmError::GasDepletion { + backtrace: BT::capture(), + } + } + + pub(crate) fn generic_err(msg: impl Into) -> Self { + VmError::GenericErr { + msg: msg.into(), + backtrace: BT::capture(), + } + } + + pub(crate) fn instantiation_err(msg: impl Into) -> Self { + VmError::InstantiationErr { + msg: msg.into(), + backtrace: BT::capture(), + } + } + + pub(crate) fn integrity_err() -> Self { + VmError::IntegrityErr { + backtrace: BT::capture(), + } + } + + pub(crate) fn parse_err(target: impl Into, msg: impl Display) -> Self { + VmError::ParseErr { + target_type: target.into(), + msg: msg.to_string(), + backtrace: BT::capture(), + } + } + + pub(crate) fn deserialization_limit_exceeded(length: usize, max_length: usize) -> Self { + VmError::DeserializationLimitExceeded { + length, + max_length, + backtrace: BT::capture(), + } + } + + pub(crate) fn serialize_err(source: impl Into, msg: impl Display) -> Self { + VmError::SerializeErr { + source_type: source.into(), + msg: msg.to_string(), + backtrace: BT::capture(), + } + } + + pub(crate) fn resolve_err(msg: impl Into) -> Self { + VmError::ResolveErr { + msg: msg.into(), + backtrace: BT::capture(), + } + } + + pub(crate) fn result_mismatch( + function_name: impl Into, + expected: usize, + actual: usize, + ) -> Self { + VmError::ResultMismatch { + function_name: function_name.into(), + expected, + actual, + backtrace: BT::capture(), + } + } + + // Creates a runtime error with the given message. + // This is private since it is only needed when converting wasmer::RuntimeError + // to VmError. + fn runtime_err(msg: impl Into) -> Self { + VmError::RuntimeErr { + msg: msg.into(), + backtrace: BT::capture(), + } + } + + pub(crate) fn static_validation_err(msg: impl Into) -> Self { + VmError::StaticValidationErr { + msg: msg.into(), + backtrace: BT::capture(), + } + } + + pub(crate) fn uninitialized_context_data(kind: impl Into) -> Self { + VmError::UninitializedContextData { + kind: kind.into(), + backtrace: BT::capture(), + } + } + + pub(crate) fn write_access_denied() -> Self { + VmError::WriteAccessDenied { + backtrace: BT::capture(), + } + } + + pub(crate) fn max_call_depth_exceeded() -> Self { + VmError::MaxCallDepthExceeded { + backtrace: BT::capture(), + } + } + + pub(crate) fn function_arity_mismatch(contract_method_arity: usize) -> Self { + VmError::FunctionArityMismatch { + contract_method_arity, + backtrace: BT::capture(), + } + } +} + +impl_from_err!(CommunicationError, VmError, VmError::CommunicationErr); + +impl From for VmError { + fn from(original: BackendError) -> Self { + match original { + BackendError::OutOfGas {} => VmError::gas_depletion(), + _ => VmError::backend_err(original), + } + } +} + +impl From for VmError { + fn from(original: CryptoError) -> Self { + VmError::crypto_err(original) + } +} + +impl From for VmError { + fn from(original: wasmer::wasmparser::BinaryReaderError) -> Self { + VmError::static_validation_err(format!( + "Wasm bytecode could not be deserialized. Deserialization error: \"{original}\"" + )) + } +} + +impl From for VmError { + fn from(original: wasmer::ExportError) -> Self { + VmError::resolve_err(format!("Could not get export: {original}")) + } +} + +impl From for VmError { + fn from(original: wasmer::SerializeError) -> Self { + VmError::cache_err(format!("Could not serialize module: {original}")) + } +} + +impl From for VmError { + fn from(original: wasmer::DeserializeError) -> Self { + VmError::cache_err(format!("Could not deserialize module: {original}")) + } +} + +impl From for VmError { + fn from(original: wasmer::RuntimeError) -> Self { + // Do not use the Display implementation or to_string() of `RuntimeError` + // because it can contain a system specific stack trace, which can + // lead to non-deterministic execution. + // + // Implementation follows https://github.com/wasmerio/wasmer/blob/2.0.0/lib/engine/src/trap/error.rs#L215 + let message = format!("RuntimeError: {}", original.message()); + debug_assert!( + original.to_string().starts_with(&message), + "The error message we created is not a prefix of the error message from Wasmer. Our message: '{}'. Wasmer message: '{}'", + &message, + original + ); + VmError::runtime_err(format!("Wasmer runtime error: {}", &message)) + } +} + +impl From for VmError { + fn from(original: wasmer::CompileError) -> Self { + VmError::compile_err(format!("Could not compile: {original}")) + } +} + +impl From for VmError { + fn from(_original: std::convert::Infallible) -> Self { + unreachable!(); + } +} + +impl From for wasmer::RuntimeError { + fn from(original: VmError) -> wasmer::RuntimeError { + let msg: String = original.to_string(); + wasmer::RuntimeError::new(msg) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // constructors + + #[test] + fn backend_err_works() { + let error = VmError::backend_err(BackendError::unknown("something went wrong")); + match error { + VmError::BackendErr { + source: BackendError::Unknown { msg }, + .. + } => assert_eq!(msg, "something went wrong"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn cache_err_works() { + let error = VmError::cache_err("something went wrong"); + match error { + VmError::CacheErr { msg, .. } => assert_eq!(msg, "something went wrong"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn compile_err_works() { + let error = VmError::compile_err("something went wrong"); + match error { + VmError::CompileErr { msg, .. } => assert_eq!(msg, "something went wrong"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn conversion_err_works() { + let error = VmError::conversion_err("i32", "u32", "-9"); + match error { + VmError::ConversionErr { + from_type, + to_type, + input, + .. + } => { + assert_eq!(from_type, "i32"); + assert_eq!(to_type, "u32"); + assert_eq!(input, "-9"); + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn crypto_err_works() { + let error = VmError::crypto_err(CryptoError::generic_err("something went wrong")); + match error { + VmError::CryptoErr { + source: CryptoError::GenericErr { msg, .. }, + .. + } => assert_eq!(msg, "something went wrong"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn gas_depletion_works() { + let error = VmError::gas_depletion(); + match error { + VmError::GasDepletion { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn generic_err_works() { + let guess = 7; + let error = VmError::generic_err(format!("{guess} is too low")); + match error { + VmError::GenericErr { msg, .. } => { + assert_eq!(msg, String::from("7 is too low")); + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn instantiation_err_works() { + let error = VmError::instantiation_err("something went wrong"); + match error { + VmError::InstantiationErr { msg, .. } => assert_eq!(msg, "something went wrong"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn integrity_err_works() { + let error = VmError::integrity_err(); + match error { + VmError::IntegrityErr { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn parse_err_works() { + let error = VmError::parse_err("Book", "Missing field: title"); + match error { + VmError::ParseErr { + target_type, msg, .. + } => { + assert_eq!(target_type, "Book"); + assert_eq!(msg, "Missing field: title"); + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn serialize_err_works() { + let error = VmError::serialize_err("Book", "Content too long"); + match error { + VmError::SerializeErr { + source_type, msg, .. + } => { + assert_eq!(source_type, "Book"); + assert_eq!(msg, "Content too long"); + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn resolve_err_works() { + let error = VmError::resolve_err("function has different signature"); + match error { + VmError::ResolveErr { msg, .. } => assert_eq!(msg, "function has different signature"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn result_mismatch_works() { + let error = VmError::result_mismatch("action", 0, 1); + match error { + VmError::ResultMismatch { + function_name, + expected, + actual, + .. + } => { + assert_eq!(function_name, "action"); + assert_eq!(expected, 0); + assert_eq!(actual, 1); + } + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn runtime_err_works() { + let error = VmError::runtime_err("something went wrong"); + match error { + VmError::RuntimeErr { msg, .. } => assert_eq!(msg, "something went wrong"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn static_validation_err_works() { + let error = VmError::static_validation_err("export xy missing"); + match error { + VmError::StaticValidationErr { msg, .. } => assert_eq!(msg, "export xy missing"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn uninitialized_context_data_works() { + let error = VmError::uninitialized_context_data("foo"); + match error { + VmError::UninitializedContextData { kind, .. } => assert_eq!(kind, "foo"), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn write_access_denied() { + let error = VmError::write_access_denied(); + match error { + VmError::WriteAccessDenied { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } +} diff --git a/vm/src/filesystem.rs b/vm/src/filesystem.rs new file mode 100644 index 000000000..45a60f435 --- /dev/null +++ b/vm/src/filesystem.rs @@ -0,0 +1,43 @@ +use std::{fs::create_dir_all, path::Path}; + +#[derive(Debug)] +pub struct MkdirPFailure; + +/// An implementation for `mkdir -p`. +/// +/// This is a thin wrapper around fs::create_dir_all that +/// hides all OS specific error messages to ensure they don't end up +/// breaking consensus. +pub fn mkdir_p(path: &Path) -> Result<(), MkdirPFailure> { + create_dir_all(path).map_err(|_e| MkdirPFailure) +} + +#[cfg(test)] +mod tests { + use tempfile::TempDir; + + use super::*; + + #[test] + fn mkdir_p_works() { + let tmp_root = TempDir::new().unwrap(); + + // Can create + let path = tmp_root.path().join("something"); + assert!(!path.is_dir()); + mkdir_p(&path).unwrap(); + assert!(path.is_dir()); + + // Can be called on existing dir + let path = tmp_root.path().join("something else"); + assert!(!path.is_dir()); + mkdir_p(&path).unwrap(); + assert!(path.is_dir()); + mkdir_p(&path).unwrap(); // no-op + assert!(path.is_dir()); + + // Fails for dir with null + let path = tmp_root.path().join("something\0with NULL"); + mkdir_p(&path).unwrap_err(); + } +} diff --git a/vm/src/imports.rs b/vm/src/imports.rs new file mode 100644 index 000000000..dc8ee955f --- /dev/null +++ b/vm/src/imports.rs @@ -0,0 +1,2892 @@ +//! Import implementations + +use std::marker::PhantomData; + +use cosmwasm_core::{BLS12_381_G1_POINT_LEN, BLS12_381_G2_POINT_LEN}; +use cosmwasm_crypto::{ + bls12_381_aggregate_g1, bls12_381_aggregate_g2, bls12_381_hash_to_g1, bls12_381_hash_to_g2, + bls12_381_pairing_equality, ed25519_batch_verify, ed25519_verify, secp256k1_recover_pubkey, + secp256k1_verify, secp256r1_recover_pubkey, secp256r1_verify, CryptoError, HashFunction, +}; +use cosmwasm_crypto::{ + ECDSA_PUBKEY_MAX_LEN, ECDSA_SIGNATURE_LEN, EDDSA_PUBKEY_LEN, MESSAGE_HASH_MAX_LEN, +}; +use rand_core::OsRng; + +#[cfg(feature = "iterator")] +use cosmwasm_std::Order; +use wasmer::{AsStoreMut, FunctionEnvMut}; + +use crate::backend::{BackendApi, BackendError, Querier, Storage}; +use crate::conversion::{ref_to_u32, to_u32}; +use crate::environment::{process_gas_info, DebugInfo, Environment}; +use crate::errors::{CommunicationError, VmError, VmResult}; +#[cfg(feature = "iterator")] +use crate::memory::maybe_read_region; +use crate::memory::{read_region, write_region}; +use crate::sections::decode_sections; +#[allow(unused_imports)] +use crate::sections::encode_sections; +use crate::serde::to_vec; +use crate::GasInfo; + +/// A kibi (kilo binary) +const KI: usize = 1024; +/// A mibi (mega binary) +const MI: usize = 1024 * 1024; +/// Max key length for db_write/db_read/db_remove/db_scan (when VM reads the key argument from Wasm memory) +const MAX_LENGTH_DB_KEY: usize = 64 * KI; +/// Max value length for db_write (when VM reads the value argument from Wasm memory) +const MAX_LENGTH_DB_VALUE: usize = 128 * KI; +/// Typically 20 (Cosmos SDK, Ethereum), 32 (Nano, Substrate) or 54 (MockApi) +const MAX_LENGTH_CANONICAL_ADDRESS: usize = 64; +/// The max length of human address inputs (in bytes). +/// The maximum allowed size for [bech32](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki#bech32) +/// is 90 characters and we're adding some safety margin around that for other formats. +const MAX_LENGTH_HUMAN_ADDRESS: usize = 256; +const MAX_LENGTH_QUERY_CHAIN_REQUEST: usize = 64 * KI; +/// Length of a serialized Ed25519 signature +const MAX_LENGTH_ED25519_SIGNATURE: usize = 64; +/// Max length of a Ed25519 message in bytes. +/// This is an arbitrary value, for performance / memory constraints. If you need to verify larger +/// messages, let us know. +const MAX_LENGTH_ED25519_MESSAGE: usize = 128 * 1024; +/// Max number of batch Ed25519 messages / signatures / public_keys. +/// This is an arbitrary value, for performance / memory constraints. If you need to batch-verify a +/// larger number of signatures, let us know. +const MAX_COUNT_ED25519_BATCH: usize = 256; + +/// Max length for a debug message +const MAX_LENGTH_DEBUG: usize = 2 * MI; + +/// Max length for an abort message +const MAX_LENGTH_ABORT: usize = 2 * MI; + +// Import implementations +// +// This block of do_* prefixed functions is tailored for Wasmer's +// Function::new_native_with_env interface. Those require an env in the first +// argument and cannot capture other variables. Thus everything is accessed +// through the env. + +/// Reads a storage entry from the VM's storage into Wasm memory +pub fn do_db_read( + mut env: FunctionEnvMut>, + key_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let key = read_region(&data.memory(&store), key_ptr, MAX_LENGTH_DB_KEY)?; + + let (result, gas_info) = data.with_storage_from_context::<_, _>(|store| Ok(store.get(&key)))?; + process_gas_info(data, &mut store, gas_info)?; + let value = result?; + + let out_data = match value { + Some(data) => data, + None => return Ok(0), + }; + write_to_contract(data, &mut store, &out_data) +} + +/// Writes a storage entry from Wasm memory into the VM's storage +pub fn do_db_write( + mut env: FunctionEnvMut>, + key_ptr: u32, + value_ptr: u32, +) -> VmResult<()> { + let (data, mut store) = env.data_and_store_mut(); + + if data.is_storage_readonly() { + return Err(VmError::write_access_denied()); + } + + /// Converts a region length error to a different variant for better understandability + fn convert_error(e: VmError, kind: &'static str) -> VmError { + if let VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, max_length }, + .. + } = e + { + VmError::generic_err(format!( + "{kind} too big. Tried to write {length} bytes to storage, limit is {max_length}." + )) + } else { + e + } + } + + let key = read_region(&data.memory(&store), key_ptr, MAX_LENGTH_DB_KEY) + .map_err(|e| convert_error(e, "Key"))?; + let value = read_region(&data.memory(&store), value_ptr, MAX_LENGTH_DB_VALUE) + .map_err(|e| convert_error(e, "Value"))?; + + let (result, gas_info) = + data.with_storage_from_context::<_, _>(|store| Ok(store.set(&key, &value)))?; + process_gas_info(data, &mut store, gas_info)?; + result?; + + Ok(()) +} + +pub fn do_db_remove( + mut env: FunctionEnvMut>, + key_ptr: u32, +) -> VmResult<()> { + let (data, mut store) = env.data_and_store_mut(); + + if data.is_storage_readonly() { + return Err(VmError::write_access_denied()); + } + + let key = read_region(&data.memory(&store), key_ptr, MAX_LENGTH_DB_KEY)?; + + let (result, gas_info) = + data.with_storage_from_context::<_, _>(|store| Ok(store.remove(&key)))?; + process_gas_info(data, &mut store, gas_info)?; + result?; + + Ok(()) +} + +pub fn do_addr_validate( + mut env: FunctionEnvMut>, + source_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let source_data = read_region(&data.memory(&store), source_ptr, MAX_LENGTH_HUMAN_ADDRESS)?; + if source_data.is_empty() { + return write_to_contract(data, &mut store, b"Input is empty"); + } + + let source_string = match String::from_utf8(source_data) { + Ok(s) => s, + Err(_) => return write_to_contract(data, &mut store, b"Input is not valid UTF-8"), + }; + + let (result, gas_info) = data.api.addr_validate(&source_string); + process_gas_info(data, &mut store, gas_info)?; + match result { + Ok(()) => Ok(0), + Err(BackendError::UserErr { msg, .. }) => { + write_to_contract(data, &mut store, msg.as_bytes()) + } + Err(err) => Err(VmError::from(err)), + } +} + +pub fn do_addr_canonicalize( + mut env: FunctionEnvMut>, + source_ptr: u32, + destination_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let source_data = read_region(&data.memory(&store), source_ptr, MAX_LENGTH_HUMAN_ADDRESS)?; + if source_data.is_empty() { + return write_to_contract(data, &mut store, b"Input is empty"); + } + + let source_string = match String::from_utf8(source_data) { + Ok(s) => s, + Err(_) => return write_to_contract(data, &mut store, b"Input is not valid UTF-8"), + }; + + let (result, gas_info) = data.api.addr_canonicalize(&source_string); + process_gas_info(data, &mut store, gas_info)?; + match result { + Ok(canonical) => { + write_region(&data.memory(&store), destination_ptr, canonical.as_slice())?; + Ok(0) + } + Err(BackendError::UserErr { msg, .. }) => { + Ok(write_to_contract(data, &mut store, msg.as_bytes())?) + } + Err(err) => Err(VmError::from(err)), + } +} + +pub fn do_addr_humanize( + mut env: FunctionEnvMut>, + source_ptr: u32, + destination_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let canonical = read_region( + &data.memory(&store), + source_ptr, + MAX_LENGTH_CANONICAL_ADDRESS, + )?; + + let (result, gas_info) = data.api.addr_humanize(&canonical); + process_gas_info(data, &mut store, gas_info)?; + match result { + Ok(human) => { + write_region(&data.memory(&store), destination_ptr, human.as_bytes())?; + Ok(0) + } + Err(BackendError::UserErr { msg, .. }) => { + Ok(write_to_contract(data, &mut store, msg.as_bytes())?) + } + Err(err) => Err(VmError::from(err)), + } +} + +/// Return code (error code) for a valid signature +const SECP256K1_VERIFY_CODE_VALID: u32 = 0; + +/// Return code (error code) for an invalid signature +const SECP256K1_VERIFY_CODE_INVALID: u32 = 1; + +/// Return code (error code) for a valid pairing +const BLS12_381_VALID_PAIRING: u32 = 0; + +/// Return code (error code) for an invalid pairing +const BLS12_381_INVALID_PAIRING: u32 = 1; + +/// Return code (error code) if the aggregating the points on curve was successful +const BLS12_381_AGGREGATE_SUCCESS: u32 = 0; + +/// Return code (error code) for success when hashing to the curve +const BLS12_381_HASH_TO_CURVE_SUCCESS: u32 = 0; + +/// Maximum size of continuous points passed to aggregate functions +const BLS12_381_MAX_AGGREGATE_SIZE: usize = 2 * MI; + +/// Maximum size of the message passed to the hash-to-curve functions +const BLS12_381_MAX_MESSAGE_SIZE: usize = 5 * MI; + +/// Maximum size of the destination passed to the hash-to-curve functions +const BLS12_381_MAX_DST_SIZE: usize = 5 * KI; + +pub fn do_bls12_381_aggregate_g1< + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +>( + mut env: FunctionEnvMut>, + g1s_ptr: u32, + out_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + let memory = data.memory(&store); + + let g1s = read_region(&memory, g1s_ptr, BLS12_381_MAX_AGGREGATE_SIZE)?; + + let estimated_point_count = (g1s.len() / BLS12_381_G1_POINT_LEN) as u64; + let gas_info = GasInfo::with_cost( + data.gas_config + .bls12_381_aggregate_g1_cost + .total_cost(estimated_point_count), + ); + process_gas_info(data, &mut store, gas_info)?; + + let code = match bls12_381_aggregate_g1(&g1s) { + Ok(point) => { + let memory = data.memory(&store); + write_region(&memory, out_ptr, &point)?; + BLS12_381_AGGREGATE_SUCCESS + } + Err(err) => match err { + CryptoError::InvalidPoint { .. } | CryptoError::Aggregation { .. } => err.code(), + CryptoError::PairingEquality { .. } + | CryptoError::BatchErr { .. } + | CryptoError::GenericErr { .. } + | CryptoError::InvalidHashFormat { .. } + | CryptoError::InvalidPubkeyFormat { .. } + | CryptoError::InvalidRecoveryParam { .. } + | CryptoError::InvalidSignatureFormat { .. } + | CryptoError::UnknownHashFunction { .. } => { + panic!("Error must not happen for this call") + } + }, + }; + + Ok(code) +} + +pub fn do_bls12_381_aggregate_g2< + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +>( + mut env: FunctionEnvMut>, + g2s_ptr: u32, + out_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + let memory = data.memory(&store); + + let g2s = read_region(&memory, g2s_ptr, BLS12_381_MAX_AGGREGATE_SIZE)?; + + let estimated_point_count = (g2s.len() / BLS12_381_G2_POINT_LEN) as u64; + let gas_info = GasInfo::with_cost( + data.gas_config + .bls12_381_aggregate_g2_cost + .total_cost(estimated_point_count), + ); + process_gas_info(data, &mut store, gas_info)?; + + let code = match bls12_381_aggregate_g2(&g2s) { + Ok(point) => { + let memory = data.memory(&store); + write_region(&memory, out_ptr, &point)?; + BLS12_381_AGGREGATE_SUCCESS + } + Err(err) => match err { + CryptoError::InvalidPoint { .. } | CryptoError::Aggregation { .. } => err.code(), + CryptoError::PairingEquality { .. } + | CryptoError::BatchErr { .. } + | CryptoError::GenericErr { .. } + | CryptoError::InvalidHashFormat { .. } + | CryptoError::InvalidPubkeyFormat { .. } + | CryptoError::InvalidRecoveryParam { .. } + | CryptoError::InvalidSignatureFormat { .. } + | CryptoError::UnknownHashFunction { .. } => { + panic!("Error must not happen for this call") + } + }, + }; + + Ok(code) +} + +pub fn do_bls12_381_pairing_equality< + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +>( + mut env: FunctionEnvMut>, + ps_ptr: u32, + qs_ptr: u32, + r_ptr: u32, + s_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + let memory = data.memory(&store); + + let ps = read_region(&memory, ps_ptr, BLS12_381_MAX_AGGREGATE_SIZE)?; + let qs = read_region(&memory, qs_ptr, BLS12_381_MAX_AGGREGATE_SIZE)?; + let r = read_region(&memory, r_ptr, BLS12_381_G1_POINT_LEN)?; + let s = read_region(&memory, s_ptr, BLS12_381_G2_POINT_LEN)?; + + // The values here are only correct if ps and qs can be divided by the point size. + // They are good enough for gas since we error in `bls12_381_pairing_equality` if the inputs are + // not properly formatted. + let estimated_n = (ps.len() / BLS12_381_G1_POINT_LEN) as u64; + // The number of parings to compute (`n` on the left hand side and `k = n + 1` in total) + let estimated_k = estimated_n + 1; + + let gas_info = GasInfo::with_cost( + data.gas_config + .bls12_381_pairing_equality_cost + .total_cost(estimated_k), + ); + process_gas_info(data, &mut store, gas_info)?; + + let code = match bls12_381_pairing_equality(&ps, &qs, &r, &s) { + Ok(true) => BLS12_381_VALID_PAIRING, + Ok(false) => BLS12_381_INVALID_PAIRING, + Err(err) => match err { + CryptoError::PairingEquality { .. } | CryptoError::InvalidPoint { .. } => err.code(), + CryptoError::Aggregation { .. } + | CryptoError::BatchErr { .. } + | CryptoError::GenericErr { .. } + | CryptoError::InvalidHashFormat { .. } + | CryptoError::InvalidPubkeyFormat { .. } + | CryptoError::InvalidRecoveryParam { .. } + | CryptoError::InvalidSignatureFormat { .. } + | CryptoError::UnknownHashFunction { .. } => { + panic!("Error must not happen for this call") + } + }, + }; + + Ok(code) +} + +pub fn do_bls12_381_hash_to_g1< + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +>( + mut env: FunctionEnvMut>, + hash_function: u32, + msg_ptr: u32, + dst_ptr: u32, + out_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + let memory = data.memory(&store); + + let msg = read_region(&memory, msg_ptr, BLS12_381_MAX_MESSAGE_SIZE)?; + let dst = read_region(&memory, dst_ptr, BLS12_381_MAX_DST_SIZE)?; + + let gas_info = GasInfo::with_cost(data.gas_config.bls12_381_hash_to_g1_cost); + process_gas_info(data, &mut store, gas_info)?; + + let hash_function = match HashFunction::from_u32(hash_function) { + Ok(func) => func, + Err(error) => return Ok(error.code()), + }; + let point = bls12_381_hash_to_g1(hash_function, &msg, &dst); + + let memory = data.memory(&store); + write_region(&memory, out_ptr, &point)?; + + Ok(BLS12_381_HASH_TO_CURVE_SUCCESS) +} + +pub fn do_bls12_381_hash_to_g2< + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +>( + mut env: FunctionEnvMut>, + hash_function: u32, + msg_ptr: u32, + dst_ptr: u32, + out_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + let memory = data.memory(&store); + + let msg = read_region(&memory, msg_ptr, BLS12_381_MAX_MESSAGE_SIZE)?; + let dst = read_region(&memory, dst_ptr, BLS12_381_MAX_DST_SIZE)?; + + let gas_info = GasInfo::with_cost(data.gas_config.bls12_381_hash_to_g2_cost); + process_gas_info(data, &mut store, gas_info)?; + + let hash_function = match HashFunction::from_u32(hash_function) { + Ok(func) => func, + Err(error) => return Ok(error.code()), + }; + let point = bls12_381_hash_to_g2(hash_function, &msg, &dst); + + let memory = data.memory(&store); + write_region(&memory, out_ptr, &point)?; + + Ok(BLS12_381_HASH_TO_CURVE_SUCCESS) +} + +pub fn do_secp256k1_verify( + mut env: FunctionEnvMut>, + hash_ptr: u32, + signature_ptr: u32, + pubkey_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let hash = read_region(&data.memory(&store), hash_ptr, MESSAGE_HASH_MAX_LEN)?; + let signature = read_region(&data.memory(&store), signature_ptr, ECDSA_SIGNATURE_LEN)?; + let pubkey = read_region(&data.memory(&store), pubkey_ptr, ECDSA_PUBKEY_MAX_LEN)?; + + let gas_info = GasInfo::with_cost(data.gas_config.secp256k1_verify_cost); + process_gas_info(data, &mut store, gas_info)?; + let result = secp256k1_verify(&hash, &signature, &pubkey); + let code = match result { + Ok(valid) => { + if valid { + SECP256K1_VERIFY_CODE_VALID + } else { + SECP256K1_VERIFY_CODE_INVALID + } + } + Err(err) => match err { + CryptoError::InvalidHashFormat { .. } + | CryptoError::InvalidPubkeyFormat { .. } + | CryptoError::InvalidSignatureFormat { .. } + | CryptoError::GenericErr { .. } => err.code(), + CryptoError::Aggregation { .. } + | CryptoError::PairingEquality { .. } + | CryptoError::BatchErr { .. } + | CryptoError::InvalidPoint { .. } + | CryptoError::InvalidRecoveryParam { .. } + | CryptoError::UnknownHashFunction { .. } => { + panic!("Error must not happen for this call") + } + }, + }; + Ok(code) +} + +pub fn do_secp256k1_recover_pubkey< + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +>( + mut env: FunctionEnvMut>, + hash_ptr: u32, + signature_ptr: u32, + recover_param: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let hash = read_region(&data.memory(&store), hash_ptr, MESSAGE_HASH_MAX_LEN)?; + let signature = read_region(&data.memory(&store), signature_ptr, ECDSA_SIGNATURE_LEN)?; + let recover_param: u8 = match recover_param.try_into() { + Ok(rp) => rp, + Err(_) => return Ok((CryptoError::invalid_recovery_param().code() as u64) << 32), + }; + + let gas_info = GasInfo::with_cost(data.gas_config.secp256k1_recover_pubkey_cost); + process_gas_info(data, &mut store, gas_info)?; + let result = secp256k1_recover_pubkey(&hash, &signature, recover_param); + match result { + Ok(pubkey) => { + let pubkey_ptr = write_to_contract(data, &mut store, pubkey.as_ref())?; + Ok(to_low_half(pubkey_ptr)) + } + Err(err) => match err { + CryptoError::InvalidHashFormat { .. } + | CryptoError::InvalidSignatureFormat { .. } + | CryptoError::InvalidRecoveryParam { .. } + | CryptoError::GenericErr { .. } => Ok(to_high_half(err.code())), + CryptoError::Aggregation { .. } + | CryptoError::PairingEquality { .. } + | CryptoError::BatchErr { .. } + | CryptoError::InvalidPoint { .. } + | CryptoError::InvalidPubkeyFormat { .. } + | CryptoError::UnknownHashFunction { .. } => { + panic!("Error must not happen for this call") + } + }, + } +} + +/// Return code (error code) for a valid signature +const SECP256R1_VERIFY_CODE_VALID: u32 = 0; + +/// Return code (error code) for an invalid signature +const SECP256R1_VERIFY_CODE_INVALID: u32 = 1; + +pub fn do_secp256r1_verify( + mut env: FunctionEnvMut>, + hash_ptr: u32, + signature_ptr: u32, + pubkey_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let hash = read_region(&data.memory(&store), hash_ptr, MESSAGE_HASH_MAX_LEN)?; + let signature = read_region(&data.memory(&store), signature_ptr, ECDSA_SIGNATURE_LEN)?; + let pubkey = read_region(&data.memory(&store), pubkey_ptr, ECDSA_PUBKEY_MAX_LEN)?; + + let gas_info = GasInfo::with_cost(data.gas_config.secp256r1_verify_cost); + process_gas_info(data, &mut store, gas_info)?; + let result = secp256r1_verify(&hash, &signature, &pubkey); + let code = match result { + Ok(valid) => { + if valid { + SECP256R1_VERIFY_CODE_VALID + } else { + SECP256R1_VERIFY_CODE_INVALID + } + } + Err(err) => match err { + CryptoError::InvalidHashFormat { .. } + | CryptoError::InvalidPubkeyFormat { .. } + | CryptoError::InvalidSignatureFormat { .. } + | CryptoError::GenericErr { .. } => err.code(), + CryptoError::Aggregation { .. } + | CryptoError::PairingEquality { .. } + | CryptoError::BatchErr { .. } + | CryptoError::InvalidPoint { .. } + | CryptoError::InvalidRecoveryParam { .. } + | CryptoError::UnknownHashFunction { .. } => { + panic!("Error must not happen for this call") + } + }, + }; + Ok(code) +} + +pub fn do_secp256r1_recover_pubkey< + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +>( + mut env: FunctionEnvMut>, + hash_ptr: u32, + signature_ptr: u32, + recover_param: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let hash = read_region(&data.memory(&store), hash_ptr, MESSAGE_HASH_MAX_LEN)?; + let signature = read_region(&data.memory(&store), signature_ptr, ECDSA_SIGNATURE_LEN)?; + let recover_param: u8 = match recover_param.try_into() { + Ok(rp) => rp, + Err(_) => return Ok((CryptoError::invalid_recovery_param().code() as u64) << 32), + }; + + let gas_info = GasInfo::with_cost(data.gas_config.secp256r1_recover_pubkey_cost); + process_gas_info(data, &mut store, gas_info)?; + let result = secp256r1_recover_pubkey(&hash, &signature, recover_param); + match result { + Ok(pubkey) => { + let pubkey_ptr = write_to_contract(data, &mut store, pubkey.as_ref())?; + Ok(to_low_half(pubkey_ptr)) + } + Err(err) => match err { + CryptoError::InvalidHashFormat { .. } + | CryptoError::InvalidSignatureFormat { .. } + | CryptoError::InvalidRecoveryParam { .. } + | CryptoError::GenericErr { .. } => Ok(to_high_half(err.code())), + CryptoError::Aggregation { .. } + | CryptoError::PairingEquality { .. } + | CryptoError::BatchErr { .. } + | CryptoError::InvalidPoint { .. } + | CryptoError::InvalidPubkeyFormat { .. } + | CryptoError::UnknownHashFunction { .. } => { + panic!("Error must not happen for this call") + } + }, + } +} + +/// Return code (error code) for a valid signature +const ED25519_VERIFY_CODE_VALID: u32 = 0; + +/// Return code (error code) for an invalid signature +const ED25519_VERIFY_CODE_INVALID: u32 = 1; + +pub fn do_ed25519_verify( + mut env: FunctionEnvMut>, + message_ptr: u32, + signature_ptr: u32, + pubkey_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let message = read_region( + &data.memory(&store), + message_ptr, + MAX_LENGTH_ED25519_MESSAGE, + )?; + let signature = read_region( + &data.memory(&store), + signature_ptr, + MAX_LENGTH_ED25519_SIGNATURE, + )?; + let pubkey = read_region(&data.memory(&store), pubkey_ptr, EDDSA_PUBKEY_LEN)?; + + let gas_info = GasInfo::with_cost(data.gas_config.ed25519_verify_cost); + process_gas_info(data, &mut store, gas_info)?; + let result = ed25519_verify(&message, &signature, &pubkey); + let code = match result { + Ok(valid) => { + if valid { + ED25519_VERIFY_CODE_VALID + } else { + ED25519_VERIFY_CODE_INVALID + } + } + Err(err) => match err { + CryptoError::InvalidPubkeyFormat { .. } + | CryptoError::InvalidSignatureFormat { .. } + | CryptoError::GenericErr { .. } => err.code(), + CryptoError::Aggregation { .. } + | CryptoError::PairingEquality { .. } + | CryptoError::BatchErr { .. } + | CryptoError::InvalidPoint { .. } + | CryptoError::InvalidHashFormat { .. } + | CryptoError::InvalidRecoveryParam { .. } + | CryptoError::UnknownHashFunction { .. } => { + panic!("Error must not happen for this call") + } + }, + }; + Ok(code) +} + +pub fn do_ed25519_batch_verify< + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +>( + mut env: FunctionEnvMut>, + messages_ptr: u32, + signatures_ptr: u32, + public_keys_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let messages = read_region( + &data.memory(&store), + messages_ptr, + (MAX_LENGTH_ED25519_MESSAGE + 4) * MAX_COUNT_ED25519_BATCH, + )?; + let signatures = read_region( + &data.memory(&store), + signatures_ptr, + (MAX_LENGTH_ED25519_SIGNATURE + 4) * MAX_COUNT_ED25519_BATCH, + )?; + let public_keys = read_region( + &data.memory(&store), + public_keys_ptr, + (EDDSA_PUBKEY_LEN + 4) * MAX_COUNT_ED25519_BATCH, + )?; + + let messages = decode_sections(&messages)?; + let signatures = decode_sections(&signatures)?; + let public_keys = decode_sections(&public_keys)?; + + let gas_cost = if public_keys.len() == 1 { + &data.gas_config.ed25519_batch_verify_one_pubkey_cost + } else { + &data.gas_config.ed25519_batch_verify_cost + }; + let gas_info = GasInfo::with_cost(gas_cost.total_cost(signatures.len() as u64)); + process_gas_info(data, &mut store, gas_info)?; + let result = ed25519_batch_verify(&mut OsRng, &messages, &signatures, &public_keys); + let code = match result { + Ok(valid) => { + if valid { + ED25519_VERIFY_CODE_VALID + } else { + ED25519_VERIFY_CODE_INVALID + } + } + Err(err) => match err { + CryptoError::BatchErr { .. } + | CryptoError::InvalidPubkeyFormat { .. } + | CryptoError::InvalidSignatureFormat { .. } + | CryptoError::GenericErr { .. } => err.code(), + CryptoError::Aggregation { .. } + | CryptoError::PairingEquality { .. } + | CryptoError::InvalidHashFormat { .. } + | CryptoError::InvalidPoint { .. } + | CryptoError::InvalidRecoveryParam { .. } + | CryptoError::UnknownHashFunction { .. } => { + panic!("Error must not happen for this call") + } + }, + }; + Ok(code) +} + +/// Prints a debug message to console. +/// This does not charge gas, so debug printing should be disabled when used in a blockchain module. +pub fn do_debug( + mut env: FunctionEnvMut>, + message_ptr: u32, +) -> VmResult<()> { + let (data, mut store) = env.data_and_store_mut(); + + if let Some(debug_handler) = data.debug_handler() { + let message_data = read_region(&data.memory(&store), message_ptr, MAX_LENGTH_DEBUG)?; + let msg = String::from_utf8_lossy(&message_data); + let gas_remaining = data.get_gas_left(&mut store); + debug_handler.borrow_mut()( + &msg, + DebugInfo { + gas_remaining, + __lifetime: PhantomData, + }, + ); + } + Ok(()) +} + +/// Aborts the contract and shows the given error message +pub fn do_abort( + mut env: FunctionEnvMut>, + message_ptr: u32, +) -> VmResult<()> { + let (data, store) = env.data_and_store_mut(); + + let message_data = read_region(&data.memory(&store), message_ptr, MAX_LENGTH_ABORT)?; + let msg = String::from_utf8_lossy(&message_data); + Err(VmError::aborted(msg)) +} + +pub fn do_query_chain( + mut env: FunctionEnvMut>, + request_ptr: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let request = read_region( + &data.memory(&store), + request_ptr, + MAX_LENGTH_QUERY_CHAIN_REQUEST, + )?; + + let gas_remaining = data.get_gas_left(&mut store); + let (result, gas_info) = data.with_querier_from_context::<_, _>(|querier| { + Ok(querier.query_raw(&request, gas_remaining)) + })?; + process_gas_info(data, &mut store, gas_info)?; + let serialized = to_vec(&result?)?; + write_to_contract(data, &mut store, &serialized) +} + +#[cfg(feature = "iterator")] +pub fn do_db_scan( + mut env: FunctionEnvMut>, + start_ptr: u32, + end_ptr: u32, + order: i32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let start = maybe_read_region(&data.memory(&store), start_ptr, MAX_LENGTH_DB_KEY)?; + let end = maybe_read_region(&data.memory(&store), end_ptr, MAX_LENGTH_DB_KEY)?; + let order: Order = order + .try_into() + .map_err(|_| CommunicationError::invalid_order(order))?; + + let (result, gas_info) = data.with_storage_from_context::<_, _>(|store| { + Ok(store.scan(start.as_deref(), end.as_deref(), order)) + })?; + process_gas_info(data, &mut store, gas_info)?; + let iterator_id = result?; + Ok(iterator_id) +} + +#[cfg(feature = "iterator")] +pub fn do_db_next( + mut env: FunctionEnvMut>, + iterator_id: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let (result, gas_info) = + data.with_storage_from_context::<_, _>(|store| Ok(store.next(iterator_id)))?; + + process_gas_info(data, &mut store, gas_info)?; + + // Empty key will later be treated as _no more element_. + let (key, value) = result?.unwrap_or_else(|| (Vec::::new(), Vec::::new())); + + let out_data = encode_sections(&[key, value])?; + write_to_contract(data, &mut store, &out_data) +} + +#[cfg(feature = "iterator")] +pub fn do_db_next_key( + mut env: FunctionEnvMut>, + iterator_id: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let (result, gas_info) = + data.with_storage_from_context::<_, _>(|store| Ok(store.next_key(iterator_id)))?; + + process_gas_info(data, &mut store, gas_info)?; + + let key = match result? { + Some(key) => key, + None => return Ok(0), + }; + + write_to_contract(data, &mut store, &key) +} + +#[cfg(feature = "iterator")] +pub fn do_db_next_value( + mut env: FunctionEnvMut>, + iterator_id: u32, +) -> VmResult { + let (data, mut store) = env.data_and_store_mut(); + + let (result, gas_info) = + data.with_storage_from_context::<_, _>(|store| Ok(store.next_value(iterator_id)))?; + + process_gas_info(data, &mut store, gas_info)?; + + let value = match result? { + Some(value) => value, + None => return Ok(0), + }; + + write_to_contract(data, &mut store, &value) +} + +/// Creates a Region in the contract, writes the given data to it and returns the memory location +fn write_to_contract( + data: &Environment, + store: &mut impl AsStoreMut, + input: &[u8], +) -> VmResult { + let out_size = to_u32(input.len())?; + let result = data.call_function1(store, "allocate", &[out_size.into()])?; + let target_ptr = ref_to_u32(&result)?; + if target_ptr == 0 { + return Err(CommunicationError::zero_address().into()); + } + write_region(&data.memory(store), target_ptr, input)?; + Ok(target_ptr) +} + +/// Returns the data shifted by 32 bits towards the most significant bit. +/// +/// This is independent of endianness. But to get the idea, it would be +/// `data || 0x00000000` in big endian representation. +#[inline] +fn to_high_half(data: u32) -> u64 { + // See https://stackoverflow.com/a/58956419/2013738 to understand + // why this is endianness agnostic. + (data as u64) << 32 +} + +/// Returns the data copied to the 4 least significant bytes. +/// +/// This is independent of endianness. But to get the idea, it would be +/// `0x00000000 || data` in big endian representation. +#[inline] +fn to_low_half(data: u32) -> u64 { + data.into() +} + +#[cfg(test)] +mod tests { + use super::*; + use cosmwasm_std::{ + coins, from_json, AllBalanceResponse, BankQuery, Binary, Empty, QueryRequest, SystemError, + SystemResult, WasmQuery, + }; + use hex_literal::hex; + use std::ptr::NonNull; + use wasmer::{imports, Function, FunctionEnv, Instance as WasmerInstance, Store}; + + use crate::size::Size; + use crate::testing::{MockApi, MockQuerier, MockStorage}; + use crate::wasm_backend::{compile, make_compiling_engine}; + + static CONTRACT: &[u8] = include_bytes!("../testdata/hackatom.wasm"); + + // prepared data + const KEY1: &[u8] = b"ant"; + const VALUE1: &[u8] = b"insect"; + const KEY2: &[u8] = b"tree"; + const VALUE2: &[u8] = b"plant"; + + // this account has some coins + const INIT_ADDR: &str = "someone"; + const INIT_AMOUNT: u128 = 500; + const INIT_DENOM: &str = "TOKEN"; + + const TESTING_GAS_LIMIT: u64 = 1_000_000_000; // ~1ms + const TESTING_MEMORY_LIMIT: Option = Some(Size::mebi(16)); + + const ECDSA_P256K1_HASH_HEX: &str = + "5ae8317d34d1e595e3fa7247db80c0af4320cce1116de187f8f7e2e099c0d8d0"; + const ECDSA_P256K1_SIG_HEX: &str = "207082eb2c3dfa0b454e0906051270ba4074ac93760ba9e7110cd9471475111151eb0dbbc9920e72146fb564f99d039802bf6ef2561446eb126ef364d21ee9c4"; + const ECDSA_P256K1_PUBKEY_HEX: &str = "04051c1ee2190ecfb174bfe4f90763f2b4ff7517b70a2aec1876ebcfd644c4633fb03f3cfbd94b1f376e34592d9d41ccaf640bb751b00a1fadeb0c01157769eb73"; + const ECDSA_P256R1_HASH_HEX: &str = + "b804cf88af0c2eff8bbbfb3660ebb3294138e9d3ebd458884e19818061dacff0"; + const ECDSA_P256R1_SIG_HEX: &str = "35fb60f5ca0f3ca08542fb3cc641c8263a2cab7a90ee6a5e1583fac2bb6f6bd1ee59d81bc9db1055cc0ed97b159d8784af04e98511d0a9a407b99bb292572e96"; + const ECDSA_P256R1_PUBKEY_HEX: &str = "0474ccd8a62fba0e667c50929a53f78c21b8ff0c3c737b0b40b1750b2302b0bde829074e21f3a0ef88b9efdf10d06aa4c295cc1671f758ca0e4cd108803d0f2614"; + + const EDDSA_MSG_HEX: &str = ""; + const EDDSA_SIG_HEX: &str = "e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"; + const EDDSA_PUBKEY_HEX: &str = + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a"; + + fn make_instance( + api: MockApi, + ) -> ( + FunctionEnv>, + Store, + Box, + ) { + let gas_limit = TESTING_GAS_LIMIT; + let env = Environment::new(api, gas_limit); + + let engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = compile(&engine, CONTRACT).unwrap(); + let mut store = Store::new(engine); + + let fe = FunctionEnv::new(&mut store, env); + + // we need stubs for all required imports + let import_obj = imports! { + "env" => { + "db_read" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "db_write" => Function::new_typed(&mut store, |_a: u32, _b: u32| {}), + "db_remove" => Function::new_typed(&mut store, |_a: u32| {}), + "db_scan" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: i32| -> u32 { 0 }), + "db_next" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "db_next_key" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "db_next_value" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "query_chain" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "addr_validate" => Function::new_typed(&mut store, |_a: u32| -> u32 { 0 }), + "addr_canonicalize" => Function::new_typed(&mut store, |_a: u32, _b: u32| -> u32 { 0 }), + "addr_humanize" => Function::new_typed(&mut store, |_a: u32, _b: u32| -> u32 { 0 }), + "secp256k1_verify" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u32 { 0 }), + "secp256k1_recover_pubkey" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u64 { 0 }), + "secp256r1_verify" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u32 { 0 }), + "secp256r1_recover_pubkey" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u64 { 0 }), + "ed25519_verify" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u32 { 0 }), + "ed25519_batch_verify" => Function::new_typed(&mut store, |_a: u32, _b: u32, _c: u32| -> u32 { 0 }), + "debug" => Function::new_typed(&mut store, |_a: u32| {}), + "abort" => Function::new_typed(&mut store, |_a: u32| {}), + }, + }; + let wasmer_instance = + Box::from(WasmerInstance::new(&mut store, &module, &import_obj).unwrap()); + let memory = wasmer_instance + .exports + .get_memory("memory") + .unwrap() + .clone(); + + fe.as_mut(&mut store).memory = Some(memory); + + let instance_ptr = NonNull::from(wasmer_instance.as_ref()); + + { + let mut fe_mut = fe.clone().into_mut(&mut store); + let (env, mut store) = fe_mut.data_and_store_mut(); + + env.set_wasmer_instance(Some(instance_ptr)); + env.set_gas_left(&mut store, gas_limit); + env.set_storage_readonly(false); + } + + (fe, store, wasmer_instance) + } + + fn leave_default_data( + fe_mut: &mut FunctionEnvMut>, + ) { + let (env, _store) = fe_mut.data_and_store_mut(); + + // create some mock data + let mut storage = MockStorage::new(); + storage.set(KEY1, VALUE1).0.expect("error setting"); + storage.set(KEY2, VALUE2).0.expect("error setting"); + let querier: MockQuerier = + MockQuerier::new(&[(INIT_ADDR, &coins(INIT_AMOUNT, INIT_DENOM))]); + env.move_in(storage, querier); + } + + fn write_data( + fe_mut: &mut FunctionEnvMut>, + data: &[u8], + ) -> u32 { + let (env, mut store) = fe_mut.data_and_store_mut(); + + let result = env + .call_function1(&mut store, "allocate", &[(data.len() as u32).into()]) + .unwrap(); + let region_ptr = ref_to_u32(&result).unwrap(); + write_region(&env.memory(&store), region_ptr, data).expect("error writing"); + region_ptr + } + + fn create_empty( + wasmer_instance: &WasmerInstance, + fe_mut: &mut FunctionEnvMut>, + capacity: u32, + ) -> u32 { + let (_, mut store) = fe_mut.data_and_store_mut(); + let allocate = wasmer_instance + .exports + .get_function("allocate") + .expect("error getting function"); + let result = allocate + .call(&mut store, &[capacity.into()]) + .expect("error calling allocate"); + ref_to_u32(&result[0]).expect("error converting result") + } + + /// A Region reader that is just good enough for the tests in this file + fn force_read( + fe_mut: &mut FunctionEnvMut>, + region_ptr: u32, + ) -> Vec { + let (env, store) = fe_mut.data_and_store_mut(); + + read_region(&env.memory(&store), region_ptr, 5000).unwrap() + } + + #[test] + fn do_db_read_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + leave_default_data(&mut fe_mut); + + let key_ptr = write_data(&mut fe_mut, KEY1); + let result = do_db_read(fe_mut.as_mut(), key_ptr); + let value_ptr = result.unwrap(); + assert!(value_ptr > 0); + leave_default_data(&mut fe_mut); + assert_eq!(force_read(&mut fe_mut, value_ptr), VALUE1); + } + + #[test] + fn do_db_read_works_for_non_existent_key() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + leave_default_data(&mut fe_mut); + + let key_ptr = write_data(&mut fe_mut, b"I do not exist in storage"); + let result = do_db_read(fe_mut, key_ptr); + assert_eq!(result.unwrap(), 0); + } + + #[test] + fn do_db_read_fails_for_large_key() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + leave_default_data(&mut fe_mut); + + let key_ptr = write_data(&mut fe_mut, &vec![7u8; 300 * 1024]); + let result = do_db_read(fe_mut, key_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, .. }, + .. + } => assert_eq!(length, 300 * 1024), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_db_write_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let key_ptr = write_data(&mut fe_mut, b"new storage key"); + let value_ptr = write_data(&mut fe_mut, b"new value"); + + leave_default_data(&mut fe_mut); + + do_db_write(fe_mut.as_mut(), key_ptr, value_ptr).unwrap(); + + let val = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| { + Ok(store + .get(b"new storage key") + .0 + .expect("error getting value")) + }) + .unwrap(); + assert_eq!(val, Some(b"new value".to_vec())); + } + + #[test] + fn do_db_write_can_override() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let key_ptr = write_data(&mut fe_mut, KEY1); + let value_ptr = write_data(&mut fe_mut, VALUE2); + + leave_default_data(&mut fe_mut); + + do_db_write(fe_mut.as_mut(), key_ptr, value_ptr).unwrap(); + + let val = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| { + Ok(store.get(KEY1).0.expect("error getting value")) + }) + .unwrap(); + assert_eq!(val, Some(VALUE2.to_vec())); + } + + #[test] + fn do_db_write_works_for_empty_value() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let key_ptr = write_data(&mut fe_mut, b"new storage key"); + let value_ptr = write_data(&mut fe_mut, b""); + + leave_default_data(&mut fe_mut); + + do_db_write(fe_mut.as_mut(), key_ptr, value_ptr).unwrap(); + + let val = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| { + Ok(store + .get(b"new storage key") + .0 + .expect("error getting value")) + }) + .unwrap(); + assert_eq!(val, Some(b"".to_vec())); + } + + #[test] + fn do_db_write_fails_for_large_key() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + const KEY_SIZE: usize = 300 * 1024; + let key_ptr = write_data(&mut fe_mut, &vec![4u8; KEY_SIZE]); + let value_ptr = write_data(&mut fe_mut, b"new value"); + + leave_default_data(&mut fe_mut); + + let result = do_db_write(fe_mut, key_ptr, value_ptr); + assert_eq!(result.unwrap_err().to_string(), format!("Generic error: Key too big. Tried to write {KEY_SIZE} bytes to storage, limit is {MAX_LENGTH_DB_KEY}.")); + } + + #[test] + fn do_db_write_fails_for_large_value() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + const VAL_SIZE: usize = 300 * 1024; + let key_ptr = write_data(&mut fe_mut, b"new storage key"); + let value_ptr = write_data(&mut fe_mut, &vec![5u8; VAL_SIZE]); + + leave_default_data(&mut fe_mut); + + let result = do_db_write(fe_mut, key_ptr, value_ptr); + assert_eq!(result.unwrap_err().to_string(), format!("Generic error: Value too big. Tried to write {VAL_SIZE} bytes to storage, limit is {MAX_LENGTH_DB_VALUE}.")); + } + + #[test] + fn do_db_write_is_prohibited_in_readonly_contexts() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let key_ptr = write_data(&mut fe_mut, b"new storage key"); + let value_ptr = write_data(&mut fe_mut, b"new value"); + + leave_default_data(&mut fe_mut); + fe_mut.data().set_storage_readonly(true); + + let result = do_db_write(fe_mut, key_ptr, value_ptr); + match result.unwrap_err() { + VmError::WriteAccessDenied { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_db_remove_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let existing_key = KEY1; + let key_ptr = write_data(&mut fe_mut, existing_key); + + leave_default_data(&mut fe_mut); + + fe_mut + .data() + .with_storage_from_context::<_, _>(|store| { + println!("{store:?}"); + Ok(()) + }) + .unwrap(); + + do_db_remove(fe_mut.as_mut(), key_ptr).unwrap(); + + fe_mut + .data() + .with_storage_from_context::<_, _>(|store| { + println!("{store:?}"); + Ok(()) + }) + .unwrap(); + + let value = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| { + Ok(store.get(existing_key).0.expect("error getting value")) + }) + .unwrap(); + assert_eq!(value, None); + } + + #[test] + fn do_db_remove_works_for_non_existent_key() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let non_existent_key = b"I do not exist"; + let key_ptr = write_data(&mut fe_mut, non_existent_key); + + leave_default_data(&mut fe_mut); + + // Note: right now we cannot differentiate between an existent and a non-existent key + do_db_remove(fe_mut.as_mut(), key_ptr).unwrap(); + + let value = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| { + Ok(store.get(non_existent_key).0.expect("error getting value")) + }) + .unwrap(); + assert_eq!(value, None); + } + + #[test] + fn do_db_remove_fails_for_large_key() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let key_ptr = write_data(&mut fe_mut, &vec![26u8; 300 * 1024]); + + leave_default_data(&mut fe_mut); + + let result = do_db_remove(fe_mut, key_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: + CommunicationError::RegionLengthTooBig { + length, max_length, .. + }, + .. + } => { + assert_eq!(length, 300 * 1024); + assert_eq!(max_length, MAX_LENGTH_DB_KEY); + } + err => panic!("unexpected error: {err:?}"), + }; + } + + #[test] + fn do_db_remove_is_prohibited_in_readonly_contexts() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let key_ptr = write_data(&mut fe_mut, b"a storage key"); + + leave_default_data(&mut fe_mut); + fe_mut.data().set_storage_readonly(true); + + let result = do_db_remove(fe_mut, key_ptr); + match result.unwrap_err() { + VmError::WriteAccessDenied { .. } => {} + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_addr_validate_works() { + let api = MockApi::default().with_prefix("osmo"); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr1 = write_data(&mut fe_mut, b"osmo186kh7c0k0gh4ww0wh4jqc4yhzu7n7dhswe845d"); + let source_ptr2 = write_data(&mut fe_mut, b"osmo18enxpg25jc4zkwe7w00yneva0vztwuex3rtv8t"); + + let res = do_addr_validate(fe_mut.as_mut(), source_ptr1).unwrap(); + assert_eq!(res, 0); + let res = do_addr_validate(fe_mut.as_mut(), source_ptr2).unwrap(); + assert_eq!(res, 0); + } + + #[test] + fn do_addr_validate_reports_invalid_input_back_to_contract() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr1 = write_data(&mut fe_mut, b"cosmwasm\x80o"); // invalid UTF-8 (cosmwasm�o) + let source_ptr2 = write_data(&mut fe_mut, b""); // empty + let source_ptr3 = write_data( + &mut fe_mut, + b"cosmwasm1h34LMPYwh4upnjdg90cjf4j70aee6z8qqfspugamjp42e4q28kqs8s7vcp", + ); // Not normalized. The definition of normalized is chain-dependent but the MockApi disallows mixed case. + + let res = do_addr_validate(fe_mut.as_mut(), source_ptr1).unwrap(); + assert_ne!(res, 0); + let err = String::from_utf8(force_read(&mut fe_mut, res)).unwrap(); + assert_eq!(err, "Input is not valid UTF-8"); + + let res = do_addr_validate(fe_mut.as_mut(), source_ptr2).unwrap(); + assert_ne!(res, 0); + let err = String::from_utf8(force_read(&mut fe_mut, res)).unwrap(); + assert_eq!(err, "Input is empty"); + + let res = do_addr_validate(fe_mut.as_mut(), source_ptr3).unwrap(); + assert_ne!(res, 0); + let err = String::from_utf8(force_read(&mut fe_mut, res)).unwrap(); + assert_eq!(err, "Error decoding bech32"); + } + + #[test] + fn do_addr_validate_fails_for_broken_backend() { + let api = MockApi::new_failing("Temporarily unavailable"); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr = write_data(&mut fe_mut, b"foo"); + + leave_default_data(&mut fe_mut); + + let result = do_addr_validate(fe_mut, source_ptr); + match result.unwrap_err() { + VmError::BackendErr { + source: BackendError::Unknown { msg, .. }, + .. + } => assert_eq!(msg, "Temporarily unavailable"), + err => panic!("Incorrect error returned: {err:?}"), + } + } + + #[test] + fn do_addr_validate_fails_for_large_inputs() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr = write_data(&mut fe_mut, &[61; 333]); + + leave_default_data(&mut fe_mut); + + let result = do_addr_validate(fe_mut, source_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: + CommunicationError::RegionLengthTooBig { + length, max_length, .. + }, + .. + } => { + assert_eq!(length, 333); + assert_eq!(max_length, 256); + } + err => panic!("Incorrect error returned: {err:?}"), + } + } + + const CANONICAL_ADDRESS_BUFFER_LENGTH: u32 = 64; + + #[test] + fn do_addr_canonicalize_works() { + let api = MockApi::default(); + let (fe, mut store, instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr = write_data( + &mut fe_mut, + b"cosmwasm1h34lmpywh4upnjdg90cjf4j70aee6z8qqfspugamjp42e4q28kqs8s7vcp", + ); + let dest_ptr = create_empty(&instance, &mut fe_mut, CANONICAL_ADDRESS_BUFFER_LENGTH); + + leave_default_data(&mut fe_mut); + + let res = do_addr_canonicalize(fe_mut.as_mut(), source_ptr, dest_ptr).unwrap(); + assert_eq!(res, 0); + let data = force_read(&mut fe_mut, dest_ptr); + assert_eq!(data.len(), 32); + } + + #[test] + fn do_addr_canonicalize_reports_invalid_input_back_to_contract() { + let api = MockApi::default(); + let (fe, mut store, instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr1 = write_data(&mut fe_mut, b"cosmwasm\x80o"); // invalid UTF-8 (cosmwasm�o) + let source_ptr2 = write_data(&mut fe_mut, b""); // empty + let dest_ptr = create_empty(&instance, &mut fe_mut, 70); + + leave_default_data(&mut fe_mut); + + let res = do_addr_canonicalize(fe_mut.as_mut(), source_ptr1, dest_ptr).unwrap(); + assert_ne!(res, 0); + let err = String::from_utf8(force_read(&mut fe_mut, res)).unwrap(); + assert_eq!(err, "Input is not valid UTF-8"); + + let res = do_addr_canonicalize(fe_mut.as_mut(), source_ptr2, dest_ptr).unwrap(); + assert_ne!(res, 0); + let err = String::from_utf8(force_read(&mut fe_mut, res)).unwrap(); + assert_eq!(err, "Input is empty"); + } + + #[test] + fn do_addr_canonicalize_fails_for_broken_backend() { + let api = MockApi::new_failing("Temporarily unavailable"); + let (fe, mut store, instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr = write_data(&mut fe_mut, b"foo"); + let dest_ptr = create_empty(&instance, &mut fe_mut, 7); + + leave_default_data(&mut fe_mut); + + let result = do_addr_canonicalize(fe_mut.as_mut(), source_ptr, dest_ptr); + match result.unwrap_err() { + VmError::BackendErr { + source: BackendError::Unknown { msg, .. }, + .. + } => assert_eq!(msg, "Temporarily unavailable"), + err => panic!("Incorrect error returned: {err:?}"), + } + } + + #[test] + fn do_addr_canonicalize_fails_for_large_inputs() { + let api = MockApi::default(); + let (fe, mut store, instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr = write_data(&mut fe_mut, &[61; 333]); + let dest_ptr = create_empty(&instance, &mut fe_mut, 8); + + leave_default_data(&mut fe_mut); + + let result = do_addr_canonicalize(fe_mut.as_mut(), source_ptr, dest_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: + CommunicationError::RegionLengthTooBig { + length, max_length, .. + }, + .. + } => { + assert_eq!(length, 333); + assert_eq!(max_length, 256); + } + err => panic!("Incorrect error returned: {err:?}"), + } + } + + #[test] + fn do_addr_canonicalize_fails_for_small_destination_region() { + let api = MockApi::default().with_prefix("osmo"); + let (fe, mut store, instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr = write_data(&mut fe_mut, b"osmo18enxpg25jc4zkwe7w00yneva0vztwuex3rtv8t"); + let dest_ptr = create_empty(&instance, &mut fe_mut, 7); + + leave_default_data(&mut fe_mut); + + let result = do_addr_canonicalize(fe_mut, source_ptr, dest_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionTooSmall { size, required, .. }, + .. + } => { + assert_eq!(size, 7); + assert_eq!(required, 20); + } + err => panic!("Incorrect error returned: {err:?}"), + } + } + + #[test] + fn do_addr_humanize_works() { + let api = MockApi::default(); + let (fe, mut store, instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_data = vec![0x22; CANONICAL_ADDRESS_BUFFER_LENGTH as usize]; + let source_ptr = write_data(&mut fe_mut, &source_data); + let dest_ptr = create_empty(&instance, &mut fe_mut, 118); + + leave_default_data(&mut fe_mut); + + let error_ptr = do_addr_humanize(fe_mut.as_mut(), source_ptr, dest_ptr).unwrap(); + assert_eq!(error_ptr, 0); + assert_eq!(force_read(&mut fe_mut, dest_ptr), b"cosmwasm1yg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zyg3zygsegeksq"); + } + + #[test] + fn do_addr_humanize_reports_invalid_input_back_to_contract() { + let api = MockApi::default(); + let (fe, mut store, instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr = write_data(&mut fe_mut, b""); // too short + let dest_ptr = create_empty(&instance, &mut fe_mut, 70); + + leave_default_data(&mut fe_mut); + + let res = do_addr_humanize(fe_mut.as_mut(), source_ptr, dest_ptr).unwrap(); + assert_ne!(res, 0); + let err = String::from_utf8(force_read(&mut fe_mut, res)).unwrap(); + assert_eq!(err, "Invalid canonical address length"); + } + + #[test] + fn do_addr_humanize_fails_for_broken_backend() { + let api = MockApi::new_failing("Temporarily unavailable"); + let (fe, mut store, instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr = write_data(&mut fe_mut, b"foo\0\0\0\0\0"); + let dest_ptr = create_empty(&instance, &mut fe_mut, 70); + + leave_default_data(&mut fe_mut); + + let result = do_addr_humanize(fe_mut, source_ptr, dest_ptr); + match result.unwrap_err() { + VmError::BackendErr { + source: BackendError::Unknown { msg, .. }, + .. + } => assert_eq!(msg, "Temporarily unavailable"), + err => panic!("Incorrect error returned: {err:?}"), + }; + } + + #[test] + fn do_addr_humanize_fails_for_input_too_long() { + let api = MockApi::default(); + let (fe, mut store, instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_ptr = write_data(&mut fe_mut, &[61; 65]); + let dest_ptr = create_empty(&instance, &mut fe_mut, 70); + + leave_default_data(&mut fe_mut); + + let result = do_addr_humanize(fe_mut, source_ptr, dest_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: + CommunicationError::RegionLengthTooBig { + length, max_length, .. + }, + .. + } => { + assert_eq!(length, 65); + assert_eq!(max_length, 64); + } + err => panic!("Incorrect error returned: {err:?}"), + } + } + + #[test] + fn do_addr_humanize_fails_for_destination_region_too_small() { + let api = MockApi::default(); + let (fe, mut store, instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let source_data = vec![0x22; CANONICAL_ADDRESS_BUFFER_LENGTH as usize]; + let source_ptr = write_data(&mut fe_mut, &source_data); + let dest_ptr = create_empty(&instance, &mut fe_mut, 2); + + leave_default_data(&mut fe_mut); + + let result = do_addr_humanize(fe_mut, source_ptr, dest_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionTooSmall { size, required, .. }, + .. + } => { + assert_eq!(size, 2); + assert_eq!(required, 118); + } + err => panic!("Incorrect error returned: {err:?}"), + } + } + + #[test] + fn do_secp256k1_verify_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 0 + ); + } + + #[test] + fn do_secp256k1_verify_wrong_hash_verify_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let mut hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + // alter hash + hash[0] ^= 0x01; + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 1 + ); + } + + #[test] + fn do_secp256k1_verify_larger_hash_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let mut hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + // extend / break hash + hash.push(0x00); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + let result = do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, .. }, + .. + } => assert_eq!(length, MESSAGE_HASH_MAX_LEN + 1), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_secp256k1_verify_shorter_hash_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let mut hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + // reduce / break hash + hash.pop(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 3 // mapped InvalidHashFormat + ); + } + + #[test] + fn do_secp256k1_verify_wrong_sig_verify_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let mut sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + // alter sig + sig[0] ^= 0x01; + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 1 + ); + } + + #[test] + fn do_secp256k1_verify_larger_sig_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let mut sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + // extend / break sig + sig.push(0x00); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + let result = do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, .. }, + .. + } => assert_eq!(length, ECDSA_SIGNATURE_LEN + 1), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_secp256k1_verify_shorter_sig_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let mut sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + // reduce / break sig + sig.pop(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 4 // mapped InvalidSignatureFormat + ) + } + + #[test] + fn do_secp256k1_verify_wrong_pubkey_format_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + // alter pubkey format + pubkey[0] ^= 0x01; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 5 // mapped InvalidPubkeyFormat + ) + } + + #[test] + fn do_secp256k1_verify_wrong_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + // alter pubkey + pubkey[1] ^= 0x01; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 10 // mapped GenericErr + ) + } + + #[test] + fn do_secp256k1_verify_larger_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + // extend / break pubkey + pubkey.push(0x00); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + let result = do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, .. }, + .. + } => assert_eq!(length, ECDSA_PUBKEY_MAX_LEN + 1), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_secp256k1_verify_shorter_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(ECDSA_P256K1_PUBKEY_HEX).unwrap(); + // reduce / break pubkey + pubkey.pop(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 5 // mapped InvalidPubkeyFormat + ) + } + + #[test] + fn do_secp256k1_verify_empty_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256K1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256K1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = vec![]; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 5 // mapped InvalidPubkeyFormat + ) + } + + #[test] + fn do_secp256k1_verify_wrong_data_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = vec![0x22; MESSAGE_HASH_MAX_LEN]; + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = vec![0x22; ECDSA_SIGNATURE_LEN]; + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = vec![0x04; ECDSA_PUBKEY_MAX_LEN]; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256k1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 10 // mapped GenericErr + ) + } + + #[test] + fn do_secp256k1_recover_pubkey_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + // https://gist.github.com/webmaster128/130b628d83621a33579751846699ed15 + let hash = hex!("5ae8317d34d1e595e3fa7247db80c0af4320cce1116de187f8f7e2e099c0d8d0"); + let sig = hex!("45c0b7f8c09a9e1f1cea0c25785594427b6bf8f9f878a8af0b1abbb48e16d0920d8becd0c220f67c51217eecfd7184ef0732481c843857e6bc7fc095c4f6b788"); + let recovery_param = 1; + let expected = hex!("044a071e8a6e10aada2b8cf39fa3b5fb3400b04e99ea8ae64ceea1a977dbeaf5d5f8c8fbd10b71ab14cd561f7df8eb6da50f8a8d81ba564342244d26d1d4211595"); + + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig_ptr = write_data(&mut fe_mut, &sig); + let result = + do_secp256k1_recover_pubkey(fe_mut.as_mut(), hash_ptr, sig_ptr, recovery_param) + .unwrap(); + let error = result >> 32; + let pubkey_ptr: u32 = (result & 0xFFFFFFFF).try_into().unwrap(); + assert_eq!(error, 0); + assert_eq!(force_read(&mut fe_mut, pubkey_ptr), expected); + } + + #[test] + fn do_secp256r1_verify_works() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 0 + ); + } + + #[test] + fn do_secp256r1_verify_wrong_hash_verify_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let mut hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + // alter hash + hash[0] ^= 0x01; + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 1 + ); + } + + #[test] + fn do_secp256r1_verify_larger_hash_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let mut hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + // extend / break hash + hash.push(0x00); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + let result = do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, .. }, + .. + } => assert_eq!(length, MESSAGE_HASH_MAX_LEN + 1), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_secp256r1_verify_shorter_hash_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let mut hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + // reduce / break hash + hash.pop(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 3 // mapped InvalidHashFormat + ); + } + + #[test] + fn do_secp256r1_verify_wrong_sig_verify_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let mut sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + // alter sig + sig[0] ^= 0x01; + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 1 + ); + } + + #[test] + fn do_secp256r1_verify_larger_sig_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let mut sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + // extend / break sig + sig.push(0x00); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + let result = do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, .. }, + .. + } => assert_eq!(length, ECDSA_SIGNATURE_LEN + 1), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_secp256r1_verify_shorter_sig_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let mut sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + // reduce / break sig + sig.pop(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 4 // mapped InvalidSignatureFormat + ) + } + + #[test] + fn do_secp256r1_verify_wrong_pubkey_format_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + // alter pubkey format + pubkey[0] ^= 0x01; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 5 // mapped InvalidPubkeyFormat + ) + } + + #[test] + fn do_secp256r1_verify_wrong_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + // alter pubkey + pubkey[1] ^= 0x01; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 10 // mapped GenericErr + ) + } + + #[test] + fn do_secp256r1_verify_larger_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + // extend / break pubkey + pubkey.push(0x00); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + let result = do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, .. }, + .. + } => assert_eq!(length, ECDSA_PUBKEY_MAX_LEN + 1), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_secp256r1_verify_shorter_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(ECDSA_P256R1_PUBKEY_HEX).unwrap(); + // reduce / break pubkey + pubkey.pop(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 5 // mapped InvalidPubkeyFormat + ) + } + + #[test] + fn do_secp256r1_verify_empty_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex::decode(ECDSA_P256R1_HASH_HEX).unwrap(); + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = hex::decode(ECDSA_P256R1_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = vec![]; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 5 // mapped InvalidPubkeyFormat + ) + } + + #[test] + fn do_secp256r1_verify_wrong_data_fails() { + let api = MockApi::default(); + let (fe, mut store, mut _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = vec![0x22; MESSAGE_HASH_MAX_LEN]; + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig = vec![0x22; ECDSA_SIGNATURE_LEN]; + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = vec![0x04; ECDSA_PUBKEY_MAX_LEN]; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_secp256r1_verify(fe_mut, hash_ptr, sig_ptr, pubkey_ptr).unwrap(), + 10 // mapped GenericErr + ) + } + + #[test] + fn do_secp256r1_recover_pubkey_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let hash = hex!("12135386c09e0bf6fd5c454a95bcfe9b3edb25c71e455c73a212405694b29002"); + let sig = hex!("b53ce4da1aa7c0dc77a1896ab716b921499aed78df725b1504aba1597ba0c64bd7c246dc7ad0e67700c373edcfdd1c0a0495fc954549ad579df6ed1438840851"); + let recovery_param = 0; + let expected = hex!("040a7dbb8bf50cb605eb2268b081f26d6b08e012f952c4b70a5a1e6e7d46af98bbf26dd7d799930062480849962ccf5004edcfd307c044f4e8f667c9baa834eeae"); + + let hash_ptr = write_data(&mut fe_mut, &hash); + let sig_ptr = write_data(&mut fe_mut, &sig); + let result = + do_secp256r1_recover_pubkey(fe_mut.as_mut(), hash_ptr, sig_ptr, recovery_param) + .unwrap(); + let error = result >> 32; + let pubkey_ptr: u32 = (result & 0xFFFFFFFF).try_into().unwrap(); + assert_eq!(error, 0); + assert_eq!(force_read(&mut fe_mut, pubkey_ptr), expected); + } + + #[test] + fn do_ed25519_verify_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let msg = hex::decode(EDDSA_MSG_HEX).unwrap(); + let msg_ptr = write_data(&mut fe_mut, &msg); + let sig = hex::decode(EDDSA_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(EDDSA_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr).unwrap(), + 0 + ); + } + + #[test] + fn do_ed25519_verify_wrong_msg_verify_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let mut msg = hex::decode(EDDSA_MSG_HEX).unwrap(); + // alter msg + msg.push(0x01); + let msg_ptr = write_data(&mut fe_mut, &msg); + let sig = hex::decode(EDDSA_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(EDDSA_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr).unwrap(), + 1 + ); + } + + #[test] + fn do_ed25519_verify_larger_msg_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let mut msg = hex::decode(EDDSA_MSG_HEX).unwrap(); + // extend / break msg + msg.extend_from_slice(&[0x00; MAX_LENGTH_ED25519_MESSAGE + 1]); + let msg_ptr = write_data(&mut fe_mut, &msg); + let sig = hex::decode(EDDSA_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(EDDSA_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + let result = do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, .. }, + .. + } => assert_eq!(length, msg.len()), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_ed25519_verify_wrong_sig_verify_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let msg = hex::decode(EDDSA_MSG_HEX).unwrap(); + let msg_ptr = write_data(&mut fe_mut, &msg); + let mut sig = hex::decode(EDDSA_SIG_HEX).unwrap(); + // alter sig + sig[0] ^= 0x01; + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(EDDSA_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr).unwrap(), + 1 + ); + } + + #[test] + fn do_ed25519_verify_larger_sig_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let msg = hex::decode(EDDSA_MSG_HEX).unwrap(); + let msg_ptr = write_data(&mut fe_mut, &msg); + let mut sig = hex::decode(EDDSA_SIG_HEX).unwrap(); + // extend / break sig + sig.push(0x00); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(EDDSA_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + let result = do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, .. }, + .. + } => assert_eq!(length, MAX_LENGTH_ED25519_SIGNATURE + 1), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_ed25519_verify_shorter_sig_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let msg = hex::decode(EDDSA_MSG_HEX).unwrap(); + let msg_ptr = write_data(&mut fe_mut, &msg); + let mut sig = hex::decode(EDDSA_SIG_HEX).unwrap(); + // reduce / break sig + sig.pop(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = hex::decode(EDDSA_PUBKEY_HEX).unwrap(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr).unwrap(), + 4 // mapped InvalidSignatureFormat + ) + } + + #[test] + fn do_ed25519_verify_wrong_pubkey_verify_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let msg = hex::decode(EDDSA_MSG_HEX).unwrap(); + let msg_ptr = write_data(&mut fe_mut, &msg); + let sig = hex::decode(EDDSA_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(EDDSA_PUBKEY_HEX).unwrap(); + // alter pubkey + pubkey[1] ^= 0x01; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr).unwrap(), + 1 + ); + } + + #[test] + fn do_ed25519_verify_larger_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let msg = hex::decode(EDDSA_MSG_HEX).unwrap(); + let msg_ptr = write_data(&mut fe_mut, &msg); + let sig = hex::decode(EDDSA_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(EDDSA_PUBKEY_HEX).unwrap(); + // extend / break pubkey + pubkey.push(0x00); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + let result = do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::RegionLengthTooBig { length, .. }, + .. + } => assert_eq!(length, EDDSA_PUBKEY_LEN + 1), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + fn do_ed25519_verify_shorter_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let msg = hex::decode(EDDSA_MSG_HEX).unwrap(); + let msg_ptr = write_data(&mut fe_mut, &msg); + let sig = hex::decode(EDDSA_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let mut pubkey = hex::decode(EDDSA_PUBKEY_HEX).unwrap(); + // reduce / break pubkey + pubkey.pop(); + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr).unwrap(), + 5 // mapped InvalidPubkeyFormat + ) + } + + #[test] + fn do_ed25519_verify_empty_pubkey_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let msg = hex::decode(EDDSA_MSG_HEX).unwrap(); + let msg_ptr = write_data(&mut fe_mut, &msg); + let sig = hex::decode(EDDSA_SIG_HEX).unwrap(); + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = vec![]; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr).unwrap(), + 5 // mapped InvalidPubkeyFormat + ) + } + + #[test] + fn do_ed25519_verify_wrong_data_fails() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let msg = vec![0x22; MESSAGE_HASH_MAX_LEN]; + let msg_ptr = write_data(&mut fe_mut, &msg); + let sig = vec![0x22; MAX_LENGTH_ED25519_SIGNATURE]; + let sig_ptr = write_data(&mut fe_mut, &sig); + let pubkey = vec![0x04; EDDSA_PUBKEY_LEN]; + let pubkey_ptr = write_data(&mut fe_mut, &pubkey); + + assert_eq!( + do_ed25519_verify(fe_mut, msg_ptr, sig_ptr, pubkey_ptr).unwrap(), + 1 // verification failure + ) + } + + #[test] + #[allow(deprecated)] + fn do_query_chain_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let request: QueryRequest = QueryRequest::Bank(BankQuery::AllBalances { + address: INIT_ADDR.to_string(), + }); + let request_data = cosmwasm_std::to_json_vec(&request).unwrap(); + let request_ptr = write_data(&mut fe_mut, &request_data); + + leave_default_data(&mut fe_mut); + + let response_ptr = do_query_chain(fe_mut.as_mut(), request_ptr).unwrap(); + let response = force_read(&mut fe_mut, response_ptr); + + let query_result: cosmwasm_std::QuerierResult = cosmwasm_std::from_json(response).unwrap(); + let query_result_inner = query_result.unwrap(); + let query_result_inner_inner = query_result_inner.unwrap(); + let parsed_again: AllBalanceResponse = from_json(query_result_inner_inner).unwrap(); + assert_eq!(parsed_again.amount, coins(INIT_AMOUNT, INIT_DENOM)); + } + + #[test] + fn do_query_chain_fails_for_broken_request() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let request = b"Not valid JSON for sure"; + let request_ptr = write_data(&mut fe_mut, request); + + leave_default_data(&mut fe_mut); + + let response_ptr = do_query_chain(fe_mut.as_mut(), request_ptr).unwrap(); + let response = force_read(&mut fe_mut, response_ptr); + + let query_result: cosmwasm_std::QuerierResult = cosmwasm_std::from_json(response).unwrap(); + match query_result { + SystemResult::Ok(_) => panic!("This must not succeed"), + SystemResult::Err(SystemError::InvalidRequest { request: err, .. }) => { + assert_eq!(err.as_slice(), request) + } + SystemResult::Err(err) => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn do_query_chain_fails_for_missing_contract() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let request: QueryRequest = QueryRequest::Wasm(WasmQuery::Smart { + contract_addr: String::from("non-existent"), + msg: Binary::from(b"{}" as &[u8]), + }); + let request_data = cosmwasm_std::to_json_vec(&request).unwrap(); + let request_ptr = write_data(&mut fe_mut, &request_data); + + leave_default_data(&mut fe_mut); + + let response_ptr = do_query_chain(fe_mut.as_mut(), request_ptr).unwrap(); + let response = force_read(&mut fe_mut, response_ptr); + + let query_result: cosmwasm_std::QuerierResult = cosmwasm_std::from_json(response).unwrap(); + match query_result { + SystemResult::Ok(_) => panic!("This must not succeed"), + SystemResult::Err(SystemError::NoSuchContract { addr }) => { + assert_eq!(addr, "non-existent") + } + SystemResult::Err(err) => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + #[cfg(feature = "iterator")] + fn do_db_scan_unbound_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + leave_default_data(&mut fe_mut); + + // set up iterator over all space + let id = do_db_scan(fe_mut.as_mut(), 0, 0, Order::Ascending.into()).unwrap(); + assert_eq!(1, id); + + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id))) + .unwrap(); + assert_eq!(item.0.unwrap().unwrap(), (KEY1.to_vec(), VALUE1.to_vec())); + + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id))) + .unwrap(); + assert_eq!(item.0.unwrap().unwrap(), (KEY2.to_vec(), VALUE2.to_vec())); + + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id))) + .unwrap(); + assert!(item.0.unwrap().is_none()); + } + + #[test] + #[cfg(feature = "iterator")] + fn do_db_scan_unbound_descending_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + leave_default_data(&mut fe_mut); + + // set up iterator over all space + let id = do_db_scan(fe_mut.as_mut(), 0, 0, Order::Descending.into()).unwrap(); + assert_eq!(1, id); + + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id))) + .unwrap(); + assert_eq!(item.0.unwrap().unwrap(), (KEY2.to_vec(), VALUE2.to_vec())); + + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id))) + .unwrap(); + assert_eq!(item.0.unwrap().unwrap(), (KEY1.to_vec(), VALUE1.to_vec())); + + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id))) + .unwrap(); + assert!(item.0.unwrap().is_none()); + } + + #[test] + #[cfg(feature = "iterator")] + fn do_db_scan_bound_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + let start = write_data(&mut fe_mut, b"anna"); + let end = write_data(&mut fe_mut, b"bert"); + + leave_default_data(&mut fe_mut); + + let id = do_db_scan(fe_mut.as_mut(), start, end, Order::Ascending.into()).unwrap(); + + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id))) + .unwrap(); + assert_eq!(item.0.unwrap().unwrap(), (KEY1.to_vec(), VALUE1.to_vec())); + + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id))) + .unwrap(); + assert!(item.0.unwrap().is_none()); + } + + #[test] + #[cfg(feature = "iterator")] + fn do_db_scan_multiple_iterators() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + leave_default_data(&mut fe_mut); + + // unbounded, ascending and descending + let id1 = do_db_scan(fe_mut.as_mut(), 0, 0, Order::Ascending.into()).unwrap(); + let id2 = do_db_scan(fe_mut.as_mut(), 0, 0, Order::Descending.into()).unwrap(); + assert_eq!(id1, 1); + assert_eq!(id2, 2); + + // first item, first iterator + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id1))) + .unwrap(); + assert_eq!(item.0.unwrap().unwrap(), (KEY1.to_vec(), VALUE1.to_vec())); + + // second item, first iterator + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id1))) + .unwrap(); + assert_eq!(item.0.unwrap().unwrap(), (KEY2.to_vec(), VALUE2.to_vec())); + + // first item, second iterator + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id2))) + .unwrap(); + assert_eq!(item.0.unwrap().unwrap(), (KEY2.to_vec(), VALUE2.to_vec())); + + // end, first iterator + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id1))) + .unwrap(); + assert!(item.0.unwrap().is_none()); + + // second item, second iterator + let item = fe_mut + .data() + .with_storage_from_context::<_, _>(|store| Ok(store.next(id2))) + .unwrap(); + assert_eq!(item.0.unwrap().unwrap(), (KEY1.to_vec(), VALUE1.to_vec())); + } + + #[test] + #[cfg(feature = "iterator")] + fn do_db_scan_errors_for_invalid_order_value() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + leave_default_data(&mut fe_mut); + + // set up iterator over all space + let result = do_db_scan(fe_mut, 0, 0, 42); + match result.unwrap_err() { + VmError::CommunicationErr { + source: CommunicationError::InvalidOrder { .. }, + .. + } => {} + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + #[cfg(feature = "iterator")] + fn do_db_next_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + leave_default_data(&mut fe_mut); + + let id = do_db_scan(fe_mut.as_mut(), 0, 0, Order::Ascending.into()).unwrap(); + + // Entry 1 + let kv_region_ptr = do_db_next(fe_mut.as_mut(), id).unwrap(); + assert_eq!( + force_read(&mut fe_mut, kv_region_ptr), + [KEY1, b"\0\0\0\x03", VALUE1, b"\0\0\0\x06"].concat() + ); + + // Entry 2 + let kv_region_ptr = do_db_next(fe_mut.as_mut(), id).unwrap(); + assert_eq!( + force_read(&mut fe_mut, kv_region_ptr), + [KEY2, b"\0\0\0\x04", VALUE2, b"\0\0\0\x05"].concat() + ); + + // End + let kv_region_ptr = do_db_next(fe_mut.as_mut(), id).unwrap(); + assert_eq!(force_read(&mut fe_mut, kv_region_ptr), b"\0\0\0\0\0\0\0\0"); + // API makes no guarantees for value_ptr in this case + } + + #[test] + #[cfg(feature = "iterator")] + fn do_db_next_fails_for_non_existent_id() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + leave_default_data(&mut fe_mut); + + let non_existent_id = 42u32; + let result = do_db_next(fe_mut.as_mut(), non_existent_id); + match result.unwrap_err() { + VmError::BackendErr { + source: BackendError::IteratorDoesNotExist { id, .. }, + .. + } => assert_eq!(id, non_existent_id), + e => panic!("Unexpected error: {e:?}"), + } + } + + #[test] + #[cfg(feature = "iterator")] + fn do_db_next_key_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + leave_default_data(&mut fe_mut); + + let id = do_db_scan(fe_mut.as_mut(), 0, 0, Order::Ascending.into()).unwrap(); + + // Entry 1 + let key_region_ptr = do_db_next_key(fe_mut.as_mut(), id).unwrap(); + assert_eq!(force_read(&mut fe_mut, key_region_ptr), KEY1); + + // Entry 2 + let key_region_ptr = do_db_next_key(fe_mut.as_mut(), id).unwrap(); + assert_eq!(force_read(&mut fe_mut, key_region_ptr), KEY2); + + // End + let key_region_ptr: u32 = do_db_next_key(fe_mut.as_mut(), id).unwrap(); + assert_eq!(key_region_ptr, 0); + } + + #[test] + #[cfg(feature = "iterator")] + fn do_db_next_value_works() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + leave_default_data(&mut fe_mut); + + let id = do_db_scan(fe_mut.as_mut(), 0, 0, Order::Ascending.into()).unwrap(); + + // Entry 1 + let value_region_ptr = do_db_next_value(fe_mut.as_mut(), id).unwrap(); + assert_eq!(force_read(&mut fe_mut, value_region_ptr), VALUE1); + + // Entry 2 + let value_region_ptr = do_db_next_value(fe_mut.as_mut(), id).unwrap(); + assert_eq!(force_read(&mut fe_mut, value_region_ptr), VALUE2); + + // End + let value_region_ptr = do_db_next_value(fe_mut.as_mut(), id).unwrap(); + assert_eq!(value_region_ptr, 0); + } + + #[test] + #[cfg(feature = "iterator")] + fn do_db_next_works_mixed() { + let api = MockApi::default(); + let (fe, mut store, _instance) = make_instance(api); + let mut fe_mut = fe.into_mut(&mut store); + + leave_default_data(&mut fe_mut); + + let id = do_db_scan(fe_mut.as_mut(), 0, 0, Order::Ascending.into()).unwrap(); + + // Key 1 + let key_region_ptr = do_db_next_key(fe_mut.as_mut(), id).unwrap(); + assert_eq!(force_read(&mut fe_mut, key_region_ptr), KEY1); + + // Value 2 + let value_region_ptr = do_db_next_value(fe_mut.as_mut(), id).unwrap(); + assert_eq!(force_read(&mut fe_mut, value_region_ptr), VALUE2); + + // End + let kv_region_ptr = do_db_next(fe_mut.as_mut(), id).unwrap(); + assert_eq!(force_read(&mut fe_mut, kv_region_ptr), b"\0\0\0\0\0\0\0\0"); + } +} diff --git a/vm/src/instance.rs b/vm/src/instance.rs new file mode 100644 index 000000000..caa732290 --- /dev/null +++ b/vm/src/instance.rs @@ -0,0 +1,1179 @@ +use std::cell::RefCell; +use std::collections::{HashMap, HashSet}; +use std::ptr::NonNull; +use std::rc::Rc; +use std::sync::Mutex; + +use wasmer::{ + Exports, Function, FunctionEnv, Imports, Instance as WasmerInstance, Module, Store, Value, +}; + +use crate::backend::{Backend, BackendApi, Querier, Storage}; +use crate::capabilities::required_capabilities_from_module; +use crate::conversion::{ref_to_u32, to_u32}; +use crate::environment::Environment; +use crate::errors::{CommunicationError, VmError, VmResult}; +use crate::imports::{ + do_abort, do_addr_canonicalize, do_addr_humanize, do_addr_validate, do_bls12_381_aggregate_g1, + do_bls12_381_aggregate_g2, do_bls12_381_hash_to_g1, do_bls12_381_hash_to_g2, + do_bls12_381_pairing_equality, do_db_read, do_db_remove, do_db_write, do_debug, + do_ed25519_batch_verify, do_ed25519_verify, do_query_chain, do_secp256k1_recover_pubkey, + do_secp256k1_verify, do_secp256r1_recover_pubkey, do_secp256r1_verify, +}; +#[cfg(feature = "iterator")] +use crate::imports::{do_db_next, do_db_next_key, do_db_next_value, do_db_scan}; +use crate::memory::{read_region, write_region}; +use crate::size::Size; +use crate::wasm_backend::{compile, make_compiling_engine}; + +pub use crate::environment::DebugInfo; // Re-exported as public via to be usable for set_debug_handler + +#[derive(Copy, Clone, Debug)] +pub struct GasReport { + /// The original limit the instance was created with + pub limit: u64, + /// The remaining gas that can be spend + pub remaining: u64, + /// The amount of gas that was spend and metered externally in operations triggered by this instance + pub used_externally: u64, + /// The amount of gas that was spend and metered internally (i.e. by executing Wasm and calling + /// API methods which are not metered externally) + pub used_internally: u64, +} + +#[derive(Copy, Clone, Debug)] +pub struct InstanceOptions { + /// Gas limit measured in [CosmWasm gas](https://github.com/CosmWasm/cosmwasm/blob/main/docs/GAS.md). + pub gas_limit: u64, +} + +pub struct Instance { + /// We put this instance in a box to maintain a constant memory address for the entire + /// lifetime of the instance in the cache. This is needed e.g. when linking the wasmer + /// instance to a context. See also https://github.com/CosmWasm/cosmwasm/pull/245. + /// + /// This instance should only be accessed via the Environment, which provides safe access. + _inner: Box, + fe: FunctionEnv>, + store: Store, +} + +impl Instance +where + A: BackendApi + 'static, // 'static is needed here to allow copying API instances into closures + S: Storage + 'static, // 'static is needed here to allow using this in an Environment that is cloned into closures + Q: Querier + 'static, // 'static is needed here to allow using this in an Environment that is cloned into closures +{ + /// This is the only Instance constructor that can be called from outside of cosmwasm-vm, + /// e.g. in test code that needs a customized variant of cosmwasm_vm::testing::mock_instance*. + pub fn from_code( + code: &[u8], + backend: Backend, + options: InstanceOptions, + memory_limit: Option, + ) -> VmResult { + let engine = make_compiling_engine(memory_limit); + let module = compile(&engine, code)?; + let store = Store::new(engine); + Instance::from_module(store, &module, backend, options.gas_limit, None, None) + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn from_module( + mut store: Store, + module: &Module, + backend: Backend, + gas_limit: u64, + extra_imports: Option>, + instantiation_lock: Option<&Mutex<()>>, + ) -> VmResult { + let fe = FunctionEnv::new(&mut store, Environment::new(backend.api, gas_limit)); + + let mut import_obj = Imports::new(); + let mut env_imports = Exports::new(); + + // Reads the database entry at the given key into the value. + // Returns 0 if key does not exist and pointer to result region otherwise. + // Ownership of the key pointer is not transferred to the host. + // Ownership of the value pointer is transferred to the contract. + env_imports.insert( + "db_read", + Function::new_typed_with_env(&mut store, &fe, do_db_read), + ); + + // Writes the given value into the database entry at the given key. + // Ownership of both input and output pointer is not transferred to the host. + env_imports.insert( + "db_write", + Function::new_typed_with_env(&mut store, &fe, do_db_write), + ); + + // Removes the value at the given key. Different than writing &[] as future + // scans will not find this key. + // At the moment it is not possible to differentiate between a key that existed before and one that did not exist (https://github.com/CosmWasm/cosmwasm/issues/290). + // Ownership of both key pointer is not transferred to the host. + env_imports.insert( + "db_remove", + Function::new_typed_with_env(&mut store, &fe, do_db_remove), + ); + + // Reads human address from source_ptr and checks if it is valid. + // Returns 0 on if the input is valid. Returns a non-zero memory location to a Region containing an UTF-8 encoded error string for invalid inputs. + // Ownership of the input pointer is not transferred to the host. + env_imports.insert( + "addr_validate", + Function::new_typed_with_env(&mut store, &fe, do_addr_validate), + ); + + // Reads human address from source_ptr and writes canonicalized representation to destination_ptr. + // A prepared and sufficiently large memory Region is expected at destination_ptr that points to pre-allocated memory. + // Returns 0 on success. Returns a non-zero memory location to a Region containing an UTF-8 encoded error string for invalid inputs. + // Ownership of both input and output pointer is not transferred to the host. + env_imports.insert( + "addr_canonicalize", + Function::new_typed_with_env(&mut store, &fe, do_addr_canonicalize), + ); + + // Reads canonical address from source_ptr and writes humanized representation to destination_ptr. + // A prepared and sufficiently large memory Region is expected at destination_ptr that points to pre-allocated memory. + // Returns 0 on success. Returns a non-zero memory location to a Region containing an UTF-8 encoded error string for invalid inputs. + // Ownership of both input and output pointer is not transferred to the host. + env_imports.insert( + "addr_humanize", + Function::new_typed_with_env(&mut store, &fe, do_addr_humanize), + ); + + // Reads a list of points on of the subgroup G1 on the BLS12-381 curve and aggregates them down to a single element. + // The "out_ptr" parameter has to be a pointer to a region with the sufficient size to fit an element of G1 (48 bytes). + // Returns a u32 as a result. 0 signifies success, anything else may be converted into a `CryptoError`. + env_imports.insert( + "bls12_381_aggregate_g1", + Function::new_typed_with_env(&mut store, &fe, do_bls12_381_aggregate_g1), + ); + + // Reads a list of points on of the subgroup G2 on the BLS12-381 curve and aggregates them down to a single element. + // The "out_ptr" parameter has to be a pointer to a region with the sufficient size to fit an element of G2 (96 bytes). + // Returns a u32 as a result. 0 signifies success, anything else may be converted into a `CryptoError`. + env_imports.insert( + "bls12_381_aggregate_g2", + Function::new_typed_with_env(&mut store, &fe, do_bls12_381_aggregate_g2), + ); + + // Four parameters, "ps", "qs", "r", "s", which all represent elements on the BLS12-381 curve (where "ps" and "r" are elements of the G1 subgroup, and "qs" and "s" elements of G2). + // The "ps" and "qs" are interpreted as a continuous list of points in the subgroups G1 and G2 respectively. + // Returns a single u32 which signifies the validity of the pairing equality. + // Returns 0 if the pairing equality exists, 1 if it doesnt, and any other code may be interpreted as a `CryptoError`. + env_imports.insert( + "bls12_381_pairing_equality", + Function::new_typed_with_env(&mut store, &fe, do_bls12_381_pairing_equality), + ); + + // Three parameters, "hash_function" and "msg" and "dst", are passed down which are both arbitrary octet strings. + // The "hash_function" parameter is interpreted as a case of the "HashFunction" enum. + // The "out_ptr" parameter has to be a pointer to a region with the sufficient size to fit an element of G1 (48 bytes). + // Returns a u32 as a result. 0 signifies success, anything else may be converted into a `CryptoError`. + env_imports.insert( + "bls12_381_hash_to_g1", + Function::new_typed_with_env(&mut store, &fe, do_bls12_381_hash_to_g1), + ); + + // Three parameters, "hash_function" and "msg" and "dst", are passed down which are both arbitrary octet strings. + // The "hash_function" parameter is interpreted as a case of the "HashFunction" enum. + // The "out_ptr" parameter has to be a pointer to a region with the sufficient size to fit an element of G2 (96 bytes). + // Returns a u32 as a result. 0 signifies success, anything else may be converted into a `CryptoError`. + env_imports.insert( + "bls12_381_hash_to_g2", + Function::new_typed_with_env(&mut store, &fe, do_bls12_381_hash_to_g2), + ); + + // Verifies message hashes against a signature with a public key, using the secp256k1 ECDSA parametrization. + // Returns 0 on verification success, 1 on verification failure, and values greater than 1 in case of error. + // Ownership of input pointers is not transferred to the host. + env_imports.insert( + "secp256k1_verify", + Function::new_typed_with_env(&mut store, &fe, do_secp256k1_verify), + ); + + env_imports.insert( + "secp256k1_recover_pubkey", + Function::new_typed_with_env(&mut store, &fe, do_secp256k1_recover_pubkey), + ); + + // Verifies message hashes against a signature with a public key, using the secp256r1 ECDSA parametrization. + // Returns 0 on verification success, 1 on verification failure, and values greater than 1 in case of error. + // Ownership of input pointers is not transferred to the host. + env_imports.insert( + "secp256r1_verify", + Function::new_typed_with_env(&mut store, &fe, do_secp256r1_verify), + ); + + env_imports.insert( + "secp256r1_recover_pubkey", + Function::new_typed_with_env(&mut store, &fe, do_secp256r1_recover_pubkey), + ); + + // Verifies a message against a signature with a public key, using the ed25519 EdDSA scheme. + // Returns 0 on verification success, 1 on verification failure, and values greater than 1 in case of error. + // Ownership of input pointers is not transferred to the host. + env_imports.insert( + "ed25519_verify", + Function::new_typed_with_env(&mut store, &fe, do_ed25519_verify), + ); + + // Verifies a batch of messages against a batch of signatures with a batch of public keys, + // using the ed25519 EdDSA scheme. + // Returns 0 on verification success (all batches verify correctly), 1 on verification failure, and values + // greater than 1 in case of error. + // Ownership of input pointers is not transferred to the host. + env_imports.insert( + "ed25519_batch_verify", + Function::new_typed_with_env(&mut store, &fe, do_ed25519_batch_verify), + ); + + // Allows the contract to emit debug logs that the host can either process or ignore. + // This is never written to chain. + // Takes a pointer argument of a memory region that must contain an UTF-8 encoded string. + // Ownership of both input and output pointer is not transferred to the host. + env_imports.insert( + "debug", + Function::new_typed_with_env(&mut store, &fe, do_debug), + ); + + // Aborts the contract execution with an error message provided by the contract. + // Takes a pointer argument of a memory region that must contain an UTF-8 encoded string. + // Ownership of both input and output pointer is not transferred to the host. + env_imports.insert( + "abort", + Function::new_typed_with_env(&mut store, &fe, do_abort), + ); + + env_imports.insert( + "query_chain", + Function::new_typed_with_env(&mut store, &fe, do_query_chain), + ); + + // Creates an iterator that will go from start to end. + // If start_ptr == 0, the start is unbounded. + // If end_ptr == 0, the end is unbounded. + // Order is defined in cosmwasm_std::Order and may be 1 (ascending) or 2 (descending). All other values result in an error. + // Ownership of both start and end pointer is not transferred to the host. + // Returns an iterator ID. + #[cfg(feature = "iterator")] + env_imports.insert( + "db_scan", + Function::new_typed_with_env(&mut store, &fe, do_db_scan), + ); + + // Get next element of iterator with ID `iterator_id`. + // Creates a region containing both key and value and returns its address. + // Ownership of the result region is transferred to the contract. + // The KV region uses the format value || key || keylen, where keylen is a fixed size big endian u32 value. + // An empty key (i.e. KV region ends with \0\0\0\0) means no more element, no matter what the value is. + #[cfg(feature = "iterator")] + env_imports.insert( + "db_next", + Function::new_typed_with_env(&mut store, &fe, do_db_next), + ); + + // Get next key of iterator with ID `iterator_id`. + // Returns 0 if there are no more entries and pointer to result region otherwise. + // Ownership of the result region is transferred to the contract. + #[cfg(feature = "iterator")] + env_imports.insert( + "db_next_key", + Function::new_typed_with_env(&mut store, &fe, do_db_next_key), + ); + + // Get next value of iterator with ID `iterator_id`. + // Returns 0 if there are no more entries and pointer to result region otherwise. + // Ownership of the result region is transferred to the contract. + #[cfg(feature = "iterator")] + env_imports.insert( + "db_next_value", + Function::new_typed_with_env(&mut store, &fe, do_db_next_value), + ); + + import_obj.register_namespace("env", env_imports); + + if let Some(extra_imports) = extra_imports { + for (namespace, exports_obj) in extra_imports { + import_obj.register_namespace(namespace, exports_obj); + } + } + + let wasmer_instance = Box::from( + { + let _lock = instantiation_lock.map(|l| l.lock().unwrap()); + WasmerInstance::new(&mut store, module, &import_obj) + } + .map_err(|original| { + VmError::instantiation_err(format!("Error instantiating module: {original}")) + })?, + ); + + let memory = wasmer_instance + .exports + .get_memory("memory") + .map_err(|original| { + VmError::instantiation_err(format!("Could not get memory 'memory': {original}")) + })? + .clone(); + + let instance_ptr = NonNull::from(wasmer_instance.as_ref()); + + { + let mut fe_mut = fe.clone().into_mut(&mut store); + let (env, mut store) = fe_mut.data_and_store_mut(); + + env.memory = Some(memory); + env.set_wasmer_instance(Some(instance_ptr)); + env.set_gas_left(&mut store, gas_limit); + env.move_in(backend.storage, backend.querier); + } + + Ok(Instance { + _inner: wasmer_instance, + fe, + store, + }) + } + + pub fn api(&self) -> &A { + &self.fe.as_ref(&self.store).api + } + + /// Decomposes this instance into its components. + /// External dependencies are returned for reuse, the rest is dropped. + #[must_use = "Calling ::recycle() without reusing the returned backend just drops the instance"] + pub fn recycle(self) -> Option> { + let Instance { + _inner, fe, store, .. + } = self; + + let env = fe.as_ref(&store); + if let (Some(storage), Some(querier)) = env.move_out() { + let api = env.api.clone(); + Some(Backend { + api, + storage, + querier, + }) + } else { + None + } + } + + pub fn set_debug_handler(&mut self, debug_handler: H) + where + H: for<'a, 'b> FnMut(/* msg */ &'a str, DebugInfo<'b>) + 'static, + { + self.fe + .as_ref(&self.store) + .set_debug_handler(Some(Rc::new(RefCell::new(debug_handler)))); + } + + pub fn unset_debug_handler(&mut self) { + self.fe.as_ref(&self.store).set_debug_handler(None); + } + + /// Returns the features required by this contract. + /// + /// This is not needed for production because we can do static analysis + /// on the Wasm file before instantiation to obtain this information. It's + /// only kept because it can be handy for integration testing. + pub fn required_capabilities(&self) -> HashSet { + required_capabilities_from_module(self._inner.module()) + } + + /// Returns the size of the default memory in pages. + /// This provides a rough idea of the peak memory consumption. Note that + /// Wasm memory always grows in 64 KiB steps (pages) and can never shrink + /// (https://github.com/WebAssembly/design/issues/1300#issuecomment-573867836). + pub fn memory_pages(&mut self) -> usize { + let mut fe_mut = self.fe.clone().into_mut(&mut self.store); + let (env, store) = fe_mut.data_and_store_mut(); + + env.memory(&store).size().0 as _ + } + + /// Returns the currently remaining gas. + pub fn get_gas_left(&mut self) -> u64 { + let mut fe_mut = self.fe.clone().into_mut(&mut self.store); + let (env, mut store) = fe_mut.data_and_store_mut(); + + env.get_gas_left(&mut store) + } + + /// Creates and returns a gas report. + /// This is a snapshot and multiple reports can be created during the lifetime of + /// an instance. + pub fn create_gas_report(&mut self) -> GasReport { + let mut fe_mut = self.fe.clone().into_mut(&mut self.store); + let (env, mut store) = fe_mut.data_and_store_mut(); + + let state = env.with_gas_state(|gas_state| gas_state.clone()); + let gas_left = env.get_gas_left(&mut store); + GasReport { + limit: state.gas_limit, + remaining: gas_left, + used_externally: state.externally_used_gas, + // If externally_used_gas exceeds the gas limit, this will return 0. + // no matter how much gas was used internally. But then we error with out of gas + // anyways, and it does not matter much anymore where gas was spend. + used_internally: state + .gas_limit + .saturating_sub(state.externally_used_gas) + .saturating_sub(gas_left), + } + } + + pub fn is_storage_readonly(&mut self) -> bool { + let mut fe_mut = self.fe.clone().into_mut(&mut self.store); + let (env, _) = fe_mut.data_and_store_mut(); + + env.is_storage_readonly() + } + + /// Sets the readonly storage flag on this instance. Since one instance can be used + /// for multiple calls in integration tests, this should be set to the desired value + /// right before every call. + pub fn set_storage_readonly(&mut self, new_value: bool) { + let mut fe_mut = self.fe.clone().into_mut(&mut self.store); + let (env, _) = fe_mut.data_and_store_mut(); + + env.set_storage_readonly(new_value); + } + + pub fn with_storage VmResult, T>(&mut self, func: F) -> VmResult { + self.fe + .as_ref(&self.store) + .with_storage_from_context::(func) + } + + pub fn with_querier VmResult, T>(&mut self, func: F) -> VmResult { + self.fe + .as_ref(&self.store) + .with_querier_from_context::(func) + } + + /// Requests memory allocation by the instance and returns a pointer + /// in the Wasm address space to the created Region object. + pub(crate) fn allocate(&mut self, size: usize) -> VmResult { + let ret = self.call_function1("allocate", &[to_u32(size)?.into()])?; + let ptr = ref_to_u32(&ret)?; + if ptr == 0 { + return Err(CommunicationError::zero_address().into()); + } + Ok(ptr) + } + + // deallocate frees memory in the instance and that was either previously + // allocated by us, or a pointer from a return value after we copy it into rust. + // we need to clean up the wasm-side buffers to avoid memory leaks + pub(crate) fn deallocate(&mut self, ptr: u32) -> VmResult<()> { + self.call_function0("deallocate", &[ptr.into()])?; + Ok(()) + } + + /// Copies all data described by the Region at the given pointer from Wasm to the caller. + pub(crate) fn read_memory(&mut self, region_ptr: u32, max_length: usize) -> VmResult> { + let mut fe_mut = self.fe.clone().into_mut(&mut self.store); + let (env, store) = fe_mut.data_and_store_mut(); + + read_region(&env.memory(&store), region_ptr, max_length) + } + + /// Copies data to the memory region that was created before using allocate. + pub(crate) fn write_memory(&mut self, region_ptr: u32, data: &[u8]) -> VmResult<()> { + let mut fe_mut = self.fe.clone().into_mut(&mut self.store); + let (env, store) = fe_mut.data_and_store_mut(); + + write_region(&env.memory(&store), region_ptr, data)?; + Ok(()) + } + + /// Calls a function exported by the instance. + /// The function is expected to return no value. Otherwise this calls errors. + pub(crate) fn call_function0(&mut self, name: &str, args: &[Value]) -> VmResult<()> { + let mut fe_mut = self.fe.clone().into_mut(&mut self.store); + let (env, mut store) = fe_mut.data_and_store_mut(); + + env.call_function0(&mut store, name, args) + } + + /// Calls a function exported by the instance. + /// The function is expected to return one value. Otherwise this calls errors. + pub(crate) fn call_function1(&mut self, name: &str, args: &[Value]) -> VmResult { + let mut fe_mut = self.fe.clone().into_mut(&mut self.store); + let (env, mut store) = fe_mut.data_and_store_mut(); + + env.call_function1(&mut store, name, args) + } +} + +/// This exists only to be exported through `internals` for use by crates that are +/// part of Cosmwasm. +pub fn instance_from_module( + store: Store, + module: &Module, + backend: Backend, + gas_limit: u64, + extra_imports: Option>, +) -> VmResult> +where + A: BackendApi + 'static, // 'static is needed here to allow copying API instances into closures + S: Storage + 'static, // 'static is needed here to allow using this in an Environment that is cloned into closures + Q: Querier + 'static, +{ + Instance::from_module(store, module, backend, gas_limit, extra_imports, None) +} + +#[cfg(test)] +mod tests { + use std::sync::atomic::{AtomicBool, Ordering}; + use std::sync::Arc; + use std::time::SystemTime; + + use super::*; + use crate::calls::{call_execute, call_instantiate, call_query}; + use crate::testing::{ + mock_backend, mock_env, mock_info, mock_instance, mock_instance_options, + mock_instance_with_balances, mock_instance_with_failing_api, mock_instance_with_gas_limit, + mock_instance_with_options, MockInstanceOptions, + }; + use cosmwasm_std::{ + coin, coins, from_json, AllBalanceResponse, BalanceResponse, BankQuery, Empty, QueryRequest, + }; + use wasmer::FunctionEnvMut; + + const KIB: usize = 1024; + const MIB: usize = 1024 * 1024; + const DEFAULT_QUERY_GAS_LIMIT: u64 = 300_000; + static CONTRACT: &[u8] = include_bytes!("../testdata/hackatom.wasm"); + static CYBERPUNK: &[u8] = include_bytes!("../testdata/cyberpunk.wasm"); + + #[test] + fn from_code_works() { + let backend = mock_backend(&[]); + let (instance_options, memory_limit) = mock_instance_options(); + let _instance = + Instance::from_code(CONTRACT, backend, instance_options, memory_limit).unwrap(); + } + + #[test] + fn set_debug_handler_and_unset_debug_handler_work() { + const LIMIT: u64 = 70_000_000_000; + let mut instance = mock_instance_with_gas_limit(CYBERPUNK, LIMIT); + + // init contract + let info = mock_info("creator", &coins(1000, "earth")); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, br#"{}"#) + .unwrap() + .unwrap(); + + let info = mock_info("caller", &[]); + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, br#"{"debug":{}}"#) + .unwrap() + .unwrap(); + + let start = SystemTime::now(); + instance.set_debug_handler(move |msg, info| { + let gas = info.gas_remaining; + let runtime = SystemTime::now().duration_since(start).unwrap().as_micros(); + eprintln!("{msg} (gas: {gas}, runtime: {runtime}µs)"); + }); + + let info = mock_info("caller", &[]); + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, br#"{"debug":{}}"#) + .unwrap() + .unwrap(); + + eprintln!("Unsetting debug handler. From here nothing is printed anymore."); + instance.unset_debug_handler(); + + let info = mock_info("caller", &[]); + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, br#"{"debug":{}}"#) + .unwrap() + .unwrap(); + } + + #[test] + fn required_capabilities_works() { + let backend = mock_backend(&[]); + let (instance_options, memory_limit) = mock_instance_options(); + let instance = + Instance::from_code(CONTRACT, backend, instance_options, memory_limit).unwrap(); + assert_eq!(instance.required_capabilities().len(), 0); + } + + #[test] + fn required_capabilities_works_for_many_exports() { + let wasm = wat::parse_str( + r#"(module + (memory 3) + (export "memory" (memory 0)) + + (type (func)) + (func (type 0) nop) + (export "requires_water" (func 0)) + (export "requires_" (func 0)) + (export "requires_nutrients" (func 0)) + (export "require_milk" (func 0)) + (export "REQUIRES_air" (func 0)) + (export "requires_sun" (func 0)) + )"#, + ) + .unwrap(); + + let backend = mock_backend(&[]); + let (instance_options, memory_limit) = mock_instance_options(); + let instance = Instance::from_code(&wasm, backend, instance_options, memory_limit).unwrap(); + assert_eq!(instance.required_capabilities().len(), 3); + assert!(instance.required_capabilities().contains("nutrients")); + assert!(instance.required_capabilities().contains("sun")); + assert!(instance.required_capabilities().contains("water")); + } + + #[test] + fn extra_imports_get_added() { + let (instance_options, memory_limit) = mock_instance_options(); + + let wasm = wat::parse_str( + r#"(module + (import "foo" "bar" (func $bar)) + (memory 3) + (export "memory" (memory 0)) + (func (export "main") (call $bar)) + )"#, + ) + .unwrap(); + + let backend = mock_backend(&[]); + let engine = make_compiling_engine(memory_limit); + let module = compile(&engine, &wasm).unwrap(); + let mut store = Store::new(engine); + + let called = Arc::new(AtomicBool::new(false)); + + #[derive(Clone)] + struct MyEnv { + // This can be mutated across threads safely. We initialize it as `false` + // and let our imported fn switch it to `true` to confirm it works. + called: Arc, + } + + let fe = FunctionEnv::new( + &mut store, + MyEnv { + called: called.clone(), + }, + ); + + let fun = + Function::new_typed_with_env(&mut store, &fe, move |fe_mut: FunctionEnvMut| { + fe_mut.data().called.store(true, Ordering::Relaxed); + }); + let mut exports = Exports::new(); + exports.insert("bar", fun); + let mut extra_imports = HashMap::new(); + extra_imports.insert("foo", exports); + let mut instance = Instance::from_module( + store, + &module, + backend, + instance_options.gas_limit, + Some(extra_imports), + None, + ) + .unwrap(); + + instance.call_function0("main", &[]).unwrap(); + + assert!(called.load(Ordering::Relaxed)); + } + + #[test] + fn call_function0_works() { + let mut instance = mock_instance(CONTRACT, &[]); + + instance + .call_function0("interface_version_8", &[]) + .expect("error calling function"); + } + + #[test] + fn call_function1_works() { + let mut instance = mock_instance(CONTRACT, &[]); + + // can call function few times + let result = instance + .call_function1("allocate", &[0u32.into()]) + .expect("error calling allocate"); + assert_ne!(result.unwrap_i32(), 0); + + let result = instance + .call_function1("allocate", &[1u32.into()]) + .expect("error calling allocate"); + assert_ne!(result.unwrap_i32(), 0); + + let result = instance + .call_function1("allocate", &[33u32.into()]) + .expect("error calling allocate"); + assert_ne!(result.unwrap_i32(), 0); + } + + #[test] + fn allocate_deallocate_works() { + let mut instance = mock_instance_with_options( + CONTRACT, + MockInstanceOptions { + memory_limit: Some(Size::mebi(500)), + ..Default::default() + }, + ); + + let sizes: Vec = vec![ + 0, + 4, + 40, + 400, + 4 * KIB, + 40 * KIB, + 400 * KIB, + 4 * MIB, + 40 * MIB, + 400 * MIB, + ]; + for size in sizes.into_iter() { + let region_ptr = instance.allocate(size).expect("error allocating"); + instance.deallocate(region_ptr).expect("error deallocating"); + } + } + + #[test] + fn write_and_read_memory_works() { + let mut instance = mock_instance(CONTRACT, &[]); + + let sizes: Vec = vec![ + 0, + 4, + 40, + 400, + 4 * KIB, + 40 * KIB, + 400 * KIB, + 4 * MIB, + // disabled for performance reasons, but pass as well + // 40 * MIB, + // 400 * MIB, + ]; + for size in sizes.into_iter() { + let region_ptr = instance.allocate(size).expect("error allocating"); + let original = vec![170u8; size]; + instance + .write_memory(region_ptr, &original) + .expect("error writing"); + let data = instance + .read_memory(region_ptr, size) + .expect("error reading"); + assert_eq!(data, original); + instance.deallocate(region_ptr).expect("error deallocating"); + } + } + + #[test] + fn errors_in_imports() { + // set up an instance that will experience an error in an import + let error_message = "Api failed intentionally"; + let mut instance = mock_instance_with_failing_api(CONTRACT, &[], error_message); + let init_result = call_instantiate::<_, _, _, Empty>( + &mut instance, + &mock_env(), + &mock_info("someone", &[]), + b"{\"verifier\": \"some1\", \"beneficiary\": \"some2\"}", + ); + + match init_result.unwrap_err() { + VmError::RuntimeErr { msg, .. } => assert!(msg.contains(error_message)), + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn read_memory_errors_when_when_length_is_too_long() { + let length = 6; + let max_length = 5; + let mut instance = mock_instance(CONTRACT, &[]); + + // Allocate sets length to 0. Write some data to increase length. + let region_ptr = instance.allocate(length).expect("error allocating"); + let data = vec![170u8; length]; + instance + .write_memory(region_ptr, &data) + .expect("error writing"); + + let result = instance.read_memory(region_ptr, max_length); + match result.unwrap_err() { + VmError::CommunicationErr { + source: + CommunicationError::RegionLengthTooBig { + length, max_length, .. + }, + .. + } => { + assert_eq!(length, 6); + assert_eq!(max_length, 5); + } + err => panic!("unexpected error: {err:?}"), + }; + + instance.deallocate(region_ptr).expect("error deallocating"); + } + + #[test] + fn memory_pages_returns_min_memory_size_by_default() { + // min: 0 pages, max: none + let wasm = wat::parse_str( + r#"(module + (memory 0) + (export "memory" (memory 0)) + + (type (func)) + (func (type 0) nop) + (export "interface_version_8" (func 0)) + (export "instantiate" (func 0)) + (export "allocate" (func 0)) + (export "deallocate" (func 0)) + )"#, + ) + .unwrap(); + let mut instance = mock_instance(&wasm, &[]); + assert_eq!(instance.memory_pages(), 0); + + // min: 3 pages, max: none + let wasm = wat::parse_str( + r#"(module + (memory 3) + (export "memory" (memory 0)) + + (type (func)) + (func (type 0) nop) + (export "interface_version_8" (func 0)) + (export "instantiate" (func 0)) + (export "allocate" (func 0)) + (export "deallocate" (func 0)) + )"#, + ) + .unwrap(); + let mut instance = mock_instance(&wasm, &[]); + assert_eq!(instance.memory_pages(), 3); + } + + #[test] + fn memory_pages_grows_with_usage() { + let mut instance = mock_instance(CONTRACT, &[]); + + assert_eq!(instance.memory_pages(), 17); + + // 100 KiB require two more pages + let region_ptr = instance.allocate(100 * 1024).expect("error allocating"); + + assert_eq!(instance.memory_pages(), 19); + + // Deallocating does not shrink memory + instance.deallocate(region_ptr).expect("error deallocating"); + assert_eq!(instance.memory_pages(), 19); + } + + #[test] + fn get_gas_left_works() { + let mut instance = mock_instance_with_gas_limit(CONTRACT, 123321); + let orig_gas = instance.get_gas_left(); + assert_eq!(orig_gas, 123321); + } + + #[test] + fn create_gas_report_works() { + const LIMIT: u64 = 700_000_000; + let mut instance = mock_instance_with_gas_limit(CONTRACT, LIMIT); + + let report1 = instance.create_gas_report(); + assert_eq!(report1.used_externally, 0); + assert_eq!(report1.used_internally, 0); + assert_eq!(report1.limit, LIMIT); + assert_eq!(report1.remaining, LIMIT); + + // init contract + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + + let report2 = instance.create_gas_report(); + assert_eq!(report2.used_externally, 251); + assert_eq!(report2.used_internally, 21589990); + assert_eq!(report2.limit, LIMIT); + assert_eq!( + report2.remaining, + LIMIT - report2.used_externally - report2.used_internally + ); + } + + #[test] + fn set_storage_readonly_works() { + let mut instance = mock_instance(CONTRACT, &[]); + + assert!(instance.is_storage_readonly()); + + instance.set_storage_readonly(false); + assert!(!instance.is_storage_readonly()); + + instance.set_storage_readonly(false); + assert!(!instance.is_storage_readonly()); + + instance.set_storage_readonly(true); + assert!(instance.is_storage_readonly()); + } + + #[test] + fn with_storage_works() { + let mut instance = mock_instance(CONTRACT, &[]); + + // initial check + instance + .with_storage(|store| { + assert!(store.get(b"foo").0.unwrap().is_none()); + Ok(()) + }) + .unwrap(); + + // write some data + instance + .with_storage(|store| { + store.set(b"foo", b"bar").0.unwrap(); + Ok(()) + }) + .unwrap(); + + // read some data + instance + .with_storage(|store| { + assert_eq!(store.get(b"foo").0.unwrap(), Some(b"bar".to_vec())); + Ok(()) + }) + .unwrap(); + } + + #[test] + #[should_panic] + fn with_storage_safe_for_panic() { + // this should fail with the assertion, but not cause a double-free crash (issue #59) + let mut instance = mock_instance(CONTRACT, &[]); + instance + .with_storage::<_, ()>(|_store| panic!("trigger failure")) + .unwrap(); + } + + #[test] + #[allow(deprecated)] + fn with_querier_works_readonly() { + let rich_addr = String::from("foobar"); + let rich_balance = vec![coin(10000, "gold"), coin(8000, "silver")]; + let mut instance = mock_instance_with_balances(CONTRACT, &[(&rich_addr, &rich_balance)]); + + // query one + instance + .with_querier(|querier| { + let response = querier + .query::( + &QueryRequest::Bank(BankQuery::Balance { + address: rich_addr.clone(), + denom: "silver".to_string(), + }), + DEFAULT_QUERY_GAS_LIMIT, + ) + .0 + .unwrap() + .unwrap() + .unwrap(); + let BalanceResponse { amount, .. } = from_json(response).unwrap(); + assert_eq!(amount.amount.u128(), 8000); + assert_eq!(amount.denom, "silver"); + Ok(()) + }) + .unwrap(); + + // query all + instance + .with_querier(|querier| { + let response = querier + .query::( + &QueryRequest::Bank(BankQuery::AllBalances { + address: rich_addr.clone(), + }), + DEFAULT_QUERY_GAS_LIMIT, + ) + .0 + .unwrap() + .unwrap() + .unwrap(); + let AllBalanceResponse { amount, .. } = from_json(response).unwrap(); + assert_eq!(amount.len(), 2); + assert_eq!(amount[0].amount.u128(), 10000); + assert_eq!(amount[0].denom, "gold"); + assert_eq!(amount[1].amount.u128(), 8000); + assert_eq!(amount[1].denom, "silver"); + + Ok(()) + }) + .unwrap(); + } + + /// This is needed for writing integration tests in which the balance of a contract changes over time. + #[test] + fn with_querier_allows_updating_balances() { + let rich_addr = String::from("foobar"); + let rich_balance1 = vec![coin(10000, "gold"), coin(500, "silver")]; + let rich_balance2 = vec![coin(10000, "gold"), coin(8000, "silver")]; + let mut instance = mock_instance_with_balances(CONTRACT, &[(&rich_addr, &rich_balance1)]); + + // Get initial state + instance + .with_querier(|querier| { + let response = querier + .query::( + &QueryRequest::Bank(BankQuery::Balance { + address: rich_addr.clone(), + denom: "silver".to_string(), + }), + DEFAULT_QUERY_GAS_LIMIT, + ) + .0 + .unwrap() + .unwrap() + .unwrap(); + let BalanceResponse { amount, .. } = from_json(response).unwrap(); + assert_eq!(amount.amount.u128(), 500); + Ok(()) + }) + .unwrap(); + + // Update balance + instance + .with_querier(|querier| { + querier.update_balance(&rich_addr, rich_balance2); + Ok(()) + }) + .unwrap(); + + // Get updated state + instance + .with_querier(|querier| { + let response = querier + .query::( + &QueryRequest::Bank(BankQuery::Balance { + address: rich_addr.clone(), + denom: "silver".to_string(), + }), + DEFAULT_QUERY_GAS_LIMIT, + ) + .0 + .unwrap() + .unwrap() + .unwrap(); + let BalanceResponse { amount, .. } = from_json(response).unwrap(); + assert_eq!(amount.amount.u128(), 8000); + Ok(()) + }) + .unwrap(); + } + + #[test] + fn contract_deducts_gas_init() { + let mut instance = mock_instance(CONTRACT, &[]); + let orig_gas = instance.get_gas_left(); + + // init contract + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + + let init_used = orig_gas - instance.get_gas_left(); + assert_eq!(init_used, 21590241); + } + + #[test] + fn contract_deducts_gas_execute() { + let mut instance = mock_instance(CONTRACT, &[]); + + // init contract + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + + // run contract - just sanity check - results validate in contract unit tests + let gas_before_execute = instance.get_gas_left(); + let info = mock_info(&verifier, &coins(15, "earth")); + let msg = br#"{"release":{}}"#; + call_execute::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg) + .unwrap() + .unwrap(); + + let execute_used = gas_before_execute - instance.get_gas_left(); + assert_eq!(execute_used, 26961511); + } + + #[test] + fn contract_enforces_gas_limit() { + let mut instance = mock_instance_with_gas_limit(CONTRACT, 20_000); + + // init contract + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let res = + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()); + assert!(res.is_err()); + } + + #[test] + fn query_works_with_gas_metering() { + let mut instance = mock_instance(CONTRACT, &[]); + + // init contract + let info = mock_info(&instance.api().addr_make("creator"), &coins(1000, "earth")); + let verifier = instance.api().addr_make("verifies"); + let beneficiary = instance.api().addr_make("benefits"); + let msg = format!(r#"{{"verifier": "{verifier}", "beneficiary": "{beneficiary}"}}"#); + let _res = + call_instantiate::<_, _, _, Empty>(&mut instance, &mock_env(), &info, msg.as_bytes()) + .unwrap() + .unwrap(); + + // run contract - just sanity check - results validate in contract unit tests + let gas_before_query = instance.get_gas_left(); + // we need to encode the key in base64 + let msg = br#"{"verifier":{}}"#; + let res = call_query(&mut instance, &mock_env(), msg).unwrap(); + let answer = res.unwrap(); + assert_eq!( + answer.as_slice(), + format!("{{\"verifier\":\"{verifier}\"}}").as_bytes() + ); + + let query_used = gas_before_query - instance.get_gas_left(); + assert_eq!(query_used, 15938086); + } +} diff --git a/vm/src/lib.rs b/vm/src/lib.rs new file mode 100644 index 000000000..e67ea78c8 --- /dev/null +++ b/vm/src/lib.rs @@ -0,0 +1,62 @@ +mod backend; +mod cache; +mod calls; +mod capabilities; +mod compatibility; +mod config; +mod conversion; +mod environment; +mod errors; +mod filesystem; +mod imports; +mod instance; +mod limited; +mod memory; +mod modules; +mod parsed_wasm; +mod sections; +mod serde; +mod size; +mod static_analysis; +pub mod testing; +mod wasm_backend; + +pub use crate::backend::{ + Backend, BackendApi, BackendError, BackendResult, GasInfo, Querier, Storage, +}; +pub use crate::cache::{AnalysisReport, Cache, Metrics, PerModuleMetrics, PinnedMetrics, Stats}; +pub use crate::calls::{ + call_execute, call_execute_raw, call_ibc_destination_callback, + call_ibc_destination_callback_raw, call_ibc_source_callback, call_ibc_source_callback_raw, + call_instantiate, call_instantiate_raw, call_migrate, call_migrate_raw, call_migrate_with_info, + call_migrate_with_info_raw, call_query, call_query_raw, call_reply, call_reply_raw, call_sudo, + call_sudo_raw, +}; +#[cfg(feature = "stargate")] +pub use crate::calls::{ + call_ibc_channel_close, call_ibc_channel_close_raw, call_ibc_channel_connect, + call_ibc_channel_connect_raw, call_ibc_channel_open, call_ibc_channel_open_raw, + call_ibc_packet_ack, call_ibc_packet_ack_raw, call_ibc_packet_receive, + call_ibc_packet_receive_raw, call_ibc_packet_timeout, call_ibc_packet_timeout_raw, +}; +pub use crate::capabilities::capabilities_from_csv; +pub use crate::config::{CacheOptions, Config, WasmLimits}; +pub use crate::errors::{ + CommunicationError, CommunicationResult, RegionValidationError, RegionValidationResult, + VmError, VmResult, +}; +pub use crate::instance::{DebugInfo, GasReport, Instance, InstanceOptions}; +pub use crate::serde::{from_slice, to_vec}; +pub use crate::size::Size; + +pub mod internals { + #![doc(hidden)] + //! We use the internals module for exporting types that are only + //! intended to be used in internal crates / utils. + //! Please don't use any of these types directly, as + //! they might change frequently or be removed in the future. + + pub use crate::compatibility::{check_wasm, LogOutput, Logger}; + pub use crate::instance::instance_from_module; + pub use crate::wasm_backend::{compile, make_compiling_engine, make_runtime_engine}; +} diff --git a/vm/src/limited.rs b/vm/src/limited.rs new file mode 100644 index 000000000..e959d3db2 --- /dev/null +++ b/vm/src/limited.rs @@ -0,0 +1,219 @@ +//! A set of tools designed for processing user defined contract data, +//! which can potentially have abusive size. + +use std::collections::{BTreeSet, HashSet}; + +pub trait LimitedDisplay { + /// Returns a string representationof the object, which is shorter than or equal to `max_length`. + /// Implementations must panic if `max_length` is not reasonably large. + fn to_string_limited(&self, max_length: usize) -> String; +} + +impl> LimitedDisplay for BTreeSet { + fn to_string_limited(&self, max_length: usize) -> String { + collection_to_string_limited(self.iter(), max_length, "{", "}") + } +} + +impl> LimitedDisplay for HashSet { + fn to_string_limited(&self, max_length: usize) -> String { + // Iteration order in HashSet is undeterminstic. We sort + // here to be on the safe side and to simplify testing. + let sorted = BTreeSet::from_iter(self); + sorted.to_string_limited(max_length) + } +} + +impl> LimitedDisplay for Vec { + fn to_string_limited(&self, max_length: usize) -> String { + collection_to_string_limited(self.iter(), max_length, "[", "]") + } +} + +/// Iterates over a collection and returns a length limited +/// string representation of it, using `opening` and `closing` +/// to surround the collection's content. +fn collection_to_string_limited, I: ExactSizeIterator>( + iter: I, + max_length: usize, + opening: &str, + closing: &str, +) -> String { + let elements_count = iter.len(); + let mut out = String::with_capacity(max_length * 130 / 100); + + let mut first = true; + out.push_str(opening); + let mut lengths_stack = Vec::::new(); + for element in iter { + lengths_stack.push(out.len()); + + if first { + out.push('"'); + first = false; + } else { + out.push_str(", \""); + } + out.push_str(element.as_ref()); + out.push('"'); + + if out.len() > max_length { + break; + }; + } + + if out.len() + closing.len() <= max_length { + out.push_str(closing); + out + } else { + loop { + let previous_length = lengths_stack + .pop() + .expect("Cannot remove hide enough elements to fit in length limit."); + let skipped = elements_count - lengths_stack.len(); + let remaining = elements_count - skipped; + let skipped_text = if remaining == 0 { + format!("... {skipped} elements") + } else { + format!(", ... {skipped} more") + }; + if previous_length + skipped_text.len() + closing.len() <= max_length { + out.truncate(previous_length); + out.push_str(&skipped_text); + out.push_str(closing); + return out; + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn works_for_btreeset() { + let set = BTreeSet::::new(); + assert_eq!(set.to_string_limited(100), "{}"); + assert_eq!(set.to_string_limited(20), "{}"); + assert_eq!(set.to_string_limited(2), "{}"); + + let fruits: BTreeSet = [ + "watermelon".to_string(), + "apple".to_string(), + "banana".to_string(), + ] + .into_iter() + .collect(); + assert_eq!( + fruits.to_string_limited(100), + "{\"apple\", \"banana\", \"watermelon\"}" + ); + assert_eq!( + fruits.to_string_limited(33), + "{\"apple\", \"banana\", \"watermelon\"}" + ); + assert_eq!( + fruits.to_string_limited(32), + "{\"apple\", \"banana\", ... 1 more}" + ); + assert_eq!( + fruits.to_string_limited(31), + "{\"apple\", \"banana\", ... 1 more}" + ); + assert_eq!(fruits.to_string_limited(30), "{\"apple\", ... 2 more}"); + assert_eq!(fruits.to_string_limited(21), "{\"apple\", ... 2 more}"); + assert_eq!(fruits.to_string_limited(20), "{... 3 elements}"); + assert_eq!(fruits.to_string_limited(16), "{... 3 elements}"); + } + + #[test] + fn works_for_hashset() { + let set = HashSet::::new(); + assert_eq!(set.to_string_limited(100), "{}"); + assert_eq!(set.to_string_limited(20), "{}"); + assert_eq!(set.to_string_limited(2), "{}"); + + let fruits: HashSet = [ + "watermelon".to_string(), + "apple".to_string(), + "banana".to_string(), + ] + .into_iter() + .collect(); + assert_eq!( + fruits.to_string_limited(100), + "{\"apple\", \"banana\", \"watermelon\"}" + ); + assert_eq!( + fruits.to_string_limited(33), + "{\"apple\", \"banana\", \"watermelon\"}" + ); + assert_eq!( + fruits.to_string_limited(32), + "{\"apple\", \"banana\", ... 1 more}" + ); + assert_eq!( + fruits.to_string_limited(31), + "{\"apple\", \"banana\", ... 1 more}" + ); + assert_eq!(fruits.to_string_limited(30), "{\"apple\", ... 2 more}"); + assert_eq!(fruits.to_string_limited(21), "{\"apple\", ... 2 more}"); + assert_eq!(fruits.to_string_limited(20), "{... 3 elements}"); + assert_eq!(fruits.to_string_limited(16), "{... 3 elements}"); + } + + #[test] + #[should_panic(expected = "Cannot remove hide enough elements to fit in length limit.")] + fn panics_if_limit_is_too_small_empty() { + let set = HashSet::::new(); + assert_eq!(set.to_string_limited(1), "{}"); + } + + #[test] + #[should_panic(expected = "Cannot remove hide enough elements to fit in length limit.")] + fn panics_if_limit_is_too_small_nonempty() { + let fruits: HashSet = [ + "watermelon".to_string(), + "apple".to_string(), + "banana".to_string(), + ] + .into_iter() + .collect(); + assert_eq!(fruits.to_string_limited(15), "{... 3 elements}"); + } + + #[test] + fn works_for_vectors() { + let list = Vec::::new(); + assert_eq!(list.to_string_limited(100), "[]"); + assert_eq!(list.to_string_limited(20), "[]"); + assert_eq!(list.to_string_limited(2), "[]"); + + let fruits = vec![ + "banana".to_string(), + "apple".to_string(), + "watermelon".to_string(), + ]; + assert_eq!( + fruits.to_string_limited(100), + "[\"banana\", \"apple\", \"watermelon\"]" + ); + assert_eq!( + fruits.to_string_limited(33), + "[\"banana\", \"apple\", \"watermelon\"]" + ); + assert_eq!( + fruits.to_string_limited(32), + "[\"banana\", \"apple\", ... 1 more]" + ); + assert_eq!( + fruits.to_string_limited(31), + "[\"banana\", \"apple\", ... 1 more]" + ); + assert_eq!(fruits.to_string_limited(30), "[\"banana\", ... 2 more]"); + assert_eq!(fruits.to_string_limited(22), "[\"banana\", ... 2 more]"); + assert_eq!(fruits.to_string_limited(21), "[... 3 elements]"); + assert_eq!(fruits.to_string_limited(16), "[... 3 elements]"); + } +} diff --git a/vm/src/memory.rs b/vm/src/memory.rs new file mode 100644 index 000000000..5870b2371 --- /dev/null +++ b/vm/src/memory.rs @@ -0,0 +1,295 @@ +use std::mem::{size_of, MaybeUninit}; + +use wasmer::{ValueType, WasmPtr}; + +use crate::conversion::to_u32; +use crate::errors::{ + CommunicationError, CommunicationResult, RegionValidationError, RegionValidationResult, + VmResult, +}; + +/****** read/write to wasm memory buffer ****/ + +/// Describes some data allocated in Wasm's linear memory. +/// A pointer to an instance of this can be returned over FFI boundaries. +/// +/// This is the same as `cosmwasm_std::memory::Region` +/// but defined here to allow Wasmer specific implementation. +#[repr(C)] +#[derive(Default, Clone, Copy, Debug)] +pub struct Region { + /// The beginning of the region expressed as bytes from the beginning of the linear memory + pub offset: u32, + /// The number of bytes available in this region + pub capacity: u32, + /// The number of bytes used in this region + pub length: u32, +} + +/// Byte representation of a [Region] struct in Wasm memory. +type RegionBytes = [u8; size_of::()]; + +impl Region { + fn from_wasm_bytes(bytes: RegionBytes) -> Self { + let offset = u32::from_le_bytes([bytes[0], bytes[1], bytes[2], bytes[3]]); + let capacity = u32::from_le_bytes([bytes[4], bytes[5], bytes[6], bytes[7]]); + let length = u32::from_le_bytes([bytes[8], bytes[9], bytes[10], bytes[11]]); + Region { + offset, + capacity, + length, + } + } + + fn into_wasm_bytes(self) -> RegionBytes { + let Region { + offset, + capacity, + length, + } = self; + + let mut bytes = [0u8; 12]; + // wasm is little endian + bytes[0..4].copy_from_slice(&offset.to_le_bytes()); + bytes[4..8].copy_from_slice(&capacity.to_le_bytes()); + bytes[8..12].copy_from_slice(&length.to_le_bytes()); + bytes + } +} + +unsafe impl ValueType for Region { + fn zero_padding_bytes(&self, _bytes: &mut [MaybeUninit]) { + // The size of Region is exactly 3x4=12 bytes with no padding. + // The `size_of::()` test below ensures that. + // So we do not need to zero any bytes here. + } +} + +// Wasm is little endian, and we want to be able to just reinterpret slices of +// wasm memory as a Region struct, so we only support little endian systems. +// If we ever need to support big endian systems, we can use more fine-grained checks +// in the places where we read/write the Region struct +// (and possibly other interactions between Wasm and host). +#[cfg(target_endian = "big")] +compile_error!("big endian systems are not supported"); + +/// Expects a (fixed size) Region struct at ptr, which is read. This links to the +/// memory region, which is copied in the second step. +/// Errors if the length of the region exceeds `max_length`. +pub fn read_region(memory: &wasmer::MemoryView, ptr: u32, max_length: usize) -> VmResult> { + let region = get_region(memory, ptr)?; + + if region.length > to_u32(max_length)? { + return Err( + CommunicationError::region_length_too_big(region.length as usize, max_length).into(), + ); + } + + let mut result = vec![0u8; region.length as usize]; + memory + .read(region.offset as u64, &mut result) + .map_err(|_err| CommunicationError::region_access_err(region, memory.size().bytes().0))?; + Ok(result) +} + +/// maybe_read_region is like read_region, but gracefully handles null pointer (0) by returning None +/// meant to be used where the argument is optional (like scan) +#[cfg(feature = "iterator")] +pub fn maybe_read_region( + memory: &wasmer::MemoryView, + ptr: u32, + max_length: usize, +) -> VmResult>> { + if ptr == 0 { + Ok(None) + } else { + read_region(memory, ptr, max_length).map(Some) + } +} + +/// A prepared and sufficiently large memory Region is expected at ptr that points to pre-allocated memory. +/// +/// Returns number of bytes written on success. +pub fn write_region(memory: &wasmer::MemoryView, ptr: u32, data: &[u8]) -> VmResult<()> { + let mut region = get_region(memory, ptr)?; + + let region_capacity = region.capacity as usize; + if data.len() > region_capacity { + return Err(CommunicationError::region_too_small(region_capacity, data.len()).into()); + } + + memory + .write(region.offset as u64, data) + .map_err(|_err| CommunicationError::region_access_err(region, memory.size().bytes().0))?; + + region.length = data.len() as u32; + set_region(memory, ptr, region)?; + + Ok(()) +} + +/// Reads in a Region at offset in Wasm memory and returns a copy of it +fn get_region(memory: &wasmer::MemoryView, offset: u32) -> CommunicationResult { + let wptr = WasmPtr::::new(offset); + let region = Region::from_wasm_bytes(wptr.deref(memory).read().map_err(|_err| { + CommunicationError::deref_err(offset, "Could not dereference this pointer to a Region") + })?); + validate_region(®ion)?; + Ok(region) +} + +/// Performs plausibility checks in the given Region. Regions are always created by the +/// contract and this can be used to detect problems in the standard library of the contract. +fn validate_region(region: &Region) -> RegionValidationResult<()> { + if region.offset == 0 { + return Err(RegionValidationError::zero_offset()); + } + if region.length > region.capacity { + return Err(RegionValidationError::length_exceeds_capacity( + region.length, + region.capacity, + )); + } + if region.capacity > (u32::MAX - region.offset) { + return Err(RegionValidationError::out_of_range( + region.offset, + region.capacity, + )); + } + Ok(()) +} + +/// Overrides a Region at offset in Wasm memory +fn set_region(memory: &wasmer::MemoryView, offset: u32, data: Region) -> CommunicationResult<()> { + let wptr = WasmPtr::::new(offset); + wptr.deref(memory) + .write(data.into_wasm_bytes()) + .map_err(|_err| { + CommunicationError::deref_err(offset, "Could not dereference this pointer to a Region") + })?; + Ok(()) +} + +#[cfg(test)] +mod tests { + use std::mem; + + use super::*; + + #[test] + fn region_has_known_size() { + // 3x4 bytes with no padding + assert_eq!(mem::size_of::(), 12); + } + + #[test] + fn validate_region_passes_for_valid_region() { + // empty + let region = Region { + offset: 23, + capacity: 500, + length: 0, + }; + validate_region(®ion).unwrap(); + + // half full + let region = Region { + offset: 23, + capacity: 500, + length: 250, + }; + validate_region(®ion).unwrap(); + + // full + let region = Region { + offset: 23, + capacity: 500, + length: 500, + }; + validate_region(®ion).unwrap(); + + // at end of linear memory (1) + let region = Region { + offset: u32::MAX, + capacity: 0, + length: 0, + }; + validate_region(®ion).unwrap(); + + // at end of linear memory (2) + let region = Region { + offset: 1, + capacity: u32::MAX - 1, + length: 0, + }; + validate_region(®ion).unwrap(); + } + + #[test] + fn validate_region_fails_for_zero_offset() { + let region = Region { + offset: 0, + capacity: 500, + length: 250, + }; + let result = validate_region(®ion); + match result.unwrap_err() { + RegionValidationError::ZeroOffset { .. } => {} + e => panic!("Got unexpected error: {e:?}"), + } + } + + #[test] + fn validate_region_fails_for_length_exceeding_capacity() { + let region = Region { + offset: 23, + capacity: 500, + length: 501, + }; + let result = validate_region(®ion); + match result.unwrap_err() { + RegionValidationError::LengthExceedsCapacity { + length, capacity, .. + } => { + assert_eq!(length, 501); + assert_eq!(capacity, 500); + } + e => panic!("Got unexpected error: {e:?}"), + } + } + + #[test] + fn validate_region_fails_when_exceeding_address_space() { + let region = Region { + offset: 23, + capacity: u32::MAX, + length: 501, + }; + let result = validate_region(®ion); + match result.unwrap_err() { + RegionValidationError::OutOfRange { + offset, capacity, .. + } => { + assert_eq!(offset, 23); + assert_eq!(capacity, u32::MAX); + } + e => panic!("Got unexpected error: {e:?}"), + } + + let region = Region { + offset: u32::MAX, + capacity: 1, + length: 0, + }; + let result = validate_region(®ion); + match result.unwrap_err() { + RegionValidationError::OutOfRange { + offset, capacity, .. + } => { + assert_eq!(offset, u32::MAX); + assert_eq!(capacity, 1); + } + e => panic!("Got unexpected error: {e:?}"), + } + } +} diff --git a/vm/src/modules/cached_module.rs b/vm/src/modules/cached_module.rs new file mode 100644 index 000000000..73c8d1fa5 --- /dev/null +++ b/vm/src/modules/cached_module.rs @@ -0,0 +1,30 @@ +use wasmer::{Engine, Module}; + +/// Some manual tests on Simon's machine showed that Engine is roughly 3-5 KB big, +/// so give it a constant 10 KiB estimate. +#[inline] +pub fn engine_size_estimate() -> usize { + 10 * 1024 +} + +#[derive(Debug, Clone)] +pub struct CachedModule { + pub module: Module, + /// The runtime engine to run this module. Ideally we could use a single engine + /// for all modules but the memory issue described in + /// requires using one engine per module as a workaround. + pub engine: Engine, + /// The estimated size of this element in memory. + /// Since the cached modules are just [rkyv](https://rkyv.org/) dumps of the Module + /// instances, we use the file size of the module on disk (not the Wasm!) + /// as an estimate for this. + /// + /// Between CosmWasm 1.4 (Wasmer 4) and 1.5.2, Store/Engine were not cached. This lead to a + /// memory consumption problem. From 1.5.2 on, Module and Engine are cached and Store is created + /// from Engine on demand. + /// + /// The majority of the Module size is the Artifact which is why we use the module filesize as the estimate. + /// Some manual tests on Simon's machine showed that Engine is roughly 3-5 KB big, so give it a constant + /// estimate: [`engine_size_estimate`]. + pub size_estimate: usize, +} diff --git a/vm/src/modules/file_system_cache.rs b/vm/src/modules/file_system_cache.rs new file mode 100644 index 000000000..004e48e09 --- /dev/null +++ b/vm/src/modules/file_system_cache.rs @@ -0,0 +1,485 @@ +use blake2::{digest::consts::U5, Blake2b, Digest}; +use std::fs; +use std::hash::Hash; +use std::io; +use std::panic::catch_unwind; +use std::path::{Path, PathBuf}; +use std::sync::OnceLock; +use thiserror::Error; + +use wasmer::{DeserializeError, Module, Target}; + +use cosmwasm_std::Checksum; + +use crate::errors::{VmError, VmResult}; +use crate::filesystem::mkdir_p; +use crate::modules::current_wasmer_module_version; +use crate::wasm_backend::make_runtime_engine; +use crate::wasm_backend::COST_FUNCTION_HASH; +use crate::Size; + +use super::cached_module::engine_size_estimate; +use super::CachedModule; + +/// This is a value you can manually modify to the cache. +/// You normally _do not_ need to change this value yourself. +/// +/// Cases where you might need to update it yourself, is things like when the memory layout of some types in Rust [std] changes. +/// +/// --- +/// +/// Now follows the legacy documentation of this value: +/// +/// ## Version history: +/// - **v1**:
+/// cosmwasm_vm < 1.0.0-beta5. This is working well up to Wasmer 2.0.0 as +/// [in wasmvm 1.0.0-beta2](https://github.com/CosmWasm/wasmvm/blob/v1.0.0-beta2/libwasmvm/Cargo.lock#L1412-L1413) +/// and [wasmvm 0.16.3](https://github.com/CosmWasm/wasmvm/blob/v0.16.3/libwasmvm/Cargo.lock#L1408-L1409). +/// Versions that ship with Wasmer 2.1.x such [as wasmvm 1.0.0-beta3](https://github.com/CosmWasm/wasmvm/blob/v1.0.0-beta3/libwasmvm/Cargo.lock#L1534-L1535) +/// to [wasmvm 1.0.0-beta5](https://github.com/CosmWasm/wasmvm/blob/v1.0.0-beta5/libwasmvm/Cargo.lock#L1530-L1531) +/// are broken, i.e. they will crash when reading older v1 modules. +/// - **v2**:
+/// Version for cosmwasm_vm 1.0.0-beta5 / wasmvm 1.0.0-beta6 that ships with Wasmer 2.1.1. +/// - **v3**:
+/// Version for Wasmer 2.2.0 which contains a [module breaking change to 2.1.x](https://github.com/wasmerio/wasmer/pull/2747). +/// - **v4**:
+/// Version for Wasmer 2.3.0 which contains a module breaking change to 2.2.0 that was not reflected in +/// the module header version (). In cosmwasm-vm 1.1.0-1.1.1 +/// the old value "v3" is still used along with Wasmer 2.3.0 (bug). From cosmwasm 1.1.2 onwards, this is +/// fixed by bumping to "v4". +/// - **v5**:
+/// A change in memory layout of some types in Rust [std] caused +/// [issues with module deserialization](https://github.com/CosmWasm/wasmvm/issues/426). +/// To work around this, the version was bumped to "v5" here to invalidate these corrupt caches. +/// - **v6**:
+/// Version for cosmwasm_vm 1.3+ which adds a sub-folder with the target identier for the modules. +/// - **v7**:
+/// New version because of Wasmer 2.3.0 -> 4 upgrade. +/// This internally changes how rkyv is used for module serialization, making compatibility unlikely. +/// - **v8**:
+/// New version because of Wasmer 4.1.2 -> 4.2.2 upgrade. +/// Module compatibility between Wasmer versions is not guaranteed. +/// - **v9**:
+/// New version because of Wasmer 4.2.2 -> 4.2.6 upgrade. +/// Module compatibility between Wasmer versions is not guaranteed. +/// - **v10**:
+/// New version because of Metering middleware change. +/// - **v20**:
+/// New version because of Wasmer 4.3.3 -> 4.3.7 upgrade. +/// Module compatibility between Wasmer versions is not guaranteed. +const MODULE_SERIALIZATION_VERSION: &str = "v20"; + +/// Function that actually does the heavy lifting of creating the module version discriminator. +/// +/// Separated for sanity tests because otherwise the `OnceLock` would cache the result. +#[inline] +fn raw_module_version_discriminator() -> String { + let hashes = [COST_FUNCTION_HASH]; + + let mut hasher = Blake2b::::new(); + + hasher.update(MODULE_SERIALIZATION_VERSION.as_bytes()); + hasher.update(wasmer::VERSION.as_bytes()); + + for hash in hashes { + hasher.update(hash); + } + + hex::encode(hasher.finalize()) +} + +/// This version __MUST__ change whenever the module system changes in a way +/// that old stored modules would be corrupt when loaded in the new system. +/// This needs to be done e.g. when switching between the jit/native engine. +/// +/// By default, this derived by performing the following operation: +/// +/// ```ignore +/// BLAKE2( +/// manual module version, +/// wasmer version requirement, +/// BLAKE2_512(cost_fn) +/// ) +/// ``` +/// +/// If anything else changes, you must change the manual module version. +/// +/// See https://github.com/wasmerio/wasmer/issues/2781 for more information +/// on Wasmer's module stability concept. +#[inline] +fn module_version_discriminator() -> &'static str { + static DISCRIMINATOR: OnceLock = OnceLock::new(); + + DISCRIMINATOR.get_or_init(raw_module_version_discriminator) +} + +/// Representation of a directory that contains compiled Wasm artifacts. +pub struct FileSystemCache { + modules_path: PathBuf, + /// If true, the cache uses the `*_unchecked` wasmer functions for loading modules from disk. + unchecked_modules: bool, +} + +/// An error type that hides system specific error information +/// to ensure deterministic errors across operating systems. +#[derive(Error, Debug)] +pub enum NewFileSystemCacheError { + #[error("Could not get metadata of cache path")] + CouldntGetMetadata, + #[error("The supplied path is readonly")] + ReadonlyPath, + #[error("The supplied path already exists but is no directory")] + ExistsButNoDirectory, + #[error("Could not create cache path")] + CouldntCreatePath, +} + +impl FileSystemCache { + /// Construct a new `FileSystemCache` around the specified directory. + /// The contents of the cache are stored in sub-versioned directories. + /// If `unchecked_modules` is set to true, it uses the `*_unchecked` + /// wasmer functions for loading modules from disk (no validity checks). + /// + /// # Safety + /// + /// This method is unsafe because there's no way to ensure the artifacts + /// stored in this cache haven't been corrupted or tampered with. + pub unsafe fn new( + base_path: impl Into, + unchecked_modules: bool, + ) -> Result { + let base_path: PathBuf = base_path.into(); + if base_path.exists() { + let metadata = base_path + .metadata() + .map_err(|_e| NewFileSystemCacheError::CouldntGetMetadata)?; + if !metadata.is_dir() { + return Err(NewFileSystemCacheError::ExistsButNoDirectory); + } + if metadata.permissions().readonly() { + return Err(NewFileSystemCacheError::ReadonlyPath); + } + } else { + // Create the directory and any parent directories if they don't yet exist. + mkdir_p(&base_path).map_err(|_e| NewFileSystemCacheError::CouldntCreatePath)?; + } + + Ok(Self { + modules_path: modules_path( + &base_path, + current_wasmer_module_version(), + &Target::default(), + ), + unchecked_modules, + }) + } + + /// If `unchecked` is true, the cache will use the `*_unchecked` wasmer functions for + /// loading modules from disk. + pub fn set_module_unchecked(&mut self, unchecked: bool) { + self.unchecked_modules = unchecked; + } + + /// Returns the path to the serialized module with the given checksum. + fn module_file(&self, checksum: &Checksum) -> PathBuf { + let mut path = self.modules_path.clone(); + path.push(checksum.to_hex()); + path.set_extension("module"); + path + } + + /// Loads a serialized module from the file system and returns a Module + Engine, + /// along with a size estimation for the pair. + pub fn load( + &self, + checksum: &Checksum, + memory_limit: Option, + ) -> VmResult> { + let file_path = self.module_file(checksum); + + let engine = make_runtime_engine(memory_limit); + let result = if self.unchecked_modules { + unsafe { Module::deserialize_from_file_unchecked(&engine, &file_path) } + } else { + unsafe { Module::deserialize_from_file(&engine, &file_path) } + }; + match result { + Ok(module) => { + let module_size = module_size(&file_path)?; + Ok(Some(CachedModule { + module, + engine, + size_estimate: module_size + engine_size_estimate(), + })) + } + Err(DeserializeError::Io(err)) => match err.kind() { + io::ErrorKind::NotFound => Ok(None), + _ => Err(VmError::cache_err(format!( + "Error opening module file: {err}" + ))), + }, + Err(err) => Err(VmError::cache_err(format!( + "Error deserializing module: {err}" + ))), + } + } + + /// Stores a serialized module to the file system. Returns the size of the serialized module. + pub fn store(&mut self, checksum: &Checksum, module: &Module) -> VmResult { + mkdir_p(&self.modules_path) + .map_err(|_e| VmError::cache_err("Error creating modules directory"))?; + + let path = self.module_file(checksum); + catch_unwind(|| { + module + .serialize_to_file(&path) + .map_err(|e| VmError::cache_err(format!("Error writing module to disk: {e}"))) + }) + .map_err(|_| VmError::cache_err("Could not write module to disk"))??; + let module_size = module_size(&path)?; + Ok(module_size) + } + + /// Removes a serialized module from the file system. + /// + /// Returns true if the file existed and false if the file did not exist. + pub fn remove(&mut self, checksum: &Checksum) -> VmResult { + let file_path = self.module_file(checksum); + + if file_path.exists() { + fs::remove_file(file_path) + .map_err(|_e| VmError::cache_err("Error deleting module from disk"))?; + Ok(true) + } else { + Ok(false) + } + } +} + +/// Returns the size of the module stored on disk +fn module_size(module_path: &Path) -> VmResult { + let module_size: usize = module_path + .metadata() + .map_err(|_e| VmError::cache_err("Error getting file metadata"))? // ensure error message is not system specific + .len() + .try_into() + .expect("Could not convert file size to usize"); + Ok(module_size) +} + +/// Creates an identifier for the Wasmer `Target` that is used for +/// cache invalidation. The output is reasonable human friendly to be useable +/// in file path component. +fn target_id(target: &Target) -> String { + // Use a custom Hasher implementation to avoid randomization. + let mut deterministic_hasher = crc32fast::Hasher::new(); + target.hash(&mut deterministic_hasher); + let hash = deterministic_hasher.finalize(); + format!("{}-{:08X}", target.triple(), hash) // print 4 byte hash as 8 hex characters +} + +/// The path to the latest version of the modules. +fn modules_path(base_path: &Path, wasmer_module_version: u32, target: &Target) -> PathBuf { + let version_dir = format!( + "{}-wasmer{wasmer_module_version}", + module_version_discriminator() + ); + let target_dir = target_id(target); + base_path.join(version_dir).join(target_dir) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::wasm_backend::{compile, make_compiling_engine}; + use tempfile::TempDir; + use wasmer::{imports, Instance as WasmerInstance, Store}; + use wasmer_middlewares::metering::set_remaining_points; + + const TESTING_MEMORY_LIMIT: Option = Some(Size::mebi(16)); + const TESTING_GAS_LIMIT: u64 = 500_000; + + const SOME_WAT: &str = r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_one") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 1 + i32.add)) + "#; + + #[test] + fn file_system_cache_run() { + let tmp_dir = TempDir::new().unwrap(); + let mut cache = unsafe { FileSystemCache::new(tmp_dir.path(), false).unwrap() }; + + // Create module + let wasm = wat::parse_str(SOME_WAT).unwrap(); + let checksum = Checksum::generate(&wasm); + + // Module does not exist + let cached = cache.load(&checksum, TESTING_MEMORY_LIMIT).unwrap(); + assert!(cached.is_none()); + + // Store module + let compiling_engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = compile(&compiling_engine, &wasm).unwrap(); + cache.store(&checksum, &module).unwrap(); + + // Load module + let cached = cache.load(&checksum, TESTING_MEMORY_LIMIT).unwrap(); + assert!(cached.is_some()); + + // Check the returned module is functional. + // This is not really testing the cache API but better safe than sorry. + { + let CachedModule { + module: cached_module, + engine: runtime_engine, + size_estimate, + } = cached.unwrap(); + assert_eq!( + size_estimate, + module.serialize().unwrap().len() + 10240 /* engine size estimate */ + ); + let import_object = imports! {}; + let mut store = Store::new(runtime_engine); + let instance = WasmerInstance::new(&mut store, &cached_module, &import_object).unwrap(); + set_remaining_points(&mut store, &instance, TESTING_GAS_LIMIT); + let add_one = instance.exports.get_function("add_one").unwrap(); + let result = add_one.call(&mut store, &[42.into()]).unwrap(); + assert_eq!(result[0].unwrap_i32(), 43); + } + } + + #[test] + fn file_system_cache_store_uses_expected_path() { + let tmp_dir = TempDir::new().unwrap(); + let mut cache = unsafe { FileSystemCache::new(tmp_dir.path(), false).unwrap() }; + + // Create module + let wasm = wat::parse_str(SOME_WAT).unwrap(); + let checksum = Checksum::generate(&wasm); + + // Store module + let engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = compile(&engine, &wasm).unwrap(); + cache.store(&checksum, &module).unwrap(); + + let discriminator = raw_module_version_discriminator(); + let mut globber = glob::glob(&format!( + "{}/{}-wasmer7/**/{}.module", + tmp_dir.path().to_string_lossy(), + discriminator, + checksum + )) + .expect("Failed to read glob pattern"); + let file_path = globber.next().unwrap().unwrap(); + let _serialized_module = fs::read(file_path).unwrap(); + } + + #[test] + fn file_system_cache_remove_works() { + let tmp_dir = TempDir::new().unwrap(); + let mut cache = unsafe { FileSystemCache::new(tmp_dir.path(), false).unwrap() }; + + // Create module + let wasm = wat::parse_str(SOME_WAT).unwrap(); + let checksum = Checksum::generate(&wasm); + + // Store module + let compiling_engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = compile(&compiling_engine, &wasm).unwrap(); + cache.store(&checksum, &module).unwrap(); + + // It's there + assert!(cache + .load(&checksum, TESTING_MEMORY_LIMIT) + .unwrap() + .is_some()); + + // Remove module + let existed = cache.remove(&checksum).unwrap(); + assert!(existed); + + // it's gone now + assert!(cache + .load(&checksum, TESTING_MEMORY_LIMIT) + .unwrap() + .is_none()); + + // Remove again + let existed = cache.remove(&checksum).unwrap(); + assert!(!existed); + } + + #[test] + fn target_id_works() { + let triple = wasmer::Triple { + architecture: wasmer::Architecture::X86_64, + vendor: target_lexicon::Vendor::Nintendo, + operating_system: target_lexicon::OperatingSystem::Fuchsia, + environment: target_lexicon::Environment::Gnu, + binary_format: target_lexicon::BinaryFormat::Coff, + }; + let target = Target::new(triple.clone(), wasmer::CpuFeature::POPCNT.into()); + let id = target_id(&target); + assert_eq!(id, "x86_64-nintendo-fuchsia-gnu-coff-01E9F9FE"); + // Changing CPU features changes the hash part + let target = Target::new(triple, wasmer::CpuFeature::AVX512DQ.into()); + let id = target_id(&target); + assert_eq!(id, "x86_64-nintendo-fuchsia-gnu-coff-93001945"); + + // Works for durrect target (hashing is deterministic); + let target = Target::default(); + let id1 = target_id(&target); + let id2 = target_id(&target); + assert_eq!(id1, id2); + } + + #[test] + fn modules_path_works() { + let base = PathBuf::from("modules"); + let triple = wasmer::Triple { + architecture: wasmer::Architecture::X86_64, + vendor: target_lexicon::Vendor::Nintendo, + operating_system: target_lexicon::OperatingSystem::Fuchsia, + environment: target_lexicon::Environment::Gnu, + binary_format: target_lexicon::BinaryFormat::Coff, + }; + let target = Target::new(triple, wasmer::CpuFeature::POPCNT.into()); + let p = modules_path(&base, 17, &target); + let discriminator = raw_module_version_discriminator(); + + assert_eq!( + p.as_os_str(), + if cfg!(windows) { + format!( + "modules\\{discriminator}-wasmer17\\x86_64-nintendo-fuchsia-gnu-coff-01E9F9FE" + ) + } else { + format!( + "modules/{discriminator}-wasmer17/x86_64-nintendo-fuchsia-gnu-coff-01E9F9FE" + ) + } + .as_str() + ); + } + + #[test] + fn module_version_discriminator_stays_the_same() { + let v1 = raw_module_version_discriminator(); + let v2 = raw_module_version_discriminator(); + let v3 = raw_module_version_discriminator(); + let v4 = raw_module_version_discriminator(); + + assert_eq!(v1, v2); + assert_eq!(v2, v3); + assert_eq!(v3, v4); + } + + #[test] + fn module_version_static() { + let version = raw_module_version_discriminator(); + assert_eq!(version, "5b35f8ce52"); + } +} diff --git a/vm/src/modules/in_memory_cache.rs b/vm/src/modules/in_memory_cache.rs new file mode 100644 index 000000000..be5257ec7 --- /dev/null +++ b/vm/src/modules/in_memory_cache.rs @@ -0,0 +1,318 @@ +use clru::{CLruCache, CLruCacheConfig, WeightScale}; +use std::collections::hash_map::RandomState; +use std::num::NonZeroUsize; + +use cosmwasm_std::Checksum; + +use super::cached_module::CachedModule; +use crate::{Size, VmError, VmResult}; + +// Minimum module size. +// Based on `examples/module_size.sh`, and the cosmwasm-plus contracts. +// We use an estimated *minimum* module size in order to compute a number of pre-allocated entries +// that are enough to handle a size-limited cache without requiring re-allocation / resizing. +// This will incurr an extra memory cost for the unused entries, but it's negligible: +// Assuming the cost per entry is 48 bytes, 10000 entries will have an extra cost of just ~500 kB. +// Which is a very small percentage (~0.03%) of our typical cache memory budget (2 GB). +const MINIMUM_MODULE_SIZE: Size = Size::kibi(250); + +#[derive(Debug)] +struct SizeScale; + +impl WeightScale for SizeScale { + #[inline] + fn weight(&self, key: &Checksum, value: &CachedModule) -> usize { + std::mem::size_of_val(key) + value.size_estimate + } +} + +/// An in-memory module cache +pub struct InMemoryCache { + modules: Option>, +} + +impl InMemoryCache { + /// Creates a new cache with the given size (in bytes) + /// and pre-allocated entries. + pub fn new(size: Size) -> Self { + let preallocated_entries = size.0 / MINIMUM_MODULE_SIZE.0; + + InMemoryCache { + modules: if size.0 > 0 { + Some(CLruCache::with_config( + CLruCacheConfig::new(NonZeroUsize::new(size.0).unwrap()) + .with_memory(preallocated_entries) + .with_scale(SizeScale), + )) + } else { + None + }, + } + } + + pub fn store(&mut self, checksum: &Checksum, cached_module: CachedModule) -> VmResult<()> { + if let Some(modules) = &mut self.modules { + modules + .put_with_weight(*checksum, cached_module) + .map_err(|e| VmError::cache_err(format!("{e:?}")))?; + } + Ok(()) + } + + /// Looks up a module in the cache and creates a new module + pub fn load(&mut self, checksum: &Checksum) -> VmResult> { + if let Some(modules) = &mut self.modules { + match modules.get(checksum) { + Some(cached) => Ok(Some(cached.clone())), + None => Ok(None), + } + } else { + Ok(None) + } + } + + /// Returns the number of elements in the cache. + pub fn len(&self) -> usize { + self.modules + .as_ref() + .map(|modules| modules.len()) + .unwrap_or_default() + } + + /// Returns cumulative size of all elements in the cache. + /// + /// This is based on the values provided with `store`. No actual + /// memory size is measured here. + pub fn size(&self) -> usize { + self.modules + .as_ref() + .map(|modules| modules.weight()) + .unwrap_or_default() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::wasm_backend::{compile, make_compiling_engine, make_runtime_engine}; + use std::mem; + use wasmer::{imports, Instance as WasmerInstance, Module, Store}; + use wasmer_middlewares::metering::set_remaining_points; + + const TESTING_MEMORY_LIMIT: Option = Some(Size::mebi(16)); + const TESTING_GAS_LIMIT: u64 = 500_000; + // Based on `examples/module_size.sh` + const TESTING_WASM_SIZE_FACTOR: usize = 18; + + const WAT1: &str = r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_one") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 1 + i32.add) + )"#; + const WAT2: &str = r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_two") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 2 + i32.add) + )"#; + const WAT3: &str = r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_three") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 3 + i32.add) + )"#; + + #[test] + fn check_element_sizes() { + let key_size = mem::size_of::(); + assert_eq!(key_size, 32); + + let value_size = mem::size_of::(); + assert_eq!(value_size, 8); + + // Just in case we want to go that route + let boxed_value_size = mem::size_of::>(); + assert_eq!(boxed_value_size, 8); + } + + #[test] + fn in_memory_cache_run() { + let mut cache = InMemoryCache::new(Size::mebi(200)); + + // Create module + let wasm = wat::parse_str(WAT1).unwrap(); + let checksum = Checksum::generate(&wasm); + + // Module does not exist + let cache_entry = cache.load(&checksum).unwrap(); + assert!(cache_entry.is_none()); + + // Compile module + let engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let original = compile(&engine, &wasm).unwrap(); + + // Ensure original module can be executed + { + let mut store = Store::new(engine.clone()); + let instance = WasmerInstance::new(&mut store, &original, &imports! {}).unwrap(); + set_remaining_points(&mut store, &instance, TESTING_GAS_LIMIT); + let add_one = instance.exports.get_function("add_one").unwrap(); + let result = add_one.call(&mut store, &[42.into()]).unwrap(); + assert_eq!(result[0].unwrap_i32(), 43); + } + + // Store module + let module = CachedModule { + module: original, + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: wasm.len() * TESTING_WASM_SIZE_FACTOR, + }; + cache.store(&checksum, module).unwrap(); + + // Load module + let cached = cache.load(&checksum).unwrap().unwrap(); + + // Ensure cached module can be executed + { + let mut store = Store::new(engine); + let instance = WasmerInstance::new(&mut store, &cached.module, &imports! {}).unwrap(); + set_remaining_points(&mut store, &instance, TESTING_GAS_LIMIT); + let add_one = instance.exports.get_function("add_one").unwrap(); + let result = add_one.call(&mut store, &[42.into()]).unwrap(); + assert_eq!(result[0].unwrap_i32(), 43); + } + } + + #[test] + fn len_works() { + let mut cache = InMemoryCache::new(Size::mebi(2)); + + // Create module + let wasm1 = wat::parse_str(WAT1).unwrap(); + let checksum1 = Checksum::generate(&wasm1); + let wasm2 = wat::parse_str(WAT2).unwrap(); + let checksum2 = Checksum::generate(&wasm2); + let wasm3 = wat::parse_str(WAT3).unwrap(); + let checksum3 = Checksum::generate(&wasm3); + + assert_eq!(cache.len(), 0); + + // Add 1 + let engine1 = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = CachedModule { + module: compile(&engine1, &wasm1).unwrap(), + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 900_000, + }; + cache.store(&checksum1, module).unwrap(); + assert_eq!(cache.len(), 1); + + // Add 2 + let engine2 = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = CachedModule { + module: compile(&engine2, &wasm2).unwrap(), + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 900_000, + }; + cache.store(&checksum2, module).unwrap(); + assert_eq!(cache.len(), 2); + + // Add 3 (pushes out the previous two) + let engine3 = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = CachedModule { + module: compile(&engine3, &wasm3).unwrap(), + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 1_500_000, + }; + cache.store(&checksum3, module).unwrap(); + assert_eq!(cache.len(), 1); + } + + #[test] + fn size_works() { + let mut cache = InMemoryCache::new(Size::mebi(2)); + + // Create module + let wasm1 = wat::parse_str(WAT1).unwrap(); + let checksum1 = Checksum::generate(&wasm1); + let wasm2 = wat::parse_str(WAT2).unwrap(); + let checksum2 = Checksum::generate(&wasm2); + let wasm3 = wat::parse_str(WAT3).unwrap(); + let checksum3 = Checksum::generate(&wasm3); + + assert_eq!(cache.size(), 0); + + // Add 1 + let engine1 = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = CachedModule { + module: compile(&engine1, &wasm1).unwrap(), + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 900_000, + }; + cache.store(&checksum1, module).unwrap(); + assert_eq!(cache.size(), 900_032); + + // Add 2 + let engine2 = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = CachedModule { + module: compile(&engine2, &wasm2).unwrap(), + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 800_000, + }; + cache.store(&checksum2, module).unwrap(); + assert_eq!(cache.size(), 900_032 + 800_032); + + // Add 3 (pushes out the previous two) + let engine3 = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = CachedModule { + module: compile(&engine3, &wasm3).unwrap(), + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 1_500_000, + }; + cache.store(&checksum3, module).unwrap(); + assert_eq!(cache.size(), 1_500_032); + } + + #[test] + fn in_memory_cache_works_for_zero_size() { + // A cache size of 0 practically disabled the cache. It must work + // like any cache with insufficient space. + // We test all common methods here. + + let mut cache = InMemoryCache::new(Size::mebi(0)); + + // Create module + let wasm = wat::parse_str(WAT1).unwrap(); + let checksum = Checksum::generate(&wasm); + + // Module does not exist + let cache_entry = cache.load(&checksum).unwrap(); + assert!(cache_entry.is_none()); + assert_eq!(cache.len(), 0); + assert_eq!(cache.size(), 0); + + // Compile module + let engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let original = compile(&engine, &wasm).unwrap(); + + // Store module + let module = CachedModule { + module: original, + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: wasm.len() * TESTING_WASM_SIZE_FACTOR, + }; + cache.store(&checksum, module).unwrap(); + assert_eq!(cache.len(), 0); + assert_eq!(cache.size(), 0); + + // Load module + let cached = cache.load(&checksum).unwrap(); + assert!(cached.is_none()); + assert_eq!(cache.len(), 0); + assert_eq!(cache.size(), 0); + } +} diff --git a/vm/src/modules/mod.rs b/vm/src/modules/mod.rs new file mode 100644 index 000000000..ec3804c91 --- /dev/null +++ b/vm/src/modules/mod.rs @@ -0,0 +1,11 @@ +mod cached_module; +mod file_system_cache; +mod in_memory_cache; +mod pinned_memory_cache; +mod versioning; + +pub use cached_module::CachedModule; +pub use file_system_cache::FileSystemCache; +pub use in_memory_cache::InMemoryCache; +pub use pinned_memory_cache::PinnedMemoryCache; +pub use versioning::current_wasmer_module_version; diff --git a/vm/src/modules/pinned_memory_cache.rs b/vm/src/modules/pinned_memory_cache.rs new file mode 100644 index 000000000..662f5c1dd --- /dev/null +++ b/vm/src/modules/pinned_memory_cache.rs @@ -0,0 +1,332 @@ +use cosmwasm_std::Checksum; +use std::collections::HashMap; + +use super::cached_module::CachedModule; +use crate::VmResult; + +/// Struct storing some additional metadata, which is only of interest for the pinned cache, +/// alongside the cached module. +pub struct InstrumentedModule { + /// Number of loads from memory this module received + pub hits: u32, + /// The actual cached module + pub module: CachedModule, +} + +/// An pinned in memory module cache +pub struct PinnedMemoryCache { + modules: HashMap, +} + +impl PinnedMemoryCache { + /// Creates a new cache + pub fn new() -> Self { + PinnedMemoryCache { + modules: HashMap::new(), + } + } + + pub fn iter(&self) -> impl Iterator { + self.modules.iter() + } + + pub fn store(&mut self, checksum: &Checksum, cached_module: CachedModule) -> VmResult<()> { + self.modules.insert( + *checksum, + InstrumentedModule { + hits: 0, + module: cached_module, + }, + ); + + Ok(()) + } + + /// Removes a module from the cache + /// Not found modules are silently ignored. Potential integrity errors (wrong checksum) are not checked / enforced + pub fn remove(&mut self, checksum: &Checksum) -> VmResult<()> { + self.modules.remove(checksum); + Ok(()) + } + + /// Looks up a module in the cache and creates a new module + pub fn load(&mut self, checksum: &Checksum) -> VmResult> { + match self.modules.get_mut(checksum) { + Some(cached) => { + cached.hits = cached.hits.saturating_add(1); + Ok(Some(cached.module.clone())) + } + None => Ok(None), + } + } + + /// Returns true if and only if this cache has an entry identified by the given checksum + pub fn has(&self, checksum: &Checksum) -> bool { + self.modules.contains_key(checksum) + } + + /// Returns the number of elements in the cache. + pub fn len(&self) -> usize { + self.modules.len() + } + + /// Returns cumulative size of all elements in the cache. + /// + /// This is based on the values provided with `store`. No actual + /// memory size is measured here. + pub fn size(&self) -> usize { + self.modules + .iter() + .map(|(key, module)| std::mem::size_of_val(key) + module.module.size_estimate) + .sum() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + wasm_backend::{compile, make_compiling_engine, make_runtime_engine}, + Size, + }; + use wasmer::{imports, Instance as WasmerInstance, Store}; + use wasmer_middlewares::metering::set_remaining_points; + + const TESTING_MEMORY_LIMIT: Option = Some(Size::mebi(16)); + const TESTING_GAS_LIMIT: u64 = 500_000; + + #[test] + fn pinned_memory_cache_run() { + let mut cache = PinnedMemoryCache::new(); + + // Create module + let wasm = wat::parse_str( + r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_one") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 1 + i32.add) + )"#, + ) + .unwrap(); + let checksum = Checksum::generate(&wasm); + + // Module does not exist + let cache_entry = cache.load(&checksum).unwrap(); + assert!(cache_entry.is_none()); + + // Compile module + let engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let original = compile(&engine, &wasm).unwrap(); + + // Ensure original module can be executed + { + let mut store = Store::new(engine.clone()); + let instance = WasmerInstance::new(&mut store, &original, &imports! {}).unwrap(); + set_remaining_points(&mut store, &instance, TESTING_GAS_LIMIT); + let add_one = instance.exports.get_function("add_one").unwrap(); + let result = add_one.call(&mut store, &[42.into()]).unwrap(); + assert_eq!(result[0].unwrap_i32(), 43); + } + + // Store module + let module = CachedModule { + module: original, + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 0, + }; + cache.store(&checksum, module).unwrap(); + + // Load module + let cached = cache.load(&checksum).unwrap().unwrap(); + + // Ensure cached module can be executed + { + let mut store = Store::new(engine); + let instance = WasmerInstance::new(&mut store, &cached.module, &imports! {}).unwrap(); + set_remaining_points(&mut store, &instance, TESTING_GAS_LIMIT); + let add_one = instance.exports.get_function("add_one").unwrap(); + let result = add_one.call(&mut store, &[42.into()]).unwrap(); + assert_eq!(result[0].unwrap_i32(), 43); + } + } + + #[test] + fn has_works() { + let mut cache = PinnedMemoryCache::new(); + + // Create module + let wasm = wat::parse_str( + r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_one") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 1 + i32.add) + )"#, + ) + .unwrap(); + let checksum = Checksum::generate(&wasm); + + assert!(!cache.has(&checksum)); + + // Add + let engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let original = compile(&engine, &wasm).unwrap(); + let module = CachedModule { + module: original, + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 0, + }; + cache.store(&checksum, module).unwrap(); + + assert!(cache.has(&checksum)); + + // Remove + cache.remove(&checksum).unwrap(); + + assert!(!cache.has(&checksum)); + } + + #[test] + fn hit_metric_works() { + let mut cache = PinnedMemoryCache::new(); + + // Create module + let wasm = wat::parse_str( + r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_one") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 1 + i32.add) + )"#, + ) + .unwrap(); + let checksum = Checksum::generate(&wasm); + + assert!(!cache.has(&checksum)); + + // Add + let engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let original = compile(&engine, &wasm).unwrap(); + let module = CachedModule { + module: original, + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 0, + }; + cache.store(&checksum, module).unwrap(); + + let (_checksum, module) = cache + .iter() + .find(|(iter_checksum, _module)| **iter_checksum == checksum) + .unwrap(); + + assert_eq!(module.hits, 0); + + let _ = cache.load(&checksum).unwrap(); + let (_checksum, module) = cache + .iter() + .find(|(iter_checksum, _module)| **iter_checksum == checksum) + .unwrap(); + + assert_eq!(module.hits, 1); + } + + #[test] + fn len_works() { + let mut cache = PinnedMemoryCache::new(); + + // Create module + let wasm = wat::parse_str( + r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_one") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 1 + i32.add) + )"#, + ) + .unwrap(); + let checksum = Checksum::generate(&wasm); + + assert_eq!(cache.len(), 0); + + // Add + let engine = make_compiling_engine(TESTING_MEMORY_LIMIT); + let original = compile(&engine, &wasm).unwrap(); + let module = CachedModule { + module: original, + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 0, + }; + cache.store(&checksum, module).unwrap(); + + assert_eq!(cache.len(), 1); + + // Remove + cache.remove(&checksum).unwrap(); + + assert_eq!(cache.len(), 0); + } + + #[test] + fn size_works() { + let mut cache = PinnedMemoryCache::new(); + + // Create module + let wasm1 = wat::parse_str( + r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_one") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 1 + i32.add) + )"#, + ) + .unwrap(); + let checksum1 = Checksum::generate(&wasm1); + let wasm2 = wat::parse_str( + r#"(module + (type $t0 (func (param i32) (result i32))) + (func $add_one (export "add_two") (type $t0) (param $p0 i32) (result i32) + local.get $p0 + i32.const 2 + i32.add) + )"#, + ) + .unwrap(); + let checksum2 = Checksum::generate(&wasm2); + + assert_eq!(cache.size(), 0); + + // Add 1 + let engine1 = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = CachedModule { + module: compile(&engine1, &wasm1).unwrap(), + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 500, + }; + cache.store(&checksum1, module).unwrap(); + assert_eq!(cache.size(), 532); + + // Add 2 + let engine2 = make_compiling_engine(TESTING_MEMORY_LIMIT); + let module = CachedModule { + module: compile(&engine2, &wasm2).unwrap(), + engine: make_runtime_engine(TESTING_MEMORY_LIMIT), + size_estimate: 300, + }; + cache.store(&checksum2, module).unwrap(); + assert_eq!(cache.size(), 532 + 332); + + // Remove 1 + cache.remove(&checksum1).unwrap(); + assert_eq!(cache.size(), 332); + + // Remove 2 + cache.remove(&checksum2).unwrap(); + assert_eq!(cache.size(), 0); + } +} diff --git a/vm/src/modules/versioning.rs b/vm/src/modules/versioning.rs new file mode 100644 index 000000000..459ae6abb --- /dev/null +++ b/vm/src/modules/versioning.rs @@ -0,0 +1,56 @@ +use crate::wasm_backend::{compile, make_compiling_engine}; + +/// This header prefix contains the module type (wasmer-universal) and +/// the magic value WASMER\0\0. +/// The full header also contains a little endian encoded uint32 version number +/// and a length that we do not check. +const EXPECTED_MODULE_HEADER_PREFIX: &[u8] = b"wasmer-universalWASMER\0\0"; + +const ENGINE_TYPE_LEN: usize = 16; // https://github.com/wasmerio/wasmer/blob/2.2.0-rc1/lib/engine-universal/src/artifact.rs#L48 +const METADATA_HEADER_LEN: usize = 16; // https://github.com/wasmerio/wasmer/blob/2.2.0-rc1/lib/engine/src/artifact.rs#L251-L252 + +fn current_wasmer_module_header() -> Vec { + // echo "(module)" > my.wat && wat2wasm my.wat && hexdump -C my.wasm + const WASM: &[u8] = b"\x00\x61\x73\x6d\x01\x00\x00\x00"; + let engine = make_compiling_engine(None); + let module = compile(&engine, WASM).unwrap(); + + let mut bytes = module.serialize().unwrap_or_default(); + + bytes.truncate(ENGINE_TYPE_LEN + METADATA_HEADER_LEN); + bytes.into() +} + +/// Obtains the module version from Wasmer that is currently used. +/// As long as the overall format does not change, this returns a +/// counter (1 for Wasmer 2.2.0). When the format changes in an +/// unexpected way (e.g. a different engine is used or the meta +/// format changes), this panics. That way we can ensure an +/// incompatible module format can be found early in the development +/// cycle. +pub fn current_wasmer_module_version() -> u32 { + let header = current_wasmer_module_header(); + if !header.starts_with(EXPECTED_MODULE_HEADER_PREFIX) { + panic!("Wasmer module format changed. Please update the expected version accordingly and bump MODULE_SERIALIZATION_VERSION."); + } + + let metadata = &header[header.len() - METADATA_HEADER_LEN..]; + u32::from_le_bytes((metadata[8..12]).try_into().unwrap()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn current_wasmer_module_header_works() { + let header = current_wasmer_module_header(); + assert!(header.starts_with(EXPECTED_MODULE_HEADER_PREFIX)); + } + + #[test] + fn current_wasmer_module_version_works() { + let version = current_wasmer_module_version(); + assert_eq!(version, 7); + } +} diff --git a/vm/src/parsed_wasm.rs b/vm/src/parsed_wasm.rs new file mode 100644 index 000000000..11c67210d --- /dev/null +++ b/vm/src/parsed_wasm.rs @@ -0,0 +1,293 @@ +use std::{fmt, mem, str}; + +use wasmer::wasmparser::{ + BinaryReaderError, CompositeType, Export, FuncToValidate, FunctionBody, Import, MemoryType, + Parser, Payload, TableType, ValidPayload, Validator, ValidatorResources, WasmFeatures, +}; + +use crate::{VmError, VmResult}; + +/// Opaque wrapper type implementing `Debug` +/// +/// The purpose of this type is to wrap types that do not implement `Debug` themselves. +/// For example, you have a large struct and derive `Debug` on it but one member does not implement the trait, that's where this type comes in. +/// +/// Instead of printing a full debug representation of the underlying data, it simply prints something akin to this: +/// +/// ```ignore +/// WrappedType { ... } +/// ``` +#[derive(Default)] +pub struct OpaqueDebug(pub T); + +impl fmt::Debug for OpaqueDebug { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct(std::any::type_name::()) + .finish_non_exhaustive() + } +} + +#[derive(Debug)] +pub enum FunctionValidator<'a> { + Pending(OpaqueDebug, FunctionBody<'a>)>>), + Success, + Error(BinaryReaderError), +} + +impl<'a> FunctionValidator<'a> { + fn push(&mut self, item: (FuncToValidate, FunctionBody<'a>)) { + let Self::Pending(OpaqueDebug(ref mut funcs)) = self else { + panic!("attempted to push function into non-pending validator"); + }; + + funcs.push(item); + } +} + +/// A parsed and validated wasm module. +/// It keeps track of the parts that are important for our static analysis and compatibility checks. +#[derive(Debug)] +pub struct ParsedWasm<'a> { + pub version: u16, + pub exports: Vec>, + pub imports: Vec>, + pub tables: Vec, + pub memories: Vec, + pub function_count: usize, + pub type_count: u32, + /// How many parameters a type has. + /// The index is the type id + pub type_params: Vec, + /// How many parameters the function with the most parameters has + pub max_func_params: usize, + /// How many results the function with the most results has + pub max_func_results: usize, + /// How many function parameters are used in the module + pub total_func_params: usize, + /// Collections of functions that are potentially pending validation + pub func_validator: FunctionValidator<'a>, + /// Contract migrate version as defined in a custom section + pub contract_migrate_version: Option, +} + +impl<'a> ParsedWasm<'a> { + pub fn parse(wasm: &'a [u8]) -> VmResult { + let mut validator = Validator::new_with_features(WasmFeatures { + mutable_global: true, + saturating_float_to_int: true, + sign_extension: true, + multi_value: true, + floats: true, + + reference_types: false, + bulk_memory: false, + simd: false, + relaxed_simd: false, + threads: false, + tail_call: false, + multi_memory: false, + exceptions: false, + memory64: false, + extended_const: false, + component_model: false, + function_references: false, + memory_control: false, + gc: false, + component_model_values: false, + component_model_nested_names: false, + }); + + let mut this = Self { + version: 0, + exports: vec![], + imports: vec![], + tables: vec![], + memories: vec![], + function_count: 0, + type_count: 0, + type_params: Vec::new(), + max_func_params: 0, + max_func_results: 0, + total_func_params: 0, + func_validator: FunctionValidator::Pending(OpaqueDebug::default()), + contract_migrate_version: None, + }; + + for p in Parser::new(0).parse_all(wasm) { + let p = p?; + // validate the payload + if let ValidPayload::Func(fv, body) = validator.payload(&p)? { + // also validate function bodies + this.func_validator.push((fv, body)); + this.function_count += 1; + } + + match p { + Payload::TypeSection(t) => { + this.type_count = 0; + // t.count() is a lower bound + this.type_params = Vec::with_capacity(t.count() as usize); + for group in t.into_iter() { + let types = group?.into_types(); + // update count + this.type_count += types.len() as u32; + + for ty in types { + match ty.composite_type { + CompositeType::Func(ft) => { + this.type_params.push(ft.params().len()); + + this.max_func_params = + core::cmp::max(ft.params().len(), this.max_func_params); + this.max_func_results = + core::cmp::max(ft.results().len(), this.max_func_results); + } + CompositeType::Array(_) | CompositeType::Struct(_) => { + // ignoring these for now, as they are only available with the GC + // proposal and we explicitly disabled that above + } + } + } + } + } + Payload::FunctionSection(section) => { + // In valid Wasm, the function section always has to come after the type section + // (see https://www.w3.org/TR/2019/REC-wasm-core-1-20191205/#modules%E2%91%A0%E2%93%AA), + // so we can assume that the type_params map is already filled at this point + + for a in section { + let type_index = a? as usize; + this.total_func_params += + this.type_params.get(type_index).ok_or_else(|| { + // this will also be thrown if the wasm section order is invalid + VmError::static_validation_err( + "Wasm bytecode error: function uses unknown type index", + ) + })? + } + } + Payload::Version { num, .. } => this.version = num, + Payload::ImportSection(i) => { + this.imports = i.into_iter().collect::, _>>()?; + } + Payload::TableSection(t) => { + this.tables = t + .into_iter() + .map(|r| r.map(|t| t.ty)) + .collect::, _>>()?; + } + Payload::MemorySection(m) => { + this.memories = m.into_iter().collect::, _>>()?; + } + Payload::ExportSection(e) => { + this.exports = e.into_iter().collect::, _>>()?; + } + Payload::CustomSection(reader) if reader.name() == "cw_migrate_version" => { + // This is supposed to be valid UTF-8 + let raw_version = str::from_utf8(reader.data()) + .map_err(|err| VmError::static_validation_err(err.to_string()))?; + + this.contract_migrate_version = Some( + raw_version + .parse::() + .map_err(|err| VmError::static_validation_err(err.to_string()))?, + ); + } + _ => {} // ignore everything else + } + } + + Ok(this) + } + + /// Perform the expensive operation of validating each function body + /// + /// Note: This function caches the output of this function into the field `func_validator` so repeated invocations are cheap. + pub fn validate_funcs(&mut self) -> VmResult<()> { + match self.func_validator { + FunctionValidator::Pending(OpaqueDebug(ref mut funcs)) => { + let result = (|| { + let mut allocations = <_>::default(); + for (func, body) in mem::take(funcs) { + let mut validator = func.into_validator(allocations); + validator.validate(&body)?; + allocations = validator.into_allocations(); + } + Ok(()) + })(); + + self.func_validator = match result { + Ok(()) => FunctionValidator::Success, + Err(err) => FunctionValidator::Error(err), + }; + + self.validate_funcs() + } + FunctionValidator::Success => Ok(()), + FunctionValidator::Error(ref err) => Err(err.clone().into()), + } + } +} + +#[cfg(test)] +mod test { + use super::ParsedWasm; + + #[test] + fn read_migrate_version() { + let wasm_data = + wat::parse_str(r#"( module ( @custom "cw_migrate_version" "42" ) )"#).unwrap(); + let parsed = ParsedWasm::parse(&wasm_data).unwrap(); + + assert_eq!(parsed.contract_migrate_version, Some(42)); + } + + #[test] + fn read_migrate_version_fails() { + let wasm_data = + wat::parse_str(r#"( module ( @custom "cw_migrate_version" "not a number" ) )"#) + .unwrap(); + assert!(ParsedWasm::parse(&wasm_data).is_err()); + } + + #[test] + fn parsed_wasm_counts_functions_correctly() { + let wasm = wat::parse_str(r#"(module)"#).unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + assert_eq!(module.function_count, 0); + + let wasm = wat::parse_str( + r#"(module + (type (func)) + (func (type 0) nop) + (func (type 0) nop) + (export "foo" (func 0)) + (export "bar" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + assert_eq!(module.function_count, 2); + } + + #[test] + fn parsed_wasm_counts_func_io_correctly() { + let wasm = wat::parse_str(r#"(module)"#).unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + assert_eq!(module.max_func_params, 0); + assert_eq!(module.max_func_results, 0); + + let wasm = wat::parse_str( + r#"(module + (type (func (param i32 i32 i32) (result i32))) + (type (func (param i32) (result i32 i32))) + (func (type 1) i32.const 42 i32.const 42) + (func (type 0) i32.const 42) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + assert_eq!(module.max_func_params, 3); + assert_eq!(module.max_func_results, 2); + } +} diff --git a/vm/src/sections.rs b/vm/src/sections.rs new file mode 100644 index 000000000..b1a95cbe8 --- /dev/null +++ b/vm/src/sections.rs @@ -0,0 +1,161 @@ +use crate::conversion::to_u32; +use crate::{CommunicationError, VmResult}; + +/// Decodes sections of data into multiple slices. +/// +/// Each encoded section is suffixed by a section length, encoded as big endian uint32. +/// +/// See also: [`encode_sections`]. +pub fn decode_sections(data: &[u8]) -> Result, CommunicationError> { + let mut result: Vec<&[u8]> = vec![]; + let mut remaining_len = data.len(); + while remaining_len >= 4 { + let tail_len = u32::from_be_bytes([ + data[remaining_len - 4], + data[remaining_len - 3], + data[remaining_len - 2], + data[remaining_len - 1], + ]) as usize; + let tail_len_idx = remaining_len - 4; // index of the first byte of the tail length + let section_start = tail_len_idx + .checked_sub(tail_len) + .ok_or_else(|| CommunicationError::invalid_section("section length overflow"))?; + result.push(&data[section_start..tail_len_idx]); + remaining_len = section_start; + } + if remaining_len > 0 { + return Err(CommunicationError::invalid_section( + "extra data outside of any section", + )); + } + result.reverse(); + Ok(result) +} + +/// Encodes multiple sections of data into one vector. +/// +/// Each section is suffixed by a section length encoded as big endian uint32. +/// Using suffixes instead of prefixes allows reading sections in reverse order, +/// such that the first element does not need to be re-allocated if the contract's +/// data structure supports truncation (such as a Rust vector). +/// +/// The resulting data looks like this: +/// +/// ```ignore +/// section1 || section1_len || section2 || section2_len || section3 || section3_len || … +/// ``` +#[allow(dead_code)] +pub fn encode_sections(sections: &[Vec]) -> VmResult> { + let mut out_len: usize = sections.iter().map(|section| section.len()).sum(); + out_len += 4 * sections.len(); + let mut out_data = Vec::with_capacity(out_len); + for section in sections { + let section_len = to_u32(section.len())?.to_be_bytes(); + out_data.extend(section); + out_data.extend_from_slice(§ion_len); + } + debug_assert_eq!(out_data.len(), out_len); + debug_assert_eq!(out_data.capacity(), out_len); + Ok(out_data) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn decode_sections_works_for_empty_sections() { + let dec = decode_sections(&[]).unwrap(); + assert_eq!(dec.len(), 0); + let dec = decode_sections(b"\0\0\0\0").unwrap(); + assert_eq!(dec, &[&[0u8; 0]]); + let dec = decode_sections(b"\0\0\0\0\0\0\0\0").unwrap(); + assert_eq!(dec, &[&[0u8; 0]; 2]); + let dec = decode_sections(b"\0\0\0\0\0\0\0\0\0\0\0\0").unwrap(); + assert_eq!(dec, &[&[0u8; 0]; 3]); + } + + #[test] + fn decode_sections_works_for_one_element() { + let dec = decode_sections(b"\xAA\0\0\0\x01").unwrap(); + assert_eq!(dec, &[vec![0xAA]]); + let dec = decode_sections(b"\xAA\xBB\0\0\0\x02").unwrap(); + assert_eq!(dec, &[vec![0xAA, 0xBB]]); + let dec = decode_sections(b"\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\0\0\x01\x15").unwrap(); + assert_eq!(dec, &[vec![0x9D; 277]]); + } + + #[test] + fn decode_sections_works_for_two_elements() { + let data = b"\xAA\0\0\0\x01\xBB\xCC\0\0\0\x02".to_vec(); + assert_eq!( + decode_sections(&data).unwrap(), + &[vec![0xAA], vec![0xBB, 0xCC]] + ); + let data = b"\xDE\xEF\x62\0\0\0\x03\0\0\0\0".to_vec(); + assert_eq!( + decode_sections(&data).unwrap(), + &[vec![0xDE, 0xEF, 0x62], vec![]] + ); + let data = b"\0\0\0\0\xDE\xEF\x62\0\0\0\x03".to_vec(); + assert_eq!( + decode_sections(&data).unwrap(), + &[vec![], vec![0xDE, 0xEF, 0x62]] + ); + let data = b"\0\0\0\0\0\0\0\0".to_vec(); + assert_eq!(decode_sections(&data).unwrap(), &[vec![0u8; 0], vec![]]); + let data = b"\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\0\0\0\x13\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\0\0\x01\x15".to_vec(); + assert_eq!( + decode_sections(&data).unwrap(), + &[vec![0xFF; 19], vec![0x9D; 277]] + ); + } + + #[test] + fn decode_sections_works_for_multiple_elements() { + let dec = decode_sections(b"\xAA\0\0\0\x01").unwrap(); + assert_eq!(dec, &[vec![0xAA]]); + let dec = decode_sections(b"\xAA\0\0\0\x01\xDE\xDE\0\0\0\x02").unwrap(); + assert_eq!(dec, &[vec![0xAA], vec![0xDE, 0xDE]]); + let dec = decode_sections(b"\xAA\0\0\0\x01\xDE\xDE\0\0\0\x02\0\0\0\0").unwrap(); + assert_eq!(dec, &[vec![0xAA], vec![0xDE, 0xDE], vec![]]); + let dec = decode_sections(b"\xAA\0\0\0\x01\xDE\xDE\0\0\0\x02\0\0\0\0\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\0\0\0\x13").unwrap(); + assert_eq!(dec, &[vec![0xAA], vec![0xDE, 0xDE], vec![], vec![0xFF; 19]]); + } + + #[test] + fn encode_sections_works_for_empty_sections() { + let enc = encode_sections(&[]).unwrap(); + assert_eq!(enc, b"" as &[u8]); + let enc = encode_sections(&[vec![]]).unwrap(); + assert_eq!(enc, b"\0\0\0\0" as &[u8]); + let enc = encode_sections(&[vec![], vec![]]).unwrap(); + assert_eq!(enc, b"\0\0\0\0\0\0\0\0" as &[u8]); + let enc = encode_sections(&[vec![], vec![], vec![]]).unwrap(); + assert_eq!(enc, b"\0\0\0\0\0\0\0\0\0\0\0\0" as &[u8]); + } + + #[test] + fn encode_sections_works_for_one_element() { + let enc = encode_sections(&[]).unwrap(); + assert_eq!(enc, b"" as &[u8]); + let enc = encode_sections(&[vec![0xAA]]).unwrap(); + assert_eq!(enc, b"\xAA\0\0\0\x01" as &[u8]); + let enc = encode_sections(&[vec![0xAA, 0xBB]]).unwrap(); + assert_eq!(enc, b"\xAA\xBB\0\0\0\x02" as &[u8]); + let enc = encode_sections(&[vec![0x9D; 277]]).unwrap(); + assert_eq!(enc, b"\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\x9D\0\0\x01\x15" as &[u8]); + } + + #[test] + fn encode_sections_works_for_multiple_elements() { + let enc = encode_sections(&[vec![0xAA]]).unwrap(); + assert_eq!(enc, b"\xAA\0\0\0\x01" as &[u8]); + let enc = encode_sections(&[vec![0xAA], vec![0xDE, 0xDE]]).unwrap(); + assert_eq!(enc, b"\xAA\0\0\0\x01\xDE\xDE\0\0\0\x02" as &[u8]); + let enc = encode_sections(&[vec![0xAA], vec![0xDE, 0xDE], vec![]]).unwrap(); + assert_eq!(enc, b"\xAA\0\0\0\x01\xDE\xDE\0\0\0\x02\0\0\0\0" as &[u8]); + let enc = encode_sections(&[vec![0xAA], vec![0xDE, 0xDE], vec![], vec![0xFF; 19]]).unwrap(); + assert_eq!(enc, b"\xAA\0\0\0\x01\xDE\xDE\0\0\0\x02\0\0\0\0\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\0\0\0\x13" as &[u8]); + } +} diff --git a/vm/src/serde.rs b/vm/src/serde.rs new file mode 100644 index 000000000..ce6c9e0f8 --- /dev/null +++ b/vm/src/serde.rs @@ -0,0 +1,129 @@ +//! This file simply re-exports some methods from serde_json +//! The reason is two fold: +//! 1. To easily ensure that all calling libraries use the same version (minimize code size) +//! 2. To allow us to switch out to eg. serde-json-core more easily +use serde::{Deserialize, Serialize}; +use std::any::type_name; + +use crate::errors::{VmError, VmResult}; + +/// Deserializes JSON data into a document of type `T`. +/// +/// The deserialization limit ensure it is not possible to slow down the execution by +/// providing overly large JSON documents. +pub fn from_slice<'a, T>(value: &'a [u8], deserialization_limit: usize) -> VmResult +where + T: Deserialize<'a>, +{ + if value.len() > deserialization_limit { + return Err(VmError::deserialization_limit_exceeded( + value.len(), + deserialization_limit, + )); + } + serde_json::from_slice(value).map_err(|e| VmError::parse_err(type_name::(), e)) +} + +pub fn to_vec(data: &T) -> VmResult> +where + T: Serialize + ?Sized, +{ + serde_json::to_vec(data).map_err(|e| VmError::serialize_err(type_name::(), e)) +} + +#[cfg(test)] +mod tests { + use super::*; + use serde::Deserialize; + + const LIMIT: usize = 20_000; + + #[derive(Serialize, Deserialize, Debug, PartialEq)] + #[serde(rename_all = "snake_case")] + enum SomeMsg { + Refund {}, + ReleaseAll { + image: String, + amount: u32, + time: u64, + karma: i32, + }, + Cowsay { + text: String, + }, + } + + #[test] + fn from_slice_works() { + let deserialized: SomeMsg = from_slice(br#"{"refund":{}}"#, LIMIT).unwrap(); + assert_eq!(deserialized, SomeMsg::Refund {}); + + let deserialized: SomeMsg = from_slice( + br#"{"release_all":{"image":"foo","amount":42,"time":18446744073709551615,"karma":-17}}"#, LIMIT + ) + .unwrap(); + assert_eq!( + deserialized, + SomeMsg::ReleaseAll { + image: "foo".to_string(), + amount: 42, + time: 18446744073709551615, + karma: -17 + } + ); + } + + #[test] + fn from_slice_works_for_special_chars() { + let deserialized: SomeMsg = + from_slice(br#"{"cowsay":{"text":"foo\"bar\\\"bla"}}"#, LIMIT).unwrap(); + assert_eq!( + deserialized, + SomeMsg::Cowsay { + text: "foo\"bar\\\"bla".to_string(), + } + ); + } + + #[test] + fn from_slice_errors_when_exceeding_deserialization_limit() { + let result = from_slice::(br#"{"refund":{}}"#, 5); + match result.unwrap_err() { + VmError::DeserializationLimitExceeded { + length, max_length, .. + } => { + assert_eq!(length, 13); + assert_eq!(max_length, 5); + } + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn to_vec_works() { + let msg = SomeMsg::Refund {}; + let serialized = to_vec(&msg).unwrap(); + assert_eq!(serialized, br#"{"refund":{}}"#); + + let msg = SomeMsg::ReleaseAll { + image: "foo".to_string(), + amount: 42, + time: 9007199254740999, // Number.MAX_SAFE_INTEGER + 7 + karma: -17, + }; + let serialized = String::from_utf8(to_vec(&msg).unwrap()).unwrap(); + assert_eq!( + serialized, + r#"{"release_all":{"image":"foo","amount":42,"time":9007199254740999,"karma":-17}}"# + ); + } + + #[test] + fn to_vec_works_for_special_chars() { + let msg = SomeMsg::Cowsay { + text: "foo\"bar\\\"bla".to_string(), + }; + let serialized = String::from_utf8(to_vec(&msg).unwrap()).unwrap(); + assert_eq!(serialized, r#"{"cowsay":{"text":"foo\"bar\\\"bla"}}"#); + } +} diff --git a/vm/src/size.rs b/vm/src/size.rs new file mode 100644 index 000000000..b4d690ccd --- /dev/null +++ b/vm/src/size.rs @@ -0,0 +1,78 @@ +use serde::{Deserialize, Serialize}; + +#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +pub struct Size(pub(crate) usize); + +impl Size { + /// Creates a size of `n` + pub const fn new(n: usize) -> Self { + Size(n) + } + + /// Creates a size of `n` kilo + pub const fn kilo(n: usize) -> Self { + Size(n * 1000) + } + + /// Creates a size of `n` kibi + pub const fn kibi(n: usize) -> Self { + Size(n * 1024) + } + + /// Creates a size of `n` mega + pub const fn mega(n: usize) -> Self { + Size(n * 1000 * 1000) + } + + /// Creates a size of `n` mebi + pub const fn mebi(n: usize) -> Self { + Size(n * 1024 * 1024) + } + + /// Creates a size of `n` giga + pub const fn giga(n: usize) -> Self { + Size(n * 1000 * 1000 * 1000) + } + + /// Creates a size of `n` gibi + pub const fn gibi(n: usize) -> Self { + Size(n * 1024 * 1024 * 1024) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn constructors_work() { + assert_eq!(Size::new(0).0, Size(0).0); + assert_eq!(Size::new(3).0, Size(3).0); + + assert_eq!(Size::kilo(0).0, 0); + assert_eq!(Size::kilo(3).0, 3000); + + assert_eq!(Size::kibi(0).0, 0); + assert_eq!(Size::kibi(3).0, 3072); + + assert_eq!(Size::mega(0).0, 0); + assert_eq!(Size::mega(3).0, 3000000); + + assert_eq!(Size::mebi(0).0, 0); + assert_eq!(Size::mebi(3).0, 3145728); + + assert_eq!(Size::giga(0).0, 0); + assert_eq!(Size::giga(3).0, 3000000000); + + assert_eq!(Size::gibi(0).0, 0); + assert_eq!(Size::gibi(3).0, 3221225472); + } + + #[test] + fn implements_debug() { + assert_eq!(format!("{:?}", Size::new(0)), "Size(0)"); + assert_eq!(format!("{:?}", Size::new(123)), "Size(123)"); + assert_eq!(format!("{:?}", Size::kibi(2)), "Size(2048)"); + assert_eq!(format!("{:?}", Size::mebi(1)), "Size(1048576)"); + } +} diff --git a/vm/src/static_analysis.rs b/vm/src/static_analysis.rs new file mode 100644 index 000000000..f56cbc52b --- /dev/null +++ b/vm/src/static_analysis.rs @@ -0,0 +1,288 @@ +use std::collections::HashSet; + +use strum::{AsRefStr, Display, EnumString}; +use wasmer::wasmparser::ExternalKind; + +use crate::parsed_wasm::ParsedWasm; + +/// An enum containing all available contract entrypoints. +/// This also provides conversions to and from strings. +#[derive(PartialEq, Eq, Debug, Clone, Copy, Hash, EnumString, Display, AsRefStr)] +pub enum Entrypoint { + #[strum(serialize = "instantiate")] + Instantiate, + #[strum(serialize = "execute")] + Execute, + #[strum(serialize = "migrate")] + Migrate, + #[strum(serialize = "sudo")] + Sudo, + #[strum(serialize = "reply")] + Reply, + #[strum(serialize = "query")] + Query, + #[strum(serialize = "ibc_channel_open")] + IbcChannelOpen, + #[strum(serialize = "ibc_channel_connect")] + IbcChannelConnect, + #[strum(serialize = "ibc_channel_close")] + IbcChannelClose, + #[strum(serialize = "ibc_packet_receive")] + IbcPacketReceive, + #[strum(serialize = "ibc_packet_ack")] + IbcPacketAck, + #[strum(serialize = "ibc_packet_timeout")] + IbcPacketTimeout, +} + +// sort entrypoints by their &str representation +impl PartialOrd for Entrypoint { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl Ord for Entrypoint { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.as_ref().cmp(other.as_ref()) + } +} + +pub const REQUIRED_IBC_EXPORTS: &[Entrypoint] = &[ + Entrypoint::IbcChannelOpen, + Entrypoint::IbcChannelConnect, + Entrypoint::IbcChannelClose, + Entrypoint::IbcPacketReceive, + Entrypoint::IbcPacketAck, + Entrypoint::IbcPacketTimeout, +]; + +/// A trait that allows accessing shared functionality of `parity_wasm::elements::Module` +/// and `wasmer::Module` in a shared fashion. +pub trait ExportInfo { + /// Returns all exported function names with the given prefix + fn exported_function_names(self, prefix: Option<&str>) -> HashSet; +} + +impl ExportInfo for &ParsedWasm<'_> { + fn exported_function_names(self, prefix: Option<&str>) -> HashSet { + self.exports + .iter() + .filter_map(|export| match export.kind { + ExternalKind::Func => Some(export.name), + _ => None, + }) + .filter(|name| { + if let Some(required_prefix) = prefix { + name.starts_with(required_prefix) + } else { + true + } + }) + .map(|name| name.to_string()) + .collect() + } +} + +impl ExportInfo for &wasmer::Module { + fn exported_function_names(self, prefix: Option<&str>) -> HashSet { + self.exports() + .functions() + .filter_map(|function_export| { + let name = function_export.name(); + if let Some(required_prefix) = prefix { + if name.starts_with(required_prefix) { + Some(name.to_string()) + } else { + None + } + } else { + Some(name.to_string()) + } + }) + .collect() + } +} + +#[cfg(test)] +mod tests { + use std::str::FromStr; + + use crate::wasm_backend::make_compiler_config; + use crate::VmError; + + use super::*; + use wasmer::Store; + + static CONTRACT: &[u8] = include_bytes!("../testdata/hackatom.wasm"); + static CORRUPTED: &[u8] = include_bytes!("../testdata/corrupted.wasm"); + + #[test] + fn deserialize_exports_works() { + let module = ParsedWasm::parse(CONTRACT).unwrap(); + assert_eq!(module.version, 1); + + let exported_functions = module + .exports + .iter() + .filter(|entry| matches!(entry.kind, ExternalKind::Func)); + assert_eq!(exported_functions.count(), 8); // 4 required exports plus "execute", "migrate", "query" and "sudo" + + let exported_memories = module + .exports + .iter() + .filter(|entry| matches!(entry.kind, ExternalKind::Memory)); + assert_eq!(exported_memories.count(), 1); + } + + #[test] + fn deserialize_wasm_corrupted_data() { + match ParsedWasm::parse(CORRUPTED) + .and_then(|mut parsed| parsed.validate_funcs()) + .unwrap_err() + { + VmError::StaticValidationErr { msg, .. } => { + assert!(msg.starts_with("Wasm bytecode could not be deserialized.")) + } + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn exported_function_names_works_for_parity_with_no_prefix() { + let wasm = wat::parse_str(r#"(module)"#).unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + let exports = module.exported_function_names(None); + assert_eq!(exports, HashSet::new()); + + let wasm = wat::parse_str( + r#"(module + (memory 3) + (export "memory" (memory 0)) + + (type (func)) + (func (type 0) nop) + (export "foo" (func 0)) + (export "bar" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + let exports = module.exported_function_names(None); + assert_eq!( + exports, + HashSet::from_iter(vec!["foo".to_string(), "bar".to_string()]) + ); + } + + #[test] + fn exported_function_names_works_for_parity_with_prefix() { + let wasm = wat::parse_str(r#"(module)"#).unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + let exports = module.exported_function_names(Some("b")); + assert_eq!(exports, HashSet::new()); + + let wasm = wat::parse_str( + r#"(module + (memory 3) + (export "memory" (memory 0)) + + (type (func)) + (func (type 0) nop) + (export "foo" (func 0)) + (export "bar" (func 0)) + (export "baz" (func 0)) + )"#, + ) + .unwrap(); + let module = ParsedWasm::parse(&wasm).unwrap(); + let exports = module.exported_function_names(Some("b")); + assert_eq!( + exports, + HashSet::from_iter(vec!["bar".to_string(), "baz".to_string()]) + ); + } + + #[test] + fn exported_function_names_works_for_wasmer_with_no_prefix() { + let wasm = wat::parse_str(r#"(module)"#).unwrap(); + let compiler = make_compiler_config(); + let store = Store::new(compiler); + let module = wasmer::Module::new(&store, wasm).unwrap(); + let exports = module.exported_function_names(None); + assert_eq!(exports, HashSet::new()); + + let wasm = wat::parse_str( + r#"(module + (memory 3) + (export "memory" (memory 0)) + + (type (func)) + (func (type 0) nop) + (export "foo" (func 0)) + (export "bar" (func 0)) + )"#, + ) + .unwrap(); + let compiler = make_compiler_config(); + let store = Store::new(compiler); + let module = wasmer::Module::new(&store, wasm).unwrap(); + let exports = module.exported_function_names(None); + assert_eq!( + exports, + HashSet::from_iter(vec!["foo".to_string(), "bar".to_string()]) + ); + } + + #[test] + fn exported_function_names_works_for_wasmer_with_prefix() { + let wasm = wat::parse_str(r#"(module)"#).unwrap(); + let compiler = make_compiler_config(); + let store = Store::new(compiler); + let module = wasmer::Module::new(&store, wasm).unwrap(); + let exports = module.exported_function_names(Some("b")); + assert_eq!(exports, HashSet::new()); + + let wasm = wat::parse_str( + r#"(module + (memory 3) + (export "memory" (memory 0)) + + (type (func)) + (func (type 0) nop) + (export "foo" (func 0)) + (export "bar" (func 0)) + (export "baz" (func 0)) + )"#, + ) + .unwrap(); + let compiler = make_compiler_config(); + let store = Store::new(compiler); + let module = wasmer::Module::new(&store, wasm).unwrap(); + let exports = module.exported_function_names(Some("b")); + assert_eq!( + exports, + HashSet::from_iter(vec!["bar".to_string(), "baz".to_string()]) + ); + } + + #[test] + fn entrypoint_from_string_works() { + assert_eq!( + Entrypoint::from_str("ibc_channel_open").unwrap(), + Entrypoint::IbcChannelOpen + ); + + assert!(Entrypoint::from_str("IbcChannelConnect").is_err()); + } + + #[test] + fn entrypoint_to_string_works() { + assert_eq!( + Entrypoint::IbcPacketTimeout.to_string(), + "ibc_packet_timeout" + ); + + let static_str: &'static str = Entrypoint::IbcPacketReceive.as_ref(); + assert_eq!(static_str, "ibc_packet_receive"); + } +} diff --git a/vm/src/testing/calls.rs b/vm/src/testing/calls.rs new file mode 100644 index 000000000..fd09e6e15 --- /dev/null +++ b/vm/src/testing/calls.rs @@ -0,0 +1,268 @@ +//! This file has some helpers for integration tests. +//! They should be imported via full path to ensure there is no confusion +//! use cosmwasm_vm::testing::X +use schemars::JsonSchema; +use serde::{de::DeserializeOwned, Serialize}; + +use cosmwasm_std::{ + ContractResult, CustomMsg, Env, MessageInfo, MigrateInfo, QueryResponse, Reply, Response, +}; +#[cfg(feature = "stargate")] +use cosmwasm_std::{ + Ibc3ChannelOpenResponse, IbcBasicResponse, IbcChannelCloseMsg, IbcChannelConnectMsg, + IbcChannelOpenMsg, IbcPacketAckMsg, IbcPacketReceiveMsg, IbcPacketTimeoutMsg, + IbcReceiveResponse, +}; + +use crate::calls::{ + call_execute, call_instantiate, call_migrate, call_migrate_with_info, call_query, call_reply, + call_sudo, +}; +#[cfg(feature = "stargate")] +use crate::calls::{ + call_ibc_channel_close, call_ibc_channel_connect, call_ibc_channel_open, call_ibc_packet_ack, + call_ibc_packet_receive, call_ibc_packet_timeout, +}; +use crate::instance::Instance; +use crate::serde::to_vec; +use crate::{BackendApi, Querier, Storage}; + +/// Mimics the call signature of the smart contracts. +/// Thus it moves env and msg rather than take them as reference. +/// This is inefficient here, but only used in test code. +pub fn instantiate( + instance: &mut Instance, + env: Env, + info: MessageInfo, + msg: M, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + M: Serialize + JsonSchema, + U: DeserializeOwned + CustomMsg, +{ + let serialized_msg = to_vec(&msg).expect("Testing error: Could not serialize request message"); + call_instantiate(instance, &env, &info, &serialized_msg).expect("VM error") +} + +// execute mimics the call signature of the smart contracts. +// thus it moves env and msg rather than take them as reference. +// this is inefficient here, but only used in test code +pub fn execute( + instance: &mut Instance, + env: Env, + info: MessageInfo, + msg: M, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + M: Serialize + JsonSchema, + U: DeserializeOwned + CustomMsg, +{ + let serialized_msg = to_vec(&msg).expect("Testing error: Could not serialize request message"); + call_execute(instance, &env, &info, &serialized_msg).expect("VM error") +} + +// migrate mimics the call signature of the smart contracts. +// thus it moves env and msg rather than take them as reference. +// this is inefficient here, but only used in test code +pub fn migrate( + instance: &mut Instance, + env: Env, + msg: M, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + M: Serialize + JsonSchema, + U: DeserializeOwned + CustomMsg, +{ + let serialized_msg = to_vec(&msg).expect("Testing error: Could not serialize request message"); + call_migrate(instance, &env, &serialized_msg).expect("VM error") +} + +// migrate mimics the call signature of the smart contracts. +// thus it moves env and msg rather than take them as reference. +// this is inefficient here, but only used in test code +pub fn migrate_with_info( + instance: &mut Instance, + env: Env, + msg: M, + migrate_info: MigrateInfo, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + M: Serialize + JsonSchema, + U: DeserializeOwned + CustomMsg, +{ + let serialized_msg = to_vec(&msg).expect("Testing error: Could not serialize request message"); + call_migrate_with_info(instance, &env, &serialized_msg, &migrate_info).expect("VM error") +} + +// sudo mimics the call signature of the smart contracts. +// thus it moves env and msg rather than take them as reference. +// this is inefficient here, but only used in test code +pub fn sudo( + instance: &mut Instance, + env: Env, + msg: M, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + M: Serialize + JsonSchema, + U: DeserializeOwned + CustomMsg, +{ + let serialized_msg = to_vec(&msg).expect("Testing error: Could not serialize request message"); + call_sudo(instance, &env, &serialized_msg).expect("VM error") +} + +// reply mimics the call signature of the smart contracts. +// thus it moves env and msg rather than take them as reference. +// this is inefficient here, but only used in test code +pub fn reply( + instance: &mut Instance, + env: Env, + msg: Reply, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + call_reply(instance, &env, &msg).expect("VM error") +} + +// query mimics the call signature of the smart contracts. +// thus it moves env and msg rather than take them as reference. +// this is inefficient here, but only used in test code +pub fn query( + instance: &mut Instance, + env: Env, + msg: M, +) -> ContractResult +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + M: Serialize + JsonSchema, +{ + let serialized_msg = to_vec(&msg).expect("Testing error: Could not serialize request message"); + call_query(instance, &env, &serialized_msg).expect("VM error") +} + +// ibc_channel_open mimics the call signature of the smart contracts. +// thus it moves env and channel rather than take them as reference. +// this is inefficient here, but only used in test code +#[cfg(feature = "stargate")] +pub fn ibc_channel_open( + instance: &mut Instance, + env: Env, + msg: IbcChannelOpenMsg, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + call_ibc_channel_open(instance, &env, &msg).expect("VM error") +} + +// ibc_channel_connect mimics the call signature of the smart contracts. +// thus it moves env and channel rather than take them as reference. +// this is inefficient here, but only used in test code +#[cfg(feature = "stargate")] +pub fn ibc_channel_connect( + instance: &mut Instance, + env: Env, + msg: IbcChannelConnectMsg, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + call_ibc_channel_connect(instance, &env, &msg).expect("VM error") +} + +// ibc_channel_close mimics the call signature of the smart contracts. +// thus it moves env and channel rather than take them as reference. +// this is inefficient here, but only used in test code +#[cfg(feature = "stargate")] +pub fn ibc_channel_close( + instance: &mut Instance, + env: Env, + msg: IbcChannelCloseMsg, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + call_ibc_channel_close(instance, &env, &msg).expect("VM error") +} + +// ibc_packet_receive mimics the call signature of the smart contracts. +// thus it moves env and packet rather than take them as reference. +// this is inefficient here, but only used in test code +#[cfg(feature = "stargate")] +pub fn ibc_packet_receive( + instance: &mut Instance, + env: Env, + msg: IbcPacketReceiveMsg, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + call_ibc_packet_receive(instance, &env, &msg).expect("VM error") +} + +// ibc_packet_ack mimics the call signature of the smart contracts. +// thus it moves env and acknowledgement rather than take them as reference. +// this is inefficient here, but only used in test code +#[cfg(feature = "stargate")] +pub fn ibc_packet_ack( + instance: &mut Instance, + env: Env, + msg: IbcPacketAckMsg, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + call_ibc_packet_ack(instance, &env, &msg).expect("VM error") +} + +// ibc_packet_timeout mimics the call signature of the smart contracts. +// thus it moves env and packet rather than take them as reference. +// this is inefficient here, but only used in test code +#[cfg(feature = "stargate")] +pub fn ibc_packet_timeout( + instance: &mut Instance, + env: Env, + msg: IbcPacketTimeoutMsg, +) -> ContractResult> +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, + U: DeserializeOwned + CustomMsg, +{ + call_ibc_packet_timeout(instance, &env, &msg).expect("VM error") +} diff --git a/vm/src/testing/instance.rs b/vm/src/testing/instance.rs new file mode 100644 index 000000000..6701cab25 --- /dev/null +++ b/vm/src/testing/instance.rs @@ -0,0 +1,206 @@ +//! This file has some helpers for integration tests. +//! They should be imported via full path to ensure there is no confusion +//! use cosmwasm_vm::testing::X +use cosmwasm_std::Coin; +use std::collections::HashSet; + +use crate::capabilities::capabilities_from_csv; +use crate::compatibility::check_wasm; +use crate::instance::{Instance, InstanceOptions}; +use crate::internals::Logger; +use crate::size::Size; +use crate::{Backend, BackendApi, Querier, Storage, WasmLimits}; + +use super::mock::{MockApi, MOCK_CONTRACT_ADDR}; +use super::querier::MockQuerier; +use super::storage::MockStorage; + +/// This gas limit is used in integration tests and should be high enough to allow a reasonable +/// number of contract executions and queries on one instance. For this reason it is significantly +/// higher than the limit for a single execution that we have in the production setup. +const DEFAULT_GAS_LIMIT: u64 = 2_000_000_000; // ~2.0ms +const DEFAULT_MEMORY_LIMIT: Option = Some(Size::mebi(16)); + +pub fn mock_instance( + wasm: &[u8], + contract_balance: &[Coin], +) -> Instance { + mock_instance_with_options( + wasm, + MockInstanceOptions { + contract_balance: Some(contract_balance), + ..Default::default() + }, + ) +} + +pub fn mock_instance_with_failing_api( + wasm: &[u8], + contract_balance: &[Coin], + backend_error: &'static str, +) -> Instance { + mock_instance_with_options( + wasm, + MockInstanceOptions { + contract_balance: Some(contract_balance), + backend_error: Some(backend_error), + ..Default::default() + }, + ) +} + +pub fn mock_instance_with_balances( + wasm: &[u8], + balances: &[(&str, &[Coin])], +) -> Instance { + mock_instance_with_options( + wasm, + MockInstanceOptions { + balances, + ..Default::default() + }, + ) +} + +/// Creates an instance from the given Wasm bytecode. +/// The gas limit is measured in [CosmWasm gas](https://github.com/CosmWasm/cosmwasm/blob/main/docs/GAS.md). +pub fn mock_instance_with_gas_limit( + wasm: &[u8], + gas_limit: u64, +) -> Instance { + mock_instance_with_options( + wasm, + MockInstanceOptions { + gas_limit, + ..Default::default() + }, + ) +} + +#[derive(Debug)] +pub struct MockInstanceOptions<'a> { + // dependencies + pub balances: &'a [(&'a str, &'a [Coin])], + /// This option is merged into balances and might override an existing value + pub contract_balance: Option<&'a [Coin]>, + /// When set, all calls to the API fail with BackendError::Unknown containing this message + pub backend_error: Option<&'static str>, + + // instance + pub available_capabilities: HashSet, + /// Gas limit measured in [CosmWasm gas](https://github.com/CosmWasm/cosmwasm/blob/main/docs/GAS.md). + pub gas_limit: u64, + /// Memory limit in bytes. Use a value that is divisible by the Wasm page size 65536, e.g. full MiBs. + pub memory_limit: Option, +} + +impl MockInstanceOptions<'_> { + fn default_capabilities() -> HashSet { + #[allow(unused_mut)] + let mut out = capabilities_from_csv( + "iterator,staking,cosmwasm_1_1,cosmwasm_1_2,cosmwasm_1_3,cosmwasm_1_4,cosmwasm_2_0,cosmwasm_2_1,cosmwasm_2_2", + ); + #[cfg(feature = "stargate")] + out.insert("stargate".to_string()); + out + } +} + +impl Default for MockInstanceOptions<'_> { + fn default() -> Self { + Self { + // dependencies + balances: Default::default(), + contract_balance: Default::default(), + backend_error: None, + + // instance + available_capabilities: Self::default_capabilities(), + gas_limit: DEFAULT_GAS_LIMIT, + memory_limit: DEFAULT_MEMORY_LIMIT, + } + } +} + +pub fn mock_instance_with_options( + wasm: &[u8], + options: MockInstanceOptions, +) -> Instance { + check_wasm( + wasm, + &options.available_capabilities, + &WasmLimits::default(), + Logger::Off, + ) + .unwrap(); + let contract_address = MOCK_CONTRACT_ADDR; + + // merge balances + let mut balances = options.balances.to_vec(); + if let Some(contract_balance) = options.contract_balance { + // Remove old entry if exists + if let Some(pos) = balances.iter().position(|item| item.0 == contract_address) { + balances.remove(pos); + } + balances.push((contract_address, contract_balance)); + } + + let api = if let Some(backend_error) = options.backend_error { + MockApi::new_failing(backend_error) + } else { + MockApi::default() + }; + + let backend = Backend { + api, + storage: MockStorage::default(), + querier: MockQuerier::new(&balances), + }; + let memory_limit = options.memory_limit; + let options = InstanceOptions { + gas_limit: options.gas_limit, + }; + Instance::from_code(wasm, backend, options, memory_limit).unwrap() +} + +/// Creates InstanceOptions for testing +pub fn mock_instance_options() -> (InstanceOptions, Option) { + ( + InstanceOptions { + gas_limit: DEFAULT_GAS_LIMIT, + }, + DEFAULT_MEMORY_LIMIT, + ) +} + +/// Runs a series of IO tests, hammering especially on allocate and deallocate. +/// This could be especially useful when run with some kind of leak detector. +pub fn test_io(instance: &mut Instance) +where + A: BackendApi + 'static, + S: Storage + 'static, + Q: Querier + 'static, +{ + let sizes: Vec = vec![0, 1, 3, 10, 200, 2000, 5 * 1024]; + let bytes: Vec = vec![0x00, 0xA5, 0xFF]; + + for size in sizes.into_iter() { + for byte in bytes.iter() { + let original = vec![*byte; size]; + let wasm_ptr = instance + .allocate(original.len()) + .expect("Could not allocate memory"); + instance + .write_memory(wasm_ptr, &original) + .expect("Could not write data"); + let wasm_data = instance.read_memory(wasm_ptr, size).expect("error reading"); + assert_eq!( + original, wasm_data, + "failed for size {size}; expected: {original:?}; actual: {wasm_data:?}" + ); + instance + .deallocate(wasm_ptr) + .expect("Could not deallocate memory"); + } + } +} diff --git a/vm/src/testing/mock.rs b/vm/src/testing/mock.rs new file mode 100644 index 000000000..b52901f7b --- /dev/null +++ b/vm/src/testing/mock.rs @@ -0,0 +1,420 @@ +use bech32::primitives::decode::CheckedHrpstring; +use bech32::{encode, Bech32, Hrp}; +use cosmwasm_std::{ + Addr, BlockInfo, Coin, ContractInfo, Env, MessageInfo, Timestamp, TransactionInfo, +}; +use sha2::{Digest, Sha256}; + +use super::querier::MockQuerier; +use super::storage::MockStorage; +use crate::backend::unwrap_or_return_with_gas; +use crate::{Backend, BackendApi, BackendError, BackendResult, GasInfo}; + +pub const MOCK_CONTRACT_ADDR: &str = + "cosmwasm1jpev2csrppg792t22rn8z8uew8h3sjcpglcd0qv9g8gj8ky922tscp8avs"; + +/// Default gas multiplier in wasmd. +/// See https://github.com/CosmWasm/wasmd/blob/v0.51.0/x/wasm/types/gas_register.go#L34 +const WASMD_GAS_MULTIPLIER: u64 = 140_000; +/// See https://github.com/CosmWasm/wasmd/blob/v0.51.0/x/wasm/keeper/api.go#L27 +const GAS_COST_HUMANIZE: u64 = 4 * WASMD_GAS_MULTIPLIER; +/// See https://github.com/CosmWasm/wasmd/blob/v0.51.0/x/wasm/keeper/api.go#L28 +const GAS_COST_CANONICALIZE: u64 = 5 * WASMD_GAS_MULTIPLIER; + +/// Default prefix used when creating Bech32 encoded address. +const BECH32_PREFIX: &str = "cosmwasm"; + +/// All external requirements that can be injected for unit tests. +/// It sets the given balance for the contract itself, nothing else +pub fn mock_backend(contract_balance: &[Coin]) -> Backend { + Backend { + api: MockApi::default(), + storage: MockStorage::default(), + querier: MockQuerier::new(&[(MOCK_CONTRACT_ADDR, contract_balance)]), + } +} + +/// Initializes the querier along with the mock_dependencies. +/// Sets all balances provided (you must explicitly set contract balance if desired) +pub fn mock_backend_with_balances( + balances: &[(&str, &[Coin])], +) -> Backend { + Backend { + api: MockApi::default(), + storage: MockStorage::default(), + querier: MockQuerier::new(balances), + } +} + +/// Zero-pads all human addresses to make them fit the canonical_length and +/// trims off zeros for the reverse operation. +/// This is not really smart, but allows us to see a difference (and consistent length for canonical addresses). +#[derive(Copy, Clone)] +pub struct MockApi(MockApiImpl); + +#[derive(Copy, Clone)] +enum MockApiImpl { + /// With this variant, all calls to the API fail with BackendError::Unknown + /// containing the given message + Error(&'static str), + /// This variant implements Bech32 addresses. + Bech32 { + /// Prefix used for creating addresses in Bech32 encoding. + bech32_prefix: &'static str, + }, +} + +impl MockApi { + pub fn new_failing(backend_error: &'static str) -> Self { + Self(MockApiImpl::Error(backend_error)) + } + + /// Returns [MockApi] with Bech32 prefix set to provided value. + /// + /// Bech32 prefix must not be empty. + /// + /// # Example + /// + /// ``` + /// # use cosmwasm_std::Addr; + /// # use cosmwasm_std::testing::MockApi; + /// # + /// let mock_api = MockApi::default().with_prefix("juno"); + /// let addr = mock_api.addr_make("creator"); + /// + /// assert_eq!(addr.as_str(), "juno1h34lmpywh4upnjdg90cjf4j70aee6z8qqfspugamjp42e4q28kqsksmtyp"); + /// ``` + pub fn with_prefix(self, prefix: &'static str) -> Self { + Self(MockApiImpl::Bech32 { + bech32_prefix: prefix, + }) + } + + /// Returns an address built from provided input string. + /// + /// # Example + /// + /// ``` + /// # use cosmwasm_std::Addr; + /// # use cosmwasm_std::testing::MockApi; + /// # + /// let mock_api = MockApi::default(); + /// let addr = mock_api.addr_make("creator"); + /// + /// assert_eq!(addr.as_str(), "cosmwasm1h34lmpywh4upnjdg90cjf4j70aee6z8qqfspugamjp42e4q28kqs8s7vcp"); + /// ``` + /// + /// # Panics + /// + /// This function panics when generating a valid address is not possible, + /// especially when Bech32 prefix set in function [with_prefix](Self::with_prefix) is empty. + /// + pub fn addr_make(&self, input: &str) -> String { + // handle error case + let bech32_prefix = match self.0 { + MockApiImpl::Error(e) => panic!("Generating address failed: {e}"), + MockApiImpl::Bech32 { bech32_prefix } => bech32_prefix, + }; + + let digest = Sha256::digest(input); + let bech32_prefix = Hrp::parse(bech32_prefix).expect("Invalid prefix"); + match encode::(bech32_prefix, &digest) { + Ok(address) => address, + Err(reason) => panic!("Generating address failed with reason: {reason}"), + } + } +} + +impl Default for MockApi { + fn default() -> Self { + Self(MockApiImpl::Bech32 { + bech32_prefix: BECH32_PREFIX, + }) + } +} + +impl BackendApi for MockApi { + fn addr_validate(&self, input: &str) -> BackendResult<()> { + let mut gas_total = GasInfo { + cost: 0, + externally_used: 0, + }; + + let (canonicalize_res, gas_info) = self.addr_canonicalize(input); + gas_total += gas_info; + let canonical = unwrap_or_return_with_gas!(canonicalize_res, gas_total); + + let (humanize_res, gas_info) = self.addr_humanize(&canonical); + gas_total += gas_info; + let normalized = unwrap_or_return_with_gas!(humanize_res, gas_total); + if input != normalized.as_str() { + return ( + Err(BackendError::user_err( + "Invalid input: address not normalized", + )), + gas_total, + ); + } + (Ok(()), gas_total) + } + + fn addr_canonicalize(&self, input: &str) -> BackendResult> { + let gas_total = GasInfo::with_cost(GAS_COST_CANONICALIZE); + + // handle error case + let bech32_prefix = match self.0 { + MockApiImpl::Error(e) => return (Err(BackendError::unknown(e)), gas_total), + MockApiImpl::Bech32 { bech32_prefix } => bech32_prefix, + }; + + let hrp_str = unwrap_or_return_with_gas!( + CheckedHrpstring::new::(input) + .map_err(|_| BackendError::user_err("Error decoding bech32")), + gas_total + ); + + if !hrp_str + .hrp() + .as_bytes() + .eq_ignore_ascii_case(bech32_prefix.as_bytes()) + { + return ( + Err(BackendError::user_err("Wrong bech32 prefix")), + gas_total, + ); + } + + let bytes: Vec = hrp_str.byte_iter().collect(); + unwrap_or_return_with_gas!(validate_length(&bytes), gas_total); + (Ok(bytes), gas_total) + } + + fn addr_humanize(&self, canonical: &[u8]) -> BackendResult { + let gas_total = GasInfo::with_cost(GAS_COST_HUMANIZE); + + // handle error case + let bech32_prefix = match self.0 { + MockApiImpl::Error(e) => return (Err(BackendError::unknown(e)), gas_total), + MockApiImpl::Bech32 { bech32_prefix } => bech32_prefix, + }; + + unwrap_or_return_with_gas!(validate_length(canonical), gas_total); + let bech32_prefix = unwrap_or_return_with_gas!( + Hrp::parse(bech32_prefix).map_err(|_| BackendError::user_err("Invalid bech32 prefix")), + gas_total + ); + let result = encode::(bech32_prefix, canonical) + .map_err(|_| BackendError::user_err("Invalid data to be encoded to bech32")); + + (result, gas_total) + } +} + +/// Does basic validation of the number of bytes in a canonical address +fn validate_length(bytes: &[u8]) -> Result<(), BackendError> { + match bytes.len() { + 1..=255 => Ok(()), + _ => Err(BackendError::user_err("Invalid canonical address length")), + } +} + +/// Returns a default environment with height, time, chain_id, and contract address. +/// You can submit as is to most contracts, or modify height/time if you want to +/// test for expiration. +/// +/// This is intended for use in test code only. +/// +/// The contract address uses the same bech32 prefix as [`MockApi`](crate::testing::MockApi). While +/// this is good for the majority of users, you might need to create your `Env`s +/// differently if you need a valid address using a different prefix. +/// +/// ## Examples +/// +/// Create an env: +/// +/// ``` +/// # use cosmwasm_std::{Addr, BlockInfo, ContractInfo, Env, Timestamp, TransactionInfo}; +/// use cosmwasm_vm::testing::mock_env; +/// +/// let env = mock_env(); +/// assert_eq!(env, Env { +/// block: BlockInfo { +/// height: 12_345, +/// time: Timestamp::from_nanos(1_571_797_419_879_305_533), +/// chain_id: "cosmos-testnet-14002".to_string(), +/// }, +/// transaction: Some(TransactionInfo { index: 3 }), +/// contract: ContractInfo { +/// address: Addr::unchecked("cosmwasm1jpev2csrppg792t22rn8z8uew8h3sjcpglcd0qv9g8gj8ky922tscp8avs"), +/// }, +/// }); +/// ``` +/// +/// Mutate and reuse environment: +/// +/// ``` +/// # use cosmwasm_std::{Addr, BlockInfo, ContractInfo, Env, Timestamp, TransactionInfo}; +/// use cosmwasm_vm::testing::mock_env; +/// +/// let env1 = mock_env(); +/// +/// // First test with `env1` +/// +/// let mut env2 = env1.clone(); +/// env2.block.height += 1; +/// env2.block.time = env1.block.time.plus_seconds(6); +/// +/// // `env2` is one block and 6 seconds later +/// +/// let mut env3 = env2.clone(); +/// env3.block.height += 1; +/// env3.block.time = env2.block.time.plus_nanos(5_500_000_000); +/// +/// // `env3` is one block and 5.5 seconds later +/// ``` +pub fn mock_env() -> Env { + let contract_addr = MockApi::default().addr_make("cosmos2contract"); + Env { + block: BlockInfo { + height: 12_345, + time: Timestamp::from_nanos(1_571_797_419_879_305_533), + chain_id: "cosmos-testnet-14002".to_string(), + }, + transaction: Some(TransactionInfo { index: 3 }), + contract: ContractInfo { + address: Addr::unchecked(contract_addr), + }, + } +} + +/// Just set sender and funds for the message. +/// This is intended for use in test code only. +pub fn mock_info(sender: &str, funds: &[Coin]) -> MessageInfo { + MessageInfo { + sender: Addr::unchecked(sender), + funds: funds.to_vec(), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use cosmwasm_std::coins; + + #[test] + fn mock_env_matches_mock_contract_addr() { + let contract_address = mock_env().contract.address; + assert_eq!(contract_address, Addr::unchecked(MOCK_CONTRACT_ADDR)); + } + + #[test] + fn mock_info_works() { + let info = mock_info("my name", &coins(100, "atom")); + assert_eq!( + info, + MessageInfo { + sender: Addr::unchecked("my name"), + funds: vec![Coin { + amount: 100u128.into(), + denom: "atom".into(), + }] + } + ); + } + + #[test] + fn addr_canonicalize_works() { + let api = MockApi::default().with_prefix("osmo"); + + api.addr_canonicalize("osmo186kh7c0k0gh4ww0wh4jqc4yhzu7n7dhswe845d") + .0 + .unwrap(); + + // is case insensitive + let data1 = api + .addr_canonicalize("osmo186kh7c0k0gh4ww0wh4jqc4yhzu7n7dhswe845d") + .0 + .unwrap(); + let data2 = api + .addr_canonicalize("OSMO186KH7C0K0GH4WW0WH4JQC4YHZU7N7DHSWE845D") + .0 + .unwrap(); + assert_eq!(data1, data2); + } + + #[test] + fn canonicalize_and_humanize_restores_original() { + let api = MockApi::default().with_prefix("juno"); + + // simple + let original = api.addr_make("shorty"); + let canonical = api.addr_canonicalize(&original).0.unwrap(); + let (recovered, _gas_cost) = api.addr_humanize(&canonical); + assert_eq!(recovered.unwrap(), original); + + // normalizes input + let original = "JUNO1MEPRU9FUQ4E65856ARD6068MFSFRWPGEMD0C3R"; + let canonical = api.addr_canonicalize(original).0.unwrap(); + let recovered = api.addr_humanize(&canonical).0.unwrap(); + assert_eq!(recovered, original.to_lowercase()); + + // Long input (Juno contract address) + let original = + String::from("juno1v82su97skv6ucfqvuvswe0t5fph7pfsrtraxf0x33d8ylj5qnrysdvkc95"); + let canonical = api.addr_canonicalize(&original).0.unwrap(); + let recovered = api.addr_humanize(&canonical).0.unwrap(); + assert_eq!(recovered, original); + } + + #[test] + fn addr_humanize_input_length() { + let api = MockApi::default(); + let input = vec![61; 256]; // too long + let (result, _gas_info) = api.addr_humanize(&input); + match result.unwrap_err() { + BackendError::UserErr { .. } => {} + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + fn addr_canonicalize_min_input_length() { + let api = MockApi::default(); + + // empty address should fail + let empty = "cosmwasm1pj90vm"; + assert!(matches!(api + .addr_canonicalize(empty) + .0 + .unwrap_err(), + BackendError::UserErr { msg } if msg.contains("address length"))); + } + + #[test] + fn addr_canonicalize_max_input_length() { + let api = MockApi::default(); + + let too_long = "cosmwasm1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqehqqkz"; + + assert!(matches!(api + .addr_canonicalize(too_long) + .0 + .unwrap_err(), + BackendError::UserErr { msg } if msg.contains("address length"))); + } + + #[test] + fn colon_in_prefix_is_valid() { + let mock_api = MockApi::default().with_prefix("did:com:"); + let bytes = mock_api + .addr_canonicalize("did:com:1jkf0kmeyefvyzpwf56m7sne2000ay53r6upttu") + .0 + .unwrap(); + let humanized = mock_api.addr_humanize(&bytes).0.unwrap(); + + assert_eq!( + humanized.as_str(), + "did:com:1jkf0kmeyefvyzpwf56m7sne2000ay53r6upttu" + ); + } +} diff --git a/vm/src/testing/mod.rs b/vm/src/testing/mod.rs new file mode 100644 index 000000000..495bcb37c --- /dev/null +++ b/vm/src/testing/mod.rs @@ -0,0 +1,24 @@ +// The external interface is `use cosmwasm_vm::testing::X` for all integration testing symbols, no matter where they live internally. + +mod calls; +mod instance; +mod mock; +mod querier; +mod storage; + +pub use calls::{execute, instantiate, migrate, migrate_with_info, query, reply, sudo}; +#[cfg(feature = "stargate")] +pub use calls::{ + ibc_channel_close, ibc_channel_connect, ibc_channel_open, ibc_packet_ack, ibc_packet_receive, + ibc_packet_timeout, +}; +pub use instance::{ + mock_instance, mock_instance_options, mock_instance_with_balances, + mock_instance_with_failing_api, mock_instance_with_gas_limit, mock_instance_with_options, + test_io, MockInstanceOptions, +}; +pub use mock::{ + mock_backend, mock_backend_with_balances, mock_env, mock_info, MockApi, MOCK_CONTRACT_ADDR, +}; +pub use querier::MockQuerier; +pub use storage::MockStorage; diff --git a/vm/src/testing/querier.rs b/vm/src/testing/querier.rs new file mode 100644 index 000000000..a8efcb0d4 --- /dev/null +++ b/vm/src/testing/querier.rs @@ -0,0 +1,236 @@ +use serde::de::DeserializeOwned; + +use cosmwasm_std::testing::{MockQuerier as StdMockQuerier, MockQuerierCustomHandlerResult}; +use cosmwasm_std::{ + to_json_binary, to_json_vec, Binary, Coin, ContractResult, CustomQuery, Empty, Querier as _, + QueryRequest, SystemError, SystemResult, +}; + +use crate::{BackendError, BackendResult, GasInfo, Querier}; + +const GAS_COST_QUERY_FLAT: u64 = 100_000; +/// Gas per request byte +const GAS_COST_QUERY_REQUEST_MULTIPLIER: u64 = 0; +/// Gas per response byte +const GAS_COST_QUERY_RESPONSE_MULTIPLIER: u64 = 100; + +/// MockQuerier holds an immutable table of bank balances +pub struct MockQuerier { + querier: StdMockQuerier, +} + +impl MockQuerier { + pub fn new(balances: &[(&str, &[Coin])]) -> Self { + MockQuerier { + querier: StdMockQuerier::new(balances), + } + } + + /// Set a new balance for the given address and return the old balance + pub fn update_balance( + &mut self, + addr: impl Into, + balance: Vec, + ) -> Option> { + self.querier.bank.update_balance(addr, balance) + } + + #[cfg(feature = "staking")] + pub fn update_staking( + &mut self, + denom: &str, + validators: &[cosmwasm_std::Validator], + delegations: &[cosmwasm_std::FullDelegation], + ) { + self.querier.staking.update(denom, validators, delegations); + } + + pub fn update_wasm(&mut self, handler: WH) + where + WH: Fn(&cosmwasm_std::WasmQuery) -> cosmwasm_std::QuerierResult + 'static, + { + self.querier.update_wasm(handler) + } + + pub fn with_custom_handler(mut self, handler: CH) -> Self + where + CH: Fn(&C) -> MockQuerierCustomHandlerResult + 'static, + { + self.querier = self.querier.with_custom_handler(handler); + self + } +} + +impl Querier for MockQuerier { + fn query_raw( + &self, + bin_request: &[u8], + gas_limit: u64, + ) -> BackendResult>> { + let response = self.querier.raw_query(bin_request); + let gas_info = GasInfo::with_externally_used( + GAS_COST_QUERY_FLAT + + (GAS_COST_QUERY_REQUEST_MULTIPLIER * (bin_request.len() as u64)) + + (GAS_COST_QUERY_RESPONSE_MULTIPLIER + * (to_json_binary(&response).unwrap().len() as u64)), + ); + + // In a production implementation, this should stop the query execution in the middle of the computation. + // Thus no query response is returned to the caller. + if gas_info.externally_used > gas_limit { + return (Err(BackendError::out_of_gas()), gas_info); + } + + // We don't use FFI in the mock implementation, so BackendResult is always Ok() regardless of error on other levels + (Ok(response), gas_info) + } +} + +impl MockQuerier { + pub fn query( + &self, + request: &QueryRequest, + gas_limit: u64, + ) -> BackendResult>> { + // encode the request, then call raw_query + let request_binary = match to_json_vec(request) { + Ok(raw) => raw, + Err(err) => { + let gas_info = GasInfo::with_externally_used(err.to_string().len() as u64); + return ( + Ok(SystemResult::Err(SystemError::InvalidRequest { + error: format!("Serializing query request: {err}"), + request: b"N/A".into(), + })), + gas_info, + ); + } + }; + self.query_raw(&request_binary, gas_limit) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use cosmwasm_std::{coin, from_json, AllBalanceResponse, BalanceResponse, BankQuery}; + + const DEFAULT_QUERY_GAS_LIMIT: u64 = 300_000; + + #[test] + fn query_raw_fails_when_out_of_gas() { + let addr = String::from("foobar"); + let balance = vec![coin(123, "ELF"), coin(777, "FLY")]; + let querier: MockQuerier = MockQuerier::new(&[(&addr, &balance)]); + + let gas_limit = 20; + let (result, _gas_info) = querier.query_raw(b"broken request", gas_limit); + match result.unwrap_err() { + BackendError::OutOfGas {} => {} + err => panic!("Unexpected error: {err:?}"), + } + } + + #[test] + #[allow(deprecated)] + fn bank_querier_all_balances() { + let addr = String::from("foobar"); + let balance = vec![coin(123, "ELF"), coin(777, "FLY")]; + let querier = MockQuerier::new(&[(&addr, &balance)]); + + // all + let all = querier + .query::( + &BankQuery::AllBalances { address: addr }.into(), + DEFAULT_QUERY_GAS_LIMIT, + ) + .0 + .unwrap() + .unwrap() + .unwrap(); + let res: AllBalanceResponse = from_json(all).unwrap(); + assert_eq!(&res.amount, &balance); + } + + #[test] + fn bank_querier_one_balance() { + let addr = String::from("foobar"); + let balance = vec![coin(123, "ELF"), coin(777, "FLY")]; + let querier = MockQuerier::new(&[(&addr, &balance)]); + + // one match + let fly = querier + .query::( + &BankQuery::Balance { + address: addr.clone(), + denom: "FLY".to_string(), + } + .into(), + DEFAULT_QUERY_GAS_LIMIT, + ) + .0 + .unwrap() + .unwrap() + .unwrap(); + let res: BalanceResponse = from_json(fly).unwrap(); + assert_eq!(res.amount, coin(777, "FLY")); + + // missing denom + let miss = querier + .query::( + &BankQuery::Balance { + address: addr, + denom: "MISS".to_string(), + } + .into(), + DEFAULT_QUERY_GAS_LIMIT, + ) + .0 + .unwrap() + .unwrap() + .unwrap(); + let res: BalanceResponse = from_json(miss).unwrap(); + assert_eq!(res.amount, coin(0, "MISS")); + } + + #[test] + #[allow(deprecated)] + fn bank_querier_missing_account() { + let addr = String::from("foobar"); + let balance = vec![coin(123, "ELF"), coin(777, "FLY")]; + let querier = MockQuerier::new(&[(&addr, &balance)]); + + // all balances on empty account is empty vec + let all = querier + .query::( + &BankQuery::AllBalances { + address: String::from("elsewhere"), + } + .into(), + DEFAULT_QUERY_GAS_LIMIT, + ) + .0 + .unwrap() + .unwrap() + .unwrap(); + let res: AllBalanceResponse = from_json(all).unwrap(); + assert_eq!(res.amount, vec![]); + + // any denom on balances on empty account is empty coin + let miss = querier + .query::( + &BankQuery::Balance { + address: String::from("elsewhere"), + denom: "ELF".to_string(), + } + .into(), + DEFAULT_QUERY_GAS_LIMIT, + ) + .0 + .unwrap() + .unwrap() + .unwrap(); + let res: BalanceResponse = from_json(miss).unwrap(); + assert_eq!(res.amount, coin(0, "ELF")); + } +} diff --git a/vm/src/testing/storage.rs b/vm/src/testing/storage.rs new file mode 100644 index 000000000..8c58b8a27 --- /dev/null +++ b/vm/src/testing/storage.rs @@ -0,0 +1,342 @@ +use std::collections::BTreeMap; +#[cfg(feature = "iterator")] +use std::collections::HashMap; +#[cfg(feature = "iterator")] +use std::ops::{Bound, RangeBounds}; + +#[cfg(feature = "iterator")] +use cosmwasm_std::{Order, Record}; + +#[cfg(feature = "iterator")] +use crate::BackendError; +use crate::{BackendResult, GasInfo, Storage}; + +#[cfg(feature = "iterator")] +const GAS_COST_LAST_ITERATION: u64 = 37; + +#[cfg(feature = "iterator")] +const GAS_COST_RANGE: u64 = 11; + +#[cfg(feature = "iterator")] +#[derive(Default, Debug)] +struct Iter { + data: Vec, + position: usize, +} + +#[derive(Default, Debug)] +pub struct MockStorage { + data: BTreeMap, Vec>, + #[cfg(feature = "iterator")] + iterators: HashMap, +} + +impl MockStorage { + pub fn new() -> Self { + MockStorage::default() + } + + #[cfg(feature = "iterator")] + pub fn all(&mut self, iterator_id: u32) -> BackendResult> { + let mut out: Vec = Vec::new(); + let mut total = GasInfo::free(); + loop { + let (result, info) = self.next(iterator_id); + total += info; + match result { + Err(err) => return (Err(err), total), + Ok(ok) => { + if let Some(v) = ok { + out.push(v); + } else { + break; + } + } + } + } + (Ok(out), total) + } +} + +impl Storage for MockStorage { + fn get(&self, key: &[u8]) -> BackendResult>> { + let gas_info = GasInfo::with_externally_used(key.len() as u64); + (Ok(self.data.get(key).cloned()), gas_info) + } + + #[cfg(feature = "iterator")] + fn scan( + &mut self, + start: Option<&[u8]>, + end: Option<&[u8]>, + order: Order, + ) -> BackendResult { + let gas_info = GasInfo::with_externally_used(GAS_COST_RANGE); + let bounds = range_bounds(start, end); + + let values: Vec = match (bounds.start_bound(), bounds.end_bound()) { + // BTreeMap.range panics if range is start > end. + // However, this cases represent just empty range and we treat it as such. + (Bound::Included(start), Bound::Excluded(end)) if start > end => Vec::new(), + _ => match order { + Order::Ascending => self.data.range(bounds).map(clone_item).collect(), + Order::Descending => self.data.range(bounds).rev().map(clone_item).collect(), + }, + }; + + let last_id: u32 = self + .iterators + .len() + .try_into() + .expect("Found more iterator IDs than supported"); + let new_id = last_id + 1; + let iter = Iter { + data: values, + position: 0, + }; + self.iterators.insert(new_id, iter); + + (Ok(new_id), gas_info) + } + + #[cfg(feature = "iterator")] + fn next(&mut self, iterator_id: u32) -> BackendResult> { + let iterator = match self.iterators.get_mut(&iterator_id) { + Some(i) => i, + None => { + return ( + Err(BackendError::iterator_does_not_exist(iterator_id)), + GasInfo::free(), + ) + } + }; + + let (value, gas_info): (Option, GasInfo) = + if iterator.data.len() > iterator.position { + let item = iterator.data[iterator.position].clone(); + iterator.position += 1; + let gas_cost = (item.0.len() + item.1.len()) as u64; + (Some(item), GasInfo::with_cost(gas_cost)) + } else { + (None, GasInfo::with_externally_used(GAS_COST_LAST_ITERATION)) + }; + + (Ok(value), gas_info) + } + + fn set(&mut self, key: &[u8], value: &[u8]) -> BackendResult<()> { + self.data.insert(key.to_vec(), value.to_vec()); + let gas_info = GasInfo::with_externally_used((key.len() + value.len()) as u64); + (Ok(()), gas_info) + } + + fn remove(&mut self, key: &[u8]) -> BackendResult<()> { + self.data.remove(key); + let gas_info = GasInfo::with_externally_used(key.len() as u64); + (Ok(()), gas_info) + } +} + +#[cfg(feature = "iterator")] +fn range_bounds(start: Option<&[u8]>, end: Option<&[u8]>) -> impl RangeBounds> { + ( + start.map_or(Bound::Unbounded, |x| Bound::Included(x.to_vec())), + end.map_or(Bound::Unbounded, |x| Bound::Excluded(x.to_vec())), + ) +} + +#[cfg(feature = "iterator")] +/// The BTreeMap specific key-value pair reference type, as returned by BTreeMap, Vec>::range. +/// This is internal as it can change any time if the map implementation is swapped out. +type BTreeMapRecordRef<'a> = (&'a Vec, &'a Vec); + +#[cfg(feature = "iterator")] +fn clone_item(item_ref: BTreeMapRecordRef) -> Record { + let (key, value) = item_ref; + (key.clone(), value.clone()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn get_and_set() { + let mut store = MockStorage::new(); + assert_eq!(None, store.get(b"foo").0.unwrap()); + store.set(b"foo", b"bar").0.unwrap(); + assert_eq!(Some(b"bar".to_vec()), store.get(b"foo").0.unwrap()); + assert_eq!(None, store.get(b"food").0.unwrap()); + } + + #[test] + fn delete() { + let mut store = MockStorage::new(); + store.set(b"foo", b"bar").0.unwrap(); + store.set(b"food", b"bank").0.unwrap(); + store.remove(b"foo").0.unwrap(); + + assert_eq!(None, store.get(b"foo").0.unwrap()); + assert_eq!(Some(b"bank".to_vec()), store.get(b"food").0.unwrap()); + } + + #[test] + #[cfg(feature = "iterator")] + fn iterator() { + let mut store = MockStorage::new(); + store.set(b"foo", b"bar").0.expect("error setting value"); + + // ensure we had previously set "foo" = "bar" + assert_eq!(store.get(b"foo").0.unwrap(), Some(b"bar".to_vec())); + let iter_id = store.scan(None, None, Order::Ascending).0.unwrap(); + assert_eq!(store.all(iter_id).0.unwrap().len(), 1); + + // setup - add some data, and delete part of it as well + store.set(b"ant", b"hill").0.expect("error setting value"); + store.set(b"ze", b"bra").0.expect("error setting value"); + + // noise that should be ignored + store.set(b"bye", b"bye").0.expect("error setting value"); + store.remove(b"bye").0.expect("error removing key"); + + // unbounded + { + let iter_id = store.scan(None, None, Order::Ascending).0.unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!( + elements, + vec![ + (b"ant".to_vec(), b"hill".to_vec()), + (b"foo".to_vec(), b"bar".to_vec()), + (b"ze".to_vec(), b"bra".to_vec()), + ] + ); + } + + // unbounded (descending) + { + let iter_id = store.scan(None, None, Order::Descending).0.unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!( + elements, + vec![ + (b"ze".to_vec(), b"bra".to_vec()), + (b"foo".to_vec(), b"bar".to_vec()), + (b"ant".to_vec(), b"hill".to_vec()), + ] + ); + } + + // bounded + { + let iter_id = store + .scan(Some(b"f"), Some(b"n"), Order::Ascending) + .0 + .unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!(elements, vec![(b"foo".to_vec(), b"bar".to_vec())]); + } + + // bounded (descending) + { + let iter_id = store + .scan(Some(b"air"), Some(b"loop"), Order::Descending) + .0 + .unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!( + elements, + vec![ + (b"foo".to_vec(), b"bar".to_vec()), + (b"ant".to_vec(), b"hill".to_vec()), + ] + ); + } + + // bounded empty [a, a) + { + let iter_id = store + .scan(Some(b"foo"), Some(b"foo"), Order::Ascending) + .0 + .unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!(elements, vec![]); + } + + // bounded empty [a, a) (descending) + { + let iter_id = store + .scan(Some(b"foo"), Some(b"foo"), Order::Descending) + .0 + .unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!(elements, vec![]); + } + + // bounded empty [a, b) with b < a + { + let iter_id = store + .scan(Some(b"z"), Some(b"a"), Order::Ascending) + .0 + .unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!(elements, vec![]); + } + + // bounded empty [a, b) with b < a (descending) + { + let iter_id = store + .scan(Some(b"z"), Some(b"a"), Order::Descending) + .0 + .unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!(elements, vec![]); + } + + // right unbounded + { + let iter_id = store.scan(Some(b"f"), None, Order::Ascending).0.unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!( + elements, + vec![ + (b"foo".to_vec(), b"bar".to_vec()), + (b"ze".to_vec(), b"bra".to_vec()), + ] + ); + } + + // right unbounded (descending) + { + let iter_id = store.scan(Some(b"f"), None, Order::Descending).0.unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!( + elements, + vec![ + (b"ze".to_vec(), b"bra".to_vec()), + (b"foo".to_vec(), b"bar".to_vec()), + ] + ); + } + + // left unbounded + { + let iter_id = store.scan(None, Some(b"f"), Order::Ascending).0.unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!(elements, vec![(b"ant".to_vec(), b"hill".to_vec()),]); + } + + // left unbounded (descending) + { + let iter_id = store.scan(None, Some(b"no"), Order::Descending).0.unwrap(); + let elements = store.all(iter_id).0.unwrap(); + assert_eq!( + elements, + vec![ + (b"foo".to_vec(), b"bar".to_vec()), + (b"ant".to_vec(), b"hill".to_vec()), + ] + ); + } + } +} diff --git a/vm/src/wasm_backend/compile.rs b/vm/src/wasm_backend/compile.rs new file mode 100644 index 000000000..d06a1ae05 --- /dev/null +++ b/vm/src/wasm_backend/compile.rs @@ -0,0 +1,23 @@ +use wasmer::{Engine, Module}; + +use crate::errors::VmResult; + +/// Compiles a given Wasm bytecode into a module. +pub fn compile(engine: &Engine, code: &[u8]) -> VmResult { + let module = Module::new(&engine, code)?; + Ok(module) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::wasm_backend::make_compiling_engine; + + static CONTRACT: &[u8] = include_bytes!("../../testdata/floaty.wasm"); + + #[test] + fn contract_with_floats_passes_check() { + let engine = make_compiling_engine(None); + assert!(compile(&engine, CONTRACT).is_ok()); + } +} diff --git a/vm/src/wasm_backend/engine.rs b/vm/src/wasm_backend/engine.rs new file mode 100644 index 000000000..6a699b10e --- /dev/null +++ b/vm/src/wasm_backend/engine.rs @@ -0,0 +1,121 @@ +use cosmwasm_vm_derive::hash_function; +use std::sync::Arc; +use wasmer::NativeEngineExt; +use wasmer::{ + sys::BaseTunables, wasmparser::Operator, CompilerConfig, Engine, Pages, Target, WASM_PAGE_SIZE, +}; + +use crate::size::Size; + +use super::gatekeeper::Gatekeeper; +use super::limiting_tunables::LimitingTunables; +use super::metering::{is_accounting, Metering}; + +/// WebAssembly linear memory objects have sizes measured in pages. Each page +/// is 65536 (2^16) bytes. In WebAssembly version 1, a linear memory can have at +/// most 65536 pages, for a total of 2^32 bytes (4 gibibytes). +/// https://github.com/WebAssembly/memory64/blob/master/proposals/memory64/Overview.md +const MAX_WASM_PAGES: u32 = 65536; + +#[hash_function(const_name = "COST_FUNCTION_HASH")] +fn cost(operator: &Operator) -> u64 { + // A flat fee for each operation + // The target is 1 Teragas per second (see GAS.md). + // + // In https://github.com/CosmWasm/cosmwasm/pull/1042 a profiler is developed to + // identify runtime differences between different Wasm operation, but this is not yet + // precise enough to derive insights from it. + // + // Please note that any changes to this function need to be accompanied by a bump of + // `MODULE_SERIALIZATION_VERSION` to avoid cached modules from using different amounts of gas + // compared to newly compiled ones. + const GAS_PER_OPERATION: u64 = 115; + + if is_accounting(operator) { + GAS_PER_OPERATION * 14 + } else { + GAS_PER_OPERATION + } +} + +/// Use Cranelift as the compiler backend if the feature is enabled +pub fn make_compiler_config() -> impl CompilerConfig + Into { + wasmer::Singlepass::new() +} + +/// Creates an engine without a compiler. +/// This is used to run modules compiled before. +pub fn make_runtime_engine(memory_limit: Option) -> Engine { + let mut engine = Engine::headless(); + if let Some(limit) = memory_limit { + let base = BaseTunables::for_target(&Target::default()); + let tunables = LimitingTunables::new(base, limit_to_pages(limit)); + engine.set_tunables(tunables); + } + engine +} + +/// Creates an Engine with a compiler attached. Use this when compiling Wasm to a module. +pub fn make_compiling_engine(memory_limit: Option) -> Engine { + let gas_limit = 0; + let deterministic = Arc::new(Gatekeeper::default()); + let metering = Arc::new(Metering::new(gas_limit, cost)); + + let mut compiler = make_compiler_config(); + compiler.canonicalize_nans(true); + compiler.push_middleware(deterministic); + compiler.push_middleware(metering); + let mut engine: Engine = compiler.into(); + if let Some(limit) = memory_limit { + let base = BaseTunables::for_target(&Target::default()); + let tunables = LimitingTunables::new(base, limit_to_pages(limit)); + engine.set_tunables(tunables); + } + engine +} + +fn limit_to_pages(limit: Size) -> Pages { + // round down to ensure the limit is less than or equal to the config + let limit_in_pages: usize = limit.0 / WASM_PAGE_SIZE; + + let capped = match u32::try_from(limit_in_pages) { + Ok(x) => std::cmp::min(x, MAX_WASM_PAGES), + // The only case where TryFromIntError can happen is when + // limit_in_pages exceeds the u32 range. In this case it is way + // larger than MAX_WASM_PAGES and needs to be capped. + Err(_too_large) => MAX_WASM_PAGES, + }; + Pages(capped) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn cost_works() { + // accounting operator + assert_eq!(cost(&Operator::Br { relative_depth: 3 }), 1610); + assert_eq!(cost(&Operator::Return {}), 1610); + + // anything else + assert_eq!(cost(&Operator::I64Const { value: 7 }), 115); + assert_eq!(cost(&Operator::I64Extend8S {}), 115); + } + + #[test] + fn limit_to_pages_works() { + // rounds down + assert_eq!(limit_to_pages(Size::new(0)), Pages(0)); + assert_eq!(limit_to_pages(Size::new(1)), Pages(0)); + assert_eq!(limit_to_pages(Size::kibi(63)), Pages(0)); + assert_eq!(limit_to_pages(Size::kibi(64)), Pages(1)); + assert_eq!(limit_to_pages(Size::kibi(65)), Pages(1)); + assert_eq!(limit_to_pages(Size::new(u32::MAX as usize)), Pages(65535)); + // caps at 4 GiB + assert_eq!(limit_to_pages(Size::gibi(3)), Pages(49152)); + assert_eq!(limit_to_pages(Size::gibi(4)), Pages(65536)); + assert_eq!(limit_to_pages(Size::gibi(5)), Pages(65536)); + assert_eq!(limit_to_pages(Size::new(usize::MAX)), Pages(65536)); + } +} diff --git a/vm/src/wasm_backend/gatekeeper.rs b/vm/src/wasm_backend/gatekeeper.rs new file mode 100644 index 000000000..ba0433b67 --- /dev/null +++ b/vm/src/wasm_backend/gatekeeper.rs @@ -0,0 +1,822 @@ +use wasmer::wasmparser::Operator; +use wasmer::{ + FunctionMiddleware, LocalFunctionIndex, MiddlewareError, MiddlewareReaderState, + ModuleMiddleware, +}; + +#[derive(Debug, Clone, Copy)] +struct GatekeeperConfig { + /// True iff float operations are allowed. + /// + /// Note: there are float operations in the SIMD block as well and we do not yet handle + /// any combination of `allow_floats` and `allow_feature_simd` properly. + allow_floats: bool, + // + // Standardized features + // + /// True iff operations of the "Bulk memory operations" feature are allowed. + /// See and . + allow_feature_bulk_memory_operations: bool, + /// True iff operations of the "Reference types" feature are allowed. + /// See and . + allow_feature_reference_types: bool, + /// True iff operations of the "Fixed-width SIMD" feature are allowed. + /// See and . + allow_feature_simd: bool, + // + // In-progress proposals + // + /// True iff operations of the "Exception handling" feature are allowed. + /// Note, this feature is not yet standardized! + /// See and . + allow_feature_exception_handling: bool, + /// True iff operations of the "Threads and atomics" feature are allowed. + /// Note, this feature is not yet standardized! + /// See and . + allow_feature_threads: bool, +} + +/// A middleware that ensures only deterministic operations are used (i.e. no floats). +/// It also disallows the use of Wasm features that are not explicitly enabled. +#[derive(Debug)] +#[non_exhaustive] +pub struct Gatekeeper { + config: GatekeeperConfig, +} + +impl Gatekeeper { + /// Creates a new Gatekeeper with a custom config. + /// + /// A custom configuration is potentially dangerous (non-final Wasm proposals, floats in SIMD operation). + /// For this reason, only [`Gatekeeper::default()`] is public. + fn new(config: GatekeeperConfig) -> Self { + Self { config } + } +} + +impl Default for Gatekeeper { + fn default() -> Self { + Self::new(GatekeeperConfig { + allow_floats: true, + allow_feature_bulk_memory_operations: false, + allow_feature_reference_types: false, + allow_feature_simd: false, + allow_feature_exception_handling: false, + allow_feature_threads: false, + }) + } +} + +impl ModuleMiddleware for Gatekeeper { + /// Generates a `FunctionMiddleware` for a given function. + fn generate_function_middleware(&self, _: LocalFunctionIndex) -> Box { + Box::new(FunctionGatekeeper::new(self.config)) + } +} + +#[derive(Debug)] +#[non_exhaustive] +struct FunctionGatekeeper { + config: GatekeeperConfig, +} + +impl FunctionGatekeeper { + fn new(config: GatekeeperConfig) -> Self { + Self { config } + } +} + +/// The name used in errors +const MIDDLEWARE_NAME: &str = "Gatekeeper"; + +impl FunctionMiddleware for FunctionGatekeeper { + fn feed<'a>( + &mut self, + operator: Operator<'a>, + state: &mut MiddlewareReaderState<'a>, + ) -> Result<(), MiddlewareError> { + match operator { + Operator::Unreachable + | Operator::Nop + | Operator::Block { .. } + | Operator::Loop { .. } + | Operator::If { .. } + | Operator::Else + | Operator::End + | Operator::Br { .. } + | Operator::BrIf { .. } + | Operator::BrTable { .. } + | Operator::Return + | Operator::Call { .. } + | Operator::CallIndirect { .. } + | Operator::Drop + | Operator::Select + | Operator::LocalGet { .. } + | Operator::LocalSet { .. } + | Operator::LocalTee { .. } + | Operator::GlobalGet { .. } + | Operator::GlobalSet { .. } + | Operator::I32Load { .. } + | Operator::I64Load { .. } + | Operator::I32Load8S { .. } + | Operator::I32Load8U { .. } + | Operator::I32Load16S { .. } + | Operator::I32Load16U { .. } + | Operator::I64Load8S { .. } + | Operator::I64Load8U { .. } + | Operator::I64Load16S { .. } + | Operator::I64Load16U { .. } + | Operator::I64Load32S { .. } + | Operator::I64Load32U { .. } + | Operator::I32Store { .. } + | Operator::I64Store { .. } + | Operator::I32Store8 { .. } + | Operator::I32Store16 { .. } + | Operator::I64Store8 { .. } + | Operator::I64Store16 { .. } + | Operator::I64Store32 { .. } + | Operator::MemorySize { .. } + | Operator::MemoryGrow { .. } + | Operator::I32Const { .. } + | Operator::I64Const { .. } + | Operator::I32Eqz + | Operator::I32Eq + | Operator::I32Ne + | Operator::I32LtS + | Operator::I32LtU + | Operator::I32GtS + | Operator::I32GtU + | Operator::I32LeS + | Operator::I32LeU + | Operator::I32GeS + | Operator::I32GeU + | Operator::I64Eqz + | Operator::I64Eq + | Operator::I64Ne + | Operator::I64LtS + | Operator::I64LtU + | Operator::I64GtS + | Operator::I64GtU + | Operator::I64LeS + | Operator::I64LeU + | Operator::I64GeS + | Operator::I64GeU + | Operator::I32Clz + | Operator::I32Ctz + | Operator::I32Popcnt + | Operator::I32Add + | Operator::I32Sub + | Operator::I32Mul + | Operator::I32DivS + | Operator::I32DivU + | Operator::I32RemS + | Operator::I32RemU + | Operator::I32And + | Operator::I32Or + | Operator::I32Xor + | Operator::I32Shl + | Operator::I32ShrS + | Operator::I32ShrU + | Operator::I32Rotl + | Operator::I32Rotr + | Operator::I64Clz + | Operator::I64Ctz + | Operator::I64Popcnt + | Operator::I64Add + | Operator::I64Sub + | Operator::I64Mul + | Operator::I64DivS + | Operator::I64DivU + | Operator::I64RemS + | Operator::I64RemU + | Operator::I64And + | Operator::I64Or + | Operator::I64Xor + | Operator::I64Shl + | Operator::I64ShrS + | Operator::I64ShrU + | Operator::I64Rotl + | Operator::I64Rotr + | Operator::I32WrapI64 + // Those are part of the MVP + // https://github.com/bytecodealliance/wasm-tools/blob/wasmparser-0.107.0/crates/wasmparser/src/lib.rs#L287-L288 + | Operator::I64ExtendI32S + | Operator::I64ExtendI32U + // Sign-extension + // https://github.com/bytecodealliance/wasm-tools/blob/wasmparser-0.107.0/crates/wasmparser/src/lib.rs#L307-L311 + | Operator::I32Extend8S + | Operator::I32Extend16S + | Operator::I64Extend8S + | Operator::I64Extend16S + | Operator::I64Extend32S => { + state.push_operator(operator); + Ok(()) + } + Operator::RefNull { .. } + | Operator::RefIsNull + | Operator::RefFunc { .. } + | Operator::ReturnCall { .. } + | Operator::ReturnCallIndirect { .. } + | Operator::TypedSelect { .. } + | Operator::TableFill { .. } + | Operator::TableGet { .. } + | Operator::TableSet { .. } + | Operator::TableGrow { .. } + | Operator::TableSize { .. } + | Operator::CallRef { .. } + | Operator::ReturnCallRef { .. } + | Operator::RefAsNonNull + | Operator::BrOnNull { .. } + | Operator::BrOnNonNull { .. } => { + if self.config.allow_feature_reference_types { + state.push_operator(operator); + Ok(()) + } else { + let msg = format!("Reference type operation detected: {operator:?}. Reference types are not supported."); + Err(MiddlewareError::new(MIDDLEWARE_NAME, msg)) + } + } + Operator::MemoryAtomicNotify { .. } + | Operator::MemoryAtomicWait32 { .. } + | Operator::MemoryAtomicWait64 { .. } + | Operator::AtomicFence + | Operator::I32AtomicLoad { .. } + | Operator::I64AtomicLoad { .. } + | Operator::I32AtomicLoad8U { .. } + | Operator::I32AtomicLoad16U { .. } + | Operator::I64AtomicLoad8U { .. } + | Operator::I64AtomicLoad16U { .. } + | Operator::I64AtomicLoad32U { .. } + | Operator::I32AtomicStore { .. } + | Operator::I64AtomicStore { .. } + | Operator::I32AtomicStore8 { .. } + | Operator::I32AtomicStore16 { .. } + | Operator::I64AtomicStore8 { .. } + | Operator::I64AtomicStore16 { .. } + | Operator::I64AtomicStore32 { .. } + | Operator::I32AtomicRmwAdd { .. } + | Operator::I64AtomicRmwAdd { .. } + | Operator::I32AtomicRmw8AddU { .. } + | Operator::I32AtomicRmw16AddU { .. } + | Operator::I64AtomicRmw8AddU { .. } + | Operator::I64AtomicRmw16AddU { .. } + | Operator::I64AtomicRmw32AddU { .. } + | Operator::I32AtomicRmwSub { .. } + | Operator::I64AtomicRmwSub { .. } + | Operator::I32AtomicRmw8SubU { .. } + | Operator::I32AtomicRmw16SubU { .. } + | Operator::I64AtomicRmw8SubU { .. } + | Operator::I64AtomicRmw16SubU { .. } + | Operator::I64AtomicRmw32SubU { .. } + | Operator::I32AtomicRmwAnd { .. } + | Operator::I64AtomicRmwAnd { .. } + | Operator::I32AtomicRmw8AndU { .. } + | Operator::I32AtomicRmw16AndU { .. } + | Operator::I64AtomicRmw8AndU { .. } + | Operator::I64AtomicRmw16AndU { .. } + | Operator::I64AtomicRmw32AndU { .. } + | Operator::I32AtomicRmwOr { .. } + | Operator::I64AtomicRmwOr { .. } + | Operator::I32AtomicRmw8OrU { .. } + | Operator::I32AtomicRmw16OrU { .. } + | Operator::I64AtomicRmw8OrU { .. } + | Operator::I64AtomicRmw16OrU { .. } + | Operator::I64AtomicRmw32OrU { .. } + | Operator::I32AtomicRmwXor { .. } + | Operator::I64AtomicRmwXor { .. } + | Operator::I32AtomicRmw8XorU { .. } + | Operator::I32AtomicRmw16XorU { .. } + | Operator::I64AtomicRmw8XorU { .. } + | Operator::I64AtomicRmw16XorU { .. } + | Operator::I64AtomicRmw32XorU { .. } + | Operator::I32AtomicRmwXchg { .. } + | Operator::I64AtomicRmwXchg { .. } + | Operator::I32AtomicRmw8XchgU { .. } + | Operator::I32AtomicRmw16XchgU { .. } + | Operator::I64AtomicRmw8XchgU { .. } + | Operator::I64AtomicRmw16XchgU { .. } + | Operator::I64AtomicRmw32XchgU { .. } + | Operator::I32AtomicRmwCmpxchg { .. } + | Operator::I64AtomicRmwCmpxchg { .. } + | Operator::I32AtomicRmw8CmpxchgU { .. } + | Operator::I32AtomicRmw16CmpxchgU { .. } + | Operator::I64AtomicRmw8CmpxchgU { .. } + | Operator::I64AtomicRmw16CmpxchgU { .. } + | Operator::I64AtomicRmw32CmpxchgU { .. } => { + if self.config.allow_feature_threads { + state.push_operator(operator); + Ok(()) + } else { + let msg = format!("Threads operator detected: {operator:?}. The Wasm Threads extension is not supported."); + Err(MiddlewareError::new(MIDDLEWARE_NAME, msg)) + } + } + Operator::V128Load { .. } + | Operator::V128Load8x8S { .. } + | Operator::V128Load8x8U { .. } + | Operator::V128Load16x4S { .. } + | Operator::V128Load16x4U { .. } + | Operator::V128Load32x2S { .. } + | Operator::V128Load32x2U { .. } + | Operator::V128Load8Splat { .. } + | Operator::V128Load16Splat { .. } + | Operator::V128Load32Splat { .. } + | Operator::V128Load64Splat { .. } + | Operator::V128Load32Zero { .. } + | Operator::V128Load64Zero { .. } + | Operator::V128Store { .. } + | Operator::V128Load8Lane { .. } + | Operator::V128Load16Lane { .. } + | Operator::V128Load32Lane { .. } + | Operator::V128Load64Lane { .. } + | Operator::V128Store8Lane { .. } + | Operator::V128Store16Lane { .. } + | Operator::V128Store32Lane { .. } + | Operator::V128Store64Lane { .. } + | Operator::V128Const { .. } + | Operator::I8x16Shuffle { .. } + | Operator::I8x16ExtractLaneS { .. } + | Operator::I8x16ExtractLaneU { .. } + | Operator::I8x16ReplaceLane { .. } + | Operator::I16x8ExtractLaneS { .. } + | Operator::I16x8ExtractLaneU { .. } + | Operator::I16x8ReplaceLane { .. } + | Operator::I32x4ExtractLane { .. } + | Operator::I32x4ReplaceLane { .. } + | Operator::I64x2ExtractLane { .. } + | Operator::I64x2ReplaceLane { .. } + | Operator::F32x4ExtractLane { .. } + | Operator::F32x4ReplaceLane { .. } + | Operator::F64x2ExtractLane { .. } + | Operator::F64x2ReplaceLane { .. } + | Operator::I8x16Swizzle + | Operator::I8x16Splat + | Operator::I16x8Splat + | Operator::I32x4Splat + | Operator::I64x2Splat + | Operator::F32x4Splat + | Operator::F64x2Splat + | Operator::I8x16Eq + | Operator::I8x16Ne + | Operator::I8x16LtS + | Operator::I8x16LtU + | Operator::I8x16GtS + | Operator::I8x16GtU + | Operator::I8x16LeS + | Operator::I8x16LeU + | Operator::I8x16GeS + | Operator::I8x16GeU + | Operator::I16x8Eq + | Operator::I16x8Ne + | Operator::I16x8LtS + | Operator::I16x8LtU + | Operator::I16x8GtS + | Operator::I16x8GtU + | Operator::I16x8LeS + | Operator::I16x8LeU + | Operator::I16x8GeS + | Operator::I16x8GeU + | Operator::I32x4Eq + | Operator::I32x4Ne + | Operator::I32x4LtS + | Operator::I32x4LtU + | Operator::I32x4GtS + | Operator::I32x4GtU + | Operator::I32x4LeS + | Operator::I32x4LeU + | Operator::I32x4GeS + | Operator::I32x4GeU + | Operator::I64x2Eq + | Operator::I64x2Ne + | Operator::I64x2LtS + | Operator::I64x2GtS + | Operator::I64x2LeS + | Operator::I64x2GeS + | Operator::F32x4Eq + | Operator::F32x4Ne + | Operator::F32x4Lt + | Operator::F32x4Gt + | Operator::F32x4Le + | Operator::F32x4Ge + | Operator::F64x2Eq + | Operator::F64x2Ne + | Operator::F64x2Lt + | Operator::F64x2Gt + | Operator::F64x2Le + | Operator::F64x2Ge + | Operator::V128Not + | Operator::V128And + | Operator::V128AndNot + | Operator::V128Or + | Operator::V128Xor + | Operator::V128Bitselect + | Operator::V128AnyTrue + | Operator::I8x16Abs + | Operator::I8x16Neg + | Operator::I8x16Popcnt + | Operator::I8x16AllTrue + | Operator::I8x16Bitmask + | Operator::I8x16NarrowI16x8S + | Operator::I8x16NarrowI16x8U + | Operator::I8x16Shl + | Operator::I8x16ShrS + | Operator::I8x16ShrU + | Operator::I8x16Add + | Operator::I8x16AddSatS + | Operator::I8x16AddSatU + | Operator::I8x16Sub + | Operator::I8x16SubSatS + | Operator::I8x16SubSatU + | Operator::I8x16MinS + | Operator::I8x16MinU + | Operator::I8x16MaxS + | Operator::I8x16MaxU + | Operator::I8x16AvgrU + | Operator::I16x8ExtAddPairwiseI8x16S + | Operator::I16x8ExtAddPairwiseI8x16U + | Operator::I16x8Abs + | Operator::I16x8Neg + | Operator::I16x8Q15MulrSatS + | Operator::I16x8AllTrue + | Operator::I16x8Bitmask + | Operator::I16x8NarrowI32x4S + | Operator::I16x8NarrowI32x4U + | Operator::I16x8ExtendLowI8x16S + | Operator::I16x8ExtendHighI8x16S + | Operator::I16x8ExtendLowI8x16U + | Operator::I16x8ExtendHighI8x16U + | Operator::I16x8Shl + | Operator::I16x8ShrS + | Operator::I16x8ShrU + | Operator::I16x8Add + | Operator::I16x8AddSatS + | Operator::I16x8AddSatU + | Operator::I16x8Sub + | Operator::I16x8SubSatS + | Operator::I16x8SubSatU + | Operator::I16x8Mul + | Operator::I16x8MinS + | Operator::I16x8MinU + | Operator::I16x8MaxS + | Operator::I16x8MaxU + | Operator::I16x8AvgrU + | Operator::I16x8ExtMulLowI8x16S + | Operator::I16x8ExtMulHighI8x16S + | Operator::I16x8ExtMulLowI8x16U + | Operator::I16x8ExtMulHighI8x16U + | Operator::I32x4ExtAddPairwiseI16x8S + | Operator::I32x4ExtAddPairwiseI16x8U + | Operator::I32x4Abs + | Operator::I32x4Neg + | Operator::I32x4AllTrue + | Operator::I32x4Bitmask + | Operator::I32x4ExtendLowI16x8S + | Operator::I32x4ExtendHighI16x8S + | Operator::I32x4ExtendLowI16x8U + | Operator::I32x4ExtendHighI16x8U + | Operator::I32x4Shl + | Operator::I32x4ShrS + | Operator::I32x4ShrU + | Operator::I32x4Add + | Operator::I32x4Sub + | Operator::I32x4Mul + | Operator::I32x4MinS + | Operator::I32x4MinU + | Operator::I32x4MaxS + | Operator::I32x4MaxU + | Operator::I32x4DotI16x8S + | Operator::I32x4ExtMulLowI16x8S + | Operator::I32x4ExtMulHighI16x8S + | Operator::I32x4ExtMulLowI16x8U + | Operator::I32x4ExtMulHighI16x8U + | Operator::I64x2Abs + | Operator::I64x2Neg + | Operator::I64x2AllTrue + | Operator::I64x2Bitmask + | Operator::I64x2ExtendLowI32x4S + | Operator::I64x2ExtendHighI32x4S + | Operator::I64x2ExtendLowI32x4U + | Operator::I64x2ExtendHighI32x4U + | Operator::I64x2Shl + | Operator::I64x2ShrS + | Operator::I64x2ShrU + | Operator::I64x2Add + | Operator::I64x2Sub + | Operator::I64x2Mul + | Operator::I64x2ExtMulLowI32x4S + | Operator::I64x2ExtMulHighI32x4S + | Operator::I64x2ExtMulLowI32x4U + | Operator::I64x2ExtMulHighI32x4U + | Operator::F32x4Ceil + | Operator::F32x4Floor + | Operator::F32x4Trunc + | Operator::F32x4Nearest + | Operator::F32x4Abs + | Operator::F32x4Neg + | Operator::F32x4Sqrt + | Operator::F32x4Add + | Operator::F32x4Sub + | Operator::F32x4Mul + | Operator::F32x4Div + | Operator::F32x4Min + | Operator::F32x4Max + | Operator::F32x4PMin + | Operator::F32x4PMax + | Operator::F64x2Ceil + | Operator::F64x2Floor + | Operator::F64x2Trunc + | Operator::F64x2Nearest + | Operator::F64x2Abs + | Operator::F64x2Neg + | Operator::F64x2Sqrt + | Operator::F64x2Add + | Operator::F64x2Sub + | Operator::F64x2Mul + | Operator::F64x2Div + | Operator::F64x2Min + | Operator::F64x2Max + | Operator::F64x2PMin + | Operator::F64x2PMax + | Operator::I32x4TruncSatF32x4S + | Operator::I32x4TruncSatF32x4U + | Operator::F32x4ConvertI32x4S + | Operator::F32x4ConvertI32x4U + | Operator::I32x4TruncSatF64x2SZero + | Operator::I32x4TruncSatF64x2UZero + | Operator::F64x2ConvertLowI32x4S + | Operator::F64x2ConvertLowI32x4U + | Operator::F32x4DemoteF64x2Zero + | Operator::F64x2PromoteLowF32x4 => { + if self.config.allow_feature_simd { + state.push_operator(operator); + Ok(()) + } else { + let msg = format!( + "SIMD operator detected: {operator:?}. The Wasm SIMD extension is not supported." + ); + Err(MiddlewareError::new(MIDDLEWARE_NAME, msg)) + } + } + // Relaxed SIMD operators + Operator::I8x16RelaxedSwizzle + | Operator::I32x4RelaxedTruncF32x4S + | Operator::I32x4RelaxedTruncF32x4U + | Operator::I32x4RelaxedTruncF64x2SZero + | Operator::I32x4RelaxedTruncF64x2UZero + | Operator::F32x4RelaxedMadd + | Operator::F32x4RelaxedNmadd + | Operator::F64x2RelaxedMadd + | Operator::F64x2RelaxedNmadd + | Operator::I8x16RelaxedLaneselect + | Operator::I16x8RelaxedLaneselect + | Operator::I32x4RelaxedLaneselect + | Operator::I64x2RelaxedLaneselect + | Operator::F32x4RelaxedMin + | Operator::F32x4RelaxedMax + | Operator::F64x2RelaxedMin + | Operator::F64x2RelaxedMax + | Operator::I16x8RelaxedQ15mulrS + | Operator::I16x8RelaxedDotI8x16I7x16S + | Operator::I32x4RelaxedDotI8x16I7x16AddS => { + let msg = format!( + "Relaxed SIMD operator detected: {operator:?}. The Wasm Relaxed SIMD extension is not supported." + ); + Err(MiddlewareError::new(MIDDLEWARE_NAME, msg)) + } + Operator::F32Load { .. } + | Operator::F64Load { .. } + | Operator::F32Store { .. } + | Operator::F64Store { .. } + | Operator::F32Const { .. } + | Operator::F64Const { .. } + | Operator::F32Eq + | Operator::F32Ne + | Operator::F32Lt + | Operator::F32Gt + | Operator::F32Le + | Operator::F32Ge + | Operator::F64Eq + | Operator::F64Ne + | Operator::F64Lt + | Operator::F64Gt + | Operator::F64Le + | Operator::F64Ge + | Operator::F32Abs + | Operator::F32Neg + | Operator::F32Ceil + | Operator::F32Floor + | Operator::F32Trunc + | Operator::F32Nearest + | Operator::F32Sqrt + | Operator::F32Add + | Operator::F32Sub + | Operator::F32Mul + | Operator::F32Div + | Operator::F32Min + | Operator::F32Max + | Operator::F32Copysign + | Operator::F64Abs + | Operator::F64Neg + | Operator::F64Ceil + | Operator::F64Floor + | Operator::F64Trunc + | Operator::F64Nearest + | Operator::F64Sqrt + | Operator::F64Add + | Operator::F64Sub + | Operator::F64Mul + | Operator::F64Div + | Operator::F64Min + | Operator::F64Max + | Operator::F64Copysign + | Operator::I32TruncF32S + | Operator::I32TruncF32U + | Operator::I32TruncF64S + | Operator::I32TruncF64U + | Operator::I64TruncF32S + | Operator::I64TruncF32U + | Operator::I64TruncF64S + | Operator::I64TruncF64U + | Operator::F32ConvertI32S + | Operator::F32ConvertI32U + | Operator::F32ConvertI64S + | Operator::F32ConvertI64U + | Operator::F32DemoteF64 + | Operator::F64ConvertI32S + | Operator::F64ConvertI32U + | Operator::F64ConvertI64S + | Operator::F64ConvertI64U + | Operator::F64PromoteF32 + | Operator::I32ReinterpretF32 + | Operator::I64ReinterpretF64 + | Operator::F32ReinterpretI32 + | Operator::F64ReinterpretI64 + | Operator::I32TruncSatF32S + | Operator::I32TruncSatF32U + | Operator::I32TruncSatF64S + | Operator::I32TruncSatF64U + | Operator::I64TruncSatF32S + | Operator::I64TruncSatF32U + | Operator::I64TruncSatF64S + | Operator::I64TruncSatF64U => { + if self.config.allow_floats { + state.push_operator(operator); + Ok(()) + } else { + let msg = format!( + "Float operator detected: {operator:?}. The use of floats is not supported." + ); + Err(MiddlewareError::new(MIDDLEWARE_NAME, msg)) + } + } + Operator::MemoryInit { .. } + | Operator::DataDrop { .. } + | Operator::MemoryCopy { .. } + | Operator::MemoryFill { .. } + | Operator::TableInit { .. } + | Operator::ElemDrop { .. } + | Operator::TableCopy { .. } => { + if self.config.allow_feature_bulk_memory_operations { + state.push_operator(operator); + Ok(()) + } else { + let msg = format!("Bulk memory operation detected: {operator:?}. Bulk memory operations are not supported."); + Err(MiddlewareError::new(MIDDLEWARE_NAME, msg)) + } + } + Operator::Try { .. } + | Operator::TryTable { .. } + | Operator::Catch { .. } + | Operator::Throw { .. } + | Operator::ThrowRef { .. } + | Operator::Rethrow { .. } + | Operator::Delegate { .. } + | Operator::CatchAll => { + if self.config.allow_feature_exception_handling { + state.push_operator(operator); + Ok(()) + } else { + let msg = format!("Exception handling operation detected: {operator:?}. Exception handling is not supported."); + Err(MiddlewareError::new(MIDDLEWARE_NAME, msg)) + } + } + Operator::RefEq { .. } | + Operator::StructNew { .. } | + Operator::StructNewDefault { .. } | + Operator::StructGet { .. } | + Operator::StructGetS { .. } | + Operator::StructGetU { .. } | + Operator::StructSet { .. } | + Operator::ArrayNew { .. } | + Operator::ArrayNewDefault { .. } | + Operator::ArrayNewFixed { .. } | + Operator::ArrayNewData { .. } | + Operator::ArrayNewElem { .. } | + Operator::ArrayGet { .. } | + Operator::ArrayGetS { .. } | + Operator::ArrayGetU { .. } | + Operator::ArraySet { .. } | + Operator::ArrayLen | + Operator::ArrayFill { .. } | + Operator::ArrayCopy { .. } | + Operator::ArrayInitData { .. } | + Operator::ArrayInitElem { .. } | + Operator::RefTestNonNull { .. } | + Operator::RefTestNullable { .. } | + Operator::RefCastNonNull { .. } | + Operator::RefCastNullable { .. } | + Operator::BrOnCast { .. } | + Operator::BrOnCastFail { .. } | + Operator::AnyConvertExtern | + Operator::ExternConvertAny | + Operator::RefI31 | + Operator::I31GetS | + Operator::I31GetU => { + let msg = format!("GC operation detected: {operator:?}. GC Proposal is not supported."); + Err(MiddlewareError::new(MIDDLEWARE_NAME, msg)) + }, + Operator::MemoryDiscard { .. } => { + let msg = format!("Memory control operation detected: {operator:?}. Memory control is not supported."); + Err(MiddlewareError::new(MIDDLEWARE_NAME, msg)) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::wasm_backend::make_compiler_config; + use std::sync::Arc; + use wasmer::{CompilerConfig, Module, Store}; + + #[test] + fn valid_wasm_instance_sanity() { + let wasm = wat::parse_str( + r#" + (module + (func (export "sum") (param i32 i32) (result i32) + local.get 0 + local.get 1 + i32.add + )) + "#, + ) + .unwrap(); + + let deterministic = Arc::new(Gatekeeper::default()); + let mut compiler = make_compiler_config(); + compiler.push_middleware(deterministic); + let store = Store::new(compiler); + let result = Module::new(&store, wasm); + assert!(result.is_ok()); + } + + #[test] + fn parser_floats_are_supported() { + let wasm = wat::parse_str( + r#" + (module + (func $to_float (param i32) (result f32) + local.get 0 + f32.convert_i32_u + )) + "#, + ) + .unwrap(); + + let deterministic = Arc::new(Gatekeeper::default()); + let mut compiler = make_compiler_config(); + compiler.push_middleware(deterministic); + let store = Store::new(compiler); + let result = Module::new(&store, wasm); + assert!(result.is_ok()); + } + + #[test] + fn bulk_operations_not_supported() { + let wasm = wat::parse_str( + r#" + (module + (memory (export "memory") 1) + (func (param $dst i32) (param $src i32) (param $size i32) (result i32) + local.get $dst + local.get $src + local.get $size + memory.copy + local.get $dst)) + "#, + ) + .unwrap(); + + let deterministic = Arc::new(Gatekeeper::default()); + let mut compiler = make_compiler_config(); + compiler.push_middleware(deterministic); + let store = Store::new(compiler); + let result = Module::new(&store, wasm); + assert!(result + .unwrap_err() + .to_string() + .contains("Bulk memory operation")); + } +} diff --git a/vm/src/wasm_backend/limiting_tunables.rs b/vm/src/wasm_backend/limiting_tunables.rs new file mode 100644 index 000000000..b1e817c39 --- /dev/null +++ b/vm/src/wasm_backend/limiting_tunables.rs @@ -0,0 +1,214 @@ +use std::ptr::NonNull; + +use wasmer::{ + vm::{ + MemoryError, MemoryStyle, TableStyle, VMMemory, VMMemoryDefinition, VMTable, + VMTableDefinition, + }, + MemoryType, Pages, TableType, Tunables, +}; + +/// A custom tunables that allows you to set a memory limit. +/// +/// After adjusting the memory limits, it delegates all other logic +/// to the base tunables. +pub struct LimitingTunables { + /// The maximum a linear memory is allowed to be (in Wasm pages, 65 KiB each). + /// Since Wasmer ensures there is only none or one memory, this is practically + /// an upper limit for the guest memory. + limit: Pages, + /// The base implementation we delegate all the logic to + base: T, +} + +impl LimitingTunables { + pub fn new(base: T, limit: Pages) -> Self { + Self { limit, base } + } + + /// Takes in input memory type as requested by the guest and sets + /// a maximum if missing. The resulting memory type is final if + /// valid. However, this can produce invalid types, such that + /// validate_memory must be called before creating the memory. + fn adjust_memory(&self, requested: &MemoryType) -> MemoryType { + let mut adjusted = *requested; + if requested.maximum.is_none() { + adjusted.maximum = Some(self.limit); + } + adjusted + } + + /// Ensures the a given memory type does not exceed the memory limit. + /// Call this after adjusting the memory. + fn validate_memory(&self, ty: &MemoryType) -> Result<(), MemoryError> { + if ty.minimum > self.limit { + return Err(MemoryError::Generic( + "Minimum exceeds the allowed memory limit".to_string(), + )); + } + + if let Some(max) = ty.maximum { + if max > self.limit { + return Err(MemoryError::Generic( + "Maximum exceeds the allowed memory limit".to_string(), + )); + } + } else { + return Err(MemoryError::Generic("Maximum unset".to_string())); + } + + Ok(()) + } +} + +impl Tunables for LimitingTunables { + /// Construct a `MemoryStyle` for the provided `MemoryType` + /// + /// Delegated to base. + fn memory_style(&self, memory: &MemoryType) -> MemoryStyle { + let adjusted = self.adjust_memory(memory); + self.base.memory_style(&adjusted) + } + + /// Construct a `TableStyle` for the provided `TableType` + /// + /// Delegated to base. + fn table_style(&self, table: &TableType) -> TableStyle { + self.base.table_style(table) + } + + /// Create a memory owned by the host given a [`MemoryType`] and a [`MemoryStyle`]. + /// + /// The requested memory type is validated, adjusted to the limited and then passed to base. + fn create_host_memory( + &self, + ty: &MemoryType, + style: &MemoryStyle, + ) -> Result { + let adjusted = self.adjust_memory(ty); + self.validate_memory(&adjusted)?; + self.base.create_host_memory(&adjusted, style) + } + + /// Create a memory owned by the VM given a [`MemoryType`] and a [`MemoryStyle`]. + /// + /// Delegated to base. + unsafe fn create_vm_memory( + &self, + ty: &MemoryType, + style: &MemoryStyle, + vm_definition_location: NonNull, + ) -> Result { + let adjusted = self.adjust_memory(ty); + self.validate_memory(&adjusted)?; + self.base + .create_vm_memory(&adjusted, style, vm_definition_location) + } + + /// Create a table owned by the host given a [`TableType`] and a [`TableStyle`]. + /// + /// Delegated to base. + fn create_host_table(&self, ty: &TableType, style: &TableStyle) -> Result { + self.base.create_host_table(ty, style) + } + + /// Create a table owned by the VM given a [`TableType`] and a [`TableStyle`]. + /// + /// Delegated to base. + unsafe fn create_vm_table( + &self, + ty: &TableType, + style: &TableStyle, + vm_definition_location: NonNull, + ) -> Result { + self.base.create_vm_table(ty, style, vm_definition_location) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use wasmer::{sys::BaseTunables, Target}; + + #[test] + fn adjust_memory_works() { + let limit = Pages(12); + let limiting = LimitingTunables::new(BaseTunables::for_target(&Target::default()), limit); + + // No maximum + let requested = MemoryType::new(3, None, true); + let adjusted = limiting.adjust_memory(&requested); + assert_eq!(adjusted, MemoryType::new(3, Some(12), true)); + + // Maximum smaller than limit + let requested = MemoryType::new(3, Some(7), true); + let adjusted = limiting.adjust_memory(&requested); + assert_eq!(adjusted, requested); + + // Maximum equal to limit + let requested = MemoryType::new(3, Some(12), true); + let adjusted = limiting.adjust_memory(&requested); + assert_eq!(adjusted, requested); + + // Maximum greater than limit + let requested = MemoryType::new(3, Some(20), true); + let adjusted = limiting.adjust_memory(&requested); + assert_eq!(adjusted, requested); + + // Minimum greater than maximum (not our problem) + let requested = MemoryType::new(5, Some(3), true); + let adjusted = limiting.adjust_memory(&requested); + assert_eq!(adjusted, requested); + + // Minimum greater than limit + let requested = MemoryType::new(20, Some(20), true); + let adjusted = limiting.adjust_memory(&requested); + assert_eq!(adjusted, requested); + } + + #[test] + fn validate_memory_works() { + let limit = Pages(12); + let limiting = LimitingTunables::new(BaseTunables::for_target(&Target::default()), limit); + + // Maximum smaller than limit + let memory = MemoryType::new(3, Some(7), true); + limiting.validate_memory(&memory).unwrap(); + + // Maximum equal to limit + let memory = MemoryType::new(3, Some(12), true); + limiting.validate_memory(&memory).unwrap(); + + // Maximum greater than limit + let memory = MemoryType::new(3, Some(20), true); + let result = limiting.validate_memory(&memory); + match result.unwrap_err() { + MemoryError::Generic(msg) => { + assert_eq!(msg, "Maximum exceeds the allowed memory limit") + } + err => panic!("Unexpected error: {err:?}"), + } + + // Maximum not set + let memory = MemoryType::new(3, None, true); + let result = limiting.validate_memory(&memory); + match result.unwrap_err() { + MemoryError::Generic(msg) => assert_eq!(msg, "Maximum unset"), + err => panic!("Unexpected error: {err:?}"), + } + + // Minimum greater than maximum (not our problem) + let memory = MemoryType::new(5, Some(3), true); + limiting.validate_memory(&memory).unwrap(); + + // Minimum greater than limit + let memory = MemoryType::new(20, Some(20), true); + let result = limiting.validate_memory(&memory); + match result.unwrap_err() { + MemoryError::Generic(msg) => { + assert_eq!(msg, "Minimum exceeds the allowed memory limit") + } + err => panic!("Unexpected error: {err:?}"), + } + } +} diff --git a/vm/src/wasm_backend/metering.rs b/vm/src/wasm_backend/metering.rs new file mode 100644 index 000000000..0155b8868 --- /dev/null +++ b/vm/src/wasm_backend/metering.rs @@ -0,0 +1,265 @@ +use std::fmt; +use std::sync::{Arc, Mutex}; +use wasmer::wasmparser::{BlockType as WpTypeOrFuncType, Operator}; +use wasmer::{ + ExportIndex, FunctionMiddleware, GlobalInit, GlobalType, LocalFunctionIndex, MiddlewareError, + MiddlewareReaderState, ModuleMiddleware, Mutability, Type, +}; +use wasmer_types::{GlobalIndex, ModuleInfo}; + +#[derive(Clone)] +struct MeteringGlobalIndexes(GlobalIndex, GlobalIndex); + +impl MeteringGlobalIndexes { + /// The global index in the current module for remaining points. + fn remaining_points(&self) -> GlobalIndex { + self.0 + } + + /// The global index in the current module for a boolean indicating whether points are exhausted + /// or not. + /// This boolean is represented as a i32 global: + /// * 0: there are remaining points + /// * 1: points have been exhausted + fn points_exhausted(&self) -> GlobalIndex { + self.1 + } +} + +impl fmt::Debug for MeteringGlobalIndexes { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("MeteringGlobalIndexes") + .field("remaining_points", &self.remaining_points()) + .field("points_exhausted", &self.points_exhausted()) + .finish() + } +} + +/// The module-level metering middleware. +/// +/// # Panic +/// +/// An instance of `Metering` should _not_ be shared among different +/// modules, since it tracks module-specific information like the +/// global index to store metering state. Attempts to use a `Metering` +/// instance from multiple modules will result in a panic. +/// +/// # Example +/// +/// ```rust +/// use std::sync::Arc; +/// use wasmer::{wasmparser::Operator, CompilerConfig}; +/// use wasmer_middlewares::Metering; +/// +/// fn create_metering_middleware(compiler_config: &mut dyn CompilerConfig) { +/// // Let's define a dummy cost function, +/// // which counts 1 for all operators. +/// let cost_function = |_operator: &Operator| -> u64 { 1 }; +/// +/// // Let's define the initial limit. +/// let initial_limit = 10; +/// +/// // Let's creating the metering middleware. +/// let metering = Arc::new(Metering::new( +/// initial_limit, +/// cost_function +/// )); +/// +/// // Finally, let's push the middleware. +/// compiler_config.push_middleware(metering); +/// } +/// ``` +pub struct Metering u64 + Send + Sync> { + /// Initial limit of points. + initial_limit: u64, + + /// Function that maps each operator to a cost in "points". + cost_function: Arc, + + /// The global indexes for metering points. + global_indexes: Mutex>, +} + +/// The function-level metering middleware. +pub struct FunctionMetering u64 + Send + Sync> { + /// Function that maps each operator to a cost in "points". + cost_function: Arc, + + /// The global indexes for metering points. + global_indexes: MeteringGlobalIndexes, + + /// Accumulated cost of the current basic block. + accumulated_cost: u64, +} + +impl u64 + Send + Sync> Metering { + /// Creates a `Metering` middleware. + pub fn new(initial_limit: u64, cost_function: F) -> Self { + Self { + initial_limit, + cost_function: Arc::new(cost_function), + global_indexes: Mutex::new(None), + } + } +} + +/// Returns `true` if and only if the given operator is an accounting operator. +/// Accounting operators do additional work to track the metering points. +pub fn is_accounting(operator: &Operator) -> bool { + matches!( + operator, + Operator::Loop { .. } // loop headers are branch targets + | Operator::End // block ends are branch targets + | Operator::If { .. } // branch source, "if" can branch to else branch + | Operator::Else // "else" is the "end" of an if branch + | Operator::Br { .. } // branch source + | Operator::BrTable { .. } // branch source + | Operator::BrIf { .. } // branch source + | Operator::Call { .. } // function call - branch source + | Operator::CallIndirect { .. } // function call - branch source + | Operator::Return // end of function - branch source + // exceptions proposal + | Operator::Throw { .. } // branch source + | Operator::ThrowRef // branch source + | Operator::Rethrow { .. } // branch source + | Operator::Delegate { .. } // branch source + | Operator::Catch { .. } // branch target + // tail_call proposal + | Operator::ReturnCall { .. } // branch source + | Operator::ReturnCallIndirect { .. } // branch source + // gc proposal + | Operator::BrOnCast { .. } // branch source + | Operator::BrOnCastFail { .. } // branch source + // function_references proposal + | Operator::CallRef { .. } // branch source + | Operator::ReturnCallRef { .. } // branch source + | Operator::BrOnNull { .. } // branch source + | Operator::BrOnNonNull { .. } // branch source + ) +} + +impl u64 + Send + Sync> fmt::Debug for Metering { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Metering") + .field("initial_limit", &self.initial_limit) + .field("cost_function", &"") + .field("global_indexes", &self.global_indexes) + .finish() + } +} + +impl u64 + Send + Sync + 'static> ModuleMiddleware for Metering { + /// Generates a `FunctionMiddleware` for a given function. + fn generate_function_middleware(&self, _: LocalFunctionIndex) -> Box { + Box::new(FunctionMetering { + cost_function: self.cost_function.clone(), + global_indexes: self.global_indexes.lock().unwrap().clone().unwrap(), + accumulated_cost: 0, + }) + } + + /// Transforms a `ModuleInfo` struct in-place. This is called before application on functions begins. + fn transform_module_info(&self, module_info: &mut ModuleInfo) -> Result<(), MiddlewareError> { + let mut global_indexes = self.global_indexes.lock().unwrap(); + + if global_indexes.is_some() { + panic!("Metering::transform_module_info: Attempting to use a `Metering` middleware from multiple modules."); + } + + // Append a global for remaining points and initialize it. + let remaining_points_global_index = module_info + .globals + .push(GlobalType::new(Type::I64, Mutability::Var)); + + module_info + .global_initializers + .push(GlobalInit::I64Const(self.initial_limit as i64)); + + module_info.exports.insert( + "wasmer_metering_remaining_points".to_string(), + ExportIndex::Global(remaining_points_global_index), + ); + + // Append a global for the exhausted points boolean and initialize it. + let points_exhausted_global_index = module_info + .globals + .push(GlobalType::new(Type::I32, Mutability::Var)); + + module_info + .global_initializers + .push(GlobalInit::I32Const(0)); + + module_info.exports.insert( + "wasmer_metering_points_exhausted".to_string(), + ExportIndex::Global(points_exhausted_global_index), + ); + + *global_indexes = Some(MeteringGlobalIndexes( + remaining_points_global_index, + points_exhausted_global_index, + )); + + Ok(()) + } +} + +impl u64 + Send + Sync> fmt::Debug for FunctionMetering { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("FunctionMetering") + .field("cost_function", &"") + .field("global_indexes", &self.global_indexes) + .finish() + } +} + +impl u64 + Send + Sync> FunctionMiddleware for FunctionMetering { + fn feed<'a>( + &mut self, + operator: Operator<'a>, + state: &mut MiddlewareReaderState<'a>, + ) -> Result<(), MiddlewareError> { + // Get the cost of the current operator, and add it to the accumulator. + // This needs to be done before the metering logic, to prevent operators like `Call` from escaping metering in some + // corner cases. + self.accumulated_cost += (self.cost_function)(&operator); + + // Possible sources and targets of a branch. Finalize the cost of the previous basic block and perform necessary checks. + if is_accounting(&operator) && self.accumulated_cost > 0 { + state.extend(&[ + // if unsigned(globals[remaining_points_index]) < unsigned(self.accumulated_cost) { throw(); } + Operator::GlobalGet { + global_index: self.global_indexes.remaining_points().as_u32(), + }, + Operator::I64Const { + value: self.accumulated_cost as i64, + }, + Operator::I64LtU, + Operator::If { + blockty: WpTypeOrFuncType::Empty, + }, + Operator::I32Const { value: 1 }, + Operator::GlobalSet { + global_index: self.global_indexes.points_exhausted().as_u32(), + }, + Operator::Unreachable, + Operator::End, + // globals[remaining_points_index] -= self.accumulated_cost; + Operator::GlobalGet { + global_index: self.global_indexes.remaining_points().as_u32(), + }, + Operator::I64Const { + value: self.accumulated_cost as i64, + }, + Operator::I64Sub, + Operator::GlobalSet { + global_index: self.global_indexes.remaining_points().as_u32(), + }, + ]); + + self.accumulated_cost = 0; + } + state.push_operator(operator); + + Ok(()) + } +} diff --git a/vm/src/wasm_backend/mod.rs b/vm/src/wasm_backend/mod.rs new file mode 100644 index 000000000..abde2cb34 --- /dev/null +++ b/vm/src/wasm_backend/mod.rs @@ -0,0 +1,11 @@ +mod compile; +mod engine; +mod gatekeeper; +mod limiting_tunables; +mod metering; + +#[cfg(test)] +pub use engine::make_compiler_config; + +pub use compile::compile; +pub use engine::{make_compiling_engine, make_runtime_engine, COST_FUNCTION_HASH}; diff --git a/vm/testdata/README.md b/vm/testdata/README.md new file mode 100644 index 000000000..e14865967 --- /dev/null +++ b/vm/testdata/README.md @@ -0,0 +1,16 @@ +# Test data + +The contracts here are compilations of the Hackatom contract. + +## contract.wasm + +Is a symbolic link to a recent hackatom contract. + +## corrupted.wasm + +A corrupted contract files, created by + +```sh +cp contract.wasm corrupted.wasm +printf '\x11\x11\x11\x11\x11\x11\x11\x11' | dd of=corrupted.wasm bs=1 seek=1000 count=8 conv=notrunc +``` diff --git a/vm/testdata/corrupted.wasm b/vm/testdata/corrupted.wasm new file mode 100644 index 000000000..d69a963a6 Binary files /dev/null and b/vm/testdata/corrupted.wasm differ diff --git a/vm/testdata/cyberpunk.wasm b/vm/testdata/cyberpunk.wasm new file mode 100644 index 000000000..885a1dbdf Binary files /dev/null and b/vm/testdata/cyberpunk.wasm differ diff --git a/vm/testdata/cyberpunk_rust170.wasm b/vm/testdata/cyberpunk_rust170.wasm new file mode 100755 index 000000000..3bd0b471c Binary files /dev/null and b/vm/testdata/cyberpunk_rust170.wasm differ diff --git a/vm/testdata/empty.wasm b/vm/testdata/empty.wasm new file mode 100644 index 000000000..fe3ef9689 Binary files /dev/null and b/vm/testdata/empty.wasm differ diff --git a/vm/testdata/floaty.wasm b/vm/testdata/floaty.wasm new file mode 100644 index 000000000..653c3e884 Binary files /dev/null and b/vm/testdata/floaty.wasm differ diff --git a/vm/testdata/floaty_0.16.wasm b/vm/testdata/floaty_0.16.wasm new file mode 100644 index 000000000..8334867cf Binary files /dev/null and b/vm/testdata/floaty_0.16.wasm differ diff --git a/vm/testdata/floaty_1.0.wasm b/vm/testdata/floaty_1.0.wasm new file mode 100644 index 000000000..aa7062053 Binary files /dev/null and b/vm/testdata/floaty_1.0.wasm differ diff --git a/vm/testdata/floaty_1.2.wasm b/vm/testdata/floaty_1.2.wasm new file mode 100644 index 000000000..653c3e884 Binary files /dev/null and b/vm/testdata/floaty_1.2.wasm differ diff --git a/vm/testdata/floaty_2.0.wasm b/vm/testdata/floaty_2.0.wasm new file mode 100755 index 000000000..3dd07a913 Binary files /dev/null and b/vm/testdata/floaty_2.0.wasm differ diff --git a/vm/testdata/hackatom.wasm b/vm/testdata/hackatom.wasm new file mode 100644 index 000000000..580f9cf13 Binary files /dev/null and b/vm/testdata/hackatom.wasm differ diff --git a/vm/testdata/hackatom_0.10.wasm b/vm/testdata/hackatom_0.10.wasm new file mode 100644 index 000000000..4bfc3b3b1 Binary files /dev/null and b/vm/testdata/hackatom_0.10.wasm differ diff --git a/vm/testdata/hackatom_0.11.wasm b/vm/testdata/hackatom_0.11.wasm new file mode 100644 index 000000000..5277729db Binary files /dev/null and b/vm/testdata/hackatom_0.11.wasm differ diff --git a/vm/testdata/hackatom_0.12.wasm b/vm/testdata/hackatom_0.12.wasm new file mode 100644 index 000000000..249a76ecf Binary files /dev/null and b/vm/testdata/hackatom_0.12.wasm differ diff --git a/vm/testdata/hackatom_0.14.wasm b/vm/testdata/hackatom_0.14.wasm new file mode 100644 index 000000000..188ed0115 Binary files /dev/null and b/vm/testdata/hackatom_0.14.wasm differ diff --git a/vm/testdata/hackatom_0.15.wasm b/vm/testdata/hackatom_0.15.wasm new file mode 100644 index 000000000..92f5db409 Binary files /dev/null and b/vm/testdata/hackatom_0.15.wasm differ diff --git a/vm/testdata/hackatom_0.16.wasm b/vm/testdata/hackatom_0.16.wasm new file mode 100644 index 000000000..1787fc84a Binary files /dev/null and b/vm/testdata/hackatom_0.16.wasm differ diff --git a/vm/testdata/hackatom_0.7.wasm b/vm/testdata/hackatom_0.7.wasm new file mode 100644 index 000000000..2ba5901c5 Binary files /dev/null and b/vm/testdata/hackatom_0.7.wasm differ diff --git a/vm/testdata/hackatom_0.8.wasm b/vm/testdata/hackatom_0.8.wasm new file mode 100644 index 000000000..2fab736ed Binary files /dev/null and b/vm/testdata/hackatom_0.8.wasm differ diff --git a/vm/testdata/hackatom_0.9.wasm b/vm/testdata/hackatom_0.9.wasm new file mode 100644 index 000000000..284120d1c Binary files /dev/null and b/vm/testdata/hackatom_0.9.wasm differ diff --git a/vm/testdata/hackatom_1.0.wasm b/vm/testdata/hackatom_1.0.wasm new file mode 100644 index 000000000..aeac737c7 Binary files /dev/null and b/vm/testdata/hackatom_1.0.wasm differ diff --git a/vm/testdata/hackatom_1.2.wasm b/vm/testdata/hackatom_1.2.wasm new file mode 100644 index 000000000..f15360139 Binary files /dev/null and b/vm/testdata/hackatom_1.2.wasm differ diff --git a/vm/testdata/hackatom_1.3.wasm b/vm/testdata/hackatom_1.3.wasm new file mode 100644 index 000000000..580f9cf13 Binary files /dev/null and b/vm/testdata/hackatom_1.3.wasm differ diff --git a/vm/testdata/ibc_callbacks.wasm b/vm/testdata/ibc_callbacks.wasm new file mode 100644 index 000000000..63519505a Binary files /dev/null and b/vm/testdata/ibc_callbacks.wasm differ diff --git a/vm/testdata/ibc_reflect.wasm b/vm/testdata/ibc_reflect.wasm new file mode 100644 index 000000000..228bdfbff Binary files /dev/null and b/vm/testdata/ibc_reflect.wasm differ diff --git a/vm/testdata/ibc_reflect_0.14.wasm b/vm/testdata/ibc_reflect_0.14.wasm new file mode 100644 index 000000000..e23d34604 Binary files /dev/null and b/vm/testdata/ibc_reflect_0.14.wasm differ diff --git a/vm/testdata/ibc_reflect_0.15.wasm b/vm/testdata/ibc_reflect_0.15.wasm new file mode 100644 index 000000000..e80ecfe4a Binary files /dev/null and b/vm/testdata/ibc_reflect_0.15.wasm differ diff --git a/vm/testdata/ibc_reflect_0.16.wasm b/vm/testdata/ibc_reflect_0.16.wasm new file mode 100644 index 000000000..5c4ced504 Binary files /dev/null and b/vm/testdata/ibc_reflect_0.16.wasm differ diff --git a/vm/testdata/ibc_reflect_1.0.wasm b/vm/testdata/ibc_reflect_1.0.wasm new file mode 100644 index 000000000..7eb9d9999 Binary files /dev/null and b/vm/testdata/ibc_reflect_1.0.wasm differ diff --git a/vm/testdata/ibc_reflect_1.2.wasm b/vm/testdata/ibc_reflect_1.2.wasm new file mode 100644 index 000000000..228bdfbff Binary files /dev/null and b/vm/testdata/ibc_reflect_1.2.wasm differ diff --git a/wazero/wasmruntime.go b/wazero/wasmruntime.go new file mode 100644 index 000000000..d67cde19d --- /dev/null +++ b/wazero/wasmruntime.go @@ -0,0 +1,44 @@ +// file: internal/runtime/wasm_runtime.go +package wazero + +import "github.com/CosmWasm/wasmvm/v2/types" + +type WasmRuntime interface { + // InitCache sets up any runtime-specific cache or resources. Returns a handle. + InitCache(config types.VMConfig) (any, error) + + // ReleaseCache frees resources created by InitCache. + ReleaseCache(handle any) + + // Compilation and code storage + StoreCode(code []byte, persist bool) (checksum []byte, err error) + StoreCodeUnchecked(code []byte) ([]byte, error) + GetCode(checksum []byte) ([]byte, error) + RemoveCode(checksum []byte) error + Pin(checksum []byte) error + Unpin(checksum []byte) error + AnalyzeCode(checksum []byte) (*types.AnalysisReport, error) + + // Execution lifecycles + Instantiate(checksum []byte, env []byte, info []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Execute(checksum []byte, env []byte, info []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Migrate(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + MigrateWithInfo(checksum []byte, env []byte, msg []byte, migrateInfo []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Sudo(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Reply(checksum []byte, env []byte, reply []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + Query(checksum []byte, env []byte, query []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + + // IBC entry points + IBCChannelOpen(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCChannelConnect(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCChannelClose(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCPacketReceive(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCPacketAck(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCPacketTimeout(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCSourceCallback(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + IBCDestinationCallback(checksum []byte, env []byte, msg []byte, otherParams ...interface{}) ([]byte, types.GasReport, error) + + // Metrics + GetMetrics() (*types.Metrics, error) + GetPinnedMetrics() (*types.PinnedMetrics, error) +}