diff --git a/docs/radix-memory.md b/docs/radix-memory.md index 92528668..1bdd193b 100644 --- a/docs/radix-memory.md +++ b/docs/radix-memory.md @@ -35,7 +35,7 @@ New benchmark suite is added, which measures the latency of the following operat - Memory read / write to random addresses - Memory read / write to contiguous address -- Memory write to sparse memory addresse +- Memory write to sparse memory addresses - Memory write to dense memory addresses - Merkle proof generation - Merkle root calculation @@ -112,4 +112,4 @@ Usually, sparse region would utilize smaller branching factor for memory optimiz - use larger branching factors at the upper address level to reduce the trie traversal depth - use smaller branching factors at the lower address level to reduce computation for each node. -In addition, we can apply pgo as mentioned above. To apply pgo to asterisc builds, we can run asterisc with cpu pprof enabled, and ship asterisc with `default.pgo` in the build path. This way, whenever the user builds Asterisc, pgo will be enabled by default, leading to addition 5+% improvement in speed. \ No newline at end of file +In addition, we can apply pgo as mentioned above. To apply pgo to asterisc builds, we can run asterisc with cpu pprof enabled, and ship asterisc with `default.pgo` in the build path. This way, whenever the user builds Asterisc, pgo will be enabled by default, leading to addition 5+% improvement in speed. diff --git a/docs/riscv.md b/docs/riscv.md index 54740ac5..40bff5f0 100644 --- a/docs/riscv.md +++ b/docs/riscv.md @@ -3,7 +3,7 @@ ## Helpful learning resources - rv32 instruction set cheat sheet: http://blog.translusion.com/images/posts/RISC-V-cheatsheet-RV32I-4-3.pdf -- rv32: reference card: https://github.com/jameslzhu/riscv-card/blob/master/riscv-card.pdf +- rv32: reference card: https://github.com/jameslzhu/riscv-card/releases/download/latest/riscv-card.pdf - online riscv32 interpreter: https://www.cs.cornell.edu/courses/cs3410/2019sp/riscv/interpreter/# - specs: https://riscv.org/technical/specifications/ - Berkely riscv card: https://inst.eecs.berkeley.edu/~cs61c/fa18/img/riscvcard.pdf diff --git a/rvgo/fast/instrumented.go b/rvgo/fast/instrumented.go index 80187cc2..c2a4c9de 100644 --- a/rvgo/fast/instrumented.go +++ b/rvgo/fast/instrumented.go @@ -73,7 +73,7 @@ func (m *InstrumentedState) Step(proof bool) (wit *StepWitness, err error) { func (m *InstrumentedState) readPreimage(key [32]byte, offset uint64) (dat [32]byte, datLen uint64, err error) { preimage := m.lastPreimage - if key != m.lastPreimageKey { + if preimage == nil || key != m.lastPreimageKey { m.lastPreimageKey = key data := m.preimageOracle.GetPreimage(key) // add the length prefix diff --git a/rvgo/fast/instrumented_test.go b/rvgo/fast/instrumented_test.go new file mode 100644 index 00000000..636bd352 --- /dev/null +++ b/rvgo/fast/instrumented_test.go @@ -0,0 +1,43 @@ +package fast + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type MockPreimageOracle struct { +} + +func (oracle *MockPreimageOracle) Hint(v []byte) { +} + +func (oracle *MockPreimageOracle) GetPreimage(k [32]byte) []byte { + return make([]byte, 32) +} + +func (oracle *MockPreimageOracle) ReadPreimagePart(key [32]byte, offset uint64) ([32]byte, uint8, error) { + return [32]byte{}, 32, nil +} + +func TestReadPreimage(t *testing.T) { + + vmState := VMState{ + PC: 0, + Memory: NewMemory(), + Registers: [32]uint64{}, + ExitCode: 0, + Exited: false, + Heap: 0x7f_00_00_00_00_00, + } + + // instruction ecall + vmState.Memory.SetUnaligned(0, []byte{0x73}) + vmState.Registers[17] = 63 + vmState.Registers[10] = 5 + + instState := NewInstrumentedState(&vmState, &MockPreimageOracle{}, nil, nil) + + _, err := instState.Step(true) + require.NoError(t, err) +} diff --git a/rvgo/fast/memory.go b/rvgo/fast/memory.go index e239fcdd..daad30d0 100644 --- a/rvgo/fast/memory.go +++ b/rvgo/fast/memory.go @@ -128,7 +128,7 @@ func (m *Memory) SetUnaligned(addr uint64, dat []byte) { m.Invalidate(addr) // invalidate this branch of memory, now that the value changed } - copy(p.Data[pageAddr:], dat) + copy(p.Data[pageAddr:], dat[d:]) } func (m *Memory) GetUnaligned(addr uint64, dest []byte) { @@ -140,7 +140,7 @@ func (m *Memory) GetUnaligned(addr uint64, dest []byte) { p, ok := m.pageLookup(pageIndex) var d int if !ok { - l := pageSize - pageAddr + l := PageSize - pageAddr if l > 32 { l = 32 } @@ -160,7 +160,7 @@ func (m *Memory) GetUnaligned(addr uint64, dest []byte) { pageAddr = addr & PageAddrMask p, ok = m.pageLookup(pageIndex) if !ok { - l := pageSize - pageAddr + l := PageSize - pageAddr if l > 32 { l = 32 } diff --git a/rvgo/fast/memory_test.go b/rvgo/fast/memory_test.go index 8653a6fc..9829ab96 100644 --- a/rvgo/fast/memory_test.go +++ b/rvgo/fast/memory_test.go @@ -412,3 +412,12 @@ func TestMemoryBinary(t *testing.T) { m.GetUnaligned(8, dest[:]) require.Equal(t, uint8(123), dest[0]) } + +func TestMemoryInvalidSetUnaligned(t *testing.T) { + t.Run("SetUnaligned incorrectly writes to next page", func(t *testing.T) { + m := NewMemory() + m.SetUnaligned(0x0FFE, []byte{0xaa, 0xbb, 0xcc, 0xdd}) + require.Equal(t, m.pages[0].Data[4094:], []byte{0xaa, 0xbb}) + require.Equal(t, m.pages[1].Data[0:2], []byte{0xcc, 0xdd}) + }) +} diff --git a/rvgo/fast/state.go b/rvgo/fast/state.go index 3f970e3f..7b6e29a1 100644 --- a/rvgo/fast/state.go +++ b/rvgo/fast/state.go @@ -13,16 +13,6 @@ import ( "github.com/ethereum/go-ethereum/crypto" ) -// page size must be at least 32 bytes (one merkle node) -// memory merkleization will look the same regardless of page size past 32. -const ( - pageAddrSize = 10 - pageKeySize = 64 - pageAddrSize - pageSize = 1 << pageAddrSize - pageAddrMask = pageSize - 1 - maxPageCount = 1 << pageKeySize -) - type VMState struct { Memory *Memory `json:"memory"` @@ -111,6 +101,9 @@ func (state *VMState) Instr() uint32 { type StateWitness []byte +const STATE_WITNESS_SIZE = 362 // STATE_WITNESS_SIZE is the size of the state witness encoding in bytes. +const EXITCODE_WITNESS_OFFSET = 32 + 32 + 8 + 8 // mem-root, preimage-key, preimage-offset, PC + const ( VMStatusValid = 0 VMStatusInvalid = 1 @@ -119,14 +112,13 @@ const ( ) func (sw StateWitness) StateHash() (common.Hash, error) { - offset := 32 + 32 + 8 + 8 // mem-root, preimage-key, preimage-offset, PC - if len(sw) <= offset+1 { - return common.Hash{}, fmt.Errorf("state must at least be %d bytes, but got %d", offset, len(sw)) + if len(sw) != STATE_WITNESS_SIZE { + return common.Hash{}, fmt.Errorf("invalid witness length. got %d, expected %d", len(sw), STATE_WITNESS_SIZE) } hash := crypto.Keccak256Hash(sw) - exitCode := sw[offset] - exited := sw[offset+1] + exitCode := sw[EXITCODE_WITNESS_OFFSET] + exited := sw[EXITCODE_WITNESS_OFFSET+1] status := vmStatus(exited == 1, exitCode) hash[0] = status return hash, nil diff --git a/rvgo/fast/vm.go b/rvgo/fast/vm.go index 3e1755d6..9036f03b 100644 --- a/rvgo/fast/vm.go +++ b/rvgo/fast/vm.go @@ -587,6 +587,12 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { switch opcode { case 0x03: // 000_0011: memory loading // LB, LH, LW, LD, LBU, LHU, LWU + + // bits[14:12] set to 111 are reserved + if eq64(funct3, toU64(0x7)) != 0 { + revertWithCode(riscv.ErrInvalidSyscall, fmt.Errorf("illegal instruction %d: reserved instruction encoding", instr)) + } + imm := parseImmTypeI(instr) signed := iszero64(and64(funct3, toU64(4))) // 4 = 100 -> bitflag size := shl64(and64(funct3, toU64(3)), toU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size @@ -631,6 +637,12 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { // So it's really 13 bits with a hardcoded 0 bit. pc = add64(pc, imm) } + + // The PC must be aligned to 4 bytes. + if pc&3 != 0 { + revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", pc)) + } + // not like the other opcodes: nothing to write to rd register, and PC has already changed setPC(pc) case 0x13: // 001_0011: immediate arithmetic and logic @@ -760,7 +772,7 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { setPC(add64(pc, toU64(4))) case 0x3B: // 011_1011: register arithmetic and logic in 32 bits rs1Value := getRegister(rs1) - rs2Value := getRegister(rs2) + rs2Value := and64(getRegister(rs2), u32Mask()) var rdValue U64 switch funct7 { case 1: // RV M extension @@ -833,13 +845,23 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { imm := parseImmTypeJ(instr) rdValue := add64(pc, toU64(4)) setRegister(rd, rdValue) - setPC(add64(pc, signExtend64(shl64(toU64(1), imm), toU64(20)))) // signed offset in multiples of 2 bytes (last bit is there, but ignored) + + newPC := add64(pc, signExtend64(shl64(toU64(1), imm), toU64(20))) + if newPC&3 != 0 { // quick target alignment check + revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC)) + } + setPC(newPC) // signed offset in multiples of 2 bytes (last bit is there, but ignored) case 0x67: // 110_0111: JALR = Jump and link register rs1Value := getRegister(rs1) imm := parseImmTypeI(instr) rdValue := add64(pc, toU64(4)) setRegister(rd, rdValue) - setPC(and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))) // least significant bit is set to 0 + + newPC := and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1))) + if newPC&3 != 0 { // quick addr alignment check + revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC)) + } + setPC(newPC) // least significant bit is set to 0 case 0x73: // 111_0011: environment things switch funct3 { case 0: // 000 = ECALL/EBREAK @@ -867,11 +889,11 @@ func (inst *InstrumentedState) riscvStep() (outErr error) { // 0b010 == RV32A W variants // 0b011 == RV64A D variants size := shl64(funct3, toU64(1)) - if lt64(size, toU64(4)) != 0 { + if lt64(size, toU64(4)) != 0 || gt64(size, toU64(8)) != 0 { revertWithCode(riscv.ErrBadAMOSize, fmt.Errorf("bad AMO size: %d", size)) } addr := getRegister(rs1) - if addr&3 != 0 { // quick addr alignment check + if mod64(addr, size) != 0 { // quick addr alignment check revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 4 bytes", addr)) } diff --git a/rvgo/slow/vm.go b/rvgo/slow/vm.go index b5ec8df2..66fc81e4 100644 --- a/rvgo/slow/vm.go +++ b/rvgo/slow/vm.go @@ -1,6 +1,7 @@ package slow import ( + "bytes" "encoding/binary" "fmt" @@ -121,6 +122,12 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err return } + // First 4 bytes of keccak256("step(bytes,bytes,bytes32)") + expectedSelector := []byte{0xe1, 0x4c, 0xed, 0x32} + if len(calldata) < 4 || !bytes.Equal(calldata[:4], expectedSelector) { + panic("invalid function selector") + } + stateContentOffset := uint16(4 + 32 + 32 + 32 + 32) if iszero(eq(add(b32asBEWord(calldataload(toU64(4))), shortToU256(32+4)), shortToU256(stateContentOffset))) { // _stateData.offset = _stateData.pointer + 32 + 4 @@ -464,6 +471,10 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err setMemoryB32(rightAddr, beWordAsB32(right), proofIndexR) } storeMem := func(addr U64, size U64, value U64, proofIndexL uint8, proofIndexR uint8) { + if size.val() > 8 { + revertWithCode(riscv.ErrStoreExceeds8Bytes, fmt.Errorf("cannot store more than 8 bytes: %d", size)) + } + storeMemUnaligned(addr, size, u64ToU256(value), proofIndexL, proofIndexR) } @@ -771,6 +782,12 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err switch opcode.val() { case 0x03: // 000_0011: memory loading // LB, LH, LW, LD, LBU, LHU, LWU + + // bits[14:12] set to 111 are reserved + if eq64(funct3, toU64(0x7)) != (U64{}) { + revertWithCode(riscv.ErrInvalidSyscall, fmt.Errorf("illegal instruction %d: reserved instruction encoding", instr)) + } + imm := parseImmTypeI(instr) signed := iszero64(and64(funct3, toU64(4))) // 4 = 100 -> bitflag size := shl64(and64(funct3, toU64(3)), toU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size @@ -815,6 +832,12 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err // So it's really 13 bits with a hardcoded 0 bit. pc = add64(pc, imm) } + + // The PC must be aligned to 4 bytes. + if and64(pc, toU64(3)) != (U64{}) { + revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", pc)) + } + // not like the other opcodes: nothing to write to rd register, and PC has already changed setPC(pc) case 0x13: // 001_0011: immediate arithmetic and logic @@ -944,7 +967,7 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err setPC(add64(pc, toU64(4))) case 0x3B: // 011_1011: register arithmetic and logic in 32 bits rs1Value := getRegister(rs1) - rs2Value := getRegister(rs2) + rs2Value := and64(getRegister(rs2), u32Mask()) var rdValue U64 switch funct7.val() { case 1: // RV M extension @@ -1017,13 +1040,23 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err imm := parseImmTypeJ(instr) rdValue := add64(pc, toU64(4)) setRegister(rd, rdValue) - setPC(add64(pc, signExtend64(shl64(toU64(1), imm), toU64(20)))) // signed offset in multiples of 2 bytes (last bit is there, but ignored) + + newPC := add64(pc, signExtend64(shl64(toU64(1), imm), toU64(20))) + if and64(newPC, toU64(3)) != (U64{}) { // quick target alignment check + revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC)) + } + setPC(newPC) // signed offset in multiples of 2 bytes (last bit is there, but ignored) case 0x67: // 110_0111: JALR = Jump and link register rs1Value := getRegister(rs1) imm := parseImmTypeI(instr) rdValue := add64(pc, toU64(4)) setRegister(rd, rdValue) - setPC(and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))) // least significant bit is set to 0 + + newPC := and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1))) + if and64(newPC, toU64(3)) != (U64{}) { // quick target alignment check + revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("pc %d not aligned with 4 bytes", newPC)) + } + setPC(newPC) // least significant bit is set to 0 case 0x73: // 111_0011: environment things switch funct3.val() { case 0: // 000 = ECALL/EBREAK @@ -1051,11 +1084,11 @@ func Step(calldata []byte, po PreimageOracle) (stateHash common.Hash, outErr err // 0b010 == RV32A W variants // 0b011 == RV64A D variants size := shl64(funct3, toU64(1)) - if lt64(size, toU64(4)) != (U64{}) { + if or64(lt64(size, toU64(4)), gt64(size, toU64(8))) != (U64{}) { revertWithCode(riscv.ErrBadAMOSize, fmt.Errorf("bad AMO size: %d", size)) } addr := getRegister(rs1) - if and64(addr, toU64(3)) != (U64{}) { // quick addr alignment check + if mod64(addr, size) != (U64{}) { // quick addr alignment check revertWithCode(riscv.ErrNotAlignedAddr, fmt.Errorf("addr %d not aligned with 4 bytes", addr)) } diff --git a/rvsol/README.md b/rvsol/README.md index 24c8abc9..f6cf70a8 100644 --- a/rvsol/README.md +++ b/rvsol/README.md @@ -40,7 +40,7 @@ forge test -vvv --ffi - There are few issues with Foundry. - Run script directly without manual build does not work with the current version of Foundry (2024-03-15 `3fa0270`). You **must run** `make build` **before** running the deploy script. ([issue](https://github.com/foundry-rs/foundry/issues/6572)) - - Some older version(2024-02-01 `2f4b5db`) of Foundry makes a dependency error reproted above issue. + - Some older version(2024-02-01 `2f4b5db`) of Foundry makes a dependency error reported above issue. Use the **latest version** of Foundry! - The deploy script can be run only once on the devnet because of the `create2` salt. - To rerun the script for dev purpose, you must restart the devnet with `make devnet-clean && make devnet-up` command on the monorepo. \ No newline at end of file + To rerun the script for dev purpose, you must restart the devnet with `make devnet-clean && make devnet-up` command on the monorepo. diff --git a/rvsol/src/RISCV.sol b/rvsol/src/RISCV.sol index 1bbb681e..5fb7f096 100644 --- a/rvsol/src/RISCV.sol +++ b/rvsol/src/RISCV.sol @@ -285,34 +285,44 @@ contract RISCV is IBigStepper { out := 0 } function stateOffsetPreimageKey() -> out { - out := add(stateOffsetMemRoot(), stateSizeMemRoot()) + out := 32 // 0 + 32 + // out := add(stateOffsetMemRoot(), stateSizeMemRoot()) } function stateOffsetPreimageOffset() -> out { - out := add(stateOffsetPreimageKey(), stateSizePreimageKey()) + out := 64 // 32 + 32 + // out := add(stateOffsetPreimageKey(), stateSizePreimageKey()) } function stateOffsetPC() -> out { - out := add(stateOffsetPreimageOffset(), stateSizePreimageOffset()) + out := 72 // 64 + 8 + // out := add(stateOffsetPreimageOffset(), stateSizePreimageOffset()) } function stateOffsetExitCode() -> out { - out := add(stateOffsetPC(), stateSizePC()) + out := 80 // 72 + 8 + // out := add(stateOffsetPC(), stateSizePC()) } function stateOffsetExited() -> out { - out := add(stateOffsetExitCode(), stateSizeExitCode()) + out := 81 // 80 + 1 + // out := add(stateOffsetExitCode(), stateSizeExitCode()) } function stateOffsetStep() -> out { - out := add(stateOffsetExited(), stateSizeExited()) + out := 82 // 81 + 1 + // out := add(stateOffsetExited(), stateSizeExited()) } function stateOffsetHeap() -> out { - out := add(stateOffsetStep(), stateSizeStep()) + out := 90 // 82 + 8 + // out := add(stateOffsetStep(), stateSizeStep()) } function stateOffsetLoadReservation() -> out { - out := add(stateOffsetHeap(), stateSizeHeap()) + out := 98 // 90 + 8 + // out := add(stateOffsetHeap(), stateSizeHeap()) } function stateOffsetRegisters() -> out { - out := add(stateOffsetLoadReservation(), stateSizeLoadReservation()) + out := 106 // 98 + 8 + // out := add(stateOffsetLoadReservation(), stateSizeLoadReservation()) } function stateSize() -> out { - out := add(stateOffsetRegisters(), stateSizeRegisters()) + out := 362 // 106 + 256 + // out := add(stateOffsetRegisters(), stateSizeRegisters()) } // @@ -738,6 +748,8 @@ contract RISCV is IBigStepper { } function storeMem(addr, size, value, proofIndexL, proofIndexR) { + if gt(size, 8) { revertWithCode(0xbad512e8) } // cannot store more than 8 bytes + storeMemUnaligned(addr, size, u64ToU256(value), proofIndexL, proofIndexR) } @@ -888,7 +900,7 @@ contract RISCV is IBigStepper { let errCode := 0 // ensure MAP_ANONYMOUS is set and fd == -1 - switch or(iszero(and(flags, 0x20)), not(eq(fd, u64Mask()))) + switch or(iszero(and(flags, 0x20)), iszero(eq(fd, u64Mask()))) case 1 { addr := u64Mask() errCode := toU64(0x4d) @@ -1147,6 +1159,10 @@ contract RISCV is IBigStepper { let pc_ := _pc // 000_0011: memory loading // LB, LH, LW, LD, LBU, LHU, LWU + + // bits[14:12] set to 111 are reserved + if eq64(funct3, toU64(0x7)) { revertWithCode(0xf001ca11) } + let imm := parseImmTypeI(instr) let signed := iszero64(and64(funct3, toU64(4))) // 4 = 100 -> bitflag let size := shl64(and64(funct3, toU64(3)), toU64(1)) // 3 = 11 -> 1, 2, 4, 8 bytes size @@ -1206,6 +1222,10 @@ contract RISCV is IBigStepper { // So it's really 13 bits with a hardcoded 0 bit. _pc := add64(_pc, imm) } + + // The PC must be aligned to 4 bytes. + if and64(_pc, toU64(3)) { revertWithCode(0xbad10ad0) } // target not aligned with 4 bytes + // not like the other opcodes: nothing to write to rd register, and PC has already changed setPC(_pc) } @@ -1399,7 +1419,7 @@ contract RISCV is IBigStepper { case 0x3B { // 011_1011: register arithmetic and logic in 32 bits let rs1Value := getRegister(rs1) - let rs2Value := getRegister(rs2) + let rs2Value := and64(getRegister(rs2), u32Mask()) let rdValue := 0 switch funct7 case 1 { @@ -1496,7 +1516,13 @@ contract RISCV is IBigStepper { let imm := parseImmTypeJ(instr) let rdValue := add64(_pc, toU64(4)) setRegister(rd, rdValue) - setPC(add64(_pc, signExtend64(shl64(toU64(1), imm), toU64(20)))) // signed offset in multiples of 2 + + let newPC := add64(_pc, signExtend64(shl64(toU64(1), imm), toU64(20))) + if and64(newPC, toU64(3)) { + // quick target alignment check + revertWithCode(0xbad10ad0) // target not aligned with 4 bytes + } + setPC(newPC) // signed offset in multiples of 2 // bytes (last bit is there, but ignored) } case 0x67 { @@ -1505,8 +1531,13 @@ contract RISCV is IBigStepper { let imm := parseImmTypeI(instr) let rdValue := add64(_pc, toU64(4)) setRegister(rd, rdValue) - setPC(and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1)))) // least - // significant bit is set to 0 + + let newPC := and64(add64(rs1Value, signExtend64(imm, toU64(11))), xor64(u64Mask(), toU64(1))) + if and64(newPC, toU64(3)) { + // quick target alignment check + revertWithCode(0xbad10ad0) // target not aligned with 4 bytes + } + setPC(newPC) // least significant bit is set to 0 } case 0x73 { // 111_0011: environment things @@ -1548,7 +1579,7 @@ contract RISCV is IBigStepper { if or(lt64(size, toU64(4)), gt64(size, toU64(8))) { revertWithCode(0xbada70) } // bad AMO size let addr := getRegister(rs1) - if and64(addr, toU64(3)) { + if mod64(addr, size) { // quick addr alignment check revertWithCode(0xbad10ad0) // addr not aligned with 4 bytes } diff --git a/rvsol/test/RISCV.t.sol b/rvsol/test/RISCV.t.sol index 79849ff4..644d62d8 100644 --- a/rvsol/test/RISCV.t.sol +++ b/rvsol/test/RISCV.t.sol @@ -607,6 +607,25 @@ contract RISCV_Test is CommonTest { assertEq(postState, outputState(expect), "unexpected post state"); } + function test_remw_by_zero_succeeds() public { + uint32 insn = encodeRType(0x3b, 27, 6, 22, 21, 1); // remw x27, x22, x21 + (State memory state, bytes memory proof) = constructRISCVState(0, insn); + state.registers[22] = 0x100f00000; //bits > 32 should be ignored + state.registers[21] = 0x200000000; // bits > 32 should be ignored, resulting in division by zero + bytes memory encodedState = encodeState(state); + + State memory expect; + expect.memRoot = state.memRoot; + expect.pc = state.pc + 4; + expect.step = state.step + 1; + expect.registers[27] = 0x00f00000; // should return original dividend (least 32 bits) + expect.registers[22] = state.registers[22]; + expect.registers[21] = state.registers[21]; + + bytes32 postState = riscv.step(encodedState, proof, 0); + assertEq(postState, outputState(expect), "unexpected post state"); + } + function test_remuw_succeeds() public { uint32 insn = encodeRType(0x3b, 30, 7, 27, 9, 1); // remuw x30, x27, x9 (State memory state, bytes memory proof) = constructRISCVState(0, insn); @@ -989,7 +1008,7 @@ contract RISCV_Test is CommonTest { function test_lrd_succeeds() public { bytes32 value = hex"a0b1df92a49eec39"; - uint64 addr = 0xb86a394544c084ec; + uint64 addr = 0xb86a394544c084e0; uint8 funct3 = 0x3; uint8 funct7 = encodeFunct7(0x2, 0x0, 0x0); uint8 size = uint8(1 << (funct3 & 0x3)); @@ -1069,7 +1088,7 @@ contract RISCV_Test is CommonTest { } function test_amoaddd_succeeds() public { - uint64 addr = 0xeae426a36ff2bb64; + uint64 addr = 0xeae426a36ff2bb60; uint32 insn; uint8 size; { @@ -1101,7 +1120,7 @@ contract RISCV_Test is CommonTest { } function test_amoxord_succeeds() public { - uint64 addr = 0x2d5ba68f57f1c564; + uint64 addr = 0x2d5ba68f57f1c560; uint32 insn; uint8 size; { @@ -1164,7 +1183,7 @@ contract RISCV_Test is CommonTest { } function test_amoord_succeeds() public { - uint64 addr = 0xa0d7a5ea65b35664; + uint64 addr = 0xa0d7a5ea65b35660; uint32 insn; uint8 size; { @@ -1260,7 +1279,7 @@ contract RISCV_Test is CommonTest { } function test_amominud_succeeds() public { - uint64 addr = 0xe094be571f4baca4; + uint64 addr = 0xe094be571f4baca0; uint32 insn; uint8 size; { @@ -2034,7 +2053,7 @@ contract RISCV_Test is CommonTest { function test_beq_succeeds() public { uint16 imm = 0x19cd; uint32 insn = encodeBType(0x63, 0, 23, 20, imm); // beq x23, x20, offset - (State memory state, bytes memory proof) = constructRISCVState(0x139a, insn); + (State memory state, bytes memory proof) = constructRISCVState(0x139c, insn); state.registers[23] = 0x2152; state.registers[20] = 0x2152; bytes memory encodedState = encodeState(state); @@ -2060,7 +2079,7 @@ contract RISCV_Test is CommonTest { } function test_bne_succeeds() public { - uint16 imm = 0x1d7e; + uint16 imm = 0x1d7c; uint32 insn = encodeBType(0x63, 1, 20, 26, imm); // bne x20, x26, offset (State memory state, bytes memory proof) = constructRISCVState(0x1afc, insn); state.registers[20] = 0x14b6; @@ -2144,9 +2163,9 @@ contract RISCV_Test is CommonTest { } function test_bltu_succeeds() public { - uint16 imm = 0x171d; + uint16 imm = 0x171c; uint32 insn = encodeBType(0x63, 6, 13, 22, imm); // bltu x13, x22, offset - (State memory state, bytes memory proof) = constructRISCVState(0x2e3a, insn); + (State memory state, bytes memory proof) = constructRISCVState(0x2e3c, insn); state.registers[13] = 0xa0cc; state.registers[22] = 0xffffffff_ffff795c; bytes memory encodedState = encodeState(state); @@ -2174,7 +2193,7 @@ contract RISCV_Test is CommonTest { function test_bgeu_succeeds() public { uint16 imm = 0x14b5; uint32 insn = encodeBType(0x63, 7, 7, 16, imm); // bgeu x7, x16, offset - (State memory state, bytes memory proof) = constructRISCVState(0x296a, insn); + (State memory state, bytes memory proof) = constructRISCVState(0x296c, insn); state.registers[7] = 0xffffffff_ffff35e5; state.registers[16] = 0x7c3c; bytes memory encodedState = encodeState(state); @@ -2246,7 +2265,7 @@ contract RISCV_Test is CommonTest { /* J Type instructions */ function test_jal_succeeds() public { - uint32 imm = 0xbef054ae; + uint32 imm = 0xbef054ac; uint32 insn = encodeJType(0x6f, 5, imm); // jal x5, imm (State memory state, bytes memory proof) = constructRISCVState(0, insn); bytes memory encodedState = encodeState(state); @@ -2440,7 +2459,7 @@ contract RISCV_Test is CommonTest { } function test_unknown_atomic_operation() public { - uint64 addr = 0xeae426a36ff2bb64; + uint64 addr = 0xeae426a36ff2bb68; uint32 insn; uint8 size; { @@ -2460,6 +2479,30 @@ contract RISCV_Test is CommonTest { riscv.step(encodedState, proof, 0); } + function test_reserved_load_instruction() public { + bytes32 value = hex"61fb11d66dcc9d48"; + uint16 offset = 0x6bf; + uint64 addr = 0xd34d + offset; + uint32 insn = encodeIType(0x3, 21, 0x7, 4, offset); // lhu x21, funct 0x7, offset(x4) + (State memory state, bytes memory proof) = constructRISCVState(0, insn, addr, value); + state.registers[4] = 0xd34d; + bytes memory encodedState = encodeState(state); + + vm.expectRevert(hex"00000000000000000000000000000000000000000000000000000000f001ca11"); + riscv.step(encodedState, proof, 0); + } + + function test_revert_unaligned_jal_instruction() public { + // 0xbef054ae % 4 != 0 + uint32 imm = 0xbef054ae; + uint32 insn = encodeJType(0x6f, 5, imm); // jal x5, imm + (State memory state, bytes memory proof) = constructRISCVState(0, insn); + bytes memory encodedState = encodeState(state); + + vm.expectRevert(hex"00000000000000000000000000000000000000000000000000000000bad10ad0"); + riscv.step(encodedState, proof, 0); + } + /* Helper methods */ function encodeState(State memory state) internal pure returns (bytes memory) {