diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..29f1c22
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,6 @@
+/target
+/Cargo.lock
+*.dot
+*.png
+.idea
+*.sdb
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..362368d
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,23 @@
+workspace = { members = ["example"] }
+
+[package]
+name = "spacedb"
+version = "0.1.0"
+edition = "2021"
+description = "A cryptographically verifiable data store and universal accumulator for the Spaces protocol."
+repository = "https://github.com/spacesprotocol/spacedb"
+license = "Apache-2.0"
+
+[dependencies]
+libc = { version = "0.2.150", optional = true }
+bincode = { version = "2.0.0-rc.3", default-features = false, features = ["alloc"] }
+hex = { version = "0.4.3", optional = true }
+
+[dependencies.sha2]
+git = "https://github.com/risc0/RustCrypto-hashes"
+tag = "sha2-v0.10.6-risczero.0"
+default-features = false
+
+[features]
+default = ["std"]
+std = ["libc", "hex", "bincode/derive", "bincode/std"]
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..bd16047
--- /dev/null
+++ b/README.md
@@ -0,0 +1,138 @@
+# SpaceDB
+
+Note: this project is still under active development and should be considered experimental.
+
+SpaceDB is a cryptographically verifiable data store and universal accumulator for the [Spaces protocol](https://spacesprotocol.com). It's a Merkle-ized binary trie described in the [Merklix](https://blog.vermorel.com/pdf/merklix-tree-for-bitcoin-2018-07.pdf) paper and explained in detail [here](https://spacesprotocol.org/#binary-trie).
+
+
+## Features
+
+- Fast, portable, single-file database.
+- MVCC-based concurrency control with multi-reader/single-writer lock-free access.
+- Provides compact proofs of membership/non-membership for batches of elements through subtrees.
+- Subtrees act as cryptographic accumulators and can be updated independently.
+- `no_std` support, particularly for use within RISC0 zkVM and leverages SHA256 acceleration.
+- Accumulator keeps a constant size state of a single 32-byte tree root.
+
+
+
+## Usage
+
+```rust
+use spacedb::db::Database;
+
+
+let db = Database::open("example.sdb")?;
+
+// Insert some data
+let mut tx = db.begin_write()?;
+for i in 0..100 {
+ let key = format!("key{}", i);
+ let value = format!("value{}", i);
+ tx.insert(db.hash(key.as_bytes()), value.into_bytes())?;
+}
+tx.commit()?;
+
+let mut snapshot = db.begin_read()?;
+println!("Tree root: {}", hex::encode(snapshot.root()?));
+
+// Prove a subset of the keys
+let keys_to_prove: Vec<_> = (0..10)
+ .map(|i| format!("key{}", i))
+ // prove exclusion of some other keys
+ .chain((0..5).map(|i| format!("other{}", i)))
+ .map(|key| db.hash(key.as_bytes()))
+ .collect();
+
+// Reveal relevant nodes needed to prove the specified set of keys
+let mut subtree = snapshot.prove_all(&keys_to_prove)?;
+
+// Will have the exact same root as the snapshot
+println!("Subtree root: {}", hex::encode(subtree.root().unwrap()));
+
+// Inclusion and exclusion proofs
+assert!(subtree.contains(&db.hash("key0".as_bytes())).unwrap());
+assert!(!subtree.contains(&db.hash("other0".as_bytes())).unwrap());
+
+// Proving exclusion of "other100" fails since we didn't reveal
+// relevant branches needed to traverse its path in this subtree
+assert!(subtree.contains(&db.hash("other100".as_bytes())).is_err());
+
+```
+
+
+
+## Subtrees
+
+Subtrees can function as cryptographic accumulators, allowing clients to verify and update their state without keeping a database.
+
+```rust
+
+// Client maintains a 32-byte tree root
+let mut accumulator_root = snapshot.root()?;
+assert_eq!(accumulator_root, subtree.root().unwrap(), "Roots must match");
+
+// Update leaves
+for (key, value) in subtree.iter_mut() {
+ *value = "new value".to_string().into_bytes();
+}
+
+// Inserting a non-existent key (must be provably absent)
+let key = subtree.hash("other0".as_bytes());
+subtree.insert(key, "new value".into_bytes()).unwrap();
+
+// Updating the accumulator root
+accumulator_root = subtree.root().unwrap();
+
+```
+
+## Using in RISC0 zkVM
+
+Subtrees work in `no_std` environments utilizing the SHA256 accelerator when running inside the RISC0 zkVM.
+
+```toml
+[dependencies]
+spacedb = { version = "0.1", default-features = false }
+```
+
+
+
+
+## Key Iteration
+
+Iterate over all keys in a given snapshot:
+
+```rust
+let db = Database::open("my.sdb")?;
+let snapshot = db.begin_read()?;
+
+for (key, value) in snapshot.iter().filter_map(Result::ok) {
+ // do something ...
+}
+
+```
+
+
+
+## Snapshot iteration
+
+Iterate over all snapshots:
+
+```rust
+let db = Database::open("my.sdb")?;
+
+for snapshot in db.iter().filter_map(Result::ok) {
+ let root = snapshot.root()?;
+ println!("Snapshot Root: {}", hex::encode(root));
+}
+```
+
+## Prior Art
+
+Merkle-ized tries, including variations like Patricia tries and Merkle prefix trees, are foundational structures that have been used in numerous projects and cryptocurrencies. Some other libraries that implement some form of Merkle-ized binary tries include
+[liburkel](https://github.com/chjj/liburkel) which this library initially drew some inspiration from — although SpaceDB is generally around ~20% faster, and [multiproof,](https://github.com/gballet/multiproof-rs/tree/master) but they either lack memory safety, core features such as subtrees/accumulators needed for Spaces protocol or are unmaintained. Other popular cryptographically verifiable data stores include [Trillian](https://github.com/google/trillian) used for [Certificate Transparency](https://www.certificate-transparency.org/)
+
+
+## License
+
+This project is licensed under the [Apache 2.0](LICENSE).
\ No newline at end of file
diff --git a/example/Cargo.toml b/example/Cargo.toml
new file mode 100644
index 0000000..7a3d47e
--- /dev/null
+++ b/example/Cargo.toml
@@ -0,0 +1,11 @@
+[package]
+name = "example"
+version = "0.1.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+spacedb = {version = "*", path = ".." }
+hex = "0.4.3"
+
diff --git a/example/src/main.rs b/example/src/main.rs
new file mode 100644
index 0000000..d696f8a
--- /dev/null
+++ b/example/src/main.rs
@@ -0,0 +1,43 @@
+use spacedb::db::Database;
+
+fn main() -> Result<(), std::io::Error> {
+ let db = Database::memory()?;
+
+ // Insert some data
+ let mut tx = db.begin_write()?;
+ for i in 0..100 {
+ let key = format!("key{}", i);
+ let value = format!("value{}", i);
+ tx.insert(db.hash(key.as_bytes()), value.into_bytes())?;
+ }
+ tx.commit()?;
+
+ // Get the committed snapshot
+ let mut snapshot = db.begin_read()?;
+ println!("Tree root: {}", hex::encode(snapshot.root()?));
+
+ // Prove a subset of the keys
+ let keys_to_prove: Vec<_> = (0..10)
+ .map(|i| format!("key{}", i))
+ // prove exclusion of some other keys
+ .chain((0..5).map(|i| format!("other{}", i)))
+ .map(|key| db.hash(key.as_bytes()))
+ .collect();
+
+ // reveal the relevant nodes needed to prove the specified set of keys
+ let subtree = snapshot.prove_all(&keys_to_prove)?;
+
+ // Will have the exact same root as the snapshot
+ println!("Subtree root: {}", hex::encode(subtree.root().unwrap()));
+
+ // Prove inclusion
+ assert!(subtree.contains(&db.hash("key0".as_bytes())).unwrap());
+
+ // Prove exclusion
+ assert!(!subtree.contains(&db.hash("other0".as_bytes())).unwrap());
+
+ // We don't have enough data to prove key "other100" is not in the subtree
+ // as the relevant branches needed to prove it are not included
+ assert!(subtree.contains(&db.hash("other100".as_bytes())).is_err());
+ Ok(())
+}
diff --git a/src/db.rs b/src/db.rs
new file mode 100644
index 0000000..dbc5e43
--- /dev/null
+++ b/src/db.rs
@@ -0,0 +1,304 @@
+use crate::{
+ fs::{FileBackend, StorageBackend},
+ node::NodeInner,
+ tx::{ReadTransaction, WriteTransaction},
+ Configuration, Hash, NodeHasher, Sha256Hasher,
+};
+use bincode::{config, error::DecodeError, Decode, Encode};
+use sha2::{Digest as _, Sha256};
+use std::{
+ fs::OpenOptions,
+ io,
+ sync::{Arc, Mutex},
+};
+
+const HEADER_MAGIC: [u8; 9] = [b's', b'p', b'a', b'c', b'e', b':', b'/', b'/', b'.'];
+pub(crate) const PAGE_SIZE: usize = 4096;
+
+#[derive(Debug, Encode, Decode, PartialEq, Eq)]
+pub struct DatabaseHeader {
+ pub magic: [u8; 9],
+ pub version: u8,
+ pub savepoint: SavePoint,
+}
+
+pub struct Database {
+ pub(crate) header: Arc>,
+ pub(crate) file: Box,
+ pub config: Configuration,
+}
+
+#[derive(Copy, Clone, Encode, Decode, Debug, Eq, PartialEq, Hash)]
+pub struct SavePoint {
+ pub root: Record,
+ pub previous_save_point: Record,
+}
+
+#[derive(Copy, Clone, Encode, Decode, Debug, Eq, PartialEq, Hash)]
+pub struct Record {
+ pub offset: u64,
+ pub size: u32,
+}
+
+pub const EMPTY_RECORD: Record = Record { offset: 0, size: 0 };
+
+impl DatabaseHeader {
+ pub fn new() -> Self {
+ Self {
+ magic: HEADER_MAGIC,
+ version: 0,
+ savepoint: SavePoint {
+ root: EMPTY_RECORD,
+ previous_save_point: EMPTY_RECORD,
+ },
+ }
+ }
+
+ pub(crate) fn to_bytes(&self) -> Vec {
+ let config = config::standard()
+ .with_fixed_int_encoding()
+ .with_little_endian();
+ let mut raw = bincode::encode_to_vec(self, config).unwrap();
+ // add 24 bytes padding + 4 bytes checksum
+ raw.extend_from_slice(&[0; 26]);
+ let mut hasher = Sha256::new();
+ hasher.update(&raw);
+ let checksum = hasher.finalize();
+ raw.extend_from_slice(&checksum[..4]);
+ raw
+ }
+
+ fn from_bytes(bytes: &[u8]) -> Result {
+ // calc checksum
+ let mut hasher = Sha256::new();
+ hasher.update(&bytes[..60]);
+ let checksum = hasher.finalize();
+
+ if bytes[60..64] != checksum[..4] {
+ return Err(DecodeError::Other("Checksum mismatch"));
+ }
+
+ let config = config::standard()
+ .with_fixed_int_encoding()
+ .with_little_endian();
+ let (h, _) = bincode::decode_from_slice(bytes, config)?;
+
+ Ok(h)
+ }
+
+ pub(crate) fn len(&self) -> u64 {
+ if self.savepoint.is_empty() {
+ return (PAGE_SIZE * 2) as u64;
+ }
+
+ let save_point_len = self.savepoint.len();
+ return (save_point_len + PAGE_SIZE as u64 - 1) / PAGE_SIZE as u64 * PAGE_SIZE as u64;
+ }
+}
+
+impl Database {
+ pub fn open(path: &str) -> Result {
+ let file = OpenOptions::new()
+ .read(true)
+ .write(true)
+ .create(true)
+ .open(path)
+ .unwrap();
+ let config = Configuration::standard();
+ Self::new(Box::new(FileBackend::new(file)?), config)
+ }
+
+ pub fn memory() -> Result {
+ let file = Box::new(crate::fs::MemoryBackend::new());
+ let config = Configuration::standard();
+ Self::new(file, config)
+ }
+}
+
+impl Database {
+ pub fn new(file: Box, config: Configuration) -> Result {
+ let header;
+ let mut has_header = false;
+
+ if file.len()? > 0 {
+ let result = Self::recover_header(&file)?;
+ header = result.0;
+ has_header = true;
+ } else {
+ header = DatabaseHeader::new();
+ let bytes = header.to_bytes();
+ file.set_len(bytes.len() as u64)?;
+ file.write(0, &bytes)?;
+ file.sync_data()?;
+ }
+
+ let db = Self {
+ header: Arc::new(Mutex::new(header)),
+ file,
+ config,
+ };
+
+ if !has_header {
+ db.write_header(&db.header.lock().unwrap())?;
+ }
+
+ Ok(db)
+ }
+
+ #[inline(always)]
+ pub fn hash(&self, data: &[u8]) -> Hash {
+ H::hash(data)
+ }
+
+ pub(crate) fn recover_header(
+ file: &Box,
+ ) -> Result<(DatabaseHeader, bool), io::Error> {
+ // Attempt to read from slot 0
+ let bytes = file.read(0, 64)?;
+ if let Ok(header) = DatabaseHeader::from_bytes(&bytes) {
+ return Ok((header, false));
+ }
+
+ // Didn't work, try slot 1
+ let bytes = file.read(PAGE_SIZE as u64, 64)?;
+ let header = DatabaseHeader::from_bytes(&bytes)
+ .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+
+ Ok((header, true))
+ }
+
+ pub(crate) fn write_header(&self, hdr: &DatabaseHeader) -> Result<(), io::Error> {
+ // Database reserves first two pages for the metadata
+ // The first page slot 0 contains the header
+ // Second page slot 1 contains a backup of the header
+ if self.file.len()? < PAGE_SIZE as u64 * 2 {
+ self.file.set_len(PAGE_SIZE as u64 * 2)?;
+ }
+
+ let mut bytes = hdr.to_bytes();
+ assert_eq!(bytes.len(), 64);
+
+ bytes.extend_from_slice(&[0; PAGE_SIZE - 64]);
+
+ self.file.write(0, &bytes)?;
+ self.file.sync_data()?;
+
+ // write backup header
+ self.file.write(PAGE_SIZE as u64, &bytes)?;
+ self.file.sync_data()?;
+ Ok(())
+ }
+
+ fn read_save_point(&self, record: Record) -> Result {
+ let raw = self.file.read(record.offset, record.size as usize)?;
+ let (save_point, _) = bincode::decode_from_slice(&raw, config::standard())
+ .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
+ Ok(save_point)
+ }
+
+ pub fn begin_write(&self) -> Result, io::Error> {
+ Ok(WriteTransaction::new(self))
+ }
+
+ pub fn begin_read(&self) -> Result, io::Error> {
+ let result = Self::recover_header(&self.file)?;
+ // Use the stored configuration
+ Ok(ReadTransaction::new(self, result.0.savepoint))
+ }
+
+ pub(crate) fn load_node(&self, id: Record) -> Result {
+ let raw = self.file.read(id.offset, id.size as usize)?;
+ let config = config::standard();
+ let (inner, _): (NodeInner, usize) = bincode::decode_from_slice(&raw, config)
+ .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))?;
+ Ok(inner)
+ }
+
+ pub fn iter(&self) -> SnapshotIterator {
+ SnapshotIterator::new(self)
+ }
+}
+
+pub struct SnapshotIterator<'db, H: NodeHasher> {
+ current: Option,
+ started: bool,
+ db: &'db Database,
+}
+
+impl<'db, H: NodeHasher> SnapshotIterator<'db, H> {
+ pub fn new(db: &'db Database) -> Self {
+ SnapshotIterator {
+ current: None,
+ started: false,
+ db,
+ }
+ }
+
+ fn prev(&mut self) -> Result