From 4d6385b4646ef9f6ac2606667c2567267c955366 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Wed, 29 May 2024 14:16:24 -0700 Subject: [PATCH 01/19] Wasmtime: Implement the custom-page-sizes proposal This commit adds support for the custom-page-sizes proposal to Wasmtime: https://github.com/WebAssembly/custom-page-sizes I've migrated, fixed some bugs within, and extended the `*.wast` tests for this proposal from the `wasm-tools` repository. I intend to upstream them into the proposal shortly. There is a new `wasmtime::Config::wasm_custom_page_sizes_proposal` method to enable or disable the proposal. It is disabled by default. Our fuzzing config has been updated to turn this feature on/off as dictated by the arbitrary input given to us from the fuzzer. Additionally, there were getting to be so many constructors for `wasmtime::MemoryType` that I added a builder rather than add yet another constructor. In general, we store the `log2(page_size)` rather than the page size directly. This helps cut down on invalid states and properties we need to assert. I've also intentionally written this code such that supporting any power of two page size (rather than just the exact values `1` and `65536` that are currently valid) will essentially just involve updating `wasmparser`'s validation and removing some debug asserts in Wasmtime. --- Cargo.lock | 2 + cranelift/codegen/src/isa/aarch64/mod.rs | 17 ++ cranelift/codegen/src/isa/mod.rs | 13 + cranelift/codegen/src/isa/riscv64/mod.rs | 5 + cranelift/codegen/src/isa/s390x/mod.rs | 5 + cranelift/codegen/src/isa/x64/mod.rs | 5 + cranelift/frontend/src/frontend.rs | 1 + .../wasm/src/code_translator/bounds_checks.rs | 29 +- cranelift/wasm/src/heap.rs | 3 + cranelift/wasm/src/sections_translator.rs | 20 +- crates/cranelift/src/func_environ.rs | 52 ++-- crates/environ/src/compile/module_environ.rs | 11 +- crates/environ/src/lib.rs | 89 +----- crates/environ/src/module.rs | 75 ++--- crates/environ/src/vmoffsets.rs | 2 +- crates/fuzzing/src/generators/config.rs | 1 + crates/fuzzing/src/generators/memory.rs | 34 ++- crates/fuzzing/src/oracles/memory.rs | 7 +- crates/types/Cargo.toml | 1 + crates/types/src/lib.rs | 62 +++-- crates/types/src/prelude.rs | 86 ++++++ crates/wasmtime/Cargo.toml | 1 + crates/wasmtime/src/config.rs | 31 ++- crates/wasmtime/src/engine/serialization.rs | 9 +- .../runtime/component/bindgen_examples/mod.rs | 3 +- crates/wasmtime/src/runtime/memory.rs | 142 ++++++++-- .../wasmtime/src/runtime/trampoline/memory.rs | 13 +- crates/wasmtime/src/runtime/types.rs | 262 +++++++++++++++--- crates/wasmtime/src/runtime/types/matching.rs | 8 + crates/wasmtime/src/runtime/vm.rs | 24 +- crates/wasmtime/src/runtime/vm/cow.rs | 30 +- crates/wasmtime/src/runtime/vm/instance.rs | 5 + .../src/runtime/vm/instance/allocator.rs | 13 +- .../instance/allocator/pooling/memory_pool.rs | 30 +- .../instance/allocator/pooling/table_pool.rs | 4 +- .../allocator/pooling/unix_stack_pool.rs | 4 +- crates/wasmtime/src/runtime/vm/libcalls.rs | 2 +- crates/wasmtime/src/runtime/vm/memory.rs | 176 +++++++++--- crates/wasmtime/src/runtime/vm/mmap.rs | 10 +- crates/wasmtime/src/runtime/vm/mpk/sys.rs | 4 +- .../wasmtime/src/runtime/vm/sys/miri/mmap.rs | 4 +- .../src/runtime/vm/sys/unix/signals.rs | 2 +- .../src/runtime/vm/sys/unix/unwind.rs | 2 +- .../src/runtime/vm/threads/shared_memory.rs | 4 + .../vm/threads/shared_memory_disabled.rs | 4 + crates/wasmtime/src/runtime/vm/vmcontext.rs | 19 +- tests/all/limits.rs | 2 +- tests/all/memory.rs | 83 +++++- tests/all/memory_creator.rs | 10 +- .../custom-page-sizes-invalid.wast | 110 ++++++++ .../custom-page-sizes/custom-page-sizes.wast | 108 ++++++++ tests/wast.rs | 11 +- winch/codegen/src/codegen/env.rs | 25 +- winch/codegen/src/codegen/mod.rs | 12 +- 54 files changed, 1310 insertions(+), 377 deletions(-) create mode 100644 crates/types/src/prelude.rs create mode 100644 tests/misc_testsuite/custom-page-sizes/custom-page-sizes-invalid.wast create mode 100644 tests/misc_testsuite/custom-page-sizes/custom-page-sizes.wast diff --git a/Cargo.lock b/Cargo.lock index 54440eb882b6..8e70161d55b4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3381,6 +3381,7 @@ dependencies = [ "cc", "cfg-if", "encoding_rs", + "env_logger", "fxprof-processed-profile", "gimli", "hashbrown 0.14.3", @@ -3795,6 +3796,7 @@ dependencies = [ name = "wasmtime-types" version = "23.0.0" dependencies = [ + "anyhow", "cranelift-entity", "serde", "serde_derive", diff --git a/cranelift/codegen/src/isa/aarch64/mod.rs b/cranelift/codegen/src/isa/aarch64/mod.rs index 78458739dc1d..c1da4e3f1b27 100644 --- a/cranelift/codegen/src/isa/aarch64/mod.rs +++ b/cranelift/codegen/src/isa/aarch64/mod.rs @@ -179,6 +179,23 @@ impl TargetIsa for AArch64Backend { inst::Inst::function_alignment() } + fn page_size_align_log2(&self) -> u8 { + use target_lexicon::*; + match self.triple().operating_system { + OperatingSystem::MacOSX { .. } + | OperatingSystem::Darwin + | OperatingSystem::Ios + | OperatingSystem::Tvos => { + debug_assert_eq!(1 << 14, 0x4000); + 14 + } + _ => { + debug_assert_eq!(1 << 16, 0x10000); + 16 + } + } + } + #[cfg(feature = "disas")] fn to_capstone(&self) -> Result { use capstone::prelude::*; diff --git a/cranelift/codegen/src/isa/mod.rs b/cranelift/codegen/src/isa/mod.rs index 662e80302887..03ecdd2d543c 100644 --- a/cranelift/codegen/src/isa/mod.rs +++ b/cranelift/codegen/src/isa/mod.rs @@ -238,6 +238,12 @@ pub struct TargetFrontendConfig { /// The pointer width of the target. pub pointer_width: PointerWidth, + + /// The log2 of the target's page size and alignment. + /// + /// Note that this may be an upper-bound that is larger than necessary for + /// some platforms since it may depend on runtime configuration. + pub page_size_align_log2: u8, } impl TargetFrontendConfig { @@ -333,6 +339,12 @@ pub trait TargetIsa: fmt::Display + Send + Sync { /// alignment, for performance, required by this ISA. fn function_alignment(&self) -> FunctionAlignment; + /// The log2 of the target's page size and alignment. + /// + /// Note that this may be an upper-bound that is larger than necessary for + /// some platforms since it may depend on runtime configuration. + fn page_size_align_log2(&self) -> u8; + /// Create a polymorphic TargetIsa from this specific implementation. fn wrapped(self) -> OwnedTargetIsa where @@ -433,6 +445,7 @@ impl<'a> dyn TargetIsa + 'a { TargetFrontendConfig { default_call_conv: self.default_call_conv(), pointer_width: self.pointer_width(), + page_size_align_log2: self.page_size_align_log2(), } } } diff --git a/cranelift/codegen/src/isa/riscv64/mod.rs b/cranelift/codegen/src/isa/riscv64/mod.rs index 214960f39a8e..0035a721048c 100644 --- a/cranelift/codegen/src/isa/riscv64/mod.rs +++ b/cranelift/codegen/src/isa/riscv64/mod.rs @@ -155,6 +155,11 @@ impl TargetIsa for Riscv64Backend { inst::Inst::function_alignment() } + fn page_size_align_log2(&self) -> u8 { + debug_assert_eq!(1 << 12, 0x1000); + 12 + } + #[cfg(feature = "disas")] fn to_capstone(&self) -> Result { use capstone::prelude::*; diff --git a/cranelift/codegen/src/isa/s390x/mod.rs b/cranelift/codegen/src/isa/s390x/mod.rs index 5840450dd7ed..842cc4cdf11e 100644 --- a/cranelift/codegen/src/isa/s390x/mod.rs +++ b/cranelift/codegen/src/isa/s390x/mod.rs @@ -156,6 +156,11 @@ impl TargetIsa for S390xBackend { inst::Inst::function_alignment() } + fn page_size_align_log2(&self) -> u8 { + debug_assert_eq!(1 << 12, 0x1000); + 12 + } + #[cfg(feature = "disas")] fn to_capstone(&self) -> Result { use capstone::prelude::*; diff --git a/cranelift/codegen/src/isa/x64/mod.rs b/cranelift/codegen/src/isa/x64/mod.rs index dc96e7fb535a..0b6371582b43 100644 --- a/cranelift/codegen/src/isa/x64/mod.rs +++ b/cranelift/codegen/src/isa/x64/mod.rs @@ -142,6 +142,11 @@ impl TargetIsa for X64Backend { Inst::function_alignment() } + fn page_size_align_log2(&self) -> u8 { + debug_assert_eq!(1 << 12, 0x1000); + 12 + } + #[cfg(feature = "disas")] fn to_capstone(&self) -> Result { use capstone::prelude::*; diff --git a/cranelift/frontend/src/frontend.rs b/cranelift/frontend/src/frontend.rs index 54372af86c81..d747e994d514 100644 --- a/cranelift/frontend/src/frontend.rs +++ b/cranelift/frontend/src/frontend.rs @@ -1501,6 +1501,7 @@ mod tests { TargetFrontendConfig { default_call_conv: CallConv::SystemV, pointer_width: PointerWidth::U64, + page_size_align_log2: 12, } } diff --git a/cranelift/wasm/src/code_translator/bounds_checks.rs b/cranelift/wasm/src/code_translator/bounds_checks.rs index 9bd0f99f5278..fd2d54f7493a 100644 --- a/cranelift/wasm/src/code_translator/bounds_checks.rs +++ b/cranelift/wasm/src/code_translator/bounds_checks.rs @@ -62,6 +62,9 @@ where let spectre_mitigations_enabled = env.heap_access_spectre_mitigation(); let pcc = env.proof_carrying_code(); + let host_page_size_log2 = env.target_config().page_size_align_log2; + let can_use_virtual_memory = heap.page_size_log2 >= host_page_size_log2; + let make_compare = |builder: &mut FunctionBuilder, compare_kind: IntCC, lhs: ir::Value, @@ -141,6 +144,7 @@ where // index + 1 > bound // ==> index >= bound HeapStyle::Dynamic { bound_gv } if offset_and_size == 1 => { + log::trace!("FITZGEN: bounds checking case 1"); let bound = get_dynamic_heap_bound(builder, env, heap); let oob = make_compare( builder, @@ -188,7 +192,10 @@ where // offset immediates -- which is a common code pattern when accessing // multiple fields in the same struct that is in linear memory -- // will all emit the same `index > bound` check, which we can GVN. - HeapStyle::Dynamic { bound_gv } if offset_and_size <= heap.offset_guard_size => { + HeapStyle::Dynamic { bound_gv } + if can_use_virtual_memory && offset_and_size <= heap.offset_guard_size => + { + log::trace!("FITZGEN: bounds checking case 2"); let bound = get_dynamic_heap_bound(builder, env, heap); let oob = make_compare( builder, @@ -219,6 +226,7 @@ where // index + offset + access_size > bound // ==> index > bound - (offset + access_size) HeapStyle::Dynamic { bound_gv } if offset_and_size <= heap.min_size.into() => { + log::trace!("FITZGEN: bounds checking case 3"); let bound = get_dynamic_heap_bound(builder, env, heap); let adjustment = offset_and_size as i64; let adjustment_value = builder.ins().iconst(env.pointer_type(), adjustment); @@ -261,6 +269,7 @@ where // // And we have to handle the overflow case in the left-hand side. HeapStyle::Dynamic { bound_gv } => { + log::trace!("FITZGEN: bounds checking case 4"); let access_size_val = builder .ins() // Explicit cast from u64 to i64: we just want the raw @@ -313,6 +322,11 @@ where // bound`, since we will end up being out-of-bounds regardless of the // given `index`. HeapStyle::Static { bound } if offset_and_size > bound.into() => { + log::trace!("FITZGEN: bounds checking case 5"); + assert!( + can_use_virtual_memory, + "static memories require the ability to use virtual memory" + ); env.before_unconditionally_trapping_memory_access(builder)?; builder.ins().trap(ir::TrapCode::HeapOutOfBounds); Unreachable @@ -357,10 +371,16 @@ where // within the guard page region, neither of which require emitting an // explicit bounds check. HeapStyle::Static { bound } - if heap.index_type == ir::types::I32 + if can_use_virtual_memory + && heap.index_type == ir::types::I32 && u64::from(u32::MAX) <= u64::from(bound) + u64::from(heap.offset_guard_size) - offset_and_size => { + log::trace!("FITZGEN: bounds checking case 6"); + assert!( + can_use_virtual_memory, + "static memories require the ability to use virtual memory" + ); Reachable(compute_addr( &mut builder.cursor(), heap, @@ -386,6 +406,11 @@ where // precise, not rely on the virtual memory subsystem at all, and not // factor in the guard pages here. HeapStyle::Static { bound } => { + log::trace!("FITZGEN: bounds checking case 7"); + assert!( + can_use_virtual_memory, + "static memories require the ability to use virtual memory" + ); // NB: this subtraction cannot wrap because we didn't hit the first // special case. let adjusted_bound = u64::from(bound) - offset_and_size; diff --git a/cranelift/wasm/src/heap.rs b/cranelift/wasm/src/heap.rs index 85d5f1d687e0..c8adc66727e3 100644 --- a/cranelift/wasm/src/heap.rs +++ b/cranelift/wasm/src/heap.rs @@ -90,6 +90,9 @@ pub struct HeapData { /// The memory type for the pointed-to memory, if using proof-carrying code. pub memory_type: Option, + + /// The log2 of this memory's page size. + pub page_size_log2: u8, } /// Style of heap including style-specific information. diff --git a/cranelift/wasm/src/sections_translator.rs b/cranelift/wasm/src/sections_translator.rs index 1ad9d88b129f..29aec74a5db2 100644 --- a/cranelift/wasm/src/sections_translator.rs +++ b/cranelift/wasm/src/sections_translator.rs @@ -10,7 +10,7 @@ use crate::environ::ModuleEnvironment; use crate::wasm_unsupported; use crate::{ - DataIndex, ElemIndex, FuncIndex, GlobalIndex, Memory, MemoryIndex, TableIndex, Tag, TagIndex, + DataIndex, ElemIndex, FuncIndex, GlobalIndex, MemoryIndex, TableIndex, Tag, TagIndex, TypeIndex, WasmError, WasmResult, }; use cranelift_entity::packed_option::ReservedValue; @@ -20,20 +20,11 @@ use std::vec::Vec; use wasmparser::{ Data, DataKind, DataSectionReader, Element, ElementItems, ElementKind, ElementSectionReader, Export, ExportSectionReader, ExternalKind, FunctionSectionReader, GlobalSectionReader, - ImportSectionReader, MemorySectionReader, MemoryType, Operator, TableSectionReader, - TagSectionReader, TagType, TypeRef, TypeSectionReader, + ImportSectionReader, MemorySectionReader, Operator, TableSectionReader, TagSectionReader, + TagType, TypeRef, TypeSectionReader, }; use wasmtime_types::ConstExpr; -fn memory(ty: MemoryType) -> Memory { - Memory { - minimum: ty.initial, - maximum: ty.maximum, - shared: ty.shared, - memory64: ty.memory64, - } -} - fn tag(e: TagType) -> Tag { match e.kind { wasmparser::TagKind::Exception => Tag { @@ -75,7 +66,7 @@ pub fn parse_import_section<'data>( )?; } TypeRef::Memory(ty) => { - environ.declare_memory_import(memory(ty), import.module, import.name)?; + environ.declare_memory_import(ty.into(), import.module, import.name)?; } TypeRef::Tag(e) => { environ.declare_tag_import(tag(e), import.module, import.name)?; @@ -139,8 +130,7 @@ pub fn parse_memory_section( environ.reserve_memories(memories.count())?; for entry in memories { - let memory = memory(entry?); - environ.declare_memory(memory)?; + environ.declare_memory(entry?.into())?; } Ok(()) diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index d6618c93ae24..b724cb270bb0 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -20,7 +20,7 @@ use std::mem; use wasmparser::Operator; use wasmtime_environ::{ BuiltinFunctionIndex, MemoryPlan, MemoryStyle, Module, ModuleTranslation, ModuleTypesBuilder, - PtrSize, TableStyle, Tunables, TypeConvert, VMOffsets, WASM_PAGE_SIZE, + PtrSize, TableStyle, Tunables, TypeConvert, VMOffsets, }; use wasmtime_environ::{FUNCREF_INIT_BIT, FUNCREF_MASK}; @@ -680,6 +680,16 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } } + /// Convert the target pointer-sized integer `val` into the given memory's + /// index type. + /// + /// This might involve extending or truncating it depending on the memory's + /// index type and the target's pointer type. Note that when extending, we + /// do an unsigned extend, *except* if `val == -1`, in which case we do a + /// sign extend. This edge case makes this helper suitable for use with + /// translating the results of a `memory.grow` libcall, for example, where + /// `-1` indicates failure but the success value is otherwise unsigned and + /// might have the high bit set. fn cast_pointer_to_memory_index( &self, mut pos: FuncCursor<'_>, @@ -698,18 +708,14 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } else if pointer_type.bits() > desired_type.bits() { pos.ins().ireduce(desired_type, val) } else { - // Note that we `sextend` instead of the probably expected - // `uextend`. This function is only used within the contexts of - // `memory.size` and `memory.grow` where we're working with units of - // pages instead of actual bytes, so we know that the upper bit is - // always cleared for "valid values". The one case we care about - // sextend would be when the return value of `memory.grow` is `-1`, - // in which case we want to copy the sign bit. - // - // This should only come up on 32-bit hosts running wasm64 modules, - // which at some point also makes you question various assumptions - // made along the way... - pos.ins().sextend(desired_type, val) + // We have a 64-bit memory on a 32-bit host -- this combo doesn't + // really make a whole lot of sense to do from a user perspective + // but that is neither here nor there. We want to unsigned extend + // unless `val` is `-1`, as described in the doc comment above. + let extended = pos.ins().uextend(desired_type, val); + let neg_one = pos.ins().iconst(desired_type, -1); + let is_failure = pos.ins().icmp_imm(IntCC::Equal, val, -1); + pos.ins().select(is_failure, neg_one, extended) } } @@ -2001,21 +2007,21 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let min_size = self.module.memory_plans[index] .memory - .minimum - .checked_mul(u64::from(WASM_PAGE_SIZE)) - .unwrap_or_else(|| { + .minimum_byte_size() + .unwrap_or_else(|_| { // The only valid Wasm memory size that won't fit in a 64-bit // integer is the maximum memory64 size (2^64) which is one // larger than `u64::MAX` (2^64 - 1). In this case, just say the // minimum heap size is `u64::MAX`. debug_assert_eq!(self.module.memory_plans[index].memory.minimum, 1 << 48); + debug_assert_eq!(self.module.memory_plans[index].memory.page_size(), 1 << 16); u64::MAX }); let max_size = self.module.memory_plans[index] .memory - .maximum - .and_then(|max| max.checked_mul(u64::from(WASM_PAGE_SIZE))); + .maximum_byte_size() + .ok(); let (ptr, base_offset, current_length_offset, ptr_memtype) = { let vmctx = self.vmctx(func); @@ -2069,6 +2075,8 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m } }; + let page_size_log2 = self.module.memory_plans[index].memory.page_size_log2; + // If we have a declared maximum, we can make this a "static" heap, which is // allocated up front and never moved. let (offset_guard_size, heap_style, readonly_base, base_fact, memory_type) = @@ -2233,6 +2241,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m style: heap_style, index_type: self.memory_index_type(index), memory_type, + page_size_log2, })) } @@ -2469,9 +2478,10 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m } } }; - let current_length_in_pages = pos - .ins() - .udiv_imm(current_length_in_bytes, i64::from(WASM_PAGE_SIZE)); + + let page_size: u64 = self.module.memory_plans[index].memory.page_size(); + let page_size = i64::try_from(page_size).unwrap(); + let current_length_in_pages = pos.ins().udiv_imm(current_length_in_bytes, page_size); Ok(self.cast_pointer_to_memory_index(pos, current_length_in_pages, index)) } diff --git a/crates/environ/src/compile/module_environ.rs b/crates/environ/src/compile/module_environ.rs index 111e7f094970..4548095b8946 100644 --- a/crates/environ/src/compile/module_environ.rs +++ b/crates/environ/src/compile/module_environ.rs @@ -22,7 +22,7 @@ use wasmparser::{ FuncToValidate, FunctionBody, KnownCustom, NameSectionReader, Naming, Operator, Parser, Payload, TypeRef, Validator, ValidatorResources, }; -use wasmtime_types::{ConstExpr, ConstOp, ModuleInternedTypeIndex, WasmHeapTopType}; +use wasmtime_types::{ConstExpr, ConstOp, ModuleInternedTypeIndex, SizeOverflow, WasmHeapTopType}; /// Object containing the standalone environment information. pub struct ModuleEnvironment<'a, 'data> { @@ -1044,8 +1044,13 @@ impl ModuleTranslation<'_> { idx: usize, } impl InitMemory for InitMemoryAtCompileTime<'_> { - fn memory_size_in_pages(&mut self, memory_index: MemoryIndex) -> u64 { - self.module.memory_plans[memory_index].memory.minimum + fn memory_size_in_bytes( + &mut self, + memory_index: MemoryIndex, + ) -> Result { + self.module.memory_plans[memory_index] + .memory + .minimum_byte_size() } fn eval_offset(&mut self, memory_index: MemoryIndex, expr: &ConstExpr) -> Option { diff --git a/crates/environ/src/lib.rs b/crates/environ/src/lib.rs index 5ff1eeccd9b4..7768330b736f 100644 --- a/crates/environ/src/lib.rs +++ b/crates/environ/src/lib.rs @@ -12,94 +12,7 @@ extern crate std; extern crate alloc; -/// Rust module prelude for Wasmtime crates. -/// -/// Wasmtime crates that use `no_std` use `core::prelude::*` by default which -/// does not include `alloc`-related functionality such as `String` and `Vec`. -/// To have similar ergonomics to `std` and additionally group up some common -/// functionality this module is intended to be imported at the top of all -/// modules with: -/// -/// ```rust,ignore -/// use crate::*; -/// ``` -/// -/// Externally for crates that depend on `wasmtime-environ` they should have -/// this in the root of the crate: -/// -/// ```rust,ignore -/// use wasmtime_environ::prelude; -/// ``` -/// -/// and then `use crate::*` works as usual. -pub mod prelude { - pub use crate::{Err2Anyhow, IntoAnyhow}; - pub use alloc::borrow::ToOwned; - pub use alloc::boxed::Box; - pub use alloc::format; - pub use alloc::string::{String, ToString}; - pub use alloc::vec; - pub use alloc::vec::Vec; - pub use wasmparser::collections::{IndexMap, IndexSet}; -} - -/// Convenience trait for converting `Result` into `anyhow::Result` -/// -/// Typically this is automatically done with the `?` operator in Rust and -/// by default this trait isn't necessary. With the `anyhow` crate's `std` -/// feature disabled, however, the `?` operator won't work because the `Error` -/// trait is not defined. This trait helps to bridge this gap. -/// -/// This does the same thing as `?` when the `std` feature is enabled, and when -/// `std` is disabled it'll use different trait bounds to create an -/// `anyhow::Error`. -/// -/// This trait is not suitable as a public interface because features change -/// what implements the trait. It's good enough for a wasmtime internal -/// implementation detail, however. -pub trait Err2Anyhow { - /// Convert `self` to `anyhow::Result`. - fn err2anyhow(self) -> anyhow::Result; -} - -impl Err2Anyhow for Result { - fn err2anyhow(self) -> anyhow::Result { - match self { - Ok(e) => Ok(e), - Err(e) => Err(e.into_anyhow()), - } - } -} - -/// Convenience trait to convert a value into `anyhow::Error` -/// -/// This trait is not a suitable public interface of Wasmtime so it's just an -/// internal implementation detail for now. This trait is conditionally -/// implemented on the `std` feature with different bounds. -pub trait IntoAnyhow { - /// Converts `self` into an `anyhow::Error`. - fn into_anyhow(self) -> anyhow::Error; -} - -#[cfg(feature = "std")] -impl IntoAnyhow for T -where - T: Into, -{ - fn into_anyhow(self) -> anyhow::Error { - self.into() - } -} - -#[cfg(not(feature = "std"))] -impl IntoAnyhow for T -where - T: core::fmt::Display + core::fmt::Debug + Send + Sync + 'static, -{ - fn into_anyhow(self) -> anyhow::Error { - anyhow::Error::msg(self) - } -} +pub use wasmtime_types::prelude; mod address_map; mod builtin; diff --git a/crates/environ/src/module.rs b/crates/environ/src/module.rs index cd82115de1c2..724d0aad5578 100644 --- a/crates/environ/src/module.rs +++ b/crates/environ/src/module.rs @@ -1,7 +1,7 @@ //! Data structures for representing decoded wasm modules. use crate::prelude::*; -use crate::{PrimaryMap, Tunables, WASM_PAGE_SIZE}; +use crate::{PrimaryMap, Tunables}; use alloc::collections::BTreeMap; use core::ops::Range; use cranelift_entity::{packed_option::ReservedValue, EntityRef}; @@ -28,25 +28,30 @@ pub enum MemoryStyle { impl MemoryStyle { /// Decide on an implementation style for the given `Memory`. pub fn for_memory(memory: Memory, tunables: &Tunables) -> (Self, u64) { - let is_static = match memory.maximum_byte_size() { - Ok(mut maximum) => { - if tunables.static_memory_bound_is_maximum { - maximum = maximum.min(tunables.static_memory_reservation); - } + let is_static = + // Ideally we would compare against (an upper bound on) the target's + // page size, but unfortunately that is a little hard to plumb + // through here. + memory.page_size_log2 >= Memory::DEFAULT_PAGE_SIZE_LOG2 + && match memory.maximum_byte_size() { + Ok(mut maximum) => { + if tunables.static_memory_bound_is_maximum { + maximum = maximum.min(tunables.static_memory_reservation); + } - // Ensure the minimum is less than the maximum; the minimum might exceed the maximum - // when the memory is artificially bounded via `static_memory_bound_is_maximum` above - memory.minimum_byte_size().unwrap() <= maximum - && maximum <= tunables.static_memory_reservation - } + // Ensure the minimum is less than the maximum; the minimum might exceed the maximum + // when the memory is artificially bounded via `static_memory_bound_is_maximum` above + memory.minimum_byte_size().unwrap() <= maximum + && maximum <= tunables.static_memory_reservation + } - // If the maximum size of this memory is not representable with - // `u64` then use the `static_memory_bound_is_maximum` to indicate - // whether it's a static memory or not. It should be ok to discard - // the linear memory's maximum size here as growth to the maximum is - // always fallible and never guaranteed. - Err(_) => tunables.static_memory_bound_is_maximum, - }; + // If the maximum size of this memory is not representable with + // `u64` then use the `static_memory_bound_is_maximum` to indicate + // whether it's a static memory or not. It should be ok to discard + // the linear memory's maximum size here as growth to the maximum is + // always fallible and never guaranteed. + Err(_) => tunables.static_memory_bound_is_maximum, + }; if is_static { return ( @@ -253,22 +258,20 @@ impl MemoryInitialization { None => return false, }; - let cur_size_in_pages = state.memory_size_in_pages(memory_index); - - // Note that this `minimum` can overflow if `minimum` is - // `1 << 48`, the maximum number of minimum pages for 64-bit - // memories. If this overflow happens, though, then there's no need - // to check the `end` value since `end` fits in a `u64` and it is - // naturally less than the overflowed value. - // - // This is a bit esoteric though because it's impossible to actually - // create a memory of `u64::MAX + 1` bytes, so this is largely just - // here to avoid having the multiplication here overflow in debug - // mode. - if let Some(max) = cur_size_in_pages.checked_mul(u64::from(WASM_PAGE_SIZE)) { - if end > max { - return false; + match state.memory_size_in_bytes(memory_index) { + Ok(max) => { + if end > max { + return false; + } } + + // Note that computing the minimum can overflow if the page size + // is the default 64KiB and the memory's minimum size in pages + // is `1 << 48`, the maximum number of minimum pages for 64-bit + // memories. We don't return `false` to signal an error here and + // instead defer the error to runtime, when it will be + // impossible to allocate that much memory anyways. + Err(_) => {} } // The limits of the data segment have been validated at this point @@ -291,9 +294,9 @@ impl MemoryInitialization { /// The various callbacks provided here are used to drive the smaller bits of /// memory initialization. pub trait InitMemory { - /// Returns the size, in wasm pages, of the memory specified. For - /// compile-time purposes this would be the memory type's minimum size. - fn memory_size_in_pages(&mut self, memory_index: MemoryIndex) -> u64; + /// Returns the size, in bytes, of the memory specified. For compile-time + /// purposes this would be the memory type's minimum size. + fn memory_size_in_bytes(&mut self, memory_index: MemoryIndex) -> Result; /// Returns the value of the constant expression, as a `u64`. Note that /// this may involve zero-extending a 32-bit global to a 64-bit number. May diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index 569e3e5ff6a3..775ce27f4433 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -208,7 +208,7 @@ pub trait PtrSize { /// Return the size of `VMMemoryDefinition`. #[inline] fn size_of_vmmemory_definition(&self) -> u8 { - 2 * self.size() + 3 * self.size() } /// Return the size of `*mut VMMemoryDefinition`. diff --git a/crates/fuzzing/src/generators/config.rs b/crates/fuzzing/src/generators/config.rs index d25d204d0637..38bea01e69c9 100644 --- a/crates/fuzzing/src/generators/config.rs +++ b/crates/fuzzing/src/generators/config.rs @@ -166,6 +166,7 @@ impl Config { .wasm_simd(self.module_config.config.simd_enabled) .wasm_memory64(self.module_config.config.memory64_enabled) .wasm_tail_call(self.module_config.config.tail_call_enabled) + .wasm_custom_page_sizes(self.module_config.config.custom_page_sizes_enabled) .wasm_threads(self.module_config.config.threads_enabled) .native_unwind_info(cfg!(target_os = "windows") || self.wasmtime.native_unwind_info) .cranelift_nan_canonicalization(self.wasmtime.canonicalize_nans) diff --git a/crates/fuzzing/src/generators/memory.rs b/crates/fuzzing/src/generators/memory.rs index 125d07ddc4dc..282adc3dc964 100644 --- a/crates/fuzzing/src/generators/memory.rs +++ b/crates/fuzzing/src/generators/memory.rs @@ -22,13 +22,26 @@ pub struct MemoryAccesses { impl<'a> Arbitrary<'a> for MemoryAccesses { fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result { + let image = HeapImage::arbitrary(u)?; + + // Don't grow too much, since oss-fuzz/asan get upset if we try, + // even if we allow it to fail. + let one_mib = 1 << 20; // 1 MiB + let max_growth = one_mib / (1 << image.page_size_log2.unwrap_or(16)); + let mut growth: u32 = u.int_in_range(0..=max_growth)?; + + // Occasionally, round to a power of two, since these tend to be + // interesting numbers that overlap with the host page size and things + // like that. + if growth > 0 && u.ratio(1, 20)? { + growth = (growth - 1).next_power_of_two(); + } + Ok(MemoryAccesses { config: u.arbitrary()?, - image: u.arbitrary()?, + image, offset: u.arbitrary()?, - // Don't grow too much, since oss-fuzz/asan get upset if we try, - // even if we allow it to fail. - growth: u.int_in_range(0..=10)?, + growth, }) } } @@ -41,6 +54,8 @@ pub struct HeapImage { pub maximum: Option, /// Whether this memory should be indexed with `i64` (rather than `i32`). pub memory64: bool, + /// The log2 of the page size for this memory. + pub page_size_log2: Option, /// Data segments for this memory. pub segments: Vec<(u32, Vec)>, } @@ -58,6 +73,7 @@ impl std::fmt::Debug for HeapImage { .field("minimum", &self.minimum) .field("maximum", &self.maximum) .field("memory64", &self.memory64) + .field("page_size_log2", &self.page_size_log2) .field("segments", &Segments(&self.segments)) .finish() } @@ -72,11 +88,16 @@ impl<'a> Arbitrary<'a> for HeapImage { None }; let memory64 = u.arbitrary()?; + let page_size_log2 = match u.int_in_range(0..=2)? { + 0 => None, + 1 => Some(0), + 2 => Some(16), + _ => unreachable!(), + }; let mut segments = vec![]; if minimum > 0 { for _ in 0..u.int_in_range(0..=4)? { - const WASM_PAGE_SIZE: u32 = 65536; - let last_addressable = WASM_PAGE_SIZE * minimum - 1; + let last_addressable = (1u32 << page_size_log2.unwrap_or(16)) * minimum - 1; let offset = u.int_in_range(0..=last_addressable)?; let max_len = std::cmp::min(u.len(), usize::try_from(last_addressable - offset).unwrap()); @@ -89,6 +110,7 @@ impl<'a> Arbitrary<'a> for HeapImage { minimum, maximum, memory64, + page_size_log2, segments, }) } diff --git a/crates/fuzzing/src/oracles/memory.rs b/crates/fuzzing/src/oracles/memory.rs index 60f7400992a7..88d37a1614cb 100644 --- a/crates/fuzzing/src/oracles/memory.rs +++ b/crates/fuzzing/src/oracles/memory.rs @@ -17,10 +17,13 @@ pub fn check_memory_accesses(input: MemoryAccesses) { let mut config = input.config.to_wasmtime(); - // Force-enable the memory64 proposal if the heap image wants it. + // Force-enable proposals if the heap image needs them. if input.image.memory64 { config.wasm_memory64(true); } + if input.image.page_size_log2.is_some() { + config.wasm_custom_page_sizes(true); + } let engine = Engine::new(&config).unwrap(); let module = match Module::new(&engine, &wasm) { @@ -273,7 +276,7 @@ fn build_wasm(image: &HeapImage, offset: u32) -> Vec { maximum: image.maximum.map(Into::into), memory64: image.memory64, shared: false, - page_size_log2: None, + page_size_log2: image.page_size_log2, }); module.section(&memories); } diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 411860d8e652..697cb3e9267d 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -9,6 +9,7 @@ documentation = "https://docs.rs/wasmtime-types" edition.workspace = true [dependencies] +anyhow = { workspace = true } cranelift-entity = { workspace = true, features = ['enable-serde'] } serde = { workspace = true } serde_derive = { workspace = true } diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 91de8d239853..fdfafda033cd 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -12,6 +12,8 @@ pub use wasmparser; #[doc(hidden)] pub use alloc::format as __format; +pub mod prelude; + use alloc::borrow::Cow; use alloc::boxed::Box; use core::{fmt, ops::Range}; @@ -1487,25 +1489,27 @@ pub struct Memory { pub shared: bool, /// Whether or not this is a 64-bit memory pub memory64: bool, + /// The log2 of this memory's page size, in bytes. + /// + /// By default the page size is 64KiB (0x10000; 2**16; 1<<16; 65536) but the + /// custom-page-sizes proposal allows opting into a page size of `1`. + pub page_size_log2: u8, } -/// WebAssembly page sizes are defined to be 64KiB. -pub const WASM_PAGE_SIZE: u32 = 0x10000; - /// Maximum size, in bytes, of 32-bit memories (4G) pub const WASM32_MAX_SIZE: u64 = 1 << 32; -/// Maximum size, in bytes, of 64-bit memories. -/// -/// Note that the true maximum size of a 64-bit linear memory, in bytes, cannot -/// be represented in a `u64`. That would require a u65 to store `1<<64`. -/// Despite that no system can actually allocate a full 64-bit linear memory so -/// this is instead emulated as "what if the kernel fit in a single wasm page -/// of linear memory". Shouldn't ever actually be possible but it provides a -/// number to serve as an effective maximum. -pub const WASM64_MAX_SIZE: u64 = 0u64.wrapping_sub(0x10000); - impl Memory { + /// WebAssembly page sizes are 64KiB by default. + pub const DEFAULT_PAGE_SIZE: u32 = 0x10000; + + /// WebAssembly page sizes are 64KiB (or `2**16`) by default. + pub const DEFAULT_PAGE_SIZE_LOG2: u8 = { + let log2 = 16; + assert!(1 << log2 == Memory::DEFAULT_PAGE_SIZE); + log2 + }; + /// Returns the minimum size, in bytes, that this memory must be. /// /// # Errors @@ -1515,7 +1519,7 @@ impl Memory { /// it's deferred to the caller to how to deal with that. pub fn minimum_byte_size(&self) -> Result { self.minimum - .checked_mul(u64::from(WASM_PAGE_SIZE)) + .checked_mul(self.page_size()) .ok_or(SizeOverflow) } @@ -1535,9 +1539,7 @@ impl Memory { /// it's deferred to the caller to how to deal with that. pub fn maximum_byte_size(&self) -> Result { match self.maximum { - Some(max) => max - .checked_mul(u64::from(WASM_PAGE_SIZE)) - .ok_or(SizeOverflow), + Some(max) => max.checked_mul(self.page_size()).ok_or(SizeOverflow), None => { let min = self.minimum_byte_size()?; Ok(min.max(self.max_size_based_on_index_type())) @@ -1545,13 +1547,30 @@ impl Memory { } } + /// Get the size of this memory's pages, in bytes. + pub fn page_size(&self) -> u64 { + debug_assert!( + self.page_size_log2 == 16 || self.page_size_log2 == 0, + "invalid page_size_log2: {}; must be 16 or 0", + self.page_size_log2 + ); + 1 << self.page_size_log2 + } + /// Returns the maximum size memory is allowed to be only based on the /// index type used by this memory. /// /// For example 32-bit linear memories return `1<<32` from this method. pub fn max_size_based_on_index_type(&self) -> u64 { if self.memory64 { - WASM64_MAX_SIZE + // Note that the true maximum size of a 64-bit linear memory, in + // bytes, cannot be represented in a `u64`. That would require a u65 + // to store `1<<64`. Despite that no system can actually allocate a + // full 64-bit linear memory so this is instead emulated as "what if + // the kernel fit in a single Wasm page of linear memory". Shouldn't + // ever actually be possible but it provides a number to serve as an + // effective maximum. + 0_u64.wrapping_sub(self.page_size()) } else { WASM32_MAX_SIZE } @@ -1572,11 +1591,18 @@ impl std::error::Error for SizeOverflow {} impl From for Memory { fn from(ty: wasmparser::MemoryType) -> Memory { + let page_size_log2 = u8::try_from(ty.page_size_log2.unwrap_or(16)).unwrap(); + debug_assert!( + page_size_log2 == 16 || page_size_log2 == 0, + "invalid page_size_log2: {}; must be 16 or 0", + page_size_log2 + ); Memory { minimum: ty.initial, maximum: ty.maximum, shared: ty.shared, memory64: ty.memory64, + page_size_log2, } } } diff --git a/crates/types/src/prelude.rs b/crates/types/src/prelude.rs new file mode 100644 index 000000000000..70a430820584 --- /dev/null +++ b/crates/types/src/prelude.rs @@ -0,0 +1,86 @@ +//! Rust module prelude for Wasmtime crates. +//! +//! Wasmtime crates that use `no_std` use `core::prelude::*` by default which +//! does not include `alloc`-related functionality such as `String` and `Vec`. +//! To have similar ergonomics to `std` and additionally group up some common +//! functionality this module is intended to be imported at the top of all +//! modules with: +//! +//! ```rust,ignore +//! use crate::*; +//! ``` +//! +//! Externally for crates that depend on `wasmtime-types` they should have this +//! in the root of the crate: +//! +//! ```rust,ignore +//! use wasmtime_types::prelude; +//! ``` +//! +//! and then `use crate::*` works as usual. + +pub use alloc::borrow::ToOwned; +pub use alloc::boxed::Box; +pub use alloc::format; +pub use alloc::string::{String, ToString}; +pub use alloc::vec; +pub use alloc::vec::Vec; +pub use wasmparser::collections::{IndexMap, IndexSet}; + +/// Convenience trait for converting `Result` into `anyhow::Result` +/// +/// Typically this is automatically done with the `?` operator in Rust and +/// by default this trait isn't necessary. With the `anyhow` crate's `std` +/// feature disabled, however, the `?` operator won't work because the `Error` +/// trait is not defined. This trait helps to bridge this gap. +/// +/// This does the same thing as `?` when the `std` feature is enabled, and when +/// `std` is disabled it'll use different trait bounds to create an +/// `anyhow::Error`. +/// +/// This trait is not suitable as a public interface because features change +/// what implements the trait. It's good enough for a wasmtime internal +/// implementation detail, however. +pub trait Err2Anyhow { + /// Convert `self` to `anyhow::Result`. + fn err2anyhow(self) -> anyhow::Result; +} + +impl Err2Anyhow for Result { + fn err2anyhow(self) -> anyhow::Result { + match self { + Ok(e) => Ok(e), + Err(e) => Err(e.into_anyhow()), + } + } +} + +/// Convenience trait to convert a value into `anyhow::Error` +/// +/// This trait is not a suitable public interface of Wasmtime so it's just an +/// internal implementation detail for now. This trait is conditionally +/// implemented on the `std` feature with different bounds. +pub trait IntoAnyhow { + /// Converts `self` into an `anyhow::Error`. + fn into_anyhow(self) -> anyhow::Error; +} + +#[cfg(feature = "std")] +impl IntoAnyhow for T +where + T: Into, +{ + fn into_anyhow(self) -> anyhow::Error { + self.into() + } +} + +#[cfg(not(feature = "std"))] +impl IntoAnyhow for T +where + T: core::fmt::Display + core::fmt::Debug + Send + Sync + 'static, +{ + fn into_anyhow(self) -> anyhow::Error { + anyhow::Error::msg(self) + } +} diff --git a/crates/wasmtime/Cargo.toml b/crates/wasmtime/Cargo.toml index 5e00f9a63159..bd9f2722a297 100644 --- a/crates/wasmtime/Cargo.toml +++ b/crates/wasmtime/Cargo.toml @@ -89,6 +89,7 @@ rustix = { workspace = true, optional = true } psm = { workspace = true, optional = true } [dev-dependencies] +env_logger = { workspace = true } proptest = { workspace = true } rand = { workspace = true } tempfile = { workspace = true } diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index c0bc8d06de72..86b7d0525252 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -731,6 +731,31 @@ impl Config { self } + /// Configures whether the WebAssembly custom-page-sizes proposal will be + /// enabled for compilation or not. + /// + /// The [WebAssembly custom-page-sizes proposal] allows a memory to + /// customize its page sizes. By default, Wasm page sizes are 64KiB + /// large. This proposal allows the memory to opt into smaller page sizes + /// instead, allowing Wasm to run in environments with less than 64KiB RAM + /// available, for example. + /// + /// Note that the page size is part of the memory's type, and because + /// different memories may have different types, they may also have + /// different page sizes. + /// + /// Currently the only valid page sizes are 64KiB (the default) and 1 + /// byte. Future extensions may relax this constraint and allow all powers + /// of two. + /// + /// Support for this proposal is disabled by default. + /// + /// [WebAssembly custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes + pub fn wasm_custom_page_sizes(&mut self, enable: bool) -> &mut Self { + self.features.set(WasmFeatures::CUSTOM_PAGE_SIZES, enable); + self + } + /// Configures whether the WebAssembly [threads] proposal will be enabled /// for compilation. /// @@ -2120,11 +2145,7 @@ fn round_up_to_pages(val: u64) -> u64 { #[cfg(feature = "runtime")] fn round_up_to_pages(val: u64) -> u64 { - let page_size = crate::runtime::vm::page_size() as u64; - debug_assert!(page_size.is_power_of_two()); - val.checked_add(page_size - 1) - .map(|val| val & !(page_size - 1)) - .unwrap_or(u64::MAX / page_size + 1) + crate::runtime::vm::round_u64_up_to_host_pages(val) } impl Default for Config { diff --git a/crates/wasmtime/src/engine/serialization.rs b/crates/wasmtime/src/engine/serialization.rs index a82c6437dffe..28e8bdb0f328 100644 --- a/crates/wasmtime/src/engine/serialization.rs +++ b/crates/wasmtime/src/engine/serialization.rs @@ -202,6 +202,7 @@ struct WasmFeatures { extended_const: bool, function_references: bool, gc: bool, + custom_page_sizes: bool, } impl Metadata<'_> { @@ -241,7 +242,6 @@ impl Metadata<'_> { assert!(!memory_control); assert!(!component_model_values); assert!(!component_model_nested_names); - assert!(!custom_page_sizes); assert!(!shared_everything_threads); Metadata { @@ -264,6 +264,7 @@ impl Metadata<'_> { extended_const, function_references, gc, + custom_page_sizes, }, } } @@ -480,6 +481,7 @@ impl Metadata<'_> { extended_const, function_references, gc, + custom_page_sizes, } = self.features; use wasmparser::WasmFeatures as F; @@ -556,6 +558,11 @@ impl Metadata<'_> { other.contains(F::RELAXED_SIMD), "WebAssembly relaxed-simd support", )?; + Self::check_bool( + custom_page_sizes, + other.contains(F::CUSTOM_PAGE_SIZES), + "WebAssembly custom-page-sizes support", + )?; Ok(()) } diff --git a/crates/wasmtime/src/runtime/component/bindgen_examples/mod.rs b/crates/wasmtime/src/runtime/component/bindgen_examples/mod.rs index aac9ab36bfbe..683408975de1 100644 --- a/crates/wasmtime/src/runtime/component/bindgen_examples/mod.rs +++ b/crates/wasmtime/src/runtime/component/bindgen_examples/mod.rs @@ -205,11 +205,10 @@ pub mod _1_world_imports; /// // ... /// } /// +/// # mod rand { pub fn thread_rng() -> G { G } pub struct G; impl G { pub fn gen(&self) -> u32 { 0 } } } /// // Note that the trait here is per-interface and within a submodule now. /// impl my::project::host::Host for MyState { /// fn gen_random_integer(&mut self) -> u32 { -/// # panic!(); -/// # #[cfg(FALSE)] /// rand::thread_rng().gen() /// } /// diff --git a/crates/wasmtime/src/runtime/memory.rs b/crates/wasmtime/src/runtime/memory.rs index c47703d934de..437e3c775120 100644 --- a/crates/wasmtime/src/runtime/memory.rs +++ b/crates/wasmtime/src/runtime/memory.rs @@ -431,7 +431,17 @@ impl Memory { /// Returns the byte length of this memory. /// - /// The returned value will be a multiple of the wasm page size, 64k. + /// WebAssembly memories are made up of a whole number of pages, so the byte + /// size returned will always be a multiple of this memory's page size. Note + /// that different Wasm memories may have different page sizes. You can get + /// a memory's page size via the [`Memory::page_size`] method. + /// + /// By default the page size is 64KiB (aka `0x10000`, `2**16`, `1<<16`, or + /// `65536`) but [the custom-page-sizes proposal] allows a memory to opt + /// into a page size of `1`. Future extensions might allow any power of two + /// as a page size. + /// + /// [the custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes /// /// For more information and examples see the documentation on the /// [`Memory`] type. @@ -447,7 +457,19 @@ impl Memory { unsafe { (*store[self.0].definition).current_length() } } - /// Returns the size, in WebAssembly pages, of this wasm memory. + /// Returns the size, in units of pages, of this Wasm memory. + /// + /// WebAssembly memories are made up of a whole number of pages, so the byte + /// size returned will always be a multiple of this memory's page size. Note + /// that different Wasm memories may have different page sizes. You can get + /// a memory's page size via the [`Memory::page_size`] method. + /// + /// By default the page size is 64KiB (aka `0x10000`, `2**16`, `1<<16`, or + /// `65536`) but [the custom-page-sizes proposal] allows a memory to opt + /// into a page size of `1`. Future extensions might allow any power of two + /// as a page size. + /// + /// [the custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes /// /// # Panics /// @@ -457,7 +479,48 @@ impl Memory { } pub(crate) fn internal_size(&self, store: &StoreOpaque) -> u64 { - (self.internal_data_size(store) / wasmtime_environ::WASM_PAGE_SIZE as usize) as u64 + let byte_size = self.internal_data_size(store); + let page_size = usize::try_from(self._page_size(store)).unwrap(); + u64::try_from(byte_size / page_size).unwrap() + } + + /// Returns the size of a page, in bytes, for this memory. + /// + /// WebAssembly memories are made up of a whole number of pages, so the byte + /// size (as returned by [`Memory::data_size`]) will always be a multiple of + /// their page size. Different Wasm memories may have different page sizes. + /// + /// By default this is 64KiB (aka `0x10000`, `2**16`, `1<<16`, or `65536`) + /// but [the custom-page-sizes proposal] allows opting into a page size of + /// `1`. Future extensions might allow any power of two as a page size. + /// + /// [the custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes + pub fn page_size(&self, store: impl AsContext) -> u64 { + self._page_size(store.as_context().0) + } + + pub(crate) fn _page_size(&self, store: &StoreOpaque) -> u64 { + store[self.0].memory.memory.page_size() + } + + /// Returns the log2 of this memory's page size, in bytes. + /// + /// WebAssembly memories are made up of a whole number of pages, so the byte + /// size (as returned by [`Memory::data_size`]) will always be a multiple of + /// their page size. Different Wasm memories may have different page sizes. + /// + /// By default the page size is 64KiB (aka `0x10000`, `2**16`, `1<<16`, or + /// `65536`) but [the custom-page-sizes proposal] allows opting into a page + /// size of `1`. Future extensions might allow any power of two as a page + /// size. + /// + /// [the custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes + pub fn page_size_log2(&self, store: impl AsContext) -> u8 { + self._page_size_log2(store.as_context().0) + } + + pub(crate) fn _page_size_log2(&self, store: &StoreOpaque) -> u8 { + store[self.0].memory.memory.page_size_log2 } /// Grows this WebAssembly memory by `delta` pages. @@ -470,6 +533,13 @@ impl Memory { /// On success returns the number of pages this memory previously had /// before the growth succeeded. /// + /// Note that, by default, a WebAssembly memory's page size is 64KiB (aka + /// 65536 or 216). The [custom-page-sizes proposal] allows Wasm + /// memories to opt into a page size of one byte (and this may be further + /// relaxed to any power of two in a future extension). + /// + /// [custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes + /// /// # Errors /// /// Returns an error if memory could not be grown, for example if it exceeds @@ -514,7 +584,8 @@ impl Memory { Some(size) => { let vm = (*mem).vmmemory(); *store[self.0].definition = vm; - Ok(u64::try_from(size).unwrap() / u64::from(wasmtime_environ::WASM_PAGE_SIZE)) + let page_size = (*mem).page_size(); + Ok(u64::try_from(size).unwrap() / page_size) } None => bail!("failed to grow memory by `{}`", delta), } @@ -720,7 +791,11 @@ pub unsafe trait MemoryCreator: Send + Sync { /// # } /// ``` #[derive(Clone)] -pub struct SharedMemory(crate::runtime::vm::SharedMemory, Engine); +pub struct SharedMemory { + vm: crate::runtime::vm::SharedMemory, + engine: Engine, + page_size_log2: u8, +} impl SharedMemory { /// Construct a [`SharedMemory`] by providing both the `minimum` and @@ -735,18 +810,38 @@ impl SharedMemory { let tunables = engine.tunables(); let plan = MemoryPlan::for_memory(ty.wasmtime_memory().clone(), tunables); + let page_size_log2 = plan.memory.page_size_log2; let memory = crate::runtime::vm::SharedMemory::new(plan)?; - Ok(Self(memory, engine.clone())) + + Ok(Self { + vm: memory, + engine: engine.clone(), + page_size_log2, + }) } /// Return the type of the shared memory. pub fn ty(&self) -> MemoryType { - MemoryType::from_wasmtime_memory(&self.0.ty()) + MemoryType::from_wasmtime_memory(&self.vm.ty()) } /// Returns the size, in WebAssembly pages, of this wasm memory. pub fn size(&self) -> u64 { - (self.data_size() / wasmtime_environ::WASM_PAGE_SIZE as usize) as u64 + let byte_size = u64::try_from(self.data_size()).unwrap(); + let page_size = u64::from(self.page_size()); + byte_size / page_size + } + + /// Returns the size of a page, in bytes, for this memory. + /// + /// By default this is 64KiB (aka `0x10000`, `2**16`, `1<<16`, or `65536`) + /// but [the custom-page-sizes proposal] allows opting into a page size of + /// `1`. Future extensions might allow any power of two as a page size. + /// + /// [the custom-page-sizes proposal]: https://github.com/WebAssembly/custom-page-sizes + pub fn page_size(&self) -> u32 { + debug_assert!(self.page_size_log2 == 0 || self.page_size_log2 == 16); + 1 << self.page_size_log2 } /// Returns the byte length of this memory. @@ -756,7 +851,7 @@ impl SharedMemory { /// For more information and examples see the documentation on the /// [`Memory`] type. pub fn data_size(&self) -> usize { - self.0.byte_size() + self.vm.byte_size() } /// Return access to the available portion of the shared memory. @@ -781,7 +876,7 @@ impl SharedMemory { /// currently be done unsafely. pub fn data(&self) -> &[UnsafeCell] { unsafe { - let definition = &*self.0.vmmemory_ptr(); + let definition = &*self.vm.vmmemory_ptr(); slice::from_raw_parts(definition.base.cast(), definition.current_length()) } } @@ -803,11 +898,11 @@ impl SharedMemory { /// [`ResourceLimiter`](crate::ResourceLimiter) is another example of /// preventing a memory to grow. pub fn grow(&self, delta: u64) -> Result { - match self.0.grow(delta, None)? { + match self.vm.grow(delta, None)? { Some((old_size, _new_size)) => { // For shared memory, the `VMMemoryDefinition` is updated inside // the locked region. - Ok(u64::try_from(old_size).unwrap() / u64::from(wasmtime_environ::WASM_PAGE_SIZE)) + Ok(u64::try_from(old_size).unwrap() / u64::from(self.page_size())) } None => bail!("failed to grow memory by `{}`", delta), } @@ -830,7 +925,7 @@ impl SharedMemory { /// This function will return an error if `addr` is not within bounds or /// not aligned to a 4-byte boundary. pub fn atomic_notify(&self, addr: u64, count: u32) -> Result { - self.0.atomic_notify(addr, count) + self.vm.atomic_notify(addr, count) } /// Equivalent of the WebAssembly `memory.atomic.wait32` instruction for @@ -872,7 +967,7 @@ impl SharedMemory { expected: u32, timeout: Option, ) -> Result { - self.0.atomic_wait32(addr, expected, timeout) + self.vm.atomic_wait32(addr, expected, timeout) } /// Equivalent of the WebAssembly `memory.atomic.wait64` instruction for @@ -890,19 +985,19 @@ impl SharedMemory { expected: u64, timeout: Option, ) -> Result { - self.0.atomic_wait64(addr, expected, timeout) + self.vm.atomic_wait64(addr, expected, timeout) } /// Return a reference to the [`Engine`] used to configure the shared /// memory. pub(crate) fn engine(&self) -> &Engine { - &self.1 + &self.engine } /// Construct a single-memory instance to provide a way to import /// [`SharedMemory`] into other modules. pub(crate) fn vmimport(&self, store: &mut StoreOpaque) -> crate::runtime::vm::VMMemoryImport { - let export_memory = generate_memory_export(store, &self.ty(), Some(&self.0)).unwrap(); + let export_memory = generate_memory_export(store, &self.ty(), Some(&self.vm)).unwrap(); VMMemoryImport { from: export_memory.definition, vmctx: export_memory.vmctx, @@ -917,14 +1012,23 @@ impl SharedMemory { wasmtime_export: crate::runtime::vm::ExportMemory, store: &mut StoreOpaque, ) -> Self { + #[cfg_attr(not(feature = "threads"), allow(unused_variables, unreachable_code))] crate::runtime::vm::Instance::from_vmctx(wasmtime_export.vmctx, |handle| { + let memory_index = handle.module().memory_index(wasmtime_export.index); + let page_size = handle.memory_page_size(memory_index); + debug_assert!(page_size.is_power_of_two()); + let page_size_log2 = u8::try_from(page_size.ilog2()).unwrap(); + let memory = handle .get_defined_memory(wasmtime_export.index) .as_mut() .unwrap(); match memory.as_shared_memory() { - #[cfg_attr(not(feature = "threads"), allow(unreachable_code))] - Some(mem) => Self(mem.clone(), store.engine().clone()), + Some(mem) => Self { + vm: mem.clone(), + engine: store.engine().clone(), + page_size_log2, + }, None => panic!("unable to convert from a shared memory"), } }) diff --git a/crates/wasmtime/src/runtime/trampoline/memory.rs b/crates/wasmtime/src/runtime/trampoline/memory.rs index bf5b470fffeb..9bf3709c5fc5 100644 --- a/crates/wasmtime/src/runtime/trampoline/memory.rs +++ b/crates/wasmtime/src/runtime/trampoline/memory.rs @@ -80,9 +80,14 @@ pub fn create_memory( struct LinearMemoryProxy { mem: Box, + page_size_log2: u8, } impl RuntimeLinearMemory for LinearMemoryProxy { + fn page_size_log2(&self) -> u8 { + self.page_size_log2 + } + fn byte_size(&self) -> usize { self.mem.byte_size() } @@ -99,6 +104,7 @@ impl RuntimeLinearMemory for LinearMemoryProxy { VMMemoryDefinition { base: self.mem.as_ptr(), current_length: self.mem.byte_size().into(), + page_size_log2: self.page_size_log2, } } @@ -141,7 +147,12 @@ impl RuntimeMemoryCreator for MemoryCreatorProxy { reserved_size_in_bytes, usize::try_from(plan.offset_guard_size).unwrap(), ) - .map(|mem| Box::new(LinearMemoryProxy { mem }) as Box) + .map(|mem| { + Box::new(LinearMemoryProxy { + mem, + page_size_log2: plan.memory.page_size_log2, + }) as Box + }) .map_err(|e| anyhow!(e)) } } diff --git a/crates/wasmtime/src/runtime/types.rs b/crates/wasmtime/src/runtime/types.rs index f890d0ecee57..352efbd07b9a 100644 --- a/crates/wasmtime/src/runtime/types.rs +++ b/crates/wasmtime/src/runtime/types.rs @@ -1,5 +1,5 @@ use crate::prelude::*; -use anyhow::{bail, ensure, Result}; +use anyhow::{bail, ensure, Context, Result}; use core::fmt::{self, Display, Write}; use wasmtime_environ::{ EngineOrModuleTypeIndex, EntityType, Global, Memory, ModuleTypes, Table, TypeTrace, @@ -2346,6 +2346,169 @@ impl TableType { // Memory Types +/// A builder for [`MemoryType`][crate::MemoryType]s. +/// +/// A new builder can be constructed via its `Default` implementation. +/// +/// When you're done configuring, get the underlying +/// [`MemoryType`][crate::MemoryType] by calling the +/// [`build`][crate::MemoryTypeBuilder::build] method. +/// +/// # Example +/// +/// ``` +/// # fn foo() -> wasmtime::Result<()> { +/// use wasmtime::MemoryTypeBuilder; +/// +/// let memory_type = MemoryTypeBuilder::default() +/// // Set the minimum size, in pages. +/// .min(4096) +/// // Set the maximum size, in pages. +/// .max(Some(4096)) +/// // Set the page size to 1 byte (aka 2**0). +/// .page_size_log2(0) +/// // Get the underlying memory type. +/// .build()?; +/// # Ok(()) +/// # } +/// ``` +pub struct MemoryTypeBuilder { + ty: Memory, +} + +impl Default for MemoryTypeBuilder { + fn default() -> Self { + MemoryTypeBuilder { + ty: Memory { + minimum: 0, + maximum: None, + shared: false, + memory64: false, + page_size_log2: Memory::DEFAULT_PAGE_SIZE_LOG2, + }, + } + } +} + +impl MemoryTypeBuilder { + fn validate(&self) -> Result<()> { + if self.ty.maximum.map_or(false, |max| max < self.ty.minimum) { + bail!("maximum page size cannot be smaller than the minimum page size"); + } + + match self.ty.page_size_log2 { + 0 | Memory::DEFAULT_PAGE_SIZE_LOG2 => {} + x => bail!( + "page size must be 2**16 or 2**0, but was given 2**{x}; note \ + that future Wasm extensions might allow any power of two page \ + size, but only 2**16 and 2**0 are currently valid", + ), + } + + if self.ty.shared && self.ty.maximum.is_none() { + bail!("shared memories must have a maximum size"); + } + + let absolute_max = self.ty.max_size_based_on_index_type(); + let min = self + .ty + .minimum_byte_size() + .err2anyhow() + .context("memory's minimum byte size must fit in a u64")?; + if min > absolute_max { + bail!("minimum size is too large for this memory type's index type"); + } + if self + .ty + .maximum_byte_size() + .map_or(false, |max| max > absolute_max) + { + bail!("maximum size is too large for this memory type's index type"); + } + + Ok(()) + } + + /// Set the minimum size, in units of pages, for the memory type being + /// built. + /// + /// The default minimum is `0`. + pub fn min(&mut self, minimum: u64) -> &mut Self { + self.ty.minimum = minimum; + self + } + + /// Set the maximum size, in units of pages, for the memory type being + /// built. + /// + /// The default maximum is `None`. + pub fn max(&mut self, maximum: Option) -> &mut Self { + self.ty.maximum = maximum; + self + } + + /// Set whether this is a 64-bit memory or not. + /// + /// If a memory is not a 64-bit memory, then it is a 32-bit memory. + /// + /// The default is `false`, aka 32-bit memories. + /// + /// Note that 64-bit memories are part of [the memory64 + /// proposal](https://github.com/WebAssembly/memory64) for WebAssembly which + /// is not fully standardized yet. + pub fn memory64(&mut self, memory64: bool) -> &mut Self { + self.ty.memory64 = memory64; + self + } + + /// Set the sharedness for the memory type being built. + /// + /// The default is `false`, aka unshared. + /// + /// Note that shared memories are part of [the threads + /// proposal](https://github.com/WebAssembly/threads) for WebAssembly which + /// is not fully standardized yet. + pub fn shared(&mut self, shared: bool) -> &mut Self { + self.ty.shared = shared; + self + } + + /// Set the log base 2 of the page size, in bytes, for the memory type being + /// built. + /// + /// The default value is `16`, which results in the default Wasm page size + /// of 64KiB (aka 216 or 65536). + /// + /// Other than `16`, the only valid value is `0`, which results in a page + /// size of one byte (aka 20). Single-byte page sizes can be used + /// to get fine-grained control over a Wasm memory's resource consumption + /// and run Wasm in embedded environments with less than 64KiB of RAM, for + /// example. + /// + /// Future extensions to the core WebAssembly language might relax these + /// constraints and introduce more valid page sizes, such as any power of + /// two between 1 and 65536 inclusive. + /// + /// Note that non-default page sizes are part of [the custom-page-sizes + /// proposal](https://github.com/WebAssembly/custom-page-sizes) for + /// WebAssembly which is not fully standardized yet. + pub fn page_size_log2(&mut self, page_size_log2: u8) -> &mut Self { + self.ty.page_size_log2 = page_size_log2; + self + } + + /// Get the underlying memory type that this builder has been building. + /// + /// # Errors + /// + /// Returns an error if the configured memory type is invalid, for example + /// if the maximum size is smaller than the minimum size. + pub fn build(&self) -> Result { + self.validate()?; + Ok(MemoryType { ty: self.ty }) + } +} + /// A descriptor for a WebAssembly memory type. /// /// Memories are described in units of pages (64KB) and represent contiguous @@ -2359,55 +2522,74 @@ impl MemoryType { /// Creates a new descriptor for a 32-bit WebAssembly memory given the /// specified limits of the memory. /// - /// The `minimum` and `maximum` values here are specified in units of - /// WebAssembly pages, which are 64k. + /// The `minimum` and `maximum` values here are specified in units of + /// WebAssembly pages, which are 64KiB by default. Use + /// [`MemoryTypeBuilder`][crate::MemoryTypeBuilder] if you want a + /// non-default page size. + /// + /// # Panics + /// + /// Panics if the minimum is greater than the maximum or if the minimum or + /// maximum number of pages can result in a byte size that is not + /// addressable with a 32-bit integer. pub fn new(minimum: u32, maximum: Option) -> MemoryType { - MemoryType { - ty: Memory { - memory64: false, - shared: false, - minimum: minimum.into(), - maximum: maximum.map(|i| i.into()), - }, - } + MemoryTypeBuilder::default() + .min(minimum.into()) + .max(maximum.map(Into::into)) + .build() + .unwrap() } /// Creates a new descriptor for a 64-bit WebAssembly memory given the /// specified limits of the memory. /// - /// The `minimum` and `maximum` values here are specified in units of - /// WebAssembly pages, which are 64k. + /// The `minimum` and `maximum` values here are specified in units of + /// WebAssembly pages, which are 64KiB by default. Use + /// [`MemoryTypeBuilder`][crate::MemoryTypeBuilder] if you want a + /// non-default page size. /// - /// Note that 64-bit memories are part of the memory64 proposal for - /// WebAssembly which is not standardized yet. + /// Note that 64-bit memories are part of [the memory64 + /// proposal](https://github.com/WebAssembly/memory64) for WebAssembly which + /// is not fully standardized yet. + /// + /// # Panics + /// + /// Panics if the minimum is greater than the maximum or if the minimum or + /// maximum number of pages can result in a byte size that is not + /// addressable with a 64-bit integer. pub fn new64(minimum: u64, maximum: Option) -> MemoryType { - MemoryType { - ty: Memory { - memory64: true, - shared: false, - minimum, - maximum, - }, - } + MemoryTypeBuilder::default() + .memory64(true) + .min(minimum) + .max(maximum) + .build() + .unwrap() } /// Creates a new descriptor for shared WebAssembly memory given the /// specified limits of the memory. /// - /// The `minimum` and `maximum` values here are specified in units of - /// WebAssembly pages, which are 64k. + /// The `minimum` and `maximum` values here are specified in units of + /// WebAssembly pages, which are 64KiB by default. Use + /// [`MemoryTypeBuilder`][crate::MemoryTypeBuilder] if you want a + /// non-default page size. /// - /// Note that shared memories are part of the threads proposal for - /// WebAssembly which is not standardized yet. + /// Note that shared memories are part of [the threads + /// proposal](https://github.com/WebAssembly/threads) for WebAssembly which + /// is not fully standardized yet. + /// + /// # Panics + /// + /// Panics if the minimum is greater than the maximum or if the minimum or + /// maximum number of pages can result in a byte size that is not + /// addressable with a 32-bit integer. pub fn shared(minimum: u32, maximum: u32) -> MemoryType { - MemoryType { - ty: Memory { - memory64: false, - shared: true, - minimum: minimum.into(), - maximum: Some(maximum.into()), - }, - } + MemoryTypeBuilder::default() + .shared(true) + .min(minimum.into()) + .max(Some(maximum.into())) + .build() + .unwrap() } /// Returns whether this is a 64-bit memory or not. @@ -2445,6 +2627,16 @@ impl MemoryType { self.ty.maximum } + /// This memory's page size, in bytes. + pub fn page_size(&self) -> u64 { + self.ty.page_size() + } + + /// The log2 of this memory's page size, in bytes. + pub fn page_size_log2(&self) -> u8 { + self.ty.page_size_log2 + } + pub(crate) fn from_wasmtime_memory(memory: &Memory) -> MemoryType { MemoryType { ty: memory.clone() } } diff --git a/crates/wasmtime/src/runtime/types/matching.rs b/crates/wasmtime/src/runtime/types/matching.rs index 6cd470e88d93..3b123f8c10de 100644 --- a/crates/wasmtime/src/runtime/types/matching.rs +++ b/crates/wasmtime/src/runtime/types/matching.rs @@ -228,6 +228,14 @@ fn memory_ty(expected: &Memory, actual: &Memory, actual_runtime_size: Option usize { +pub fn host_page_size() -> usize { static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0); return match PAGE_SIZE.load(Ordering::Relaxed) { @@ -344,6 +344,28 @@ pub fn page_size() -> usize { }; } +/// Is `bytes` a multiple of the host page size? +pub fn usize_is_multiple_of_host_page_size(bytes: usize) -> bool { + bytes % host_page_size() == 0 +} + +/// Round the given byte size up to a multiple of the host OS page size. +pub fn round_u64_up_to_host_pages(bytes: u64) -> u64 { + let page_size = u64::try_from(crate::runtime::vm::host_page_size()).unwrap(); + debug_assert!(page_size.is_power_of_two()); + bytes + .checked_add(page_size - 1) + .map(|val| val & !(page_size - 1)) + .unwrap_or(u64::MAX / page_size + 1) +} + +/// Same as `round_u64_up_to_host_pages` but for `usize`s. +pub fn round_usize_up_to_host_pages(bytes: usize) -> usize { + let bytes = u64::try_from(bytes).unwrap(); + let rounded = round_u64_up_to_host_pages(bytes); + usize::try_from(rounded).unwrap() +} + /// Result of `Memory::atomic_wait32` and `Memory::atomic_wait64` #[derive(Copy, Clone, PartialEq, Eq, Debug)] pub enum WaitResult { diff --git a/crates/wasmtime/src/runtime/vm/cow.rs b/crates/wasmtime/src/runtime/vm/cow.rs index b86fba30d818..d5325ca88f0f 100644 --- a/crates/wasmtime/src/runtime/vm/cow.rs +++ b/crates/wasmtime/src/runtime/vm/cow.rs @@ -164,7 +164,7 @@ impl ModuleMemoryImages { _ => return Ok(None), }; let mut memories = PrimaryMap::with_capacity(map.len()); - let page_size = crate::runtime::vm::page_size() as u32; + let page_size = crate::runtime::vm::host_page_size() as u32; for (memory_index, init) in map { // mmap-based-initialization only works for defined memories with a // known starting point of all zeros, so bail out if the mmeory is @@ -189,6 +189,23 @@ impl ModuleMemoryImages { // creation files then we fail creating `ModuleMemoryImages` since this // memory couldn't be represented. let data = &wasm_data[init.data.start as usize..init.data.end as usize]; + if module.memory_plans[memory_index] + .memory + .minimum_byte_size() + .map_or(false, |mem_initial_len| { + init.offset + u64::try_from(data.len()).unwrap() > mem_initial_len + }) + { + // The image is rounded up to multiples of the host OS page + // size. But if Wasm is using a custom page size, the Wasm page + // size might be smaller than the host OS page size, and that + // rounding might have made the image larger than the Wasm + // memory's initial length. This is *probably* okay, since the + // rounding would have just introduced new runs of zeroes in the + // image, but out of an abundance of caution we don't generate + // CoW images in this scenario. + return Ok(None); + } let image = match MemoryImage::new(page_size, init.offset, data, mmap)? { Some(image) => image, None => return Ok(None), @@ -728,8 +745,8 @@ impl Drop for MemoryImageSlot { #[cfg(all(test, target_os = "linux", not(miri)))] mod test { use super::{MemoryImage, MemoryImageSlot, MemoryImageSource, MemoryPlan, MemoryStyle}; + use crate::runtime::vm::host_page_size; use crate::runtime::vm::mmap::Mmap; - use crate::runtime::vm::page_size; use crate::runtime::vm::sys::vm::decommit_pages; use anyhow::Result; use std::sync::Arc; @@ -737,7 +754,7 @@ mod test { fn create_memfd_with_data(offset: usize, data: &[u8]) -> Result { // Offset must be page-aligned. - let page_size = page_size(); + let page_size = host_page_size(); assert_eq!(offset & (page_size - 1), 0); // The image length is rounded up to the nearest page size @@ -759,6 +776,7 @@ mod test { maximum: None, shared: false, memory64: false, + page_size_log2: Memory::DEFAULT_PAGE_SIZE_LOG2, }, pre_guard_size: 0, offset_guard_size: 0, @@ -804,7 +822,7 @@ mod test { #[test] fn instantiate_image() { - let page_size = page_size(); + let page_size = host_page_size(); let plan = dummy_memory_plan(MemoryStyle::Static { byte_reservation: 4 << 30, }); @@ -865,7 +883,7 @@ mod test { #[test] #[cfg(target_os = "linux")] fn memset_instead_of_madvise() { - let page_size = page_size(); + let page_size = host_page_size(); let plan = dummy_memory_plan(MemoryStyle::Static { byte_reservation: 100 << 16, }); @@ -914,7 +932,7 @@ mod test { #[test] #[cfg(target_os = "linux")] fn dynamic() { - let page_size = page_size(); + let page_size = host_page_size(); let plan = dummy_memory_plan(MemoryStyle::Dynamic { reserve: 200 }); let mut mmap = Mmap::accessible_reserved(0, 4 << 20).unwrap(); diff --git a/crates/wasmtime/src/runtime/vm/instance.rs b/crates/wasmtime/src/runtime/vm/instance.rs index 6ee7f8bd8a49..a46ffb508bdd 100644 --- a/crates/wasmtime/src/runtime/vm/instance.rs +++ b/crates/wasmtime/src/runtime/vm/instance.rs @@ -602,6 +602,11 @@ impl Instance { index } + /// Get the given memory's page size, in bytes. + pub(crate) fn memory_page_size(&self, index: MemoryIndex) -> usize { + usize::try_from(self.module().memory_plans[index].memory.page_size()).unwrap() + } + /// Grow memory by the specified amount of pages. /// /// Returns `None` if memory can't be grown by the specified amount diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator.rs b/crates/wasmtime/src/runtime/vm/instance/allocator.rs index b73f53e2fafb..9aa881972daa 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator.rs @@ -10,8 +10,8 @@ use anyhow::{bail, Result}; use core::{any::Any, mem, ptr}; use wasmtime_environ::{ DefinedMemoryIndex, DefinedTableIndex, HostPtr, InitMemory, MemoryInitialization, - MemoryInitializer, MemoryPlan, Module, PrimaryMap, TableInitialValue, TablePlan, Trap, - VMOffsets, WasmHeapTopType, WASM_PAGE_SIZE, + MemoryInitializer, MemoryPlan, Module, PrimaryMap, SizeOverflow, TableInitialValue, TablePlan, + Trap, VMOffsets, WasmHeapTopType, }; #[cfg(feature = "gc")] @@ -690,8 +690,13 @@ fn initialize_memories(instance: &mut Instance, module: &Module) -> Result<()> { } impl InitMemory for InitMemoryAtInstantiation<'_> { - fn memory_size_in_pages(&mut self, memory: wasmtime_environ::MemoryIndex) -> u64 { - (self.instance.get_memory(memory).current_length() as u64) / u64::from(WASM_PAGE_SIZE) + fn memory_size_in_bytes( + &mut self, + memory: wasmtime_environ::MemoryIndex, + ) -> Result { + let len = self.instance.get_memory(memory).current_length(); + let len = u64::try_from(len).unwrap(); + Ok(len) } fn eval_offset( diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs index 28c4dc179092..d64d463f0b4a 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs @@ -64,9 +64,7 @@ use anyhow::{anyhow, bail, Context, Result}; use std::ffi::c_void; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::Mutex; -use wasmtime_environ::{ - DefinedMemoryIndex, MemoryPlan, MemoryStyle, Module, Tunables, WASM_PAGE_SIZE, -}; +use wasmtime_environ::{DefinedMemoryIndex, MemoryPlan, MemoryStyle, Module, Tunables}; /// A set of allocator slots. /// @@ -269,7 +267,6 @@ impl MemoryPool { ); } - let max_memory_pages = self.layout.max_memory_bytes / WASM_PAGE_SIZE as usize; for (i, plan) in module .memory_plans .iter() @@ -288,12 +285,18 @@ impl MemoryPool { } MemoryStyle::Dynamic { .. } => {} } - if plan.memory.minimum > u64::try_from(max_memory_pages).unwrap() { + let min = plan.memory.minimum_byte_size().with_context(|| { + format!( + "memory index {} has a minimum byte size that cannot be represented in a u64", + i.as_u32() + ) + })?; + if min > u64::try_from(self.layout.max_memory_bytes).unwrap() { bail!( - "memory index {} has a minimum page size of {} which exceeds the limit of {}", + "memory index {} has a minimum byte size of {} which exceeds the limit of {} bytes", i.as_u32(), - plan.memory.minimum, - max_memory_pages, + min, + self.layout.max_memory_bytes, ); } } @@ -358,7 +361,10 @@ impl MemoryPool { let mut slot = self.take_memory_image_slot(allocation_index); let image = request.runtime_info.memory_image(memory_index)?; - let initial_size = memory_plan.memory.minimum * WASM_PAGE_SIZE as u64; + let initial_size = memory_plan + .memory + .minimum_byte_size() + .expect("min size checked in validation"); // If instantiation fails, we can propagate the error // upward and drop the slot. This will cause the Drop @@ -719,7 +725,7 @@ fn calculate(constraints: &SlabConstraints) -> Result { }; // The page-aligned slot size; equivalent to `memory_and_guard_size`. - let page_alignment = crate::runtime::vm::page_size() - 1; + let page_alignment = crate::runtime::vm::host_page_size() - 1; let slot_bytes = slot_bytes .checked_add(page_alignment) .and_then(|slot_bytes| Some(slot_bytes & !page_alignment)) @@ -753,6 +759,8 @@ mod tests { use super::*; use proptest::prelude::*; + const WASM_PAGE_SIZE: u32 = wasmtime_environ::Memory::DEFAULT_PAGE_SIZE; + #[cfg(target_pointer_width = "64")] #[test] fn test_memory_pool() -> Result<()> { @@ -970,6 +978,6 @@ mod tests { } fn is_aligned(bytes: usize) -> bool { - bytes % crate::runtime::vm::page_size() == 0 + bytes % crate::runtime::vm::host_page_size() == 0 } } diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs index db8772460255..7215f56d1f6e 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs @@ -30,7 +30,7 @@ pub struct TablePool { impl TablePool { /// Create a new `TablePool`. pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result { - let page_size = crate::runtime::vm::page_size(); + let page_size = crate::runtime::vm::host_page_size(); let table_size = round_up_to_pow2( mem::size_of::<*mut u8>() @@ -225,7 +225,7 @@ mod tests { ..Default::default() })?; - let host_page_size = crate::runtime::vm::page_size(); + let host_page_size = crate::runtime::vm::host_page_size(); assert_eq!(pool.table_size, host_page_size); assert_eq!(pool.max_total_tables, 7); diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs index 43a404b7f086..be81ba8952d8 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs @@ -33,7 +33,7 @@ impl StackPool { pub fn new(config: &PoolingInstanceAllocatorConfig) -> Result { use rustix::mm::{mprotect, MprotectFlags}; - let page_size = crate::runtime::vm::page_size(); + let page_size = crate::runtime::vm::host_page_size(); // Add a page to the stack size for the guard page when using fiber stacks let stack_size = if config.stack_size == 0 { @@ -228,7 +228,7 @@ mod tests { }; let pool = StackPool::new(&config)?; - let native_page_size = crate::runtime::vm::page_size(); + let native_page_size = crate::runtime::vm::host_page_size(); assert_eq!(pool.stack_size, 2 * native_page_size); assert_eq!(pool.max_stacks, 10); assert_eq!(pool.page_size, native_page_size); diff --git a/crates/wasmtime/src/runtime/vm/libcalls.rs b/crates/wasmtime/src/runtime/vm/libcalls.rs index 6e27f6d2b98c..e8ee64dcfd97 100644 --- a/crates/wasmtime/src/runtime/vm/libcalls.rs +++ b/crates/wasmtime/src/runtime/vm/libcalls.rs @@ -197,7 +197,7 @@ fn memory32_grow( error, needs_backtrace: true, })? { - Some(size_in_bytes) => size_in_bytes / (wasmtime_environ::WASM_PAGE_SIZE as usize), + Some(size_in_bytes) => size_in_bytes / instance.memory_page_size(memory_index), None => usize::max_value(), }; Ok(result as *mut _) diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index fcfb774b4b30..ceab8e764377 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -14,9 +14,7 @@ use anyhow::{bail, format_err, Result}; use core::ops::Range; use core::ptr::NonNull; use core::time::Duration; -use wasmtime_environ::{MemoryPlan, MemoryStyle, Trap, WASM32_MAX_SIZE, WASM64_MAX_SIZE}; - -const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize; +use wasmtime_environ::{MemoryPlan, MemoryStyle, Trap}; /// A memory allocator pub trait RuntimeMemoryCreator: Send + Sync { @@ -52,8 +50,20 @@ impl RuntimeMemoryCreator for DefaultMemoryCreator { } } -/// A linear memory +/// A linear memory's backing storage. +/// +/// This does not a full Wasm linear memory, as it may pub trait RuntimeLinearMemory: Send + Sync { + /// Returns the log2 of this memory's page size, in bytes. + fn page_size_log2(&self) -> u8; + + /// Returns this memory's page size, in bytes. + fn page_size(&self) -> u64 { + let log2 = self.page_size_log2(); + debug_assert!(log2 == 16 || log2 == 0); + 1 << self.page_size_log2() + } + /// Returns the number of allocated bytes. fn byte_size(&self) -> usize; @@ -82,24 +92,23 @@ pub trait RuntimeLinearMemory: Send + Sync { return Ok(Some((old_byte_size, old_byte_size))); } + let page_size = usize::try_from(self.page_size()).unwrap(); + // The largest wasm-page-aligned region of memory is possible to // represent in a `usize`. This will be impossible for the system to // actually allocate. - let absolute_max = 0usize.wrapping_sub(WASM_PAGE_SIZE); + let absolute_max = 0usize.wrapping_sub(page_size); // Calculate the byte size of the new allocation. Let it overflow up to // `usize::MAX`, then clamp it down to `absolute_max`. let new_byte_size = usize::try_from(delta_pages) .unwrap_or(usize::MAX) - .saturating_mul(WASM_PAGE_SIZE) - .saturating_add(old_byte_size); - let new_byte_size = if new_byte_size > absolute_max { - absolute_max - } else { - new_byte_size - }; + .saturating_mul(page_size) + .saturating_add(old_byte_size) + .min(absolute_max); let maximum = self.maximum_byte_size(); + // Store limiter gets first chance to reject memory_growing. if let Some(store) = &mut store { if !store.memory_growing(old_byte_size, new_byte_size, maximum)? { @@ -167,18 +176,29 @@ pub struct MmapMemory { // The underlying allocation. mmap: Mmap, - // The number of bytes that are accessible in `mmap` and available for - // reading and writing. + // The current length of this Wasm memory, in bytes. // - // This region starts at `pre_guard_size` offset from the base of `mmap`. - accessible: usize, + // This region starts at `pre_guard_size` offset from the base of `mmap`. It + // is always accessible, which means that if the Wasm page size is smaller + // than the host page size, there may be some trailing region in the `mmap` + // that is accessible but should not be accessed. (We rely on explicit + // bounds checks in the compiled code to protect this region.) + len: usize, // The optional maximum accessible size, in bytes, for this linear memory. // // Note that this maximum does not factor in guard pages, so this isn't the // maximum size of the linear address space reservation for this memory. + // + // This is *not* always a multiple of the host page size, and + // `self.accessible()` may go past `self.maximum` when Wasm is using a small + // custom page size due to `self.accessible()`'s rounding up to the host + // page size. maximum: Option, + // The log2 of this Wasm memory's page size, in bytes. + page_size_log2: u8, + // The amount of extra bytes to reserve whenever memory grows. This is // specified so that the cost of repeated growth is amortized. extra_to_reserve_on_growth: usize, @@ -208,10 +228,18 @@ impl MmapMemory { let offset_guard_bytes = usize::try_from(plan.offset_guard_size).unwrap(); let pre_guard_bytes = usize::try_from(plan.pre_guard_size).unwrap(); + // Ensure that our guard regions are multiples of the host page size. + let offset_guard_bytes = + crate::runtime::vm::round_usize_up_to_host_pages(offset_guard_bytes); + let pre_guard_bytes = crate::runtime::vm::round_usize_up_to_host_pages(pre_guard_bytes); + let (alloc_bytes, extra_to_reserve_on_growth) = match plan.style { // Dynamic memories start with the minimum size plus the `reserve` // amount specified to grow into. - MemoryStyle::Dynamic { reserve } => (minimum, usize::try_from(reserve).unwrap()), + MemoryStyle::Dynamic { reserve } => ( + crate::runtime::vm::round_usize_up_to_host_pages(minimum), + crate::runtime::vm::round_usize_up_to_host_pages(usize::try_from(reserve).unwrap()), + ), // Static memories will never move in memory and consequently get // their entire allocation up-front with no extra room to grow into. @@ -225,6 +253,7 @@ impl MmapMemory { (bound_bytes, 0) } }; + assert_eq!(alloc_bytes % crate::runtime::vm::host_page_size(), 0); let request_bytes = pre_guard_bytes .checked_add(alloc_bytes) @@ -234,7 +263,8 @@ impl MmapMemory { let mut mmap = Mmap::accessible_reserved(0, request_bytes)?; if minimum > 0 { - mmap.make_accessible(pre_guard_bytes, minimum)?; + let accessible = crate::runtime::vm::round_usize_up_to_host_pages(minimum); + mmap.make_accessible(pre_guard_bytes, accessible)?; } // If a memory image was specified, try to create the MemoryImageSlot on @@ -259,19 +289,33 @@ impl MmapMemory { Ok(Self { mmap, - accessible: minimum, + len: minimum, maximum, + page_size_log2: plan.memory.page_size_log2, pre_guard_size: pre_guard_bytes, offset_guard_size: offset_guard_bytes, extra_to_reserve_on_growth, memory_image, }) } + + /// Get the length of the accessible portion of the underlying `mmap`. This + /// is the same region as `self.len` but rounded up to a multiple of the + /// host page size. + fn accessible(&self) -> usize { + let accessible = crate::runtime::vm::round_usize_up_to_host_pages(self.len); + debug_assert!(accessible <= self.mmap.len() - self.offset_guard_size - self.pre_guard_size); + accessible + } } impl RuntimeLinearMemory for MmapMemory { + fn page_size_log2(&self) -> u8 { + self.page_size_log2 + } + fn byte_size(&self) -> usize { - self.accessible + self.len } fn maximum_byte_size(&self) -> Option { @@ -279,26 +323,37 @@ impl RuntimeLinearMemory for MmapMemory { } fn grow_to(&mut self, new_size: usize) -> Result<()> { - if new_size > self.mmap.len() - self.offset_guard_size - self.pre_guard_size { + assert!(crate::runtime::vm::usize_is_multiple_of_host_page_size( + self.offset_guard_size + )); + assert!(crate::runtime::vm::usize_is_multiple_of_host_page_size( + self.pre_guard_size + )); + assert!(crate::runtime::vm::usize_is_multiple_of_host_page_size( + self.mmap.len() + )); + + let new_accessible = crate::runtime::vm::round_usize_up_to_host_pages(new_size); + if new_accessible > self.mmap.len() - self.offset_guard_size - self.pre_guard_size { // If the new size of this heap exceeds the current size of the // allocation we have, then this must be a dynamic heap. Use // `new_size` to calculate a new size of an allocation, allocate it, // and then copy over the memory from before. let request_bytes = self .pre_guard_size - .checked_add(new_size) + .checked_add(new_accessible) .and_then(|s| s.checked_add(self.extra_to_reserve_on_growth)) .and_then(|s| s.checked_add(self.offset_guard_size)) .ok_or_else(|| format_err!("overflow calculating size of memory allocation"))?; let mut new_mmap = Mmap::accessible_reserved(0, request_bytes)?; - new_mmap.make_accessible(self.pre_guard_size, new_size)?; + new_mmap.make_accessible(self.pre_guard_size, new_accessible)?; // This method has an exclusive reference to `self.mmap` and just // created `new_mmap` so it should be safe to acquire references // into both of them and copy between them. unsafe { - let range = self.pre_guard_size..self.pre_guard_size + self.accessible; + let range = self.pre_guard_size..self.pre_guard_size + self.len; let src = self.mmap.slice(range.clone()); let dst = new_mmap.slice_mut(range); dst.copy_from_slice(src); @@ -323,14 +378,28 @@ impl RuntimeLinearMemory for MmapMemory { // or "dynamic" heaps which have some space reserved after the // initial allocation to grow into before the heap is moved in // memory. - assert!(new_size > self.accessible); - self.mmap.make_accessible( - self.pre_guard_size + self.accessible, - new_size - self.accessible, - )?; + assert!(new_size > self.len); + assert!(self.maximum.map_or(true, |max| new_size <= max)); + assert!(new_size <= self.mmap.len() - self.offset_guard_size - self.pre_guard_size); + + let new_accessible = crate::runtime::vm::round_usize_up_to_host_pages(new_size); + assert!( + new_accessible <= self.mmap.len() - self.offset_guard_size - self.pre_guard_size, + ); + + // If the Wasm memory's page size is smaller than the host's page + // size, then we might not need to actually change permissions, + // since we are forced to round our accessible range up to the + // host's page size. + if new_accessible > self.accessible() { + self.mmap.make_accessible( + self.pre_guard_size + self.accessible(), + new_accessible - self.accessible(), + )?; + } } - self.accessible = new_size; + self.len = new_size; Ok(()) } @@ -338,7 +407,8 @@ impl RuntimeLinearMemory for MmapMemory { fn vmmemory(&mut self) -> VMMemoryDefinition { VMMemoryDefinition { base: unsafe { self.mmap.as_mut_ptr().add(self.pre_guard_size) }, - current_length: self.accessible.into(), + current_length: self.len.into(), + page_size_log2: self.page_size_log2, } } @@ -372,6 +442,9 @@ struct StaticMemory { /// The current size, in bytes, of this memory. size: usize, + /// The log2 of this memory's page size. + page_size_log2: u8, + /// The size, in bytes, of the virtual address allocation starting at `base` /// and going to the end of the guard pages at the end of the linear memory. memory_and_guard_size: usize, @@ -387,6 +460,7 @@ impl StaticMemory { base_capacity: usize, initial_size: usize, maximum_size: Option, + page_size_log2: u8, memory_image: MemoryImageSlot, memory_and_guard_size: usize, ) -> Result { @@ -409,6 +483,7 @@ impl StaticMemory { base: SendSyncPtr::new(NonNull::new(base_ptr).unwrap()), capacity: base_capacity, size: initial_size, + page_size_log2, memory_image, memory_and_guard_size, }) @@ -416,6 +491,10 @@ impl StaticMemory { } impl RuntimeLinearMemory for StaticMemory { + fn page_size_log2(&self) -> u8 { + self.page_size_log2 + } + fn byte_size(&self) -> usize { self.size } @@ -440,6 +519,7 @@ impl RuntimeLinearMemory for StaticMemory { VMMemoryDefinition { base: self.base.as_ptr(), current_length: self.size.into(), + page_size_log2: self.page_size_log2, } } @@ -494,6 +574,7 @@ impl Memory { base_capacity, minimum, maximum, + plan.memory.page_size_log2, memory_image, memory_and_guard_size, )?; @@ -512,24 +593,13 @@ impl Memory { /// Calls the `store`'s limiter to optionally prevent a memory from being allocated. /// - /// Returns the minimum size and optional maximum size of the memory, in - /// bytes. + /// Returns a tuple of the minimum size, optional maximum size, and log(page + /// size) of the memory, all in bytes. pub(crate) fn limit_new( plan: &MemoryPlan, store: Option<&mut dyn Store>, ) -> Result<(usize, Option)> { - // Sanity-check what should already be true from wasm module validation. - let absolute_max = if plan.memory.memory64 { - WASM64_MAX_SIZE - } else { - WASM32_MAX_SIZE - }; - if let Ok(size) = plan.memory.minimum_byte_size() { - assert!(size <= absolute_max); - } - if let Ok(max) = plan.memory.maximum_byte_size() { - assert!(max <= absolute_max); - } + let page_size = usize::try_from(plan.memory.page_size()).unwrap(); // This is the absolute possible maximum that the module can try to // allocate, which is our entire address space minus a wasm page. That @@ -542,7 +612,15 @@ impl Memory { // here. To actually faithfully represent the byte requests of modules // we'd have to represent things as `u128`, but that's kinda // overkill for this purpose. - let absolute_max = 0usize.wrapping_sub(WASM_PAGE_SIZE); + let absolute_max = 0usize.wrapping_sub(page_size); + + // Sanity-check what should already be true from wasm module validation. + if let Ok(size) = plan.memory.minimum_byte_size() { + assert!(size <= u64::try_from(absolute_max).unwrap()); + } + if let Ok(max) = plan.memory.maximum_byte_size() { + assert!(max <= u64::try_from(absolute_max).unwrap()); + } // If the minimum memory size overflows the size of our own address // space, then we can't satisfy this request, but defer the error to @@ -594,9 +672,15 @@ impl Memory { plan.memory.minimum ) })?; + Ok((minimum, maximum)) } + /// Returns this memory's page size, in bytes. + pub fn page_size(&self) -> u64 { + self.0.page_size() + } + /// Returns the number of allocated wasm pages. pub fn byte_size(&self) -> usize { self.0.byte_size() diff --git a/crates/wasmtime/src/runtime/vm/mmap.rs b/crates/wasmtime/src/runtime/vm/mmap.rs index c2bbf9f84f4f..000721f54c95 100644 --- a/crates/wasmtime/src/runtime/vm/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/mmap.rs @@ -21,7 +21,7 @@ impl Mmap { /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned /// accessible memory. pub fn with_at_least(size: usize) -> Result { - let page_size = crate::runtime::vm::page_size(); + let page_size = crate::runtime::vm::host_page_size(); let rounded_size = (size + (page_size - 1)) & !(page_size - 1); Self::accessible_reserved(rounded_size, rounded_size) } @@ -53,7 +53,7 @@ impl Mmap { /// This function will panic if `accessible_size` is greater than /// `mapping_size` or if either of them are not page-aligned. pub fn accessible_reserved(accessible_size: usize, mapping_size: usize) -> Result { - let page_size = crate::runtime::vm::page_size(); + let page_size = crate::runtime::vm::host_page_size(); assert!(accessible_size <= mapping_size); assert_eq!(mapping_size & (page_size - 1), 0); assert_eq!(accessible_size & (page_size - 1), 0); @@ -96,7 +96,7 @@ impl Mmap { /// This function will panic if `start` or `len` is not page aligned or if /// either are outside the bounds of this mapping. pub fn make_accessible(&mut self, start: usize, len: usize) -> Result<()> { - let page_size = crate::runtime::vm::page_size(); + let page_size = crate::runtime::vm::host_page_size(); assert_eq!(start & (page_size - 1), 0); assert_eq!(len & (page_size - 1), 0); assert!(len <= self.len()); @@ -185,7 +185,7 @@ impl Mmap { assert!(range.end <= self.len()); assert!(range.start <= range.end); assert!( - range.start % crate::runtime::vm::page_size() == 0, + range.start % crate::runtime::vm::host_page_size() == 0, "changing of protections isn't page-aligned", ); self.sys @@ -199,7 +199,7 @@ impl Mmap { assert!(range.end <= self.len()); assert!(range.start <= range.end); assert!( - range.start % crate::runtime::vm::page_size() == 0, + range.start % crate::runtime::vm::host_page_size() == 0, "changing of protections isn't page-aligned", ); self.sys diff --git a/crates/wasmtime/src/runtime/vm/mpk/sys.rs b/crates/wasmtime/src/runtime/vm/mpk/sys.rs index 833d97c18fe7..c3799b07bb1b 100644 --- a/crates/wasmtime/src/runtime/vm/mpk/sys.rs +++ b/crates/wasmtime/src/runtime/vm/mpk/sys.rs @@ -9,7 +9,7 @@ //! [`pkey_mprotect`]: https://man7.org/linux/man-pages/man2/pkey_mprotect.2.html //! [`pkeys`]: https://man7.org/linux/man-pages/man7/pkeys.7.html -use crate::runtime::vm::page_size; +use crate::runtime::vm::host_page_size; use anyhow::Result; use std::io::Error; @@ -56,7 +56,7 @@ pub fn pkey_free(key: u32) -> Result<()> { /// /// [docs]: https://man7.org/linux/man-pages/man2/pkey_mprotect.2.html pub fn pkey_mprotect(addr: usize, len: usize, prot: u32, key: u32) -> Result<()> { - let page_size = page_size(); + let page_size = host_page_size(); if addr % page_size != 0 { log::warn!( "memory must be page-aligned for MPK (addr = {addr:#x}, page size = {page_size}" diff --git a/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs b/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs index 2796e1fa5ace..18a8c8bf6f8b 100644 --- a/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs @@ -32,7 +32,7 @@ impl Mmap { } pub fn reserve(size: usize) -> Result { - let layout = Layout::from_size_align(size, crate::runtime::vm::page_size()).unwrap(); + let layout = Layout::from_size_align(size, crate::runtime::vm::host_page_size()).unwrap(); let ptr = unsafe { alloc::alloc(layout) }; if ptr.is_null() { bail!("failed to allocate memory"); @@ -88,7 +88,7 @@ impl Drop for Mmap { } unsafe { let layout = - Layout::from_size_align(self.len(), crate::runtime::vm::page_size()).unwrap(); + Layout::from_size_align(self.len(), crate::runtime::vm::host_page_size()).unwrap(); alloc::dealloc(self.as_mut_ptr(), layout); } } diff --git a/crates/wasmtime/src/runtime/vm/sys/unix/signals.rs b/crates/wasmtime/src/runtime/vm/sys/unix/signals.rs index 855b5dc7dbdc..9a12bd460843 100644 --- a/crates/wasmtime/src/runtime/vm/sys/unix/signals.rs +++ b/crates/wasmtime/src/runtime/vm/sys/unix/signals.rs @@ -355,7 +355,7 @@ pub fn lazy_per_thread_init() { // ... but failing that we need to allocate our own, so do all that // here. - let page_size = crate::runtime::vm::page_size(); + let page_size = crate::runtime::vm::host_page_size(); let guard_size = page_size; let alloc_size = guard_size + MIN_STACK_SIZE; diff --git a/crates/wasmtime/src/runtime/vm/sys/unix/unwind.rs b/crates/wasmtime/src/runtime/vm/sys/unix/unwind.rs index 957dda05e26a..f9ff4aa91fec 100644 --- a/crates/wasmtime/src/runtime/vm/sys/unix/unwind.rs +++ b/crates/wasmtime/src/runtime/vm/sys/unix/unwind.rs @@ -89,7 +89,7 @@ impl UnwindRegistration { unwind_len: usize, ) -> Result { debug_assert_eq!( - unwind_info as usize % crate::runtime::vm::page_size(), + unwind_info as usize % crate::runtime::vm::host_page_size(), 0, "The unwind info must always be aligned to a page" ); diff --git a/crates/wasmtime/src/runtime/vm/threads/shared_memory.rs b/crates/wasmtime/src/runtime/vm/threads/shared_memory.rs index 5394f685566a..7e5eb906c2eb 100644 --- a/crates/wasmtime/src/runtime/vm/threads/shared_memory.rs +++ b/crates/wasmtime/src/runtime/vm/threads/shared_memory.rs @@ -191,6 +191,10 @@ unsafe impl Sync for LongTermVMMemoryDefinition {} /// Proxy all calls through the [`RwLock`]. impl RuntimeLinearMemory for SharedMemory { + fn page_size_log2(&self) -> u8 { + self.0.memory.read().unwrap().page_size_log2() + } + fn byte_size(&self) -> usize { self.0.memory.read().unwrap().byte_size() } diff --git a/crates/wasmtime/src/runtime/vm/threads/shared_memory_disabled.rs b/crates/wasmtime/src/runtime/vm/threads/shared_memory_disabled.rs index 4b9e8b1d7800..0ce4cf33a0bf 100644 --- a/crates/wasmtime/src/runtime/vm/threads/shared_memory_disabled.rs +++ b/crates/wasmtime/src/runtime/vm/threads/shared_memory_disabled.rs @@ -63,6 +63,10 @@ impl SharedMemory { } impl RuntimeLinearMemory for SharedMemory { + fn page_size_log2(&self) -> u8 { + match *self {} + } + fn byte_size(&self) -> usize { match *self {} } diff --git a/crates/wasmtime/src/runtime/vm/vmcontext.rs b/crates/wasmtime/src/runtime/vm/vmcontext.rs index 37ef6b929a3a..f6692139bde6 100644 --- a/crates/wasmtime/src/runtime/vm/vmcontext.rs +++ b/crates/wasmtime/src/runtime/vm/vmcontext.rs @@ -266,16 +266,20 @@ pub struct VMMemoryDefinition { /// atomically. For relaxed access, see /// [`VMMemoryDefinition::current_length()`]. pub current_length: AtomicUsize, + + /// The log2 of this memory's page size, in bytes. + pub page_size_log2: u8, } impl VMMemoryDefinition { - /// Return the current length of the [`VMMemoryDefinition`] by performing a - /// relaxed load; do not use this function for situations in which a precise - /// length is needed. Owned memories (i.e., non-shared) will always return a - /// precise result (since no concurrent modification is possible) but shared - /// memories may see an imprecise value--a `current_length` potentially - /// smaller than what some other thread observes. Since Wasm memory only - /// grows, this under-estimation may be acceptable in certain cases. + /// Return the current length (in bytes) of the [`VMMemoryDefinition`] by + /// performing a relaxed load; do not use this function for situations in + /// which a precise length is needed. Owned memories (i.e., non-shared) will + /// always return a precise result (since no concurrent modification is + /// possible) but shared memories may see an imprecise value--a + /// `current_length` potentially smaller than what some other thread + /// observes. Since Wasm memory only grows, this under-estimation may be + /// acceptable in certain cases. pub fn current_length(&self) -> usize { self.current_length.load(Ordering::Relaxed) } @@ -287,6 +291,7 @@ impl VMMemoryDefinition { VMMemoryDefinition { base: other.base, current_length: other.current_length().into(), + page_size_log2: other.page_size_log2, } } } diff --git a/tests/all/limits.rs b/tests/all/limits.rs index 8971fea828b9..9150ce20de78 100644 --- a/tests/all/limits.rs +++ b/tests/all/limits.rs @@ -1,6 +1,6 @@ use wasmtime::*; -const WASM_PAGE_SIZE: usize = wasmtime_environ::WASM_PAGE_SIZE as usize; +const WASM_PAGE_SIZE: usize = wasmtime_environ::Memory::DEFAULT_PAGE_SIZE as usize; #[test] #[cfg_attr(miri, ignore)] diff --git a/tests/all/memory.rs b/tests/all/memory.rs index 3e3aaa1396ed..7120d9556a14 100644 --- a/tests/all/memory.rs +++ b/tests/all/memory.rs @@ -357,7 +357,7 @@ fn massive_64_bit_still_limited() -> Result<()> { let mut store = Store::new(&engine, MyLimiter { hit: false }); store.limiter(|x| x); - let ty = MemoryType::new64(1 << 48, None); + let ty = MemoryType::new64(1 << 46, None); assert!(Memory::new(&mut store, ty).is_err()); assert!(store.data().hit); @@ -506,7 +506,24 @@ fn memory64_maximum_minimum() -> Result<()> { let engine = Engine::new(&config)?; let mut store = Store::new(&engine, ()); - assert!(Memory::new(&mut store, MemoryType::new64(1 << 48, None)).is_err()); + assert!(MemoryTypeBuilder::default() + .memory64(true) + .min(1 << 48) + .build() + .is_err()); + + let module = Module::new( + &engine, + format!(r#"(module (import "" "" (memory i64 {})))"#, 1u64 << 48), + )?; + let mem_ty = module + .imports() + .next() + .unwrap() + .ty() + .unwrap_memory() + .clone(); + assert!(Memory::new(&mut store, mem_ty).is_err()); let module = Module::new( &engine, @@ -545,7 +562,12 @@ fn shared_memory_basics() -> Result<()> { assert!(SharedMemory::new(&engine, MemoryType::new(1, Some(1))).is_err()); assert!(SharedMemory::new(&engine, MemoryType::new64(1, None)).is_err()); assert!(SharedMemory::new(&engine, MemoryType::new64(1, Some(1))).is_err()); - assert!(SharedMemory::new(&engine, MemoryType::shared(1, 0)).is_err()); + assert!(MemoryTypeBuilder::default() + .shared(true) + .min(1) + .max(Some(0)) + .build() + .is_err()); let memory = SharedMemory::new(&engine, MemoryType::shared(1, 1))?; assert!(memory.ty().is_shared()); @@ -669,3 +691,58 @@ fn non_page_aligned_static_memory() -> Result<()> { Memory::new(&mut Store::new(&engine, ()), ty)?; Ok(()) } + +#[test] +fn new_memory_with_custom_page_size() -> Result<()> { + let engine = Engine::default(); + let mut store = Store::new(&engine, ()); + + let ty = MemoryTypeBuilder::default() + .page_size_log2(0) + .min(4096) + .max(Some(9000)) + .build()?; + + let mem = Memory::new(&mut store, ty)?; + assert_eq!(mem.data_size(&store), 4096); + assert_eq!(mem.size(&store), 4096); + + mem.grow(&mut store, 9000 - 4096)?; + assert_eq!(mem.data_size(&store), 9000); + assert_eq!(mem.size(&store), 9000); + + assert!(mem.grow(&mut store, 1).is_err()); + assert_eq!(mem.data_size(&store), 9000); + assert_eq!(mem.size(&store), 9000); + + Ok(()) +} + +#[test] +#[cfg_attr(miri, ignore)] +fn get_memory_type_with_custom_page_size_from_wasm() -> Result<()> { + let mut config = Config::new(); + config.wasm_custom_page_sizes(true); + let engine = Engine::new(&config)?; + let mut store = Store::new(&engine, ()); + + let module = Module::new( + &engine, + r#" + (module + (memory (export "memory") 1 0xffffffff (pagesize 1)) + ) + "#, + )?; + + let instance = Instance::new(&mut store, &module, &[])?; + let memory = instance.get_memory(&mut store, "memory").unwrap(); + let mem_ty = memory.ty(&store); + + assert_eq!(mem_ty.minimum(), 1); + assert_eq!(mem_ty.maximum(), Some(0xffffffff)); + assert_eq!(mem_ty.page_size(), 1); + assert_eq!(mem_ty.page_size_log2(), 0); + + Ok(()) +} diff --git a/tests/all/memory_creator.rs b/tests/all/memory_creator.rs index e289ab895920..e3c3c272da05 100644 --- a/tests/all/memory_creator.rs +++ b/tests/all/memory_creator.rs @@ -1,7 +1,7 @@ #[cfg(all(not(target_os = "windows"), not(miri)))] mod not_for_windows { use wasmtime::*; - use wasmtime_environ::{WASM32_MAX_SIZE, WASM_PAGE_SIZE}; + use wasmtime_environ::WASM32_MAX_SIZE; use rustix::mm::{mmap_anonymous, mprotect, munmap, MapFlags, MprotectFlags, ProtFlags}; @@ -22,7 +22,8 @@ mod not_for_windows { let page_size = rustix::param::page_size(); let guard_size = page_size; let size = maximum + guard_size; - assert_eq!(size % page_size, 0); // we rely on WASM_PAGE_SIZE being multiple of host page size + // We rely on the Wasm page size being multiple of host page size. + assert_eq!(size % page_size, 0); let mem = mmap_anonymous(null_mut(), size, ProtFlags::empty(), MapFlags::PRIVATE) .expect("mmap failed"); @@ -180,7 +181,10 @@ mod not_for_windows { // we take the lock outside the assert, so it won't get poisoned on assert failure let tot_pages = *mem_creator.num_total_bytes.lock().unwrap(); - assert_eq!(tot_pages, (4 * WASM_PAGE_SIZE) as usize); + assert_eq!( + tot_pages, + (4 * wasmtime_environ::Memory::DEFAULT_PAGE_SIZE) as usize + ); drop(store); let tot_pages = *mem_creator.num_total_bytes.lock().unwrap(); diff --git a/tests/misc_testsuite/custom-page-sizes/custom-page-sizes-invalid.wast b/tests/misc_testsuite/custom-page-sizes/custom-page-sizes-invalid.wast new file mode 100644 index 000000000000..80cbcff65812 --- /dev/null +++ b/tests/misc_testsuite/custom-page-sizes/custom-page-sizes-invalid.wast @@ -0,0 +1,110 @@ +;; Page size that is not a power of two. +(assert_malformed + (module quote "(memory 0 (pagesize 3))") + "invalid custom page size" +) + +;; Power-of-two page sizes that are not 1 or 64KiB. +(assert_invalid + (module (memory 0 (pagesize 2))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 4))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 8))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 16))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 32))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 64))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 128))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 256))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 512))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 1024))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 2048))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 4096))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 8192))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 16384))) + "invalid custom page size" +) +(assert_invalid + (module (memory 0 (pagesize 32768))) + "invalid custom page size" +) + +;; Power-of-two page size that is larger than 64KiB. +(assert_invalid + (module (memory 0 (pagesize 0x20000))) + "invalid custom page size" +) + +;; Power of two page size that cannot fit in a u64 to exercise checks against +;; shift overflow. +(assert_malformed + (module binary + "\00asm" "\01\00\00\00" + "\05\04\01" ;; Memory section + + ;; memory 0 + "\08" ;; flags w/ custom page size + "\00" ;; minimum = 0 + "\41" ;; pagesize = 2**65 + ) + "invalid custom page size" +) + +;; Importing a memory with the wrong page size. + +(module $m + (memory (export "small-pages-memory") 0 (pagesize 1)) + (memory (export "large-pages-memory") 0 (pagesize 65536)) +) +(register "m" $m) + +(assert_unlinkable + (module + (memory (import "m" "small-pages-memory") 0 (pagesize 65536)) + ) + "memory types incompatible" +) + +(assert_unlinkable + (module + (memory (import "m" "large-pages-memory") 0 (pagesize 1)) + ) + "memory types incompatible" +) diff --git a/tests/misc_testsuite/custom-page-sizes/custom-page-sizes.wast b/tests/misc_testsuite/custom-page-sizes/custom-page-sizes.wast new file mode 100644 index 000000000000..b312f1f94390 --- /dev/null +++ b/tests/misc_testsuite/custom-page-sizes/custom-page-sizes.wast @@ -0,0 +1,108 @@ +;; Check all the valid custom page sizes. +(module (memory 1 (pagesize 1))) +(module (memory 1 (pagesize 65536))) + +;; Check them all again with maximums specified. +(module (memory 1 2 (pagesize 1))) +(module (memory 1 2 (pagesize 65536))) + +;; Check the behavior of memories with page size 1. +(module + (memory 0 (pagesize 1)) + (func (export "size") (result i32) + memory.size + ) + (func (export "grow") (param i32) (result i32) + (memory.grow (local.get 0)) + ) + (func (export "load") (param i32) (result i32) + (i32.load8_u (local.get 0)) + ) + (func (export "store") (param i32 i32) + (i32.store8 (local.get 0) (local.get 1)) + ) +) + +(assert_return (invoke "size") (i32.const 0)) +(assert_trap (invoke "load" (i32.const 0)) "out of bounds memory access") + +(assert_return (invoke "grow" (i32.const 65536)) (i32.const 0)) +(assert_return (invoke "size") (i32.const 65536)) +(assert_return (invoke "load" (i32.const 65535)) (i32.const 0)) +(assert_return (invoke "store" (i32.const 65535) (i32.const 1))) +(assert_return (invoke "load" (i32.const 65535)) (i32.const 1)) +(assert_trap (invoke "load" (i32.const 65536)) "out of bounds memory access") + +(assert_return (invoke "grow" (i32.const 65536)) (i32.const 65536)) +(assert_return (invoke "size") (i32.const 131072)) +(assert_return (invoke "load" (i32.const 131071)) (i32.const 0)) +(assert_return (invoke "store" (i32.const 131071) (i32.const 1))) +(assert_return (invoke "load" (i32.const 131071)) (i32.const 1)) +(assert_trap (invoke "load" (i32.const 131072)) "out of bounds memory access") + +;; Although smaller page sizes let us get to memories larger than 2**16 pages, +;; we can't do that with the default page size, even if we explicitly state it +;; as a custom page size. +(module + (memory 0 (pagesize 65536)) + (func (export "size") (result i32) + memory.size + ) + (func (export "grow") (param i32) (result i32) + (memory.grow (local.get 0)) + ) +) +(assert_return (invoke "size") (i32.const 0)) +(assert_return (invoke "grow" (i32.const 65537)) (i32.const -1)) +(assert_return (invoke "size") (i32.const 0)) + +;; Can copy between memories of different page sizes. +(module + (memory $small 10 (pagesize 1)) + (memory $large 1 (pagesize 65536)) + + (data (memory $small) (i32.const 0) "\11\22\33\44") + (data (memory $large) (i32.const 0) "\55\66\77\88") + + (func (export "copy-small-to-large") (param i32 i32 i32) + (memory.copy $large $small (local.get 0) (local.get 1) (local.get 2)) + ) + + (func (export "copy-large-to-small") (param i32 i32 i32) + (memory.copy $small $large (local.get 0) (local.get 1) (local.get 2)) + ) + + (func (export "load8-small") (param i32) (result i32) + (i32.load8_u $small (local.get 0)) + ) + + (func (export "load8-large") (param i32) (result i32) + (i32.load8_u $large (local.get 0)) + ) +) + +(assert_return (invoke "copy-small-to-large" (i32.const 6) (i32.const 0) (i32.const 2))) +(assert_return (invoke "load8-large" (i32.const 6)) (i32.const 0x11)) +(assert_return (invoke "load8-large" (i32.const 7)) (i32.const 0x22)) + +(assert_return (invoke "copy-large-to-small" (i32.const 4) (i32.const 1) (i32.const 3))) +(assert_return (invoke "load8-small" (i32.const 4)) (i32.const 0x66)) +(assert_return (invoke "load8-small" (i32.const 5)) (i32.const 0x77)) +(assert_return (invoke "load8-small" (i32.const 6)) (i32.const 0x88)) + +;; Can link together modules that export and import memories with custom page +;; sizes. + +(module $m + (memory (export "small-pages-memory") 0 (pagesize 1)) + (memory (export "large-pages-memory") 0 (pagesize 65536)) +) +(register "m" $m) + +(module + (memory (import "m" "small-pages-memory") 0 (pagesize 1)) +) + +(module + (memory (import "m" "large-pages-memory") 0 (pagesize 65536)) +) diff --git a/tests/wast.rs b/tests/wast.rs index 2f5c1c8aae6b..8e5b38c85084 100644 --- a/tests/wast.rs +++ b/tests/wast.rs @@ -8,7 +8,7 @@ use wasmtime::{ Config, Engine, InstanceAllocationStrategy, MpkEnabled, PoolingAllocationConfig, Store, Strategy, }; -use wasmtime_environ::WASM_PAGE_SIZE; +use wasmtime_environ::Memory; use wasmtime_wast::{SpectestConfig, WastContext}; fn main() { @@ -194,8 +194,10 @@ fn run_wast(wast: &Path, strategy: Strategy, pooling: bool) -> anyhow::Result<() let wast = Path::new(wast); let memory64 = feature_found(wast, "memory64"); - let multi_memory = - feature_found(wast, "multi-memory") || feature_found(wast, "component-model"); + let custom_page_sizes = feature_found(wast, "custom-page-sizes"); + let multi_memory = feature_found(wast, "multi-memory") + || feature_found(wast, "component-model") + || custom_page_sizes; let threads = feature_found(wast, "threads"); let gc = feature_found(wast, "gc"); let function_references = gc || feature_found(wast, "function-references"); @@ -224,6 +226,7 @@ fn run_wast(wast: &Path, strategy: Strategy, pooling: bool) -> anyhow::Result<() .wasm_reference_types(reference_types) .wasm_relaxed_simd(relaxed_simd) .wasm_tail_call(tail_call) + .wasm_custom_page_sizes(custom_page_sizes) .strategy(strategy); if is_cranelift { @@ -263,7 +266,7 @@ fn run_wast(wast: &Path, strategy: Strategy, pooling: bool) -> anyhow::Result<() // also don't reserve lots of memory after dynamic memories for growth // (makes growth slower). if use_shared_memory { - cfg.static_memory_maximum_size(2 * WASM_PAGE_SIZE as u64); + cfg.static_memory_maximum_size(2 * u64::from(Memory::DEFAULT_PAGE_SIZE)); } else { cfg.static_memory_maximum_size(0); } diff --git a/winch/codegen/src/codegen/env.rs b/winch/codegen/src/codegen/env.rs index 4e8504f65733..772aff519080 100644 --- a/winch/codegen/src/codegen/env.rs +++ b/winch/codegen/src/codegen/env.rs @@ -13,7 +13,7 @@ use wasmparser::BlockType; use wasmtime_environ::{ BuiltinFunctionIndex, FuncIndex, GlobalIndex, MemoryIndex, MemoryPlan, MemoryStyle, ModuleTranslation, ModuleTypesBuilder, PrimaryMap, PtrSize, TableIndex, TablePlan, TypeConvert, - TypeIndex, VMOffsets, WasmHeapType, WasmValType, WASM_PAGE_SIZE, + TypeIndex, VMOffsets, WasmHeapType, WasmValType, }; #[derive(Debug, Clone, Copy)] @@ -79,6 +79,11 @@ pub struct HeapData { pub min_size: u64, /// The maximum heap size in bytes. pub max_size: Option, + /// The log2 of this memory's page size, in bytes. + /// + /// By default the page size is 64KiB (0x10000; 2**16; 1<<16; 65536) but the + /// custom-page-sizes proposal allows opting into a page size of `1`. + pub page_size_log2: u8, /// Size in bytes of the offset guard pages, located after the heap bounds. pub offset_guard_size: u64, } @@ -300,6 +305,7 @@ impl<'a, 'translation, 'data, P: PtrSize> FuncEnv<'a, 'translation, 'data, P> { }, min_size, max_size, + page_size_log2: plan.memory.page_size_log2, offset_guard_size, }) } @@ -433,16 +439,11 @@ fn heap_style_and_offset_guard_size(plan: &MemoryPlan) -> (HeapStyle, u64) { fn heap_limits(plan: &MemoryPlan) -> (u64, Option) { ( - plan.memory - .minimum - .checked_mul(u64::from(WASM_PAGE_SIZE)) - .unwrap_or_else(|| { - // 2^64 as a minimum doesn't fin in a 64 bit integer. - // So in this case, the minimum is clamped to u64::MAX. - u64::MAX - }), - plan.memory - .maximum - .and_then(|max| max.checked_mul(u64::from(WASM_PAGE_SIZE))), + plan.memory.minimum_byte_size().unwrap_or_else(|_| { + // 2^64 as a minimum doesn't fin in a 64 bit integer. + // So in this case, the minimum is clamped to u64::MAX. + u64::MAX + }), + plan.memory.maximum_byte_size().ok(), ) } diff --git a/winch/codegen/src/codegen/mod.rs b/winch/codegen/src/codegen/mod.rs index 4b01077b90e5..07d60ed69208 100644 --- a/winch/codegen/src/codegen/mod.rs +++ b/winch/codegen/src/codegen/mod.rs @@ -14,7 +14,7 @@ use wasmparser::{ }; use wasmtime_environ::{ GlobalIndex, MemoryIndex, PtrSize, TableIndex, TypeIndex, WasmHeapType, WasmValType, - FUNCREF_MASK, WASM_PAGE_SIZE, + FUNCREF_MASK, }; use cranelift_codegen::{ @@ -854,17 +854,13 @@ where .stack .push(TypedReg::new(heap_data.ty, size_reg).into()); - // Since the page size is a power-of-two, verify that 2^16, equals the - // defined constant. This is mostly a safeguard in case the constant - // value ever changes. - let pow = 16; - debug_assert_eq!(2u32.pow(pow), WASM_PAGE_SIZE); + let pow = heap_data.page_size_log2; // Ensure that the constant is correctly typed according to the heap // type to reduce register pressure when emitting the shift operation. match heap_data.ty { - WasmValType::I32 => self.context.stack.push(Val::i32(pow as i32)), - WasmValType::I64 => self.context.stack.push(Val::i64(pow as i64)), + WasmValType::I32 => self.context.stack.push(Val::i32(i32::from(pow))), + WasmValType::I64 => self.context.stack.push(Val::i64(i64::from(pow))), _ => unreachable!(), } From c1e57a9f25a5a599e9c164d5cbf76b5aec482ae4 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Mon, 10 Jun 2024 16:48:11 -0700 Subject: [PATCH 02/19] Update error string expectation --- tests/all/pooling_allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/all/pooling_allocator.rs b/tests/all/pooling_allocator.rs index 9d4c62a6720e..33bd8eb9b015 100644 --- a/tests/all/pooling_allocator.rs +++ b/tests/all/pooling_allocator.rs @@ -49,7 +49,7 @@ fn memory_limit() -> Result<()> { Ok(_) => panic!("module instantiation should fail"), Err(e) => assert_eq!( e.to_string(), - "memory index 0 has a minimum page size of 4 which exceeds the limit of 3", + "memory index 0 has a minimum byte size of 262144 which exceeds the limit of 196608", ), } From 1657a7842048f19370372dbef85eb37d21bddc46 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Mon, 10 Jun 2024 16:50:21 -0700 Subject: [PATCH 03/19] Remove debug logging --- cranelift/wasm/src/code_translator/bounds_checks.rs | 7 ------- 1 file changed, 7 deletions(-) diff --git a/cranelift/wasm/src/code_translator/bounds_checks.rs b/cranelift/wasm/src/code_translator/bounds_checks.rs index fd2d54f7493a..c66eb8001acb 100644 --- a/cranelift/wasm/src/code_translator/bounds_checks.rs +++ b/cranelift/wasm/src/code_translator/bounds_checks.rs @@ -144,7 +144,6 @@ where // index + 1 > bound // ==> index >= bound HeapStyle::Dynamic { bound_gv } if offset_and_size == 1 => { - log::trace!("FITZGEN: bounds checking case 1"); let bound = get_dynamic_heap_bound(builder, env, heap); let oob = make_compare( builder, @@ -195,7 +194,6 @@ where HeapStyle::Dynamic { bound_gv } if can_use_virtual_memory && offset_and_size <= heap.offset_guard_size => { - log::trace!("FITZGEN: bounds checking case 2"); let bound = get_dynamic_heap_bound(builder, env, heap); let oob = make_compare( builder, @@ -226,7 +224,6 @@ where // index + offset + access_size > bound // ==> index > bound - (offset + access_size) HeapStyle::Dynamic { bound_gv } if offset_and_size <= heap.min_size.into() => { - log::trace!("FITZGEN: bounds checking case 3"); let bound = get_dynamic_heap_bound(builder, env, heap); let adjustment = offset_and_size as i64; let adjustment_value = builder.ins().iconst(env.pointer_type(), adjustment); @@ -269,7 +266,6 @@ where // // And we have to handle the overflow case in the left-hand side. HeapStyle::Dynamic { bound_gv } => { - log::trace!("FITZGEN: bounds checking case 4"); let access_size_val = builder .ins() // Explicit cast from u64 to i64: we just want the raw @@ -322,7 +318,6 @@ where // bound`, since we will end up being out-of-bounds regardless of the // given `index`. HeapStyle::Static { bound } if offset_and_size > bound.into() => { - log::trace!("FITZGEN: bounds checking case 5"); assert!( can_use_virtual_memory, "static memories require the ability to use virtual memory" @@ -376,7 +371,6 @@ where && u64::from(u32::MAX) <= u64::from(bound) + u64::from(heap.offset_guard_size) - offset_and_size => { - log::trace!("FITZGEN: bounds checking case 6"); assert!( can_use_virtual_memory, "static memories require the ability to use virtual memory" @@ -406,7 +400,6 @@ where // precise, not rely on the virtual memory subsystem at all, and not // factor in the guard pages here. HeapStyle::Static { bound } => { - log::trace!("FITZGEN: bounds checking case 7"); assert!( can_use_virtual_memory, "static memories require the ability to use virtual memory" From 25737d993957cb9cf1d03896d9172bfa7d29453c Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Mon, 10 Jun 2024 16:52:29 -0700 Subject: [PATCH 04/19] Use a right shift instead of a division --- crates/cranelift/src/func_environ.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index b724cb270bb0..74e23c70ef27 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -2479,9 +2479,8 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m } }; - let page_size: u64 = self.module.memory_plans[index].memory.page_size(); - let page_size = i64::try_from(page_size).unwrap(); - let current_length_in_pages = pos.ins().udiv_imm(current_length_in_bytes, page_size); + let page_size_log2 = i64::from(self.module.memory_plans[index].memory.page_size_log2); + let current_length_in_pages = pos.ins().ushr_imm(current_length_in_bytes, page_size_log2); Ok(self.cast_pointer_to_memory_index(pos, current_length_in_pages, index)) } From a48835de0fc1bd5893f1b871e462589ecd4100b1 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Mon, 10 Jun 2024 16:53:06 -0700 Subject: [PATCH 05/19] fix error message expectation again --- tests/all/pooling_allocator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/all/pooling_allocator.rs b/tests/all/pooling_allocator.rs index 33bd8eb9b015..e6f51e5acac5 100644 --- a/tests/all/pooling_allocator.rs +++ b/tests/all/pooling_allocator.rs @@ -49,7 +49,7 @@ fn memory_limit() -> Result<()> { Ok(_) => panic!("module instantiation should fail"), Err(e) => assert_eq!( e.to_string(), - "memory index 0 has a minimum byte size of 262144 which exceeds the limit of 196608", + "memory index 0 has a minimum byte size of 262144 which exceeds the limit of 196608 bytes", ), } From 3b3d883ca71a65380a435435432c8816ec0996da Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Mon, 10 Jun 2024 16:54:57 -0700 Subject: [PATCH 06/19] remove page size from VMMemoryDefinition --- crates/wasmtime/src/runtime/trampoline/memory.rs | 1 - crates/wasmtime/src/runtime/vm/memory.rs | 2 -- crates/wasmtime/src/runtime/vm/vmcontext.rs | 4 ---- 3 files changed, 7 deletions(-) diff --git a/crates/wasmtime/src/runtime/trampoline/memory.rs b/crates/wasmtime/src/runtime/trampoline/memory.rs index 9bf3709c5fc5..b0046d310119 100644 --- a/crates/wasmtime/src/runtime/trampoline/memory.rs +++ b/crates/wasmtime/src/runtime/trampoline/memory.rs @@ -104,7 +104,6 @@ impl RuntimeLinearMemory for LinearMemoryProxy { VMMemoryDefinition { base: self.mem.as_ptr(), current_length: self.mem.byte_size().into(), - page_size_log2: self.page_size_log2, } } diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index ceab8e764377..7406b4945881 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -408,7 +408,6 @@ impl RuntimeLinearMemory for MmapMemory { VMMemoryDefinition { base: unsafe { self.mmap.as_mut_ptr().add(self.pre_guard_size) }, current_length: self.len.into(), - page_size_log2: self.page_size_log2, } } @@ -519,7 +518,6 @@ impl RuntimeLinearMemory for StaticMemory { VMMemoryDefinition { base: self.base.as_ptr(), current_length: self.size.into(), - page_size_log2: self.page_size_log2, } } diff --git a/crates/wasmtime/src/runtime/vm/vmcontext.rs b/crates/wasmtime/src/runtime/vm/vmcontext.rs index f6692139bde6..234cd87429d6 100644 --- a/crates/wasmtime/src/runtime/vm/vmcontext.rs +++ b/crates/wasmtime/src/runtime/vm/vmcontext.rs @@ -266,9 +266,6 @@ pub struct VMMemoryDefinition { /// atomically. For relaxed access, see /// [`VMMemoryDefinition::current_length()`]. pub current_length: AtomicUsize, - - /// The log2 of this memory's page size, in bytes. - pub page_size_log2: u8, } impl VMMemoryDefinition { @@ -291,7 +288,6 @@ impl VMMemoryDefinition { VMMemoryDefinition { base: other.base, current_length: other.current_length().into(), - page_size_log2: other.page_size_log2, } } } From 6506f277c096a64e5304c66c5e8b5b25d5f8070c Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Mon, 10 Jun 2024 16:58:53 -0700 Subject: [PATCH 07/19] fix size of VMMemoryDefinition again --- crates/environ/src/vmoffsets.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/environ/src/vmoffsets.rs b/crates/environ/src/vmoffsets.rs index 775ce27f4433..569e3e5ff6a3 100644 --- a/crates/environ/src/vmoffsets.rs +++ b/crates/environ/src/vmoffsets.rs @@ -208,7 +208,7 @@ pub trait PtrSize { /// Return the size of `VMMemoryDefinition`. #[inline] fn size_of_vmmemory_definition(&self) -> u8 { - 3 * self.size() + 2 * self.size() } /// Return the size of `*mut VMMemoryDefinition`. From f3c5a01b586196576a4a551c41a751456111e8ec Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Tue, 11 Jun 2024 09:24:55 -0700 Subject: [PATCH 08/19] Only dynamically check for `-1` sentinel for 1-byte page sizes --- crates/cranelift/src/func_environ.rs | 46 ++++++++++++++++++---------- tests/disas/pcc-imported-memory.wat | 34 ++++++++++---------- 2 files changed, 46 insertions(+), 34 deletions(-) diff --git a/crates/cranelift/src/func_environ.rs b/crates/cranelift/src/func_environ.rs index 74e23c70ef27..8adf8245409a 100644 --- a/crates/cranelift/src/func_environ.rs +++ b/crates/cranelift/src/func_environ.rs @@ -680,17 +680,13 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } } - /// Convert the target pointer-sized integer `val` into the given memory's + /// Convert the target pointer-sized integer `val` that is holding a memory + /// length (or the `-1` `memory.grow`-failed sentinel) into the memory's /// index type. /// /// This might involve extending or truncating it depending on the memory's - /// index type and the target's pointer type. Note that when extending, we - /// do an unsigned extend, *except* if `val == -1`, in which case we do a - /// sign extend. This edge case makes this helper suitable for use with - /// translating the results of a `memory.grow` libcall, for example, where - /// `-1` indicates failure but the success value is otherwise unsigned and - /// might have the high bit set. - fn cast_pointer_to_memory_index( + /// index type and the target's pointer type. + fn convert_memory_length_to_index_type( &self, mut pos: FuncCursor<'_>, val: ir::Value, @@ -710,12 +706,30 @@ impl<'module_environment> FuncEnvironment<'module_environment> { } else { // We have a 64-bit memory on a 32-bit host -- this combo doesn't // really make a whole lot of sense to do from a user perspective - // but that is neither here nor there. We want to unsigned extend - // unless `val` is `-1`, as described in the doc comment above. - let extended = pos.ins().uextend(desired_type, val); - let neg_one = pos.ins().iconst(desired_type, -1); - let is_failure = pos.ins().icmp_imm(IntCC::Equal, val, -1); - pos.ins().select(is_failure, neg_one, extended) + // but that is neither here nor there. We want to logically do an + // unsigned extend *except* when we are given the `-1` sentinel, + // which we must preserve as `-1` in the wider type. + match self.module.memory_plans[index].memory.page_size_log2 { + 16 => { + // In the case that we have default page sizes, we can + // always sign extend, since valid memory lengths (in pages) + // never have their sign bit set, and so if the sign bit is + // set then this must be the `-1` sentinel, which we want to + // preserve through the extension. + pos.ins().sextend(desired_type, val) + } + 0 => { + // For single-byte pages, we have to explicitly check for + // `-1` and choose whether to do an unsigned extension or + // return a larger `-1` because there are valid memory + // lengths (in pages) that have the sign bit set. + let extended = pos.ins().uextend(desired_type, val); + let neg_one = pos.ins().iconst(desired_type, -1); + let is_failure = pos.ins().icmp_imm(IntCC::Equal, val, -1); + pos.ins().select(is_failure, neg_one, extended) + } + _ => unreachable!("only page sizes 2**0 and 2**16 are currently valid"), + } } } @@ -2406,7 +2420,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let val = self.cast_memory_index_to_i64(&mut pos, val, index); let call_inst = pos.ins().call(memory_grow, &[vmctx, val, memory_index]); let result = *pos.func.dfg.inst_results(call_inst).first().unwrap(); - Ok(self.cast_pointer_to_memory_index(pos, result, index)) + Ok(self.convert_memory_length_to_index_type(pos, result, index)) } fn translate_memory_size( @@ -2482,7 +2496,7 @@ impl<'module_environment> cranelift_wasm::FuncEnvironment for FuncEnvironment<'m let page_size_log2 = i64::from(self.module.memory_plans[index].memory.page_size_log2); let current_length_in_pages = pos.ins().ushr_imm(current_length_in_bytes, page_size_log2); - Ok(self.cast_pointer_to_memory_index(pos, current_length_in_pages, index)) + Ok(self.convert_memory_length_to_index_type(pos, current_length_in_pages, index)) } fn translate_memory_copy( diff --git a/tests/disas/pcc-imported-memory.wat b/tests/disas/pcc-imported-memory.wat index c09251f86aeb..616cb5bc3e26 100644 --- a/tests/disas/pcc-imported-memory.wat +++ b/tests/disas/pcc-imported-memory.wat @@ -37,24 +37,22 @@ ;; wasm[0]::function[0]: ;; pushq %rbp ;; movq %rsp, %rbp -;; movq 0x58(%rdi), %r9 -;; movq 8(%r9), %rax -;; movl $0x10000, %r9d -;; xorq %rdx, %rdx -;; divq %r9 -;; movq %rax, %r8 -;; shll $0x10, %r8d -;; leal 4(%rax), %r9d -;; cmpl %r9d, %r8d -;; jbe 0x45 -;; 2c: testl %eax, %eax -;; jle 0x45 -;; 34: movq 0x58(%rdi), %rcx -;; movq (%rcx), %rcx -;; movl %eax, %eax -;; movl (%rcx, %rax), %esi -;; jmp 0x47 -;; 45: xorl %esi, %esi +;; movq 0x58(%rdi), %rcx +;; movq 8(%rcx), %rax +;; shrq $0x10, %rax +;; movq %rax, %rcx +;; shll $0x10, %ecx +;; leal 4(%rax), %edx +;; cmpl %edx, %ecx +;; jbe 0x3b +;; 21: testl %eax, %eax +;; jle 0x3b +;; 29: movq 0x58(%rdi), %rsi +;; movq (%rsi), %rsi +;; movl %eax, %edi +;; movl (%rsi, %rdi), %r10d +;; jmp 0x3e +;; 3b: xorl %r10d, %r10d ;; movq %rbp, %rsp ;; popq %rbp ;; retq From ff8630b1afdb7832317e71989b3215d3f52e0185 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Tue, 11 Jun 2024 09:25:56 -0700 Subject: [PATCH 09/19] Import functions that are used a few times --- crates/wasmtime/src/runtime/vm/memory.rs | 36 ++++++++++-------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index 7406b4945881..2390c050f443 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -6,7 +6,8 @@ use crate::prelude::*; use crate::runtime::vm::mmap::Mmap; use crate::runtime::vm::vmcontext::VMMemoryDefinition; use crate::runtime::vm::{ - MemoryImage, MemoryImageSlot, SendSyncPtr, SharedMemory, Store, WaitResult, + host_page_size, round_usize_up_to_host_pages, usize_is_multiple_of_host_page_size, MemoryImage, + MemoryImageSlot, SendSyncPtr, SharedMemory, Store, WaitResult, }; use alloc::sync::Arc; use anyhow::Error; @@ -229,16 +230,15 @@ impl MmapMemory { let pre_guard_bytes = usize::try_from(plan.pre_guard_size).unwrap(); // Ensure that our guard regions are multiples of the host page size. - let offset_guard_bytes = - crate::runtime::vm::round_usize_up_to_host_pages(offset_guard_bytes); - let pre_guard_bytes = crate::runtime::vm::round_usize_up_to_host_pages(pre_guard_bytes); + let offset_guard_bytes = round_usize_up_to_host_pages(offset_guard_bytes); + let pre_guard_bytes = round_usize_up_to_host_pages(pre_guard_bytes); let (alloc_bytes, extra_to_reserve_on_growth) = match plan.style { // Dynamic memories start with the minimum size plus the `reserve` // amount specified to grow into. MemoryStyle::Dynamic { reserve } => ( - crate::runtime::vm::round_usize_up_to_host_pages(minimum), - crate::runtime::vm::round_usize_up_to_host_pages(usize::try_from(reserve).unwrap()), + round_usize_up_to_host_pages(minimum), + round_usize_up_to_host_pages(usize::try_from(reserve).unwrap()), ), // Static memories will never move in memory and consequently get @@ -253,7 +253,7 @@ impl MmapMemory { (bound_bytes, 0) } }; - assert_eq!(alloc_bytes % crate::runtime::vm::host_page_size(), 0); + assert_eq!(alloc_bytes % host_page_size(), 0); let request_bytes = pre_guard_bytes .checked_add(alloc_bytes) @@ -263,7 +263,7 @@ impl MmapMemory { let mut mmap = Mmap::accessible_reserved(0, request_bytes)?; if minimum > 0 { - let accessible = crate::runtime::vm::round_usize_up_to_host_pages(minimum); + let accessible = round_usize_up_to_host_pages(minimum); mmap.make_accessible(pre_guard_bytes, accessible)?; } @@ -303,7 +303,7 @@ impl MmapMemory { /// is the same region as `self.len` but rounded up to a multiple of the /// host page size. fn accessible(&self) -> usize { - let accessible = crate::runtime::vm::round_usize_up_to_host_pages(self.len); + let accessible = round_usize_up_to_host_pages(self.len); debug_assert!(accessible <= self.mmap.len() - self.offset_guard_size - self.pre_guard_size); accessible } @@ -323,17 +323,11 @@ impl RuntimeLinearMemory for MmapMemory { } fn grow_to(&mut self, new_size: usize) -> Result<()> { - assert!(crate::runtime::vm::usize_is_multiple_of_host_page_size( - self.offset_guard_size - )); - assert!(crate::runtime::vm::usize_is_multiple_of_host_page_size( - self.pre_guard_size - )); - assert!(crate::runtime::vm::usize_is_multiple_of_host_page_size( - self.mmap.len() - )); - - let new_accessible = crate::runtime::vm::round_usize_up_to_host_pages(new_size); + assert!(usize_is_multiple_of_host_page_size(self.offset_guard_size)); + assert!(usize_is_multiple_of_host_page_size(self.pre_guard_size)); + assert!(usize_is_multiple_of_host_page_size(self.mmap.len())); + + let new_accessible = round_usize_up_to_host_pages(new_size); if new_accessible > self.mmap.len() - self.offset_guard_size - self.pre_guard_size { // If the new size of this heap exceeds the current size of the // allocation we have, then this must be a dynamic heap. Use @@ -382,7 +376,7 @@ impl RuntimeLinearMemory for MmapMemory { assert!(self.maximum.map_or(true, |max| new_size <= max)); assert!(new_size <= self.mmap.len() - self.offset_guard_size - self.pre_guard_size); - let new_accessible = crate::runtime::vm::round_usize_up_to_host_pages(new_size); + let new_accessible = round_usize_up_to_host_pages(new_size); assert!( new_accessible <= self.mmap.len() - self.offset_guard_size - self.pre_guard_size, ); From 3ce470c74690f0cdb431181c6742e2c0b6b05d7a Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Tue, 11 Jun 2024 09:51:55 -0700 Subject: [PATCH 10/19] Better handle overflows when rounding up to the host page size Propagate errors instead of returning a value that is not actually a rounded up version of the input. Delay rounding up various config sizes until runtime instead of eagerly doing it at config time (which isn't even guaranteed to work, so we already had to have a backup plan to round up at runtime, since we might be cross-compiling wasm or not have the runtime feature enabled). --- crates/wasmtime/src/config.rs | 22 ++-------------------- crates/wasmtime/src/runtime/vm.rs | 20 ++++++++++++-------- crates/wasmtime/src/runtime/vm/memory.rs | 17 +++++++++-------- 3 files changed, 23 insertions(+), 36 deletions(-) diff --git a/crates/wasmtime/src/config.rs b/crates/wasmtime/src/config.rs index 86b7d0525252..a6aac0bd39c4 100644 --- a/crates/wasmtime/src/config.rs +++ b/crates/wasmtime/src/config.rs @@ -1394,7 +1394,7 @@ impl Config { /// for pooling allocation by using memory protection; see /// `PoolingAllocatorConfig::memory_protection_keys` for details. pub fn static_memory_maximum_size(&mut self, max_size: u64) -> &mut Self { - self.tunables.static_memory_reservation = Some(round_up_to_pages(max_size)); + self.tunables.static_memory_reservation = Some(max_size); self } @@ -1465,7 +1465,6 @@ impl Config { /// The `Engine::new` method will return an error if this option is smaller /// than the value configured for [`Config::dynamic_memory_guard_size`]. pub fn static_memory_guard_size(&mut self, guard_size: u64) -> &mut Self { - let guard_size = round_up_to_pages(guard_size); self.tunables.static_memory_offset_guard_size = Some(guard_size); self } @@ -1498,7 +1497,6 @@ impl Config { /// The `Engine::new` method will return an error if this option is larger /// than the value configured for [`Config::static_memory_guard_size`]. pub fn dynamic_memory_guard_size(&mut self, guard_size: u64) -> &mut Self { - let guard_size = round_up_to_pages(guard_size); self.tunables.dynamic_memory_offset_guard_size = Some(guard_size); self } @@ -1538,7 +1536,7 @@ impl Config { /// For 64-bit platforms this defaults to 2GB, and for 32-bit platforms this /// defaults to 1MB. pub fn dynamic_memory_reserved_for_growth(&mut self, reserved: u64) -> &mut Self { - self.tunables.dynamic_memory_growth_reserve = Some(round_up_to_pages(reserved)); + self.tunables.dynamic_memory_growth_reserve = Some(reserved); self } @@ -2135,19 +2133,6 @@ impl Config { } } -/// If building without the runtime feature we can't determine the page size of -/// the platform where the execution will happen so just keep the original -/// values. -#[cfg(not(feature = "runtime"))] -fn round_up_to_pages(val: u64) -> u64 { - val -} - -#[cfg(feature = "runtime")] -fn round_up_to_pages(val: u64) -> u64 { - crate::runtime::vm::round_u64_up_to_host_pages(val) -} - impl Default for Config { fn default() -> Config { Config::new() @@ -2505,7 +2490,6 @@ impl PoolingAllocationConfig { /// never be decommitted. #[cfg(feature = "async")] pub fn async_stack_keep_resident(&mut self, size: usize) -> &mut Self { - let size = round_up_to_pages(size as u64) as usize; self.config.async_stack_keep_resident = size; self } @@ -2522,7 +2506,6 @@ impl PoolingAllocationConfig { /// which can, in some configurations, reduce the number of page faults /// taken when a slot is reused. pub fn linear_memory_keep_resident(&mut self, size: usize) -> &mut Self { - let size = round_up_to_pages(size as u64) as usize; self.config.linear_memory_keep_resident = size; self } @@ -2537,7 +2520,6 @@ impl PoolingAllocationConfig { /// [`PoolingAllocationConfig::linear_memory_keep_resident`] except that it /// is applicable to tables instead. pub fn table_keep_resident(&mut self, size: usize) -> &mut Self { - let size = round_up_to_pages(size as u64) as usize; self.config.table_keep_resident = size; self } diff --git a/crates/wasmtime/src/runtime/vm.rs b/crates/wasmtime/src/runtime/vm.rs index e7c8715e51eb..4b4508e5ffbc 100644 --- a/crates/wasmtime/src/runtime/vm.rs +++ b/crates/wasmtime/src/runtime/vm.rs @@ -5,7 +5,7 @@ use crate::prelude::*; use alloc::sync::Arc; -use anyhow::{Error, Result}; +use anyhow::{anyhow, Error, Result}; use core::fmt; use core::mem; use core::ptr::NonNull; @@ -350,20 +350,24 @@ pub fn usize_is_multiple_of_host_page_size(bytes: usize) -> bool { } /// Round the given byte size up to a multiple of the host OS page size. -pub fn round_u64_up_to_host_pages(bytes: u64) -> u64 { - let page_size = u64::try_from(crate::runtime::vm::host_page_size()).unwrap(); +/// +/// Returns an error if rounding up overflows. +pub fn round_u64_up_to_host_pages(bytes: u64) -> Result { + let page_size = u64::try_from(crate::runtime::vm::host_page_size())?; debug_assert!(page_size.is_power_of_two()); bytes .checked_add(page_size - 1) + .ok_or_else(|| anyhow!( + "{bytes} is too large to be rounded up to a multiple of the host page size of {page_size}" + )) .map(|val| val & !(page_size - 1)) - .unwrap_or(u64::MAX / page_size + 1) } /// Same as `round_u64_up_to_host_pages` but for `usize`s. -pub fn round_usize_up_to_host_pages(bytes: usize) -> usize { - let bytes = u64::try_from(bytes).unwrap(); - let rounded = round_u64_up_to_host_pages(bytes); - usize::try_from(rounded).unwrap() +pub fn round_usize_up_to_host_pages(bytes: usize) -> Result { + let bytes = u64::try_from(bytes)?; + let rounded = round_u64_up_to_host_pages(bytes)?; + Ok(usize::try_from(rounded)?) } /// Result of `Memory::atomic_wait32` and `Memory::atomic_wait64` diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index 2390c050f443..acb20dea2838 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -230,15 +230,15 @@ impl MmapMemory { let pre_guard_bytes = usize::try_from(plan.pre_guard_size).unwrap(); // Ensure that our guard regions are multiples of the host page size. - let offset_guard_bytes = round_usize_up_to_host_pages(offset_guard_bytes); - let pre_guard_bytes = round_usize_up_to_host_pages(pre_guard_bytes); + let offset_guard_bytes = round_usize_up_to_host_pages(offset_guard_bytes)?; + let pre_guard_bytes = round_usize_up_to_host_pages(pre_guard_bytes)?; let (alloc_bytes, extra_to_reserve_on_growth) = match plan.style { // Dynamic memories start with the minimum size plus the `reserve` // amount specified to grow into. MemoryStyle::Dynamic { reserve } => ( - round_usize_up_to_host_pages(minimum), - round_usize_up_to_host_pages(usize::try_from(reserve).unwrap()), + round_usize_up_to_host_pages(minimum)?, + round_usize_up_to_host_pages(usize::try_from(reserve).unwrap())?, ), // Static memories will never move in memory and consequently get @@ -263,7 +263,7 @@ impl MmapMemory { let mut mmap = Mmap::accessible_reserved(0, request_bytes)?; if minimum > 0 { - let accessible = round_usize_up_to_host_pages(minimum); + let accessible = round_usize_up_to_host_pages(minimum)?; mmap.make_accessible(pre_guard_bytes, accessible)?; } @@ -303,7 +303,8 @@ impl MmapMemory { /// is the same region as `self.len` but rounded up to a multiple of the /// host page size. fn accessible(&self) -> usize { - let accessible = round_usize_up_to_host_pages(self.len); + let accessible = + round_usize_up_to_host_pages(self.len).expect("accessible region always fits in usize"); debug_assert!(accessible <= self.mmap.len() - self.offset_guard_size - self.pre_guard_size); accessible } @@ -327,7 +328,7 @@ impl RuntimeLinearMemory for MmapMemory { assert!(usize_is_multiple_of_host_page_size(self.pre_guard_size)); assert!(usize_is_multiple_of_host_page_size(self.mmap.len())); - let new_accessible = round_usize_up_to_host_pages(new_size); + let new_accessible = round_usize_up_to_host_pages(new_size)?; if new_accessible > self.mmap.len() - self.offset_guard_size - self.pre_guard_size { // If the new size of this heap exceeds the current size of the // allocation we have, then this must be a dynamic heap. Use @@ -376,7 +377,7 @@ impl RuntimeLinearMemory for MmapMemory { assert!(self.maximum.map_or(true, |max| new_size <= max)); assert!(new_size <= self.mmap.len() - self.offset_guard_size - self.pre_guard_size); - let new_accessible = round_usize_up_to_host_pages(new_size); + let new_accessible = round_usize_up_to_host_pages(new_size)?; assert!( new_accessible <= self.mmap.len() - self.offset_guard_size - self.pre_guard_size, ); From 83a5d78296efe616a81b724a6e0f8d1a70297695 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Tue, 11 Jun 2024 11:24:17 -0700 Subject: [PATCH 11/19] Fix some anyhow and nostd errors --- crates/wasmtime/src/runtime/vm.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/wasmtime/src/runtime/vm.rs b/crates/wasmtime/src/runtime/vm.rs index 4b4508e5ffbc..96d8be5a4d37 100644 --- a/crates/wasmtime/src/runtime/vm.rs +++ b/crates/wasmtime/src/runtime/vm.rs @@ -11,7 +11,7 @@ use core::mem; use core::ptr::NonNull; use core::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use wasmtime_environ::{ - DefinedFuncIndex, DefinedMemoryIndex, HostPtr, ModuleInternedTypeIndex, VMOffsets, + prelude::*, DefinedFuncIndex, DefinedMemoryIndex, HostPtr, ModuleInternedTypeIndex, VMOffsets, VMSharedTypeIndex, }; @@ -353,7 +353,7 @@ pub fn usize_is_multiple_of_host_page_size(bytes: usize) -> bool { /// /// Returns an error if rounding up overflows. pub fn round_u64_up_to_host_pages(bytes: u64) -> Result { - let page_size = u64::try_from(crate::runtime::vm::host_page_size())?; + let page_size = u64::try_from(crate::runtime::vm::host_page_size()).err2anyhow()?; debug_assert!(page_size.is_power_of_two()); bytes .checked_add(page_size - 1) @@ -365,9 +365,9 @@ pub fn round_u64_up_to_host_pages(bytes: u64) -> Result { /// Same as `round_u64_up_to_host_pages` but for `usize`s. pub fn round_usize_up_to_host_pages(bytes: usize) -> Result { - let bytes = u64::try_from(bytes)?; + let bytes = u64::try_from(bytes).err2anyhow()?; let rounded = round_u64_up_to_host_pages(bytes)?; - Ok(usize::try_from(rounded)?) + Ok(usize::try_from(rounded).err2anyhow()?) } /// Result of `Memory::atomic_wait32` and `Memory::atomic_wait64` From 621f29dbad1fbc1297e66207288a63af0af5a566 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Tue, 11 Jun 2024 11:39:08 -0700 Subject: [PATCH 12/19] Add missing rounding up to host page size at runtime --- crates/wasmtime/src/runtime/vm/memory.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index acb20dea2838..43b5820a58c1 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -249,6 +249,7 @@ impl MmapMemory { MemoryStyle::Static { byte_reservation } => { assert!(byte_reservation >= plan.memory.minimum_byte_size().unwrap()); let bound_bytes = usize::try_from(byte_reservation).unwrap(); + let bound_bytes = round_usize_up_to_host_pages(bound_bytes)?; maximum = Some(bound_bytes.min(maximum.unwrap_or(usize::MAX))); (bound_bytes, 0) } From c9de8019b42e1f1a8fdd38cc4c80d02710955e9e Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Tue, 11 Jun 2024 12:55:40 -0700 Subject: [PATCH 13/19] Add validate feature to wasmparser dep --- crates/types/Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/types/Cargo.toml b/crates/types/Cargo.toml index 697cb3e9267d..bd5c88331751 100644 --- a/crates/types/Cargo.toml +++ b/crates/types/Cargo.toml @@ -14,7 +14,7 @@ cranelift-entity = { workspace = true, features = ['enable-serde'] } serde = { workspace = true } serde_derive = { workspace = true } smallvec = { workspace = true, features = ["serde"] } -wasmparser = { workspace = true } +wasmparser = { workspace = true, features = ["validate"] } [lints] workspace = true From 30ed095e4ad1bef1fd0acdc269d75305c271ff34 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Wed, 12 Jun 2024 10:27:08 -0700 Subject: [PATCH 14/19] Add some new rounding in a few places, due to no longer rounding in config methods --- .../allocator/pooling/decommit_queue.rs | 7 ++- .../instance/allocator/pooling/memory_pool.rs | 46 +++++++++++-------- .../instance/allocator/pooling/table_pool.rs | 4 +- .../allocator/pooling/unix_stack_pool.rs | 6 ++- crates/wasmtime/src/runtime/vm/memory.rs | 7 ++- crates/wasmtime/src/runtime/vm/mmap.rs | 10 ++-- 6 files changed, 48 insertions(+), 32 deletions(-) diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/decommit_queue.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/decommit_queue.rs index 03de145d28b1..8844086111c2 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/decommit_queue.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/decommit_queue.rs @@ -152,7 +152,12 @@ impl DecommitQueue { for iovec in self.raw.drain(..) { unsafe { crate::vm::sys::vm::decommit_pages(iovec.0.iov_base.cast(), iovec.0.iov_len) - .expect("failed to decommit pages"); + .unwrap_or_else(|e| { + panic!( + "failed to decommit ptr={:#p}, len={:#x}: {e}", + iovec.0.iov_base, iovec.0.iov_len + ) + }); } } } diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs index d64d463f0b4a..ce7b68814008 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/memory_pool.rs @@ -54,12 +54,12 @@ use super::{ index_allocator::{MemoryInModule, ModuleAffinityIndexAllocator, SlotId}, MemoryAllocationIndex, }; -use crate::prelude::*; use crate::runtime::vm::mpk::{self, ProtectionKey, ProtectionMask}; use crate::runtime::vm::{ CompiledModuleId, InstanceAllocationRequest, InstanceLimits, Memory, MemoryImageSlot, Mmap, MpkEnabled, PoolingInstanceAllocatorConfig, }; +use crate::{prelude::*, vm::round_usize_up_to_host_pages}; use anyhow::{anyhow, bail, Context, Result}; use std::ffi::c_void; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -239,7 +239,7 @@ impl MemoryPool { image_slots, layout, memories_per_instance: usize::try_from(config.limits.max_memories_per_module).unwrap(), - keep_resident: config.linear_memory_keep_resident, + keep_resident: round_usize_up_to_host_pages(config.linear_memory_keep_resident)?, next_available_pkey: AtomicUsize::new(0), }; @@ -561,30 +561,38 @@ impl SlabConstraints { tunables: &Tunables, num_pkeys_available: usize, ) -> Result { - // `static_memory_bound` is the configured number of Wasm pages for a + // `static_memory_reservation` is the configured number of bytes for a // static memory slot (see `Config::static_memory_maximum_size`); even // if the memory never grows to this size (e.g., it has a lower memory // maximum), codegen will assume that this unused memory is mapped - // `PROT_NONE`. Typically `static_memory_bound` is 4G which helps elide - // most bounds checks. `MemoryPool` must respect this bound, though not - // explicitly: if we can achieve the same effect via MPK-protected - // stripes, the slot size can be lower than the `static_memory_bound`. - let expected_slot_bytes = tunables.static_memory_reservation; + // `PROT_NONE`. Typically `static_memory_bound` is 4GiB which helps + // elide most bounds checks. `MemoryPool` must respect this bound, + // though not explicitly: if we can achieve the same effect via + // MPK-protected stripes, the slot size can be lower than the + // `static_memory_bound`. + let expected_slot_bytes: usize = tunables + .static_memory_reservation + .try_into() + .context("static memory bound is too large")?; + let expected_slot_bytes = round_usize_up_to_host_pages(expected_slot_bytes)?; + + let guard_bytes: usize = tunables + .static_memory_offset_guard_size + .try_into() + .context("guard region is too large")?; + let guard_bytes = round_usize_up_to_host_pages(guard_bytes)?; + + let num_slots = limits + .total_memories + .try_into() + .context("too many memories")?; let constraints = SlabConstraints { max_memory_bytes: limits.max_memory_size, - num_slots: limits - .total_memories - .try_into() - .context("too many memories")?, - expected_slot_bytes: expected_slot_bytes - .try_into() - .context("static memory bound is too large")?, + num_slots, + expected_slot_bytes, num_pkeys_available, - guard_bytes: tunables - .static_memory_offset_guard_size - .try_into() - .context("guard region is too large")?, + guard_bytes, guard_before_slots: tunables.guard_before_linear_memory, }; Ok(constraints) diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs index 7215f56d1f6e..1bbeafc1d6be 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/table_pool.rs @@ -2,10 +2,10 @@ use super::{ index_allocator::{SimpleIndexAllocator, SlotId}, round_up_to_pow2, TableAllocationIndex, }; -use crate::runtime::vm::sys::vm::commit_pages; use crate::runtime::vm::{ InstanceAllocationRequest, Mmap, PoolingInstanceAllocatorConfig, SendSyncPtr, Table, }; +use crate::{runtime::vm::sys::vm::commit_pages, vm::round_usize_up_to_host_pages}; use anyhow::{anyhow, bail, Context, Result}; use std::mem; use std::ptr::NonNull; @@ -56,7 +56,7 @@ impl TablePool { max_total_tables, tables_per_instance, page_size, - keep_resident: config.table_keep_resident, + keep_resident: round_usize_up_to_host_pages(config.table_keep_resident)?, table_elements: usize::try_from(config.limits.table_elements).unwrap(), }) } diff --git a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs index be81ba8952d8..ce6bebf13140 100644 --- a/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs +++ b/crates/wasmtime/src/runtime/vm/instance/allocator/pooling/unix_stack_pool.rs @@ -5,7 +5,7 @@ use super::{ round_up_to_pow2, }; use crate::runtime::vm::sys::vm::commit_pages; -use crate::runtime::vm::{Mmap, PoolingInstanceAllocatorConfig}; +use crate::runtime::vm::{round_usize_up_to_host_pages, Mmap, PoolingInstanceAllocatorConfig}; use anyhow::{anyhow, bail, Context, Result}; /// Represents a pool of execution stacks (used for the async fiber implementation). @@ -71,7 +71,9 @@ impl StackPool { max_stacks, page_size, async_stack_zeroing: config.async_stack_zeroing, - async_stack_keep_resident: config.async_stack_keep_resident, + async_stack_keep_resident: round_usize_up_to_host_pages( + config.async_stack_keep_resident, + )?, index_allocator: SimpleIndexAllocator::new(config.limits.total_stacks), }) } diff --git a/crates/wasmtime/src/runtime/vm/memory.rs b/crates/wasmtime/src/runtime/vm/memory.rs index 43b5820a58c1..6425593b7d1e 100644 --- a/crates/wasmtime/src/runtime/vm/memory.rs +++ b/crates/wasmtime/src/runtime/vm/memory.rs @@ -6,7 +6,7 @@ use crate::prelude::*; use crate::runtime::vm::mmap::Mmap; use crate::runtime::vm::vmcontext::VMMemoryDefinition; use crate::runtime::vm::{ - host_page_size, round_usize_up_to_host_pages, usize_is_multiple_of_host_page_size, MemoryImage, + round_usize_up_to_host_pages, usize_is_multiple_of_host_page_size, MemoryImage, MemoryImageSlot, SendSyncPtr, SharedMemory, Store, WaitResult, }; use alloc::sync::Arc; @@ -254,13 +254,15 @@ impl MmapMemory { (bound_bytes, 0) } }; - assert_eq!(alloc_bytes % host_page_size(), 0); + assert!(usize_is_multiple_of_host_page_size(alloc_bytes)); let request_bytes = pre_guard_bytes .checked_add(alloc_bytes) .and_then(|i| i.checked_add(extra_to_reserve_on_growth)) .and_then(|i| i.checked_add(offset_guard_bytes)) .ok_or_else(|| format_err!("cannot allocate {} with guard regions", minimum))?; + assert!(usize_is_multiple_of_host_page_size(request_bytes)); + let mut mmap = Mmap::accessible_reserved(0, request_bytes)?; if minimum > 0 { @@ -341,6 +343,7 @@ impl RuntimeLinearMemory for MmapMemory { .and_then(|s| s.checked_add(self.extra_to_reserve_on_growth)) .and_then(|s| s.checked_add(self.offset_guard_size)) .ok_or_else(|| format_err!("overflow calculating size of memory allocation"))?; + assert!(usize_is_multiple_of_host_page_size(request_bytes)); let mut new_mmap = Mmap::accessible_reserved(0, request_bytes)?; new_mmap.make_accessible(self.pre_guard_size, new_accessible)?; diff --git a/crates/wasmtime/src/runtime/vm/mmap.rs b/crates/wasmtime/src/runtime/vm/mmap.rs index 000721f54c95..1135733bd095 100644 --- a/crates/wasmtime/src/runtime/vm/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/mmap.rs @@ -1,8 +1,8 @@ //! Low-level abstraction for allocating and managing zero-filled pages //! of memory. -use crate::prelude::*; use crate::runtime::vm::sys::mmap; +use crate::{prelude::*, vm::usize_is_multiple_of_host_page_size}; use anyhow::{Context, Result}; use core::ops::Range; #[cfg(feature = "std")] @@ -21,8 +21,7 @@ impl Mmap { /// Create a new `Mmap` pointing to at least `size` bytes of page-aligned /// accessible memory. pub fn with_at_least(size: usize) -> Result { - let page_size = crate::runtime::vm::host_page_size(); - let rounded_size = (size + (page_size - 1)) & !(page_size - 1); + let rounded_size = crate::runtime::vm::round_usize_up_to_host_pages(size)?; Self::accessible_reserved(rounded_size, rounded_size) } @@ -53,10 +52,9 @@ impl Mmap { /// This function will panic if `accessible_size` is greater than /// `mapping_size` or if either of them are not page-aligned. pub fn accessible_reserved(accessible_size: usize, mapping_size: usize) -> Result { - let page_size = crate::runtime::vm::host_page_size(); assert!(accessible_size <= mapping_size); - assert_eq!(mapping_size & (page_size - 1), 0); - assert_eq!(accessible_size & (page_size - 1), 0); + assert!(usize_is_multiple_of_host_page_size(mapping_size)); + assert!(usize_is_multiple_of_host_page_size(accessible_size)); if mapping_size == 0 { Ok(Mmap { From 0b2ed81e66737c65fda7ae74929709d2074820de Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Wed, 12 Jun 2024 13:43:09 -0700 Subject: [PATCH 15/19] Avoid actually trying to allocate the whole address space in the `massive_64_bit_still_limited` test The point of the test is to ensure that we hit the limiter, so just cancel the allocation from the limiter, and otherwise avoid MIRI attempting to allocate a bunch of memory after we hit the limiter. --- tests/all/memory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/all/memory.rs b/tests/all/memory.rs index 7120d9556a14..aac0f81bd719 100644 --- a/tests/all/memory.rs +++ b/tests/all/memory.rs @@ -375,7 +375,7 @@ fn massive_64_bit_still_limited() -> Result<()> { _max: Option, ) -> Result { self.hit = true; - Ok(true) + Ok(false) } fn table_growing( &mut self, From 6fe8f3e8b785d07f07728b4bcafbcf1339b9f751 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Wed, 12 Jun 2024 14:03:24 -0700 Subject: [PATCH 16/19] prtest:full From 67c35062b2efbdb288132fb5852275bc123a7b26 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Wed, 12 Jun 2024 14:08:43 -0700 Subject: [PATCH 17/19] Revert "Avoid actually trying to allocate the whole address space in the `massive_64_bit_still_limited` test" This reverts commit ccfa34a78dd3d53e49a6158ca03077d42ce8bcd7. --- tests/all/memory.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/all/memory.rs b/tests/all/memory.rs index aac0f81bd719..7120d9556a14 100644 --- a/tests/all/memory.rs +++ b/tests/all/memory.rs @@ -375,7 +375,7 @@ fn massive_64_bit_still_limited() -> Result<()> { _max: Option, ) -> Result { self.hit = true; - Ok(false) + Ok(true) } fn table_growing( &mut self, From 68ad4ca30a831fd5d5289f95922c49e1de2da8f3 Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Wed, 12 Jun 2024 14:11:55 -0700 Subject: [PATCH 18/19] miri: don't attempt to allocate more than 4GiB of memory It seems that rather than returning a null pointer from `std::alloc::alloc`, miri will sometimes choose to simply crash the whole program. --- crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs b/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs index 18a8c8bf6f8b..1a4d330795bc 100644 --- a/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs +++ b/crates/wasmtime/src/runtime/vm/sys/miri/mmap.rs @@ -32,6 +32,9 @@ impl Mmap { } pub fn reserve(size: usize) -> Result { + if size > 1 << 32 { + bail!("failed to allocate memory"); + } let layout = Layout::from_size_align(size, crate::runtime::vm::host_page_size()).unwrap(); let ptr = unsafe { alloc::alloc(layout) }; if ptr.is_null() { From 3732ec8713b2cec475307b48fbe333905e479edf Mon Sep 17 00:00:00 2001 From: Nick Fitzgerald Date: Wed, 12 Jun 2024 14:32:28 -0700 Subject: [PATCH 19/19] remove duplicate prelude import after rebasing --- crates/wasmtime/src/runtime/vm.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/wasmtime/src/runtime/vm.rs b/crates/wasmtime/src/runtime/vm.rs index 96d8be5a4d37..181de2773215 100644 --- a/crates/wasmtime/src/runtime/vm.rs +++ b/crates/wasmtime/src/runtime/vm.rs @@ -11,7 +11,7 @@ use core::mem; use core::ptr::NonNull; use core::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use wasmtime_environ::{ - prelude::*, DefinedFuncIndex, DefinedMemoryIndex, HostPtr, ModuleInternedTypeIndex, VMOffsets, + DefinedFuncIndex, DefinedMemoryIndex, HostPtr, ModuleInternedTypeIndex, VMOffsets, VMSharedTypeIndex, };