Skip to content

Commit

Permalink
Put MemoryPool and MmapMemory mmaps behind Arc (#9682)
Browse files Browse the repository at this point in the history
In upcoming work we're going to centralize memory management inside `Mmap`.  In
order to do that, we need memory that logically borrows from the `Mmap` to have
access to it. That turns out to be possible in some situations but very
difficult in others (see #9681 for an attempt that was a real hassle to get
working).

To prepare for this work, start moving these uses to be behind `Arc`. These
aren't cloned at the moment, but they will be in the future.
  • Loading branch information
sunshowers authored Dec 3, 2024
1 parent 90e31cd commit c737a48
Show file tree
Hide file tree
Showing 8 changed files with 168 additions and 80 deletions.
149 changes: 107 additions & 42 deletions crates/wasmtime/src/runtime/vm/cow.rs
Original file line number Diff line number Diff line change
Expand Up @@ -817,9 +817,33 @@ mod test {
}
}

fn mmap_4mib_inaccessible() -> Mmap<AlignedLength> {
fn mmap_4mib_inaccessible() -> Arc<Mmap<AlignedLength>> {
let four_mib = HostAlignedByteCount::new(4 << 20).expect("4 MiB is page aligned");
Mmap::accessible_reserved(HostAlignedByteCount::ZERO, four_mib).unwrap()
Arc::new(Mmap::accessible_reserved(HostAlignedByteCount::ZERO, four_mib).unwrap())
}

/// Presents a part of an mmap as a mutable slice within a callback.
///
/// The callback ensures that the reference no longer lives after the
/// function is done.
///
/// # Safety
///
/// The caller must ensure that during this function call, the only way this
/// region of memory is not accessed by (read from or written to) is via the
/// reference. Making the callback `'static` goes some way towards ensuring
/// that, but it's still possible to squirrel away a reference into global
/// state. So don't do that.
unsafe fn with_slice_mut(
mmap: &Arc<Mmap<AlignedLength>>,
range: Range<usize>,
f: impl FnOnce(&mut [u8]) + 'static,
) {
let ptr = mmap.as_ptr().cast_mut();
let slice = unsafe {
core::slice::from_raw_parts_mut(ptr.add(range.start), range.end - range.start)
};
f(slice);
}

#[test]
Expand All @@ -830,7 +854,7 @@ mod test {
..Tunables::default_miri()
};
// 4 MiB mmap'd area, not accessible
let mut mmap = mmap_4mib_inaccessible();
let mmap = mmap_4mib_inaccessible();
// Create a MemoryImageSlot on top of it
let mut memfd = MemoryImageSlot::create(
mmap.as_mut_ptr() as *mut _,
Expand All @@ -842,13 +866,18 @@ mod test {
// instantiate with 64 KiB initial size
memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
assert!(memfd.is_dirty());

// We should be able to access this 64 KiB (try both ends) and
// it should consist of zeroes.
let slice = unsafe { mmap.slice_mut(0..65536) };
assert_eq!(0, slice[0]);
assert_eq!(0, slice[65535]);
slice[1024] = 42;
assert_eq!(42, slice[1024]);
unsafe {
with_slice_mut(&mmap, 0..65536, |slice| {
assert_eq!(0, slice[0]);
assert_eq!(0, slice[65535]);
slice[1024] = 42;
assert_eq!(42, slice[1024]);
});
}

// grow the heap
memfd.set_heap_limit(128 << 10).unwrap();
let slice = unsafe { mmap.slice(0..1 << 20) };
Expand Down Expand Up @@ -876,7 +905,7 @@ mod test {
..Tunables::default_miri()
};
// 4 MiB mmap'd area, not accessible
let mut mmap = mmap_4mib_inaccessible();
let mmap = mmap_4mib_inaccessible();
// Create a MemoryImageSlot on top of it
let mut memfd = MemoryImageSlot::create(
mmap.as_mut_ptr() as *mut _,
Expand All @@ -891,9 +920,14 @@ mod test {
.instantiate(64 << 10, Some(&image), &ty, &tunables)
.unwrap();
assert!(memfd.has_image());
let slice = unsafe { mmap.slice_mut(0..65536) };
assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
slice[page_size] = 5;

unsafe {
with_slice_mut(&mmap, 0..65536, move |slice| {
assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
slice[page_size] = 5;
});
}

// Clear and re-instantiate same image
memfd
.clear_and_remain_ready(HostAlignedByteCount::ZERO, |ptr, len| unsafe {
Expand All @@ -903,9 +937,9 @@ mod test {
memfd
.instantiate(64 << 10, Some(&image), &ty, &tunables)
.unwrap();
let slice = unsafe { mmap.slice_mut(0..65536) };
// Should not see mutation from above
let slice = unsafe { mmap.slice(0..65536) };
assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);

// Clear and re-instantiate no image
memfd
.clear_and_remain_ready(HostAlignedByteCount::ZERO, |ptr, len| unsafe {
Expand All @@ -914,8 +948,9 @@ mod test {
.unwrap();
memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
assert!(!memfd.has_image());
let slice = unsafe { mmap.slice_mut(0..65536) };
let slice = unsafe { mmap.slice(0..65536) };
assert_eq!(&[0, 0, 0, 0], &slice[page_size..][..4]);

// Clear and re-instantiate image again
memfd
.clear_and_remain_ready(HostAlignedByteCount::ZERO, |ptr, len| unsafe {
Expand All @@ -925,8 +960,9 @@ mod test {
memfd
.instantiate(64 << 10, Some(&image), &ty, &tunables)
.unwrap();
let slice = unsafe { mmap.slice_mut(0..65536) };
let slice = unsafe { mmap.slice(0..65536) };
assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);

// Create another image with different data.
let image2 = Arc::new(create_memfd_with_data(page_size, &[10, 11, 12, 13]).unwrap());
memfd
Expand All @@ -937,8 +973,9 @@ mod test {
memfd
.instantiate(128 << 10, Some(&image2), &ty, &tunables)
.unwrap();
let slice = unsafe { mmap.slice_mut(0..65536) };
let slice = unsafe { mmap.slice(0..65536) };
assert_eq!(&[10, 11, 12, 13], &slice[page_size..][..4]);

// Instantiate the original image again; we should notice it's
// a different image and not reuse the mappings.
memfd
Expand All @@ -949,7 +986,7 @@ mod test {
memfd
.instantiate(64 << 10, Some(&image), &ty, &tunables)
.unwrap();
let slice = unsafe { mmap.slice_mut(0..65536) };
let slice = unsafe { mmap.slice(0..65536) };
assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
}

Expand All @@ -962,7 +999,7 @@ mod test {
memory_reservation: 100 << 16,
..Tunables::default_miri()
};
let mut mmap = mmap_4mib_inaccessible();
let mmap = mmap_4mib_inaccessible();
let mut memfd = MemoryImageSlot::create(
mmap.as_mut_ptr() as *mut _,
HostAlignedByteCount::ZERO,
Expand All @@ -979,14 +1016,19 @@ mod test {
.instantiate(64 << 10, Some(&image), &ty, &tunables)
.unwrap();
assert!(memfd.has_image());
let slice = unsafe { mmap.slice_mut(0..64 << 10) };
if image_off > 0 {
assert_eq!(slice[image_off - 1], 0);
}
assert_eq!(slice[image_off + 5], 0);
assert_eq!(&[1, 2, 3, 4], &slice[image_off..][..4]);
slice[image_off] = 5;
assert_eq!(&[5, 2, 3, 4], &slice[image_off..][..4]);

unsafe {
with_slice_mut(&mmap, 0..64 << 10, move |slice| {
if image_off > 0 {
assert_eq!(slice[image_off - 1], 0);
}
assert_eq!(slice[image_off + 5], 0);
assert_eq!(&[1, 2, 3, 4], &slice[image_off..][..4]);
slice[image_off] = 5;
assert_eq!(&[5, 2, 3, 4], &slice[image_off..][..4]);
})
};

memfd
.clear_and_remain_ready(amt_to_memset, |ptr, len| unsafe {
decommit_pages(ptr, len).unwrap()
Expand All @@ -999,10 +1041,14 @@ mod test {
for amt_to_memset in [0, page_size, page_size * 10, 1 << 20, 10 << 20] {
let amt_to_memset = HostAlignedByteCount::new(amt_to_memset).unwrap();
memfd.instantiate(64 << 10, None, &ty, &tunables).unwrap();
let mem = unsafe { mmap.slice_mut(0..64 << 10) };
for chunk in mem.chunks_mut(1024) {
assert_eq!(chunk[0], 0);
chunk[0] = 5;

unsafe {
with_slice_mut(&mmap, 0..64 << 10, |slice| {
for chunk in slice.chunks_mut(1024) {
assert_eq!(chunk[0], 0);
chunk[0] = 5;
}
});
}
memfd
.clear_and_remain_ready(amt_to_memset, |ptr, len| unsafe {
Expand All @@ -1023,7 +1069,7 @@ mod test {
..Tunables::default_miri()
};

let mut mmap = mmap_4mib_inaccessible();
let mmap = mmap_4mib_inaccessible();
let mut memfd = MemoryImageSlot::create(
mmap.as_mut_ptr() as *mut _,
HostAlignedByteCount::ZERO,
Expand All @@ -1039,15 +1085,21 @@ mod test {
.instantiate(initial, Some(&image), &ty, &tunables)
.unwrap();
assert!(memfd.has_image());
let slice = unsafe { mmap.slice_mut(0..(64 << 10) + page_size) };
assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
slice[page_size] = 5;
assert_eq!(&[5, 2, 3, 4], &slice[page_size..][..4]);

unsafe {
with_slice_mut(&mmap, 0..(64 << 10) + page_size, move |slice| {
assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);
slice[page_size] = 5;
assert_eq!(&[5, 2, 3, 4], &slice[page_size..][..4]);
});
}

memfd
.clear_and_remain_ready(HostAlignedByteCount::ZERO, |ptr, len| unsafe {
decommit_pages(ptr, len).unwrap()
})
.unwrap();
let slice = unsafe { mmap.slice(0..(64 << 10) + page_size) };
assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);

// Re-instantiate make sure it preserves memory. Grow a bit and set data
Expand All @@ -1056,10 +1108,17 @@ mod test {
.instantiate(initial, Some(&image), &ty, &tunables)
.unwrap();
assert_eq!(&[1, 2, 3, 4], &slice[page_size..][..4]);

memfd.set_heap_limit(initial * 2).unwrap();
assert_eq!(&[0, 0], &slice[initial..initial + 2]);
slice[initial] = 100;
assert_eq!(&[100, 0], &slice[initial..initial + 2]);

unsafe {
with_slice_mut(&mmap, 0..(64 << 10) + page_size, move |slice| {
assert_eq!(&[0, 0], &slice[initial..initial + 2]);
slice[initial] = 100;
assert_eq!(&[100, 0], &slice[initial..initial + 2]);
});
}

memfd
.clear_and_remain_ready(HostAlignedByteCount::ZERO, |ptr, len| unsafe {
decommit_pages(ptr, len).unwrap()
Expand All @@ -1076,9 +1135,15 @@ mod test {
.unwrap();
assert_eq!(&[0, 0], &slice[initial..initial + 2]);
memfd.set_heap_limit(initial * 2).unwrap();
assert_eq!(&[0, 0], &slice[initial..initial + 2]);
slice[initial] = 100;
assert_eq!(&[100, 0], &slice[initial..initial + 2]);

unsafe {
with_slice_mut(&mmap, 0..(64 << 10) + page_size, move |slice| {
assert_eq!(&[0, 0], &slice[initial..initial + 2]);
slice[initial] = 100;
assert_eq!(&[100, 0], &slice[initial..initial + 2]);
});
}

memfd
.clear_and_remain_ready(HostAlignedByteCount::ZERO, |ptr, len| unsafe {
decommit_pages(ptr, len).unwrap()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ use crate::{
};
use std::ffi::c_void;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
use std::sync::{Arc, Mutex};
use wasmtime_environ::{DefinedMemoryIndex, Module, Tunables};

/// A set of allocator slots.
Expand Down Expand Up @@ -104,7 +104,7 @@ struct Stripe {
/// ```
#[derive(Debug)]
pub struct MemoryPool {
mapping: Mmap<AlignedLength>,
mapping: Arc<Mmap<AlignedLength>>,
/// This memory pool is stripe-aware. If using memory protection keys, this
/// will contain one stripe per available key; otherwise, a single stripe
/// with an empty key.
Expand Down Expand Up @@ -244,7 +244,7 @@ impl MemoryPool {

let pool = Self {
stripes,
mapping,
mapping: Arc::new(mapping),
image_slots,
layout,
memories_per_instance: usize::try_from(config.limits.max_memories_per_module).unwrap(),
Expand Down
38 changes: 25 additions & 13 deletions crates/wasmtime/src/runtime/vm/memory/mmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,14 @@
use crate::prelude::*;
use crate::runtime::vm::memory::RuntimeLinearMemory;
use crate::runtime::vm::{mmap::AlignedLength, HostAlignedByteCount, Mmap};
use alloc::sync::Arc;
use wasmtime_environ::Tunables;

/// A linear memory instance.
#[derive(Debug)]
pub struct MmapMemory {
// The underlying allocation.
mmap: Mmap<AlignedLength>,
mmap: Arc<Mmap<AlignedLength>>,

// The current length of this Wasm memory, in bytes.
//
Expand Down Expand Up @@ -105,15 +106,18 @@ impl MmapMemory {
.err2anyhow()
.with_context(|| format!("cannot allocate {minimum} with guard regions"))?;

let mut mmap = Mmap::accessible_reserved(HostAlignedByteCount::ZERO, request_bytes)?;
let mmap = Mmap::accessible_reserved(HostAlignedByteCount::ZERO, request_bytes)?;

if minimum > 0 {
let accessible = HostAlignedByteCount::new_rounded_up(minimum).err2anyhow()?;
mmap.make_accessible(pre_guard_bytes, accessible)?;
// SAFETY: mmap is not in use right now so it's safe to make it accessible.
unsafe {
mmap.make_accessible(pre_guard_bytes, accessible)?;
}
}

Ok(Self {
mmap,
mmap: Arc::new(mmap),
len: minimum,
maximum,
pre_guard_size: pre_guard_bytes,
Expand Down Expand Up @@ -169,7 +173,11 @@ impl RuntimeLinearMemory for MmapMemory {

let mut new_mmap =
Mmap::accessible_reserved(HostAlignedByteCount::ZERO, request_bytes)?;
new_mmap.make_accessible(self.pre_guard_size, new_accessible)?;
// SAFETY: new_mmap is not in use right now so it's safe to make it
// accessible.
unsafe {
new_mmap.make_accessible(self.pre_guard_size, new_accessible)?;
}

// This method has an exclusive reference to `self.mmap` and just
// created `new_mmap` so it should be safe to acquire references
Expand All @@ -182,7 +190,7 @@ impl RuntimeLinearMemory for MmapMemory {
dst.copy_from_slice(src);
}

self.mmap = new_mmap;
self.mmap = Arc::new(new_mmap);
} else {
// If the new size of this heap fits within the existing allocation
// then all we need to do is to make the new pages accessible. This
Expand All @@ -198,13 +206,17 @@ impl RuntimeLinearMemory for MmapMemory {
// since we are forced to round our accessible range up to the
// host's page size.
if let Ok(difference) = new_accessible.checked_sub(self.accessible()) {
self.mmap.make_accessible(
self.pre_guard_size
.checked_add(self.accessible())
.err2anyhow()
.context("overflow calculating new accessible region")?,
difference,
)?;
// SAFETY: the difference was previously inaccessible so we
// never handed out any references to within it.
unsafe {
self.mmap.make_accessible(
self.pre_guard_size
.checked_add(self.accessible())
.err2anyhow()
.context("overflow calculating new accessible region")?,
difference,
)?;
}
}
}

Expand Down
Loading

0 comments on commit c737a48

Please sign in to comment.