Skip to content

Commit

Permalink
Auto merge of #3951 - RalfJung:release-clock, r=RalfJung
Browse files Browse the repository at this point in the history
fix behavior of release_clock()

Fixes #3947

Thanks to `@FrankReh` for noticing this and suggesting the right approach for the fix!
  • Loading branch information
bors committed Oct 8, 2024
2 parents 5c2f574 + bddd6f3 commit 9045930
Show file tree
Hide file tree
Showing 9 changed files with 90 additions and 30 deletions.
2 changes: 1 addition & 1 deletion src/alloc_addresses/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ impl<'tcx> MiriMachine<'tcx> {
let thread = self.threads.active_thread();
global_state.reuse.add_addr(rng, addr, size, align, kind, thread, || {
if let Some(data_race) = &self.data_race {
data_race.release_clock(&self.threads).clone()
data_race.release_clock(&self.threads, |clock| clock.clone())
} else {
VClock::default()
}
Expand Down
33 changes: 17 additions & 16 deletions src/concurrency/data_race.rs
Original file line number Diff line number Diff line change
Expand Up @@ -828,15 +828,14 @@ pub trait EvalContextExt<'tcx>: MiriInterpCxExt<'tcx> {
}
}

/// Returns the `release` clock of the current thread.
/// Calls the callback with the "release" clock of the current thread.
/// Other threads can acquire this clock in the future to establish synchronization
/// with this program point.
fn release_clock<'a>(&'a self) -> Option<Ref<'a, VClock>>
where
'tcx: 'a,
{
///
/// The closure will only be invoked if data race handling is on.
fn release_clock<R>(&self, callback: impl FnOnce(&VClock) -> R) -> Option<R> {
let this = self.eval_context_ref();
Some(this.machine.data_race.as_ref()?.release_clock(&this.machine.threads))
Some(this.machine.data_race.as_ref()?.release_clock(&this.machine.threads, callback))
}

/// Acquire the given clock into the current thread, establishing synchronization with
Expand Down Expand Up @@ -1728,7 +1727,7 @@ impl GlobalState {
let current_index = self.active_thread_index(thread_mgr);

// Store the terminaion clock.
let terminaion_clock = self.release_clock(thread_mgr).clone();
let terminaion_clock = self.release_clock(thread_mgr, |clock| clock.clone());
self.thread_info.get_mut()[current_thread].termination_vector_clock =
Some(terminaion_clock);

Expand Down Expand Up @@ -1778,21 +1777,23 @@ impl GlobalState {
clocks.clock.join(clock);
}

/// Returns the `release` clock of the current thread.
/// Calls the given closure with the "release" clock of the current thread.
/// Other threads can acquire this clock in the future to establish synchronization
/// with this program point.
pub fn release_clock<'tcx>(&self, threads: &ThreadManager<'tcx>) -> Ref<'_, VClock> {
pub fn release_clock<'tcx, R>(
&self,
threads: &ThreadManager<'tcx>,
callback: impl FnOnce(&VClock) -> R,
) -> R {
let thread = threads.active_thread();
let span = threads.active_thread_ref().current_span();
// We increment the clock each time this happens, to ensure no two releases
// can be confused with each other.
let (index, mut clocks) = self.thread_state_mut(thread);
let r = callback(&clocks.clock);
// Increment the clock, so that all following events cannot be confused with anything that
// occurred before the release. Crucially, the callback is invoked on the *old* clock!
clocks.increment_clock(index, span);
drop(clocks);
// To return a read-only view, we need to release the RefCell
// and borrow it again.
let (_index, clocks) = self.thread_state(thread);
Ref::map(clocks, |c| &c.clock)

r
}

fn thread_index(&self, thread: ThreadId) -> VectorIdx {
Expand Down
6 changes: 4 additions & 2 deletions src/concurrency/init_once.rs
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {

// Each complete happens-before the end of the wait
if let Some(data_race) = &this.machine.data_race {
init_once.clock.clone_from(&data_race.release_clock(&this.machine.threads));
data_race
.release_clock(&this.machine.threads, |clock| init_once.clock.clone_from(clock));
}

// Wake up everyone.
Expand All @@ -119,7 +120,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {

// Each complete happens-before the end of the wait
if let Some(data_race) = &this.machine.data_race {
init_once.clock.clone_from(&data_race.release_clock(&this.machine.threads));
data_race
.release_clock(&this.machine.threads, |clock| init_once.clock.clone_from(clock));
}

// Wake up one waiting thread, so they can go ahead and try to init this.
Expand Down
16 changes: 11 additions & 5 deletions src/concurrency/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -444,7 +444,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
// The mutex is completely unlocked. Try transferring ownership
// to another thread.
if let Some(data_race) = &this.machine.data_race {
mutex.clock.clone_from(&data_race.release_clock(&this.machine.threads));
data_race.release_clock(&this.machine.threads, |clock| {
mutex.clock.clone_from(clock)
});
}
if let Some(thread) = this.machine.sync.mutexes[id].queue.pop_front() {
this.unblock_thread(thread, BlockReason::Mutex(id))?;
Expand Down Expand Up @@ -553,7 +555,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
}
if let Some(data_race) = &this.machine.data_race {
// Add this to the shared-release clock of all concurrent readers.
rwlock.clock_current_readers.join(&data_race.release_clock(&this.machine.threads));
data_race.release_clock(&this.machine.threads, |clock| {
rwlock.clock_current_readers.join(clock)
});
}

// The thread was a reader. If the lock is not held any more, give it to a writer.
Expand Down Expand Up @@ -632,7 +636,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, thread);
// Record release clock for next lock holder.
if let Some(data_race) = &this.machine.data_race {
rwlock.clock_unlocked.clone_from(&*data_race.release_clock(&this.machine.threads));
data_race.release_clock(&this.machine.threads, |clock| {
rwlock.clock_unlocked.clone_from(clock)
});
}
// The thread was a writer.
//
Expand Down Expand Up @@ -764,7 +770,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {

// Each condvar signal happens-before the end of the condvar wake
if let Some(data_race) = data_race {
condvar.clock.clone_from(&*data_race.release_clock(&this.machine.threads));
data_race.release_clock(&this.machine.threads, |clock| condvar.clock.clone_from(clock));
}
let Some(waiter) = condvar.waiters.pop_front() else {
return interp_ok(false);
Expand Down Expand Up @@ -837,7 +843,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {

// Each futex-wake happens-before the end of the futex wait
if let Some(data_race) = data_race {
futex.clock.clone_from(&*data_race.release_clock(&this.machine.threads));
data_race.release_clock(&this.machine.threads, |clock| futex.clock.clone_from(clock));
}

// Wake up the first thread in the queue that matches any of the bits in the bitset.
Expand Down
4 changes: 2 additions & 2 deletions src/shims/unix/linux/epoll.rs
Original file line number Diff line number Diff line change
Expand Up @@ -568,9 +568,9 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
let epoll = epfd.downcast::<Epoll>().unwrap();

// Synchronize running thread to the epoll ready list.
if let Some(clock) = &this.release_clock() {
this.release_clock(|clock| {
epoll.ready_list.clock.borrow_mut().join(clock);
}
});

if let Some(thread_id) = epoll.thread_id.borrow_mut().pop() {
waiter.push(thread_id);
Expand Down
4 changes: 2 additions & 2 deletions src/shims/unix/linux/eventfd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,9 +140,9 @@ impl FileDescription for Event {
match self.counter.get().checked_add(num) {
Some(new_count @ 0..=MAX_COUNTER) => {
// Future `read` calls will synchronize with this write, so update the FD clock.
if let Some(clock) = &ecx.release_clock() {
ecx.release_clock(|clock| {
self.clock.borrow_mut().join(clock);
}
});
self.counter.set(new_count);
}
None | Some(u64::MAX) =>
Expand Down
4 changes: 2 additions & 2 deletions src/shims/unix/unnamed_socket.rs
Original file line number Diff line number Diff line change
Expand Up @@ -234,9 +234,9 @@ impl FileDescription for AnonSocket {
}
}
// Remember this clock so `read` can synchronize with us.
if let Some(clock) = &ecx.release_clock() {
ecx.release_clock(|clock| {
writebuf.clock.join(clock);
}
});
// Do full write / partial write based on the space available.
let actual_write_size = len.min(available_space);
let bytes = ecx.read_bytes_ptr_strip_provenance(ptr, Size::from_bytes(len))?;
Expand Down
31 changes: 31 additions & 0 deletions tests/fail-dep/libc/socketpair-data-race.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
//! This is a regression test for <https://github.com/rust-lang/miri/issues/3947>: we had some
//! faulty logic around `release_clock` that led to this code not reporting a data race.
//@ignore-target: windows # no libc socketpair on Windows
//@compile-flags: -Zmiri-preemption-rate=0
use std::thread;

fn main() {
static mut VAL: u8 = 0;
let mut fds = [-1, -1];
let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) };
assert_eq!(res, 0);
let thread1 = thread::spawn(move || {
let data = "a".as_bytes().as_ptr();
let res = unsafe { libc::write(fds[0], data as *const libc::c_void, 1) };
assert_eq!(res, 1);
// The write to VAL is *after* the write to the socket, so there's no proper synchronization.
unsafe { VAL = 1 };
});
thread::yield_now();

let mut buf: [u8; 1] = [0; 1];
let res: i32 = unsafe {
libc::read(fds[1], buf.as_mut_ptr().cast(), buf.len() as libc::size_t).try_into().unwrap()
};
assert_eq!(res, 1);
assert_eq!(buf, "a".as_bytes());

unsafe { assert_eq!({ VAL }, 1) }; //~ERROR: Data race

thread1.join().unwrap();
}
20 changes: 20 additions & 0 deletions tests/fail-dep/libc/socketpair-data-race.stderr
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
error: Undefined Behavior: Data race detected between (1) non-atomic write on thread `unnamed-ID` and (2) non-atomic read on thread `main` at ALLOC. (2) just happened here
--> tests/fail-dep/libc/socketpair-data-race.rs:LL:CC
|
LL | unsafe { assert_eq!({ VAL }, 1) };
| ^^^ Data race detected between (1) non-atomic write on thread `unnamed-ID` and (2) non-atomic read on thread `main` at ALLOC. (2) just happened here
|
help: and (1) occurred earlier here
--> tests/fail-dep/libc/socketpair-data-race.rs:LL:CC
|
LL | unsafe { VAL = 1 };
| ^^^^^^^
= help: this indicates a bug in the program: it performed an invalid operation, and caused Undefined Behavior
= help: see https://doc.rust-lang.org/nightly/reference/behavior-considered-undefined.html for further information
= note: BACKTRACE (of the first span):
= note: inside `main` at tests/fail-dep/libc/socketpair-data-race.rs:LL:CC

note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace

error: aborting due to 1 previous error

0 comments on commit 9045930

Please sign in to comment.