Fix false positives wrt RwLock deadlock detection (#2121)

* add a test suite demonstrating the issue

* fix false positives in RwLock, thus passing the new test suite

* friendler output

* augmented test suite with RWlock specifics (failing as expected!)

* full support for RWLocks

* implement support for guard remappings

* Add some newlines

* join `use` statements

* pass cranky

* addressing PR comments

Co-authored-by: Emil Ernerfeldt <emil.ernerfeldt@gmail.com>
This commit is contained in:
Clement Rey 2022-10-10 12:07:44 +02:00 committed by GitHub
parent f4d2aa5b4a
commit 367378d75d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -146,11 +146,98 @@ mod rw_lock_impl {
#[cfg(not(target_arch = "wasm32"))]
#[cfg(feature = "deadlock_detection")]
mod rw_lock_impl {
use std::{
ops::{Deref, DerefMut},
sync::Arc,
thread::ThreadId,
};
use ahash::HashMap;
use parking_lot::{MappedRwLockReadGuard, MappedRwLockWriteGuard};
/// The lock you get from [`RwLock::read`].
pub use parking_lot::MappedRwLockReadGuard as RwLockReadGuard;
pub struct RwLockReadGuard<'a, T> {
// The option is used only because we need to `take()` the guard out of self
// when doing remappings (`map()`), i.e. it's used as a safe `ManuallyDrop`.
guard: Option<MappedRwLockReadGuard<'a, T>>,
holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
}
impl<'a, T> RwLockReadGuard<'a, T> {
#[inline]
pub fn map<U, F>(mut s: Self, f: F) -> RwLockReadGuard<'a, U>
where
F: FnOnce(&T) -> &U,
{
RwLockReadGuard {
guard: s
.guard
.take()
.map(|g| parking_lot::MappedRwLockReadGuard::map(g, f)),
holders: Arc::clone(&s.holders),
}
}
}
impl<'a, T> Deref for RwLockReadGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&**self.guard.as_ref().unwrap()
}
}
impl<'a, T> Drop for RwLockReadGuard<'a, T> {
fn drop(&mut self) {
let tid = std::thread::current().id();
self.holders.lock().remove(&tid);
}
}
/// The lock you get from [`RwLock::write`].
pub use parking_lot::MappedRwLockWriteGuard as RwLockWriteGuard;
pub struct RwLockWriteGuard<'a, T> {
// The option is used only because we need to `take()` the guard out of self
// when doing remappings (`map()`), i.e. it's used as a safe `ManuallyDrop`.
guard: Option<MappedRwLockWriteGuard<'a, T>>,
holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
}
impl<'a, T> RwLockWriteGuard<'a, T> {
#[inline]
pub fn map<U, F>(mut s: Self, f: F) -> RwLockWriteGuard<'a, U>
where
F: FnOnce(&mut T) -> &mut U,
{
RwLockWriteGuard {
guard: s
.guard
.take()
.map(|g| parking_lot::MappedRwLockWriteGuard::map(g, f)),
holders: Arc::clone(&s.holders),
}
}
}
impl<'a, T> Deref for RwLockWriteGuard<'a, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&**self.guard.as_ref().unwrap()
}
}
impl<'a, T> DerefMut for RwLockWriteGuard<'a, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut **self.guard.as_mut().unwrap()
}
}
impl<'a, T> Drop for RwLockWriteGuard<'a, T> {
fn drop(&mut self) {
let tid = std::thread::current().id();
self.holders.lock().remove(&tid);
}
}
/// Provides interior mutability.
///
@ -158,39 +245,74 @@ mod rw_lock_impl {
#[derive(Default)]
pub struct RwLock<T> {
lock: parking_lot::RwLock<T>,
last_lock: parking_lot::Mutex<backtrace::Backtrace>,
// Technically we'd need a list of backtraces per thread-id since parking_lot's
// read-locks are reentrant.
// In practice it's not that useful to have the whole list though, so we only
// keep track of the first backtrace for now.
holders: Arc<parking_lot::Mutex<HashMap<ThreadId, backtrace::Backtrace>>>,
}
impl<T> RwLock<T> {
pub fn new(val: T) -> Self {
Self {
lock: parking_lot::RwLock::new(val),
last_lock: Default::default(),
holders: Default::default(),
}
}
pub fn read(&self) -> RwLockReadGuard<'_, T> {
let tid = std::thread::current().id();
// If it is write-locked, and we locked it (re-entrancy deadlock)
let would_deadlock =
self.lock.is_locked_exclusive() && self.holders.lock().contains_key(&tid);
assert!(
!self.lock.is_locked_exclusive(),
"{} DEAD-LOCK DETECTED! Previous lock held at:\n{}\n\n",
!would_deadlock,
"{} DEAD-LOCK DETECTED ({:?})!\n\
Trying to grab read-lock at:\n{}\n\
which is already exclusively held by current thread at:\n{}\n\n",
std::any::type_name::<Self>(),
format_backtrace(&mut self.last_lock.lock())
tid,
format_backtrace(&mut make_backtrace()),
format_backtrace(self.holders.lock().get_mut(&tid).unwrap())
);
*self.last_lock.lock() = make_backtrace();
parking_lot::RwLockReadGuard::map(self.lock.read(), |v| v)
self.holders
.lock()
.entry(tid)
.or_insert_with(make_backtrace);
RwLockReadGuard {
guard: parking_lot::RwLockReadGuard::map(self.lock.read(), |v| v).into(),
holders: Arc::clone(&self.holders),
}
}
pub fn write(&self) -> RwLockWriteGuard<'_, T> {
let tid = std::thread::current().id();
// If it is locked in any way, and we locked it (re-entrancy deadlock)
let would_deadlock = self.lock.is_locked() && self.holders.lock().contains_key(&tid);
assert!(
!self.lock.is_locked(),
"{} DEAD-LOCK DETECTED! Previous lock held at:\n{}\n\n",
!would_deadlock,
"{} DEAD-LOCK DETECTED ({:?})!\n\
Trying to grab write-lock at:\n{}\n\
which is already held by current thread at:\n{}\n\n",
std::any::type_name::<Self>(),
format_backtrace(&mut self.last_lock.lock())
tid,
format_backtrace(&mut make_backtrace()),
format_backtrace(self.holders.lock().get_mut(&tid).unwrap())
);
*self.last_lock.lock() = make_backtrace();
parking_lot::RwLockWriteGuard::map(self.lock.write(), |v| v)
self.holders
.lock()
.entry(tid)
.or_insert_with(make_backtrace);
RwLockWriteGuard {
guard: parking_lot::RwLockWriteGuard::map(self.lock.write(), |v| v).into(),
holders: Arc::clone(&self.holders),
}
}
}
@ -335,3 +457,123 @@ mod tests {
other_thread.join().unwrap();
}
}
#[cfg(not(target_arch = "wasm32"))]
#[cfg(feature = "deadlock_detection")]
#[cfg(test)]
mod tests_rwlock {
use crate::mutex::RwLock;
use std::time::Duration;
#[test]
fn lock_two_different_rwlocks_single_thread() {
let one = RwLock::new(());
let two = RwLock::new(());
let _a = one.write();
let _b = two.write();
}
#[test]
fn rwlock_multiple_threads() {
use std::sync::Arc;
let one = Arc::new(RwLock::new(()));
let our_lock = one.write();
let other_thread1 = {
let one = Arc::clone(&one);
std::thread::spawn(move || {
let _ = one.write();
})
};
let other_thread2 = {
let one = Arc::clone(&one);
std::thread::spawn(move || {
let _ = one.read();
})
};
std::thread::sleep(Duration::from_millis(200));
drop(our_lock);
other_thread1.join().unwrap();
other_thread2.join().unwrap();
}
#[test]
#[should_panic]
fn rwlock_write_write_reentrancy() {
let one = RwLock::new(());
let _a1 = one.write();
let _a2 = one.write(); // panics
}
#[test]
#[should_panic]
fn rwlock_write_read_reentrancy() {
let one = RwLock::new(());
let _a1 = one.write();
let _a2 = one.read(); // panics
}
#[test]
#[should_panic]
fn rwlock_read_write_reentrancy() {
let one = RwLock::new(());
let _a1 = one.read();
let _a2 = one.write(); // panics
}
#[test]
fn rwlock_read_read_reentrancy() {
let one = RwLock::new(());
let _a1 = one.read();
// This is legal: this test suite specifically targets native, which relies
// on parking_lot's rw-locks, which are reentrant.
let _a2 = one.read();
}
#[test]
fn rwlock_short_read_foreign_read_write_reentrancy() {
use std::sync::Arc;
let lock = Arc::new(RwLock::new(()));
// Thread #0 grabs a read lock
let t0r0 = lock.read();
// Thread #1 grabs the same read lock
let other_thread = {
let lock = Arc::clone(&lock);
std::thread::spawn(move || {
let _t1r0 = lock.read();
})
};
other_thread.join().unwrap();
// Thread #0 releases its read lock
drop(t0r0);
// Thread #0 now grabs a write lock, which is legal
let _t0w0 = lock.write();
}
#[test]
#[should_panic]
fn rwlock_read_foreign_read_write_reentrancy() {
use std::sync::Arc;
let lock = Arc::new(RwLock::new(()));
// Thread #0 grabs a read lock
let _t0r0 = lock.read();
// Thread #1 grabs the same read lock
let other_thread = {
let lock = Arc::clone(&lock);
std::thread::spawn(move || {
let _t1r0 = lock.read();
})
};
other_thread.join().unwrap();
// Thread #0 now grabs a write lock, which should panic (read-write)
let _t0w0 = lock.write(); // panics
}
}