wgpu_core/lock/
ranked.rs

1//! Lock types that enforce well-ranked lock acquisition order.
2//!
3//! This module's [`Mutex`] and [`RwLock` types are instrumented to check that
4//! `wgpu-core` acquires locks according to their rank, to prevent deadlocks. To
5//! use it, put `--cfg wgpu_validate_locks` in `RUSTFLAGS`.
6//!
7//! The [`LockRank`] constants in the [`lock::rank`] module describe edges in a
8//! directed graph of lock acquisitions: each lock's rank says, if this is the most
9//! recently acquired lock that you are still holding, then these are the locks you
10//! are allowed to acquire next.
11//!
12//! As long as this graph doesn't have cycles, any number of threads can acquire
13//! locks along paths through the graph without deadlock:
14//!
15//! - Assume that if a thread is holding a lock, then it will either release it,
16//!   or block trying to acquire another one. No thread just sits on its locks
17//!   forever for unrelated reasons. If it did, then that would be a source of
18//!   deadlock "outside the system" that we can't do anything about.
19//!
20//! - This module asserts that threads acquire and release locks in a stack-like
21//!   order: a lock is dropped only when it is the *most recently acquired* lock
22//!   *still held* - call this the "youngest" lock. This stack-like ordering
23//!   isn't a Rust requirement; Rust lets you drop guards in any order you like.
24//!   This is a restriction we impose.
25//!
26//! - Consider the directed graph whose nodes are locks, and whose edges go from
27//!   each lock to its permitted followers, the locks in its [`LockRank::followers`]
28//!   set. The definition of the [`lock::rank`] module's [`LockRank`] constants
29//!   ensures that this graph has no cycles, including trivial cycles from a node to
30//!   itself.
31//!
32//! - This module then asserts that each thread attempts to acquire a lock only if
33//!   it is among its youngest lock's permitted followers. Thus, as a thread
34//!   acquires locks, it must be traversing a path through the graph along its
35//!   edges.
36//!
37//! - Because there are no cycles in the graph, whenever one thread is blocked
38//!   waiting to acquire a lock, that lock must be held by a different thread: if
39//!   you were allowed to acquire a lock you already hold, that would be a cycle in
40//!   the graph.
41//!
42//! - Furthermore, because the graph has no cycles, as we work our way from each
43//!   thread to the thread it is blocked waiting for, we must eventually reach an
44//!   end point: there must be some thread that is able to acquire its next lock, or
45//!   that is about to release a lock.
46//!
47//! Thus, the system as a whole is always able to make progress: it is free of
48//! deadlocks.
49//!
50//! Note that this validation only monitors each thread's behavior in isolation:
51//! there's only thread-local state, nothing communicated between threads. So we
52//! don't detect deadlocks, per se, only the potential to cause deadlocks. This
53//! means that the validation is conservative, but more reproducible, since it's not
54//! dependent on any particular interleaving of execution.
55//!
56//! [`lock::rank`]: crate::lock::rank
57
58use core::{cell::Cell, fmt, ops, panic::Location};
59
60use super::rank::LockRank;
61
62pub use LockState as RankData;
63
64/// A `Mutex` instrumented for deadlock prevention.
65///
66/// This is just a wrapper around a [`parking_lot::Mutex`], along with
67/// its rank in the `wgpu_core` lock ordering.
68///
69/// For details, see [the module documentation][self].
70pub struct Mutex<T> {
71    inner: parking_lot::Mutex<T>,
72    rank: LockRank,
73}
74
75/// A guard produced by locking [`Mutex`].
76///
77/// This is just a wrapper around a [`parking_lot::MutexGuard`], along
78/// with the state needed to track lock acquisition.
79///
80/// For details, see [the module documentation][self].
81pub struct MutexGuard<'a, T> {
82    inner: parking_lot::MutexGuard<'a, T>,
83    saved: LockStateGuard,
84}
85
86std::thread_local! {
87    static LOCK_STATE: Cell<LockState> = const { Cell::new(LockState::INITIAL) };
88}
89
90/// Per-thread state for the deadlock checker.
91#[derive(Debug, Copy, Clone)]
92pub struct LockState {
93    /// The last lock we acquired, and where.
94    last_acquired: Option<(LockRank, &'static Location<'static>)>,
95
96    /// The number of locks currently held.
97    ///
98    /// This is used to enforce stack-like lock acquisition and release.
99    depth: u32,
100}
101
102impl LockState {
103    const INITIAL: LockState = LockState {
104        last_acquired: None,
105        depth: 0,
106    };
107}
108
109/// A container that restores a [`LockState`] when dropped.
110///
111/// This type serves two purposes:
112///
113/// - Operations like `RwLockWriteGuard::downgrade` would like to be able to
114///   destructure lock guards and reassemble their pieces into new guards, but
115///   if the guard type itself implements `Drop`, we can't destructure it
116///   without unsafe code or pointless `Option`s whose state is almost always
117///   statically known.
118///
119/// - We can just implement `Drop` for this type once, and then use it in lock
120///   guards, rather than implementing `Drop` separately for each guard type.
121struct LockStateGuard(LockState);
122
123impl Drop for LockStateGuard {
124    fn drop(&mut self) {
125        release(self.0)
126    }
127}
128
129/// Check and record the acquisition of a lock with `new_rank`.
130///
131/// Check that acquiring a lock with `new_rank` is permitted at this point, and
132/// update the per-thread state accordingly.
133///
134/// Return the `LockState` that must be restored when this thread is released.
135fn acquire(new_rank: LockRank, location: &'static Location<'static>) -> LockState {
136    let state = LOCK_STATE.get();
137    // Initially, it's fine to acquire any lock. So we only
138    // need to check when `last_acquired` is `Some`.
139    if let Some((ref last_rank, ref last_location)) = state.last_acquired {
140        assert!(
141            last_rank.followers.contains(new_rank.bit),
142            "Attempt to acquire nested mutexes in wrong order:\n\
143             last locked {:<35} at {}\n\
144             now locking {:<35} at {}\n\
145             Locking {} after locking {} is not permitted.",
146            last_rank.bit.member_name(),
147            last_location,
148            new_rank.bit.member_name(),
149            location,
150            new_rank.bit.member_name(),
151            last_rank.bit.member_name(),
152        );
153    }
154    LOCK_STATE.set(LockState {
155        last_acquired: Some((new_rank, location)),
156        depth: state.depth + 1,
157    });
158    state
159}
160
161/// Record the release of a lock whose saved state was `saved`.
162///
163/// Check that locks are being acquired in stacking order, and update the
164/// per-thread state accordingly.
165fn release(saved: LockState) {
166    let prior = LOCK_STATE.replace(saved);
167
168    // Although Rust allows mutex guards to be dropped in any
169    // order, this analysis requires that locks be acquired and
170    // released in stack order: the next lock to be released must be
171    // the most recently acquired lock still held.
172    assert_eq!(
173        prior.depth,
174        saved.depth + 1,
175        "Lock not released in stacking order"
176    );
177}
178
179impl<T> Mutex<T> {
180    pub fn new(rank: LockRank, value: T) -> Mutex<T> {
181        Mutex {
182            inner: parking_lot::Mutex::new(value),
183            rank,
184        }
185    }
186
187    #[track_caller]
188    pub fn lock(&self) -> MutexGuard<'_, T> {
189        let saved = acquire(self.rank, Location::caller());
190        MutexGuard {
191            inner: self.inner.lock(),
192            saved: LockStateGuard(saved),
193        }
194    }
195
196    pub fn into_inner(self) -> T {
197        self.inner.into_inner()
198    }
199}
200
201impl<'a, T> ops::Deref for MutexGuard<'a, T> {
202    type Target = T;
203
204    fn deref(&self) -> &Self::Target {
205        self.inner.deref()
206    }
207}
208
209impl<'a, T> ops::DerefMut for MutexGuard<'a, T> {
210    fn deref_mut(&mut self) -> &mut Self::Target {
211        self.inner.deref_mut()
212    }
213}
214
215impl<T: fmt::Debug> fmt::Debug for Mutex<T> {
216    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
217        self.inner.fmt(f)
218    }
219}
220
221/// An `RwLock` instrumented for deadlock prevention.
222///
223/// This is just a wrapper around a [`parking_lot::RwLock`], along with
224/// its rank in the `wgpu_core` lock ordering.
225///
226/// For details, see [the module documentation][self].
227pub struct RwLock<T> {
228    inner: parking_lot::RwLock<T>,
229    rank: LockRank,
230}
231
232/// A read guard produced by locking [`RwLock`] for reading.
233///
234/// This is just a wrapper around a [`parking_lot::RwLockReadGuard`], along with
235/// the state needed to track lock acquisition.
236///
237/// For details, see [the module documentation][self].
238pub struct RwLockReadGuard<'a, T> {
239    inner: parking_lot::RwLockReadGuard<'a, T>,
240    saved: LockStateGuard,
241}
242
243/// A write guard produced by locking [`RwLock`] for writing.
244///
245/// This is just a wrapper around a [`parking_lot::RwLockWriteGuard`], along
246/// with the state needed to track lock acquisition.
247///
248/// For details, see [the module documentation][self].
249pub struct RwLockWriteGuard<'a, T> {
250    inner: parking_lot::RwLockWriteGuard<'a, T>,
251    saved: LockStateGuard,
252}
253
254impl<T> RwLock<T> {
255    pub fn new(rank: LockRank, value: T) -> RwLock<T> {
256        RwLock {
257            inner: parking_lot::RwLock::new(value),
258            rank,
259        }
260    }
261
262    #[track_caller]
263    pub fn read(&self) -> RwLockReadGuard<'_, T> {
264        let saved = acquire(self.rank, Location::caller());
265        RwLockReadGuard {
266            inner: self.inner.read(),
267            saved: LockStateGuard(saved),
268        }
269    }
270
271    #[track_caller]
272    pub fn write(&self) -> RwLockWriteGuard<'_, T> {
273        let saved = acquire(self.rank, Location::caller());
274        RwLockWriteGuard {
275            inner: self.inner.write(),
276            saved: LockStateGuard(saved),
277        }
278    }
279
280    /// Force an read-unlock operation on this lock.
281    ///
282    /// Safety:
283    /// - A read lock must be held which is not held by a guard.
284    pub unsafe fn force_unlock_read(&self, data: RankData) {
285        release(data);
286        unsafe { self.inner.force_unlock_read() };
287    }
288}
289
290impl<'a, T> RwLockReadGuard<'a, T> {
291    // Forget the read guard, leaving the lock in a locked state with no guard.
292    //
293    // Equivalent to std::mem::forget, but preserves the information about the lock
294    // rank.
295    pub fn forget(this: Self) -> RankData {
296        core::mem::forget(this.inner);
297
298        this.saved.0
299    }
300}
301
302impl<'a, T> RwLockWriteGuard<'a, T> {
303    pub fn downgrade(this: Self) -> RwLockReadGuard<'a, T> {
304        RwLockReadGuard {
305            inner: parking_lot::RwLockWriteGuard::downgrade(this.inner),
306            saved: this.saved,
307        }
308    }
309}
310
311impl<T: fmt::Debug> fmt::Debug for RwLock<T> {
312    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
313        self.inner.fmt(f)
314    }
315}
316
317impl<'a, T> ops::Deref for RwLockReadGuard<'a, T> {
318    type Target = T;
319
320    fn deref(&self) -> &Self::Target {
321        self.inner.deref()
322    }
323}
324
325impl<'a, T> ops::Deref for RwLockWriteGuard<'a, T> {
326    type Target = T;
327
328    fn deref(&self) -> &Self::Target {
329        self.inner.deref()
330    }
331}
332
333impl<'a, T> ops::DerefMut for RwLockWriteGuard<'a, T> {
334    fn deref_mut(&mut self) -> &mut Self::Target {
335        self.inner.deref_mut()
336    }
337}
338
339/// Locks can be acquired in the order indicated by their ranks.
340#[test]
341fn permitted() {
342    use super::rank;
343
344    let lock1 = Mutex::new(rank::PAWN, ());
345    let lock2 = Mutex::new(rank::ROOK, ());
346
347    let _guard1 = lock1.lock();
348    let _guard2 = lock2.lock();
349}
350
351/// Locks can only be acquired in the order indicated by their ranks.
352#[test]
353#[should_panic(expected = "Locking pawn after locking rook")]
354fn forbidden_unrelated() {
355    use super::rank;
356
357    let lock1 = Mutex::new(rank::ROOK, ());
358    let lock2 = Mutex::new(rank::PAWN, ());
359
360    let _guard1 = lock1.lock();
361    let _guard2 = lock2.lock();
362}
363
364/// Lock acquisitions can't skip ranks.
365///
366/// These two locks *could* be acquired in this order, but only if other locks
367/// are acquired in between them. Skipping ranks isn't allowed.
368#[test]
369#[should_panic(expected = "Locking knight after locking pawn")]
370fn forbidden_skip() {
371    use super::rank;
372
373    let lock1 = Mutex::new(rank::PAWN, ());
374    let lock2 = Mutex::new(rank::KNIGHT, ());
375
376    let _guard1 = lock1.lock();
377    let _guard2 = lock2.lock();
378}
379
380/// Locks can be acquired and released in a stack-like order.
381#[test]
382fn stack_like() {
383    use super::rank;
384
385    let lock1 = Mutex::new(rank::PAWN, ());
386    let lock2 = Mutex::new(rank::ROOK, ());
387    let lock3 = Mutex::new(rank::BISHOP, ());
388
389    let guard1 = lock1.lock();
390    let guard2 = lock2.lock();
391    drop(guard2);
392
393    let guard3 = lock3.lock();
394    drop(guard3);
395    drop(guard1);
396}
397
398/// Locks can only be acquired and released in a stack-like order.
399#[test]
400#[should_panic(expected = "Lock not released in stacking order")]
401fn non_stack_like() {
402    use super::rank;
403
404    let lock1 = Mutex::new(rank::PAWN, ());
405    let lock2 = Mutex::new(rank::ROOK, ());
406
407    let guard1 = lock1.lock();
408    let guard2 = lock2.lock();
409
410    // Avoid a double panic from dropping this while unwinding due to the panic
411    // we're testing for.
412    core::mem::forget(guard2);
413
414    drop(guard1);
415}