wgpu_core/lock/
ranked.rs

1//! Lock types that enforce well-ranked lock acquisition order.
2//!
3//! This module's [`Mutex`] and [`RwLock` types are instrumented to check that
4//! `wgpu-core` acquires locks according to their rank, to prevent deadlocks. To
5//! use it, put `--cfg wgpu_validate_locks` in `RUSTFLAGS`.
6//!
7//! The [`LockRank`] constants in the [`lock::rank`] module describe edges in a
8//! directed graph of lock acquisitions: each lock's rank says, if this is the most
9//! recently acquired lock that you are still holding, then these are the locks you
10//! are allowed to acquire next.
11//!
12//! As long as this graph doesn't have cycles, any number of threads can acquire
13//! locks along paths through the graph without deadlock:
14//!
15//! - Assume that if a thread is holding a lock, then it will either release it,
16//!   or block trying to acquire another one. No thread just sits on its locks
17//!   forever for unrelated reasons. If it did, then that would be a source of
18//!   deadlock "outside the system" that we can't do anything about.
19//!
20//! - This module asserts that threads acquire and release locks in a stack-like
21//!   order: a lock is dropped only when it is the *most recently acquired* lock
22//!   *still held* - call this the "youngest" lock. This stack-like ordering
23//!   isn't a Rust requirement; Rust lets you drop guards in any order you like.
24//!   This is a restriction we impose.
25//!
26//! - Consider the directed graph whose nodes are locks, and whose edges go from
27//!   each lock to its permitted followers, the locks in its [`LockRank::followers`]
28//!   set. The definition of the [`lock::rank`] module's [`LockRank`] constants
29//!   ensures that this graph has no cycles, including trivial cycles from a node to
30//!   itself.
31//!
32//! - This module then asserts that each thread attempts to acquire a lock only if
33//!   it is among its youngest lock's permitted followers. Thus, as a thread
34//!   acquires locks, it must be traversing a path through the graph along its
35//!   edges.
36//!
37//! - Because there are no cycles in the graph, whenever one thread is blocked
38//!   waiting to acquire a lock, that lock must be held by a different thread: if
39//!   you were allowed to acquire a lock you already hold, that would be a cycle in
40//!   the graph.
41//!
42//! - Furthermore, because the graph has no cycles, as we work our way from each
43//!   thread to the thread it is blocked waiting for, we must eventually reach an
44//!   end point: there must be some thread that is able to acquire its next lock, or
45//!   that is about to release a lock.
46//!
47//! Thus, the system as a whole is always able to make progress: it is free of
48//! deadlocks.
49//!
50//! Note that this validation only monitors each thread's behavior in isolation:
51//! there's only thread-local state, nothing communicated between threads. So we
52//! don't detect deadlocks, per se, only the potential to cause deadlocks. This
53//! means that the validation is conservative, but more reproducible, since it's not
54//! dependent on any particular interleaving of execution.
55//!
56//! [`lock::rank`]: crate::lock::rank
57
58use core::{cell::Cell, fmt, ops, panic::Location};
59
60use super::rank::LockRank;
61
62pub use LockState as RankData;
63
64/// A `Mutex` instrumented for deadlock prevention.
65///
66/// This is just a wrapper around a [`parking_lot::Mutex`], along with
67/// its rank in the `wgpu_core` lock ordering.
68///
69/// For details, see [the module documentation][self].
70pub struct Mutex<T> {
71    inner: parking_lot::Mutex<T>,
72    rank: LockRank,
73}
74
75/// A guard produced by locking [`Mutex`].
76///
77/// This is just a wrapper around a [`parking_lot::MutexGuard`], along
78/// with the state needed to track lock acquisition.
79///
80/// For details, see [the module documentation][self].
81pub struct MutexGuard<'a, T> {
82    inner: parking_lot::MutexGuard<'a, T>,
83    saved: LockStateGuard,
84}
85
86std::thread_local! {
87    static LOCK_STATE: Cell<LockState> = const { Cell::new(LockState::INITIAL) };
88}
89
90/// Per-thread state for the deadlock checker.
91#[derive(Debug, Copy, Clone)]
92pub struct LockState {
93    /// The last lock we acquired, and where.
94    last_acquired: Option<(LockRank, &'static Location<'static>)>,
95
96    /// The number of locks currently held.
97    ///
98    /// This is used to enforce stack-like lock acquisition and release.
99    depth: u32,
100}
101
102impl LockState {
103    const INITIAL: LockState = LockState {
104        last_acquired: None,
105        depth: 0,
106    };
107}
108
109/// A container that restores a [`LockState`] when dropped.
110///
111/// This type serves two purposes:
112///
113/// - Operations like `RwLockWriteGuard::downgrade` would like to be able to
114///   destructure lock guards and reassemble their pieces into new guards, but
115///   if the guard type itself implements `Drop`, we can't destructure it
116///   without unsafe code or pointless `Option`s whose state is almost always
117///   statically known.
118///
119/// - We can just implement `Drop` for this type once, and then use it in lock
120///   guards, rather than implementing `Drop` separately for each guard type.
121struct LockStateGuard(LockState);
122
123impl Drop for LockStateGuard {
124    fn drop(&mut self) {
125        release(self.0)
126    }
127}
128
129/// Check and record the acquisition of a lock with `new_rank`.
130///
131/// Check that acquiring a lock with `new_rank` is permitted at this point, and
132/// update the per-thread state accordingly.
133///
134/// Return the `LockState` that must be restored when this thread is released.
135fn acquire(new_rank: LockRank, location: &'static Location<'static>) -> LockState {
136    let state = LOCK_STATE.get();
137    // Initially, it's fine to acquire any lock. So we only
138    // need to check when `last_acquired` is `Some`.
139    if let Some((ref last_rank, ref last_location)) = state.last_acquired {
140        assert!(
141            last_rank.followers.contains(new_rank.bit),
142            "Attempt to acquire nested mutexes in wrong order:\n\
143             last locked {:<35} at {}\n\
144             now locking {:<35} at {}\n\
145             Locking {} after locking {} is not permitted.",
146            last_rank.bit.member_name(),
147            last_location,
148            new_rank.bit.member_name(),
149            location,
150            new_rank.bit.member_name(),
151            last_rank.bit.member_name(),
152        );
153    }
154    LOCK_STATE.set(LockState {
155        last_acquired: Some((new_rank, location)),
156        depth: state.depth + 1,
157    });
158    state
159}
160
161/// Record the release of a lock whose saved state was `saved`.
162///
163/// Check that locks are being acquired in stacking order, and update the
164/// per-thread state accordingly.
165fn release(saved: LockState) {
166    let prior = LOCK_STATE.replace(saved);
167
168    // Although Rust allows mutex guards to be dropped in any
169    // order, this analysis requires that locks be acquired and
170    // released in stack order: the next lock to be released must be
171    // the most recently acquired lock still held.
172    assert_eq!(
173        prior.depth,
174        saved.depth + 1,
175        "Lock not released in stacking order"
176    );
177}
178
179impl<T> Mutex<T> {
180    pub fn new(rank: LockRank, value: T) -> Mutex<T> {
181        Mutex {
182            inner: parking_lot::Mutex::new(value),
183            rank,
184        }
185    }
186
187    #[track_caller]
188    pub fn lock(&self) -> MutexGuard<'_, T> {
189        let saved = acquire(self.rank, Location::caller());
190        MutexGuard {
191            inner: self.inner.lock(),
192            saved: LockStateGuard(saved),
193        }
194    }
195}
196
197impl<'a, T> ops::Deref for MutexGuard<'a, T> {
198    type Target = T;
199
200    fn deref(&self) -> &Self::Target {
201        self.inner.deref()
202    }
203}
204
205impl<'a, T> ops::DerefMut for MutexGuard<'a, T> {
206    fn deref_mut(&mut self) -> &mut Self::Target {
207        self.inner.deref_mut()
208    }
209}
210
211impl<T: fmt::Debug> fmt::Debug for Mutex<T> {
212    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
213        self.inner.fmt(f)
214    }
215}
216
217/// An `RwLock` instrumented for deadlock prevention.
218///
219/// This is just a wrapper around a [`parking_lot::RwLock`], along with
220/// its rank in the `wgpu_core` lock ordering.
221///
222/// For details, see [the module documentation][self].
223pub struct RwLock<T> {
224    inner: parking_lot::RwLock<T>,
225    rank: LockRank,
226}
227
228/// A read guard produced by locking [`RwLock`] for reading.
229///
230/// This is just a wrapper around a [`parking_lot::RwLockReadGuard`], along with
231/// the state needed to track lock acquisition.
232///
233/// For details, see [the module documentation][self].
234pub struct RwLockReadGuard<'a, T> {
235    inner: parking_lot::RwLockReadGuard<'a, T>,
236    saved: LockStateGuard,
237}
238
239/// A write guard produced by locking [`RwLock`] for writing.
240///
241/// This is just a wrapper around a [`parking_lot::RwLockWriteGuard`], along
242/// with the state needed to track lock acquisition.
243///
244/// For details, see [the module documentation][self].
245pub struct RwLockWriteGuard<'a, T> {
246    inner: parking_lot::RwLockWriteGuard<'a, T>,
247    saved: LockStateGuard,
248}
249
250impl<T> RwLock<T> {
251    pub fn new(rank: LockRank, value: T) -> RwLock<T> {
252        RwLock {
253            inner: parking_lot::RwLock::new(value),
254            rank,
255        }
256    }
257
258    #[track_caller]
259    pub fn read(&self) -> RwLockReadGuard<'_, T> {
260        let saved = acquire(self.rank, Location::caller());
261        RwLockReadGuard {
262            inner: self.inner.read(),
263            saved: LockStateGuard(saved),
264        }
265    }
266
267    #[track_caller]
268    pub fn write(&self) -> RwLockWriteGuard<'_, T> {
269        let saved = acquire(self.rank, Location::caller());
270        RwLockWriteGuard {
271            inner: self.inner.write(),
272            saved: LockStateGuard(saved),
273        }
274    }
275
276    /// Force an read-unlock operation on this lock.
277    ///
278    /// Safety:
279    /// - A read lock must be held which is not held by a guard.
280    pub unsafe fn force_unlock_read(&self, data: RankData) {
281        release(data);
282        unsafe { self.inner.force_unlock_read() };
283    }
284}
285
286impl<'a, T> RwLockReadGuard<'a, T> {
287    // Forget the read guard, leaving the lock in a locked state with no guard.
288    //
289    // Equivalent to std::mem::forget, but preserves the information about the lock
290    // rank.
291    pub fn forget(this: Self) -> RankData {
292        core::mem::forget(this.inner);
293
294        this.saved.0
295    }
296}
297
298impl<'a, T> RwLockWriteGuard<'a, T> {
299    pub fn downgrade(this: Self) -> RwLockReadGuard<'a, T> {
300        RwLockReadGuard {
301            inner: parking_lot::RwLockWriteGuard::downgrade(this.inner),
302            saved: this.saved,
303        }
304    }
305}
306
307impl<T: fmt::Debug> fmt::Debug for RwLock<T> {
308    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
309        self.inner.fmt(f)
310    }
311}
312
313impl<'a, T> ops::Deref for RwLockReadGuard<'a, T> {
314    type Target = T;
315
316    fn deref(&self) -> &Self::Target {
317        self.inner.deref()
318    }
319}
320
321impl<'a, T> ops::Deref for RwLockWriteGuard<'a, T> {
322    type Target = T;
323
324    fn deref(&self) -> &Self::Target {
325        self.inner.deref()
326    }
327}
328
329impl<'a, T> ops::DerefMut for RwLockWriteGuard<'a, T> {
330    fn deref_mut(&mut self) -> &mut Self::Target {
331        self.inner.deref_mut()
332    }
333}
334
335/// Locks can be acquired in the order indicated by their ranks.
336#[test]
337fn permitted() {
338    use super::rank;
339
340    let lock1 = Mutex::new(rank::PAWN, ());
341    let lock2 = Mutex::new(rank::ROOK, ());
342
343    let _guard1 = lock1.lock();
344    let _guard2 = lock2.lock();
345}
346
347/// Locks can only be acquired in the order indicated by their ranks.
348#[test]
349#[should_panic(expected = "Locking pawn after locking rook")]
350fn forbidden_unrelated() {
351    use super::rank;
352
353    let lock1 = Mutex::new(rank::ROOK, ());
354    let lock2 = Mutex::new(rank::PAWN, ());
355
356    let _guard1 = lock1.lock();
357    let _guard2 = lock2.lock();
358}
359
360/// Lock acquisitions can't skip ranks.
361///
362/// These two locks *could* be acquired in this order, but only if other locks
363/// are acquired in between them. Skipping ranks isn't allowed.
364#[test]
365#[should_panic(expected = "Locking knight after locking pawn")]
366fn forbidden_skip() {
367    use super::rank;
368
369    let lock1 = Mutex::new(rank::PAWN, ());
370    let lock2 = Mutex::new(rank::KNIGHT, ());
371
372    let _guard1 = lock1.lock();
373    let _guard2 = lock2.lock();
374}
375
376/// Locks can be acquired and released in a stack-like order.
377#[test]
378fn stack_like() {
379    use super::rank;
380
381    let lock1 = Mutex::new(rank::PAWN, ());
382    let lock2 = Mutex::new(rank::ROOK, ());
383    let lock3 = Mutex::new(rank::BISHOP, ());
384
385    let guard1 = lock1.lock();
386    let guard2 = lock2.lock();
387    drop(guard2);
388
389    let guard3 = lock3.lock();
390    drop(guard3);
391    drop(guard1);
392}
393
394/// Locks can only be acquired and released in a stack-like order.
395#[test]
396#[should_panic(expected = "Lock not released in stacking order")]
397fn non_stack_like() {
398    use super::rank;
399
400    let lock1 = Mutex::new(rank::PAWN, ());
401    let lock2 = Mutex::new(rank::ROOK, ());
402
403    let guard1 = lock1.lock();
404    let guard2 = lock2.lock();
405
406    // Avoid a double panic from dropping this while unwinding due to the panic
407    // we're testing for.
408    core::mem::forget(guard2);
409
410    drop(guard1);
411}