1use core::{cell::UnsafeCell, fmt, mem::ManuallyDrop};
23use crate::lock::{rank, RankData, RwLock, RwLockReadGuard, RwLockWriteGuard};
45/// A guard that provides read access to snatchable data.
6pub struct SnatchGuard<'a>(RwLockReadGuard<'a, ()>);
7/// A guard that allows snatching the snatchable data.
8pub struct ExclusiveSnatchGuard<'a>(#[expect(dead_code)] RwLockWriteGuard<'a, ()>);
910/// A value that is mostly immutable but can be "snatched" if we need to destroy
11/// it early.
12///
13/// In order to safely access the underlying data, the device's global snatchable
14/// lock must be taken. To guarantee it, methods take a read or write guard of that
15/// special lock.
16pub struct Snatchable<T> {
17 value: UnsafeCell<Option<T>>,
18}
1920impl<T> Snatchable<T> {
21pub fn new(val: T) -> Self {
22 Snatchable {
23 value: UnsafeCell::new(Some(val)),
24 }
25 }
2627#[allow(dead_code)]
28pub fn empty() -> Self {
29 Snatchable {
30 value: UnsafeCell::new(None),
31 }
32 }
3334/// Get read access to the value. Requires a the snatchable lock's read guard.
35pub fn get<'a>(&'a self, _guard: &'a SnatchGuard) -> Option<&'a T> {
36unsafe { (*self.value.get()).as_ref() }
37 }
3839/// Take the value. Requires a the snatchable lock's write guard.
40pub fn snatch(&self, _guard: &mut ExclusiveSnatchGuard) -> Option<T> {
41unsafe { (*self.value.get()).take() }
42 }
4344/// Take the value without a guard. This can only be used with exclusive access
45 /// to self, so it does not require locking.
46 ///
47 /// Typically useful in a drop implementation.
48pub fn take(&mut self) -> Option<T> {
49self.value.get_mut().take()
50 }
51}
5253// Can't safely print the contents of a snatchable object without holding
54// the lock.
55impl<T> fmt::Debug for Snatchable<T> {
56fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
57write!(f, "<snatchable>")
58 }
59}
6061unsafe impl<T> Sync for Snatchable<T> {}
6263use trace::LockTrace;
64#[cfg(all(debug_assertions, feature = "std"))]
65mod trace {
66use core::{cell::Cell, fmt, panic::Location};
67use std::{backtrace::Backtrace, thread};
6869pub(super) struct LockTrace {
70 purpose: &'static str,
71 caller: &'static Location<'static>,
72 backtrace: Backtrace,
73 }
7475impl fmt::Display for LockTrace {
76fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
77write!(
78 f,
79"a {} lock at {}\n{}",
80self.purpose, self.caller, self.backtrace
81 )
82 }
83 }
8485impl LockTrace {
86#[track_caller]
87pub(super) fn enter(purpose: &'static str) {
88let new = LockTrace {
89 purpose,
90 caller: Location::caller(),
91 backtrace: Backtrace::capture(),
92 };
9394if let Some(prev) = SNATCH_LOCK_TRACE.take() {
95let current = thread::current();
96let name = current.name().unwrap_or("<unnamed>");
97panic!(
98"thread '{name}' attempted to acquire a snatch lock recursively.\n\
99 - Currently trying to acquire {new}\n\
100 - Previously acquired {prev}",
101 );
102 } else {
103 SNATCH_LOCK_TRACE.set(Some(new));
104 }
105 }
106107pub(super) fn exit() {
108 SNATCH_LOCK_TRACE.take();
109 }
110 }
111112std::thread_local! {
113static SNATCH_LOCK_TRACE: Cell<Option<LockTrace>> = const { Cell::new(None) };
114 }
115}
116#[cfg(not(all(debug_assertions, feature = "std")))]
117mod trace {
118pub(super) struct LockTrace {
119 _private: (),
120 }
121122impl LockTrace {
123pub(super) fn enter(_purpose: &'static str) {}
124pub(super) fn exit() {}
125 }
126}
127128/// A Device-global lock for all snatchable data.
129pub struct SnatchLock {
130 lock: RwLock<()>,
131}
132133impl SnatchLock {
134/// The safety of `Snatchable::get` and `Snatchable::snatch` rely on their using of the
135 /// right SnatchLock (the one associated to the same device). This method is unsafe
136 /// to force force sers to think twice about creating a SnatchLock. The only place this
137 /// method should be called is when creating the device.
138pub unsafe fn new(rank: rank::LockRank) -> Self {
139 SnatchLock {
140 lock: RwLock::new(rank, ()),
141 }
142 }
143144/// Request read access to snatchable resources.
145#[track_caller]
146pub fn read(&self) -> SnatchGuard<'_> {
147 LockTrace::enter("read");
148 SnatchGuard(self.lock.read())
149 }
150151/// Request write access to snatchable resources.
152 ///
153 /// This should only be called when a resource needs to be snatched. This has
154 /// a high risk of causing lock contention if called concurrently with other
155 /// wgpu work.
156#[track_caller]
157pub fn write(&self) -> ExclusiveSnatchGuard<'_> {
158 LockTrace::enter("write");
159 ExclusiveSnatchGuard(self.lock.write())
160 }
161162#[track_caller]
163pub unsafe fn force_unlock_read(&self, data: RankData) {
164// This is unsafe because it can cause deadlocks if the lock is held.
165 // It should only be used in very specific cases, like when a resource
166 // needs to be snatched in a panic handler.
167LockTrace::exit();
168unsafe { self.lock.force_unlock_read(data) };
169 }
170}
171172impl SnatchGuard<'_> {
173/// Forget the guard, leaving the lock in a locked state with no guard.
174 ///
175 /// This is equivalent to `std::mem::forget`, but preserves the information about the lock
176 /// rank.
177pub fn forget(this: Self) -> RankData {
178// Cancel the drop implementation of the current guard.
179let manually_drop = ManuallyDrop::new(this);
180181// As we are unable to destructure out of this guard due to the drop implementation,
182 // so we manually read the inner value.
183 // SAFETY: This is safe because we never access the original guard again.
184let inner_guard = unsafe { core::ptr::read(&manually_drop.0) };
185186 RwLockReadGuard::forget(inner_guard)
187 }
188}
189190impl Drop for SnatchGuard<'_> {
191fn drop(&mut self) {
192 LockTrace::exit();
193 }
194}
195196impl Drop for ExclusiveSnatchGuard<'_> {
197fn drop(&mut self) {
198 LockTrace::exit();
199 }
200}