wgpu_core/device/life.rs
1use alloc::{sync::Arc, vec::Vec};
2
3use smallvec::SmallVec;
4use thiserror::Error;
5
6use crate::{
7 device::{
8 queue::{EncoderInFlight, SubmittedWorkDoneClosure, TempResource},
9 DeviceError,
10 },
11 ray_tracing::BlasCompactReadyPendingClosure,
12 resource::{Blas, Buffer, Texture, Trackable},
13 snatch::SnatchGuard,
14 SubmissionIndex,
15};
16
17/// A command submitted to the GPU for execution.
18///
19/// ## Keeping resources alive while the GPU is using them
20///
21/// [`wgpu_hal`] requires that, when a command is submitted to a queue, all the
22/// resources it uses must remain alive until it has finished executing.
23///
24/// [`wgpu_hal`]: hal
25struct ActiveSubmission {
26 /// The index of the submission we track.
27 ///
28 /// When `Device::fence`'s value is greater than or equal to this, our queue
29 /// submission has completed.
30 index: SubmissionIndex,
31
32 /// Buffers to be mapped once this submission has completed.
33 mapped: Vec<Arc<Buffer>>,
34
35 /// BLASes to have their compacted size read back once this submission has completed.
36 compact_read_back: Vec<Arc<Blas>>,
37
38 /// Command buffers used by this submission, and the encoder that owns them.
39 ///
40 /// [`wgpu_hal::Queue::submit`] requires the submitted command buffers to
41 /// remain alive until the submission has completed execution. Command
42 /// encoders double as allocation pools for command buffers, so holding them
43 /// here and cleaning them up in [`LifetimeTracker::triage_submissions`]
44 /// satisfies that requirement.
45 ///
46 /// Once this submission has completed, the command buffers are reset and
47 /// the command encoder is recycled.
48 ///
49 /// [`wgpu_hal::Queue::submit`]: hal::Queue::submit
50 encoders: Vec<EncoderInFlight>,
51
52 /// List of queue "on_submitted_work_done" closures to be called once this
53 /// submission has completed.
54 work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
55}
56
57impl ActiveSubmission {
58 /// Returns true if this submission contains the given buffer.
59 ///
60 /// This only uses constant-time operations.
61 pub fn contains_buffer(&self, buffer: &Buffer) -> bool {
62 for encoder in &self.encoders {
63 // The ownership location of buffers depends on where the command encoder
64 // came from. If it is the staging command encoder on the queue, it is
65 // in the pending buffer list. If it came from a user command encoder,
66 // it is in the tracker.
67
68 if encoder.trackers.buffers.contains(buffer) {
69 return true;
70 }
71
72 if encoder
73 .pending_buffers
74 .contains_key(&buffer.tracker_index())
75 {
76 return true;
77 }
78 }
79
80 false
81 }
82
83 /// Returns true if this submission contains the given texture.
84 ///
85 /// This only uses constant-time operations.
86 pub fn contains_texture(&self, texture: &Texture) -> bool {
87 for encoder in &self.encoders {
88 // The ownership location of textures depends on where the command encoder
89 // came from. If it is the staging command encoder on the queue, it is
90 // in the pending buffer list. If it came from a user command encoder,
91 // it is in the tracker.
92
93 if encoder.trackers.textures.contains(texture) {
94 return true;
95 }
96
97 if encoder
98 .pending_textures
99 .contains_key(&texture.tracker_index())
100 {
101 return true;
102 }
103 }
104
105 false
106 }
107
108 /// Returns true if this submission contains the given blas.
109 ///
110 /// This only uses constant-time operations.
111 pub fn contains_blas(&self, blas: &Blas) -> bool {
112 for encoder in &self.encoders {
113 if encoder.trackers.blas_s.contains(blas) {
114 return true;
115 }
116
117 if encoder.pending_blas_s.contains_key(&blas.tracker_index()) {
118 return true;
119 }
120 }
121
122 false
123 }
124}
125
126#[derive(Clone, Debug, Error)]
127#[non_exhaustive]
128pub enum WaitIdleError {
129 #[error(transparent)]
130 Device(#[from] DeviceError),
131 #[error("Tried to wait using a submission index ({0}) that has not been returned by a successful submission (last successful submission: {1})")]
132 WrongSubmissionIndex(SubmissionIndex, SubmissionIndex),
133 #[error("Timed out trying to wait for the given submission index.")]
134 Timeout,
135}
136
137impl WaitIdleError {
138 pub fn to_poll_error(&self) -> Option<wgt::PollError> {
139 match self {
140 WaitIdleError::Timeout => Some(wgt::PollError::Timeout),
141 &WaitIdleError::WrongSubmissionIndex(a, b) => {
142 Some(wgt::PollError::WrongSubmissionIndex(a, b))
143 }
144 _ => None,
145 }
146 }
147}
148
149/// Resource tracking for a device.
150///
151/// ## Host mapping buffers
152///
153/// A buffer cannot be mapped until all active queue submissions that use it
154/// have completed. To that end:
155///
156/// - Each buffer's `ResourceInfo::submission_index` records the index of the
157/// most recent queue submission that uses that buffer.
158///
159/// - When the device is polled, the following `LifetimeTracker` methods decide
160/// what should happen next:
161///
162/// 1) `triage_submissions` moves entries in `self.active[i]` for completed
163/// submissions to `self.ready_to_map`. At this point, both
164/// `self.active` and `self.ready_to_map` are up to date with the given
165/// submission index.
166///
167/// 2) `handle_mapping` drains `self.ready_to_map` and actually maps the
168/// buffers, collecting a list of notification closures to call.
169///
170/// Only calling `Global::buffer_map_async` clones a new `Arc` for the
171/// buffer. This new `Arc` is only dropped by `handle_mapping`.
172pub(crate) struct LifetimeTracker {
173 /// Resources used by queue submissions still in flight. One entry per
174 /// submission, with older submissions appearing before younger.
175 ///
176 /// Entries are added by `track_submission` and drained by
177 /// `LifetimeTracker::triage_submissions`. Lots of methods contribute data
178 /// to particular entries.
179 active: Vec<ActiveSubmission>,
180
181 /// Buffers the user has asked us to map, and which are not used by any
182 /// queue submission still in flight.
183 ready_to_map: Vec<Arc<Buffer>>,
184
185 /// BLASes the user has asked us to prepare to compact, and which are not used by any
186 /// queue submission still in flight.
187 ready_to_compact: Vec<Arc<Blas>>,
188
189 /// Queue "on_submitted_work_done" closures that were initiated for while there is no
190 /// currently pending submissions. These cannot be immediately invoked as they
191 /// must happen _after_ all mapped buffer callbacks are mapped, so we defer them
192 /// here until the next time the device is maintained.
193 work_done_closures: SmallVec<[SubmittedWorkDoneClosure; 1]>,
194}
195
196impl LifetimeTracker {
197 pub fn new() -> Self {
198 Self {
199 active: Vec::new(),
200 ready_to_map: Vec::new(),
201 ready_to_compact: Vec::new(),
202 work_done_closures: SmallVec::new(),
203 }
204 }
205
206 /// Return true if there are no queue submissions still in flight.
207 pub fn queue_empty(&self) -> bool {
208 self.active.is_empty()
209 }
210
211 /// Start tracking resources associated with a new queue submission.
212 pub fn track_submission(&mut self, index: SubmissionIndex, encoders: Vec<EncoderInFlight>) {
213 self.active.push(ActiveSubmission {
214 index,
215 mapped: Vec::new(),
216 compact_read_back: Vec::new(),
217 encoders,
218 work_done_closures: SmallVec::new(),
219 });
220 }
221
222 pub(crate) fn map(&mut self, buffer: &Arc<Buffer>) -> Option<SubmissionIndex> {
223 // Determine which buffers are ready to map, and which must wait for the GPU.
224 let submission = self
225 .active
226 .iter_mut()
227 .rev()
228 .find(|a| a.contains_buffer(buffer));
229
230 let maybe_submission_index = submission.as_ref().map(|s| s.index);
231
232 submission
233 .map_or(&mut self.ready_to_map, |a| &mut a.mapped)
234 .push(buffer.clone());
235
236 maybe_submission_index
237 }
238
239 pub(crate) fn prepare_compact(&mut self, blas: &Arc<Blas>) -> Option<SubmissionIndex> {
240 // Determine which BLASes are ready to map, and which must wait for the GPU.
241 let submission = self.active.iter_mut().rev().find(|a| a.contains_blas(blas));
242
243 let maybe_submission_index = submission.as_ref().map(|s| s.index);
244
245 submission
246 .map_or(&mut self.ready_to_compact, |a| &mut a.compact_read_back)
247 .push(blas.clone());
248
249 maybe_submission_index
250 }
251
252 /// Returns the submission index of the most recent submission that uses the
253 /// given buffer.
254 pub fn get_buffer_latest_submission_index(&self, buffer: &Buffer) -> Option<SubmissionIndex> {
255 // We iterate in reverse order, so that we can bail out early as soon
256 // as we find a hit.
257 self.active.iter().rev().find_map(|submission| {
258 if submission.contains_buffer(buffer) {
259 Some(submission.index)
260 } else {
261 None
262 }
263 })
264 }
265
266 /// Returns the submission index of the most recent submission that uses the
267 /// given texture.
268 pub fn get_texture_latest_submission_index(
269 &self,
270 texture: &Texture,
271 ) -> Option<SubmissionIndex> {
272 // We iterate in reverse order, so that we can bail out early as soon
273 // as we find a hit.
274 self.active.iter().rev().find_map(|submission| {
275 if submission.contains_texture(texture) {
276 Some(submission.index)
277 } else {
278 None
279 }
280 })
281 }
282
283 /// Sort out the consequences of completed submissions.
284 ///
285 /// Assume that all submissions up through `last_done` have completed.
286 ///
287 /// - Buffers used by those submissions are now ready to map, if requested.
288 /// Add any buffers in the submission's [`mapped`] list to
289 /// [`self.ready_to_map`], where [`LifetimeTracker::handle_mapping`]
290 /// will find them.
291 ///
292 /// Return a list of [`SubmittedWorkDoneClosure`]s to run.
293 ///
294 /// [`mapped`]: ActiveSubmission::mapped
295 /// [`self.ready_to_map`]: LifetimeTracker::ready_to_map
296 /// [`SubmittedWorkDoneClosure`]: crate::device::queue::SubmittedWorkDoneClosure
297 #[must_use]
298 pub fn triage_submissions(
299 &mut self,
300 last_done: SubmissionIndex,
301 ) -> SmallVec<[SubmittedWorkDoneClosure; 1]> {
302 profiling::scope!("triage_submissions");
303
304 //TODO: enable when `is_sorted_by_key` is stable
305 //debug_assert!(self.active.is_sorted_by_key(|a| a.index));
306 let done_count = self
307 .active
308 .iter()
309 .position(|a| a.index > last_done)
310 .unwrap_or(self.active.len());
311
312 let mut work_done_closures: SmallVec<_> = self.work_done_closures.drain(..).collect();
313 for a in self.active.drain(..done_count) {
314 self.ready_to_map.extend(a.mapped);
315 self.ready_to_compact.extend(a.compact_read_back);
316 for encoder in a.encoders {
317 // This involves actually decrementing the ref count of all command buffer
318 // resources, so can be _very_ expensive.
319 profiling::scope!("drop command buffer trackers");
320 drop(encoder);
321 }
322 work_done_closures.extend(a.work_done_closures);
323 }
324 work_done_closures
325 }
326
327 pub fn schedule_resource_destruction(
328 &mut self,
329 temp_resource: TempResource,
330 last_submit_index: SubmissionIndex,
331 ) {
332 let resources = self
333 .active
334 .iter_mut()
335 .find(|a| a.index == last_submit_index)
336 .map(|a| {
337 // Because this resource's `last_submit_index` matches `a.index`,
338 // we know that we must have done something with the resource,
339 // so `a.encoders` should not be empty.
340 &mut a.encoders.last_mut().unwrap().temp_resources
341 });
342 if let Some(resources) = resources {
343 resources.push(temp_resource);
344 }
345 }
346
347 pub fn add_work_done_closure(
348 &mut self,
349 closure: SubmittedWorkDoneClosure,
350 ) -> Option<SubmissionIndex> {
351 match self.active.last_mut() {
352 Some(active) => {
353 active.work_done_closures.push(closure);
354 Some(active.index)
355 }
356 // We must defer the closure until all previously occurring map_async closures
357 // have fired. This is required by the spec.
358 None => {
359 self.work_done_closures.push(closure);
360 None
361 }
362 }
363 }
364
365 /// Map the buffers in `self.ready_to_map`.
366 ///
367 /// Return a list of mapping notifications to send.
368 ///
369 /// See the documentation for [`LifetimeTracker`] for details.
370 #[must_use]
371 pub(crate) fn handle_mapping(
372 &mut self,
373 snatch_guard: &SnatchGuard,
374 ) -> Vec<super::BufferMapPendingClosure> {
375 if self.ready_to_map.is_empty() {
376 return Vec::new();
377 }
378 let mut pending_callbacks: Vec<super::BufferMapPendingClosure> =
379 Vec::with_capacity(self.ready_to_map.len());
380
381 for buffer in self.ready_to_map.drain(..) {
382 match buffer.map(snatch_guard) {
383 Some(cb) => pending_callbacks.push(cb),
384 None => continue,
385 }
386 }
387 pending_callbacks
388 }
389 /// Read back compact sizes from the BLASes in `self.ready_to_compact`.
390 ///
391 /// Return a list of mapping notifications to send.
392 ///
393 /// See the documentation for [`LifetimeTracker`] for details.
394 #[must_use]
395 pub(crate) fn handle_compact_read_back(&mut self) -> Vec<BlasCompactReadyPendingClosure> {
396 if self.ready_to_compact.is_empty() {
397 return Vec::new();
398 }
399 let mut pending_callbacks: Vec<BlasCompactReadyPendingClosure> =
400 Vec::with_capacity(self.ready_to_compact.len());
401
402 for blas in self.ready_to_compact.drain(..) {
403 match blas.read_back_compact_size() {
404 Some(cb) => pending_callbacks.push(cb),
405 None => continue,
406 }
407 }
408 pending_callbacks
409 }
410}