wgpu_core/device/
queue.rs

1use alloc::{boxed::Box, string::ToString, sync::Arc, vec, vec::Vec};
2use core::{
3    iter,
4    mem::{self, ManuallyDrop},
5    num::NonZeroU64,
6    ptr::NonNull,
7    sync::atomic::Ordering,
8};
9use smallvec::SmallVec;
10use thiserror::Error;
11use wgt::{
12    error::{ErrorType, WebGpuError},
13    AccelerationStructureFlags,
14};
15
16use super::{life::LifetimeTracker, Device};
17#[cfg(feature = "trace")]
18use crate::device::trace::{Action, IntoTrace};
19use crate::{
20    api_log,
21    command::{
22        extract_texture_selector, validate_linear_texture_data, validate_texture_buffer_copy,
23        validate_texture_copy_dst_format, validate_texture_copy_range, ClearError,
24        CommandAllocator, CommandBuffer, CommandEncoder, CommandEncoderError, CopySide,
25        TransferError,
26    },
27    device::{DeviceError, FenceReadGuard, FenceWriteGuard, WaitIdleError},
28    get_lowest_common_denom,
29    global::Global,
30    hal_label,
31    id::{self, BlasId, QueueId},
32    init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
33    lock::{rank, Mutex, MutexGuard, RwLock, RwLockWriteGuard},
34    ray_tracing::{BlasCompactReadyPendingClosure, CompactBlasError},
35    resource::{
36        Blas, BlasCompactState, Buffer, BufferAccessError, BufferMapState, DestroyedBuffer,
37        DestroyedResourceError, DestroyedTexture, Fallible, FlushedStagingBuffer,
38        InvalidResourceError, Labeled, ParentDevice, ResourceErrorIdent, StagingBuffer, Texture,
39        TextureInner, Trackable, TrackingData,
40    },
41    resource_log,
42    scratch::ScratchBuffer,
43    snatch::{SnatchGuard, Snatchable},
44    track::{self, Tracker, TrackerIndex},
45    FastHashMap, SubmissionIndex,
46};
47use crate::{device::resource::CommandIndices, resource::RawResourceAccess};
48
49pub struct Queue {
50    raw: Box<dyn hal::DynQueue>,
51    pub(crate) pending_writes: Mutex<PendingWrites>,
52    life_tracker: Mutex<LifetimeTracker>,
53    // The device needs to be dropped last (`Device.zero_buffer` might be referenced by the encoder in pending writes).
54    pub(crate) device: Arc<Device>,
55}
56
57impl Queue {
58    pub(crate) fn new(
59        device: Arc<Device>,
60        raw: Box<dyn hal::DynQueue>,
61        instance_flags: wgt::InstanceFlags,
62    ) -> Result<Self, DeviceError> {
63        let pending_encoder = device
64            .command_allocator
65            .acquire_encoder(device.raw(), raw.as_ref())
66            .map_err(DeviceError::from_hal);
67
68        let pending_encoder = match pending_encoder {
69            Ok(pending_encoder) => pending_encoder,
70            Err(e) => {
71                return Err(e);
72            }
73        };
74
75        let mut pending_writes = PendingWrites::new(pending_encoder, instance_flags);
76
77        let zero_buffer = device.zero_buffer.as_ref();
78        pending_writes.activate();
79        unsafe {
80            pending_writes
81                .command_encoder
82                .transition_buffers(&[hal::BufferBarrier {
83                    buffer: zero_buffer,
84                    usage: hal::StateTransition {
85                        from: wgt::BufferUses::empty(),
86                        to: wgt::BufferUses::COPY_DST,
87                    },
88                }]);
89            pending_writes
90                .command_encoder
91                .clear_buffer(zero_buffer, 0..super::ZERO_BUFFER_SIZE);
92            pending_writes
93                .command_encoder
94                .transition_buffers(&[hal::BufferBarrier {
95                    buffer: zero_buffer,
96                    usage: hal::StateTransition {
97                        from: wgt::BufferUses::COPY_DST,
98                        to: wgt::BufferUses::COPY_SRC,
99                    },
100                }]);
101        }
102
103        Ok(Queue {
104            raw,
105            device,
106            pending_writes: Mutex::new(rank::QUEUE_PENDING_WRITES, pending_writes),
107            life_tracker: Mutex::new(rank::QUEUE_LIFE_TRACKER, LifetimeTracker::new()),
108        })
109    }
110
111    pub(crate) fn raw(&self) -> &dyn hal::DynQueue {
112        self.raw.as_ref()
113    }
114
115    #[track_caller]
116    pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
117        self.life_tracker.lock()
118    }
119
120    /// Ensure the surface texture is in the PRESENT state, clearing it if it was never rendered to.
121    /// Submits any necessary work to the GPU before the HAL present call.
122    ///
123    /// See <https://github.com/gfx-rs/wgpu/issues/6748>
124    pub(crate) fn prepare_surface_texture_for_present(
125        &self,
126        texture: &Arc<Texture>,
127    ) -> Result<(), DeviceError> {
128        let snatch_guard = self.device.snatchable_lock.read();
129        let (mut submission, _index) = self.allocate_submission(snatch_guard);
130        let device = &self.device;
131
132        // If the texture is uninitialized it needs to be cleared before presenting
133        let needs_clear = {
134            let status = texture.initialization_status.read();
135            status
136                .mips
137                .first()
138                .is_some_and(|mip| mip.check(0..1).is_some())
139        };
140
141        let mut pending_writes = self.pending_writes.lock();
142
143        if needs_clear {
144            let encoder = pending_writes.activate();
145            let mut trackers = device.trackers.lock();
146            crate::command::clear_texture(
147                texture,
148                TextureInitRange {
149                    mip_range: 0..1,
150                    layer_range: 0..1,
151                },
152                encoder,
153                &mut trackers.textures,
154                &device.alignments,
155                device.zero_buffer.as_ref(),
156                &submission.snatch_guard,
157                device.instance_flags,
158            )
159            .map_err(|e| match e {
160                ClearError::Device(e) => e,
161                _ => DeviceError::Lost,
162            })?;
163            texture.initialization_status.write().mips[0].drain(0..1);
164        }
165
166        // Transition the texture to PRESENT in the device tracker.
167        // If it's already in PRESENT, this produces no barriers and we can skip the submission.
168        //
169        // This has to be after any clear_texture call because clear_texture modifies the tracker state internally.
170        // Computing transitions afterward ensures they reflect the actual current state.
171        let pending = {
172            let mut trackers = device.trackers.lock();
173            let pending: Vec<track::PendingTransition<wgt::TextureUses>> = trackers
174                .textures
175                .set_single(
176                    texture,
177                    texture.full_range.clone(),
178                    wgt::TextureUses::PRESENT,
179                )
180                .collect();
181            pending
182        };
183
184        if pending.is_empty() {
185            return Ok(());
186        }
187
188        // Emit the transition barriers to PRESENT.
189        {
190            let raw_texture = texture
191                .try_raw(&submission.snatch_guard)
192                .map_err(|_| DeviceError::Lost)?;
193            let barriers: Vec<hal::TextureBarrier<'_, dyn hal::DynTexture>> = pending
194                .into_iter()
195                .map(|pt| pt.into_hal(raw_texture))
196                .collect();
197
198            let encoder = pending_writes.activate();
199            // SAFETY:
200            // - The encoder is in the recording state after `activate()`
201            // - The texture is kept alive by the Arc from `acquired_texture`
202            unsafe {
203                encoder.transition_textures(&barriers);
204            }
205        }
206
207        // Keep the texture alive in the submission so its clear_view isn't
208        // destroyed before the GPU finishes the submitted commands.
209        pending_writes.insert_texture(texture);
210
211        // Flush pending writes through the standard submission path.
212        submission
213            .surface_textures
214            .insert(Arc::as_ptr(texture), texture.clone());
215
216        submission.submit(pending_writes)?;
217
218        Ok(())
219    }
220
221    pub(crate) fn maintain(
222        &self,
223        submission_index: u64,
224        snatch_guard: &SnatchGuard,
225    ) -> (
226        SmallVec<[SubmittedWorkDoneClosure; 1]>,
227        Vec<super::BufferMapPendingClosure>,
228        Vec<BlasCompactReadyPendingClosure>,
229        bool,
230    ) {
231        let mut life_tracker = self.lock_life();
232        let submission_closures = life_tracker.triage_submissions(submission_index);
233
234        let mapping_closures = life_tracker.handle_mapping(snatch_guard);
235        let blas_closures = life_tracker.handle_compact_read_back();
236
237        let queue_empty = life_tracker.queue_empty();
238
239        (
240            submission_closures,
241            mapping_closures,
242            blas_closures,
243            queue_empty,
244        )
245    }
246}
247
248crate::impl_resource_type!(Queue);
249// TODO: https://github.com/gfx-rs/wgpu/issues/4014
250impl Labeled for Queue {
251    fn label(&self) -> &str {
252        ""
253    }
254}
255crate::impl_parent_device!(Queue);
256crate::impl_storage_item!(Queue);
257
258impl Drop for Queue {
259    fn drop(&mut self) {
260        resource_log!("Drop {}", self.error_ident());
261
262        // On Vulkan, pending presents are not tracked by fences.
263        // wait_for_idle covers both fence-tracked submissions and pending presents.
264        match unsafe { self.raw.wait_for_idle() } {
265            Ok(()) => {}
266            Err(hal::DeviceError::Lost) => {
267                self.device.handle_hal_error(hal::DeviceError::Lost);
268            }
269            Err(e) => {
270                panic!("Unexpected error while waiting for queue idle on drop: {e:?}");
271            }
272        }
273
274        let last_successful_submission_index = self
275            .device
276            .last_successful_submission_index
277            .load(Ordering::Acquire);
278
279        let snatch_guard = self.device.snatchable_lock.read();
280        let (submission_closures, mapping_closures, blas_compact_ready_closures, queue_empty) =
281            self.maintain(last_successful_submission_index, &snatch_guard);
282        drop(snatch_guard);
283
284        assert!(queue_empty);
285
286        let closures = crate::device::UserClosures {
287            mappings: mapping_closures,
288            blas_compact_ready: blas_compact_ready_closures,
289            submissions: submission_closures,
290            device_lost_invocations: SmallVec::new(),
291        };
292
293        closures.fire();
294    }
295}
296
297#[cfg(send_sync)]
298pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + Send + 'static>;
299#[cfg(not(send_sync))]
300pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + 'static>;
301
302/// A texture or buffer to be freed soon.
303///
304/// This is just a tagged raw texture or buffer, generally about to be added to
305/// some other more specific container like:
306///
307/// - `PendingWrites::temp_resources`: resources used by queue writes and
308///   unmaps, waiting to be folded in with the next queue submission
309///
310/// - `ActiveSubmission::temp_resources`: temporary resources used by a queue
311///   submission, to be freed when it completes
312#[derive(Debug)]
313pub enum TempResource {
314    StagingBuffer(FlushedStagingBuffer),
315    ScratchBuffer(ScratchBuffer),
316    DestroyedBuffer(DestroyedBuffer),
317    DestroyedTexture(DestroyedTexture),
318}
319
320/// A series of raw [`CommandBuffer`]s that have been submitted to a
321/// queue, and the [`wgpu_hal::CommandEncoder`] that built them.
322///
323/// [`CommandBuffer`]: hal::Api::CommandBuffer
324/// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
325pub(crate) struct EncoderInFlight {
326    inner: crate::command::InnerCommandEncoder,
327    pub(crate) trackers: Tracker,
328    pub(crate) temp_resources: Vec<TempResource>,
329    /// We only need to keep these resources alive.
330    _indirect_draw_validation_resources: crate::indirect_validation::DrawResources,
331
332    /// These are the buffers that have been tracked by `PendingWrites`.
333    pub(crate) pending_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
334    /// These are the textures that have been tracked by `PendingWrites`.
335    pub(crate) pending_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
336    /// These are the BLASes that have been tracked by `PendingWrites`.
337    pub(crate) pending_blas_s: FastHashMap<TrackerIndex, Arc<Blas>>,
338}
339
340/// A private command encoder for writes made directly on the device
341/// or queue.
342///
343/// Operations like `buffer_unmap`, `queue_write_buffer`, and
344/// `queue_write_texture` need to copy data to the GPU. At the hal
345/// level, this must be done by encoding and submitting commands, but
346/// these operations are not associated with any specific wgpu command
347/// buffer.
348///
349/// Instead, `Device::pending_writes` owns one of these values, which
350/// has its own hal command encoder and resource lists. The commands
351/// accumulated here are automatically submitted to the queue at the
352/// sooner of:
353///
354/// 1. The user's next wgpu command buffer submission. (Pending writes
355///    are inserted ahead of the user's commands.)
356/// 2. The next `mapAsync` request for a buffer that has pending
357///    writes.
358///
359/// Important:
360/// When locking pending_writes be sure that tracker is not locked
361/// and try to lock trackers for the minimum timespan possible
362///
363/// All uses of [`StagingBuffer`]s end up here.
364#[derive(Debug)]
365pub(crate) struct PendingWrites {
366    // The command encoder needs to be destroyed before any other resource in pending writes.
367    pub command_encoder: Box<dyn hal::DynCommandEncoder>,
368
369    /// True if `command_encoder` is in the "recording" state, as
370    /// described in the docs for the [`wgpu_hal::CommandEncoder`]
371    /// trait.
372    ///
373    /// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
374    pub is_recording: bool,
375
376    temp_resources: Vec<TempResource>,
377    dst_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
378    dst_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
379    copied_blas_s: FastHashMap<TrackerIndex, Arc<Blas>>,
380    instance_flags: wgt::InstanceFlags,
381}
382
383impl PendingWrites {
384    pub fn new(
385        command_encoder: Box<dyn hal::DynCommandEncoder>,
386        instance_flags: wgt::InstanceFlags,
387    ) -> Self {
388        Self {
389            command_encoder,
390            is_recording: false,
391            temp_resources: Vec::new(),
392            dst_buffers: FastHashMap::default(),
393            dst_textures: FastHashMap::default(),
394            copied_blas_s: FastHashMap::default(),
395            instance_flags,
396        }
397    }
398
399    pub fn insert_buffer(&mut self, buffer: &Arc<Buffer>) {
400        self.dst_buffers
401            .insert(buffer.tracker_index(), buffer.clone());
402    }
403
404    pub fn insert_texture(&mut self, texture: &Arc<Texture>) {
405        self.dst_textures
406            .insert(texture.tracker_index(), texture.clone());
407    }
408
409    pub fn insert_blas(&mut self, blas: &Arc<Blas>) {
410        self.copied_blas_s
411            .insert(blas.tracker_index(), blas.clone());
412    }
413
414    pub fn contains_buffer(&self, buffer: &Arc<Buffer>) -> bool {
415        self.dst_buffers.contains_key(&buffer.tracker_index())
416    }
417
418    pub fn contains_texture(&self, texture: &Arc<Texture>) -> bool {
419        self.dst_textures.contains_key(&texture.tracker_index())
420    }
421
422    pub fn consume_temp(&mut self, resource: TempResource) {
423        self.temp_resources.push(resource);
424    }
425
426    pub fn consume(&mut self, buffer: FlushedStagingBuffer) {
427        self.temp_resources
428            .push(TempResource::StagingBuffer(buffer));
429    }
430
431    fn pre_submit(
432        &mut self,
433        command_allocator: &CommandAllocator,
434        device: &Arc<Device>,
435        queue: &Queue,
436    ) -> Result<Option<EncoderInFlight>, DeviceError> {
437        if self.is_recording {
438            let pending_buffers = mem::take(&mut self.dst_buffers);
439            let pending_textures = mem::take(&mut self.dst_textures);
440            let pending_blas_s = mem::take(&mut self.copied_blas_s);
441
442            let cmd_buf = unsafe { self.command_encoder.end_encoding() }
443                .map_err(|e| device.handle_hal_error(e))?;
444            self.is_recording = false;
445
446            let new_encoder = command_allocator
447                .acquire_encoder(device.raw(), queue.raw())
448                .map_err(|e| device.handle_hal_error(e))?;
449
450            let encoder = EncoderInFlight {
451                inner: crate::command::InnerCommandEncoder {
452                    raw: ManuallyDrop::new(mem::replace(&mut self.command_encoder, new_encoder)),
453                    list: vec![cmd_buf],
454                    device: device.clone(),
455                    is_open: false,
456                    api: crate::command::EncodingApi::InternalUse,
457                    label: "(wgpu internal) PendingWrites command encoder".into(),
458                },
459                trackers: Tracker::new(device.ordered_buffer_usages, device.ordered_texture_usages),
460                temp_resources: mem::take(&mut self.temp_resources),
461                _indirect_draw_validation_resources: crate::indirect_validation::DrawResources::new(
462                    device.clone(),
463                ),
464                pending_buffers,
465                pending_textures,
466                pending_blas_s,
467            };
468            Ok(Some(encoder))
469        } else {
470            self.dst_buffers.clear();
471            self.dst_textures.clear();
472            self.copied_blas_s.clear();
473            Ok(None)
474        }
475    }
476
477    pub fn activate(&mut self) -> &mut dyn hal::DynCommandEncoder {
478        if !self.is_recording {
479            unsafe {
480                self.command_encoder
481                    .begin_encoding(hal_label(
482                        Some("(wgpu internal) PendingWrites"),
483                        self.instance_flags,
484                    ))
485                    .unwrap();
486            }
487            self.is_recording = true;
488        }
489        self.command_encoder.as_mut()
490    }
491}
492
493impl Drop for PendingWrites {
494    fn drop(&mut self) {
495        unsafe {
496            if self.is_recording {
497                self.command_encoder.discard_encoding();
498            }
499        }
500    }
501}
502
503#[derive(Clone, Debug, Error)]
504#[non_exhaustive]
505pub enum QueueWriteError {
506    #[error(transparent)]
507    Queue(#[from] DeviceError),
508    #[error(transparent)]
509    Transfer(#[from] TransferError),
510    #[error(transparent)]
511    MemoryInitFailure(#[from] ClearError),
512    #[error(transparent)]
513    DestroyedResource(#[from] DestroyedResourceError),
514    #[error(transparent)]
515    InvalidResource(#[from] InvalidResourceError),
516}
517
518impl WebGpuError for QueueWriteError {
519    fn webgpu_error_type(&self) -> ErrorType {
520        match self {
521            Self::Queue(e) => e.webgpu_error_type(),
522            Self::Transfer(e) => e.webgpu_error_type(),
523            Self::MemoryInitFailure(e) => e.webgpu_error_type(),
524            Self::DestroyedResource(e) => e.webgpu_error_type(),
525            Self::InvalidResource(e) => e.webgpu_error_type(),
526        }
527    }
528}
529
530#[derive(Clone, Debug, Error)]
531#[non_exhaustive]
532pub enum QueueSubmitError {
533    #[error(transparent)]
534    Queue(#[from] DeviceError),
535    #[error(transparent)]
536    DestroyedResource(#[from] DestroyedResourceError),
537    #[error("{0} is still mapped")]
538    BufferStillMapped(ResourceErrorIdent),
539    #[error(transparent)]
540    InvalidResource(#[from] InvalidResourceError),
541    #[error(transparent)]
542    CommandEncoder(#[from] CommandEncoderError),
543    #[error(transparent)]
544    ValidateAsActionsError(#[from] crate::ray_tracing::ValidateAsActionsError),
545}
546
547impl WebGpuError for QueueSubmitError {
548    fn webgpu_error_type(&self) -> ErrorType {
549        match self {
550            Self::Queue(e) => e.webgpu_error_type(),
551            Self::CommandEncoder(e) => e.webgpu_error_type(),
552            Self::ValidateAsActionsError(e) => e.webgpu_error_type(),
553            Self::InvalidResource(e) => e.webgpu_error_type(),
554            Self::DestroyedResource(_) | Self::BufferStillMapped(_) => ErrorType::Validation,
555        }
556    }
557}
558
559/// A partially-assembled submission.
560///
561/// Returned from [`Queue::allocate_submission`] and consumed by [`submit`].
562/// These are internal APIs used in `Queue::submit` and other places within
563/// `wgpu-core` that need to submit work.
564///
565/// [`submit`]: `PendingSubmission::submit`
566pub(crate) struct PendingSubmission<'a> {
567    queue: &'a Queue,
568    snatch_guard: SnatchGuard<'a>,
569    fence: FenceWriteGuard<'a>,
570    command_index_guard: RwLockWriteGuard<'a, CommandIndices>,
571    // Command buffers to be executed, along with trackers for the resources they use.
572    pub executions: Vec<EncoderInFlight>,
573    // Surface textures referenced by command buffers in this submission. These need to be
574    // passed to the HAL `submit` call. Deduplicated using a hashmap to avoid vulkan
575    // deadlocking from the same surface texture being submitted multiple times.
576    pub surface_textures: FastHashMap<*const Texture, Arc<Texture>>,
577    pub index: SubmissionIndex,
578}
579
580pub(crate) struct SubmissionResult<'a> {
581    pub fence: FenceReadGuard<'a>,
582    pub snatch_guard: SnatchGuard<'a>,
583}
584
585impl<'a> PendingSubmission<'a> {
586    fn submit(
587        self,
588        pending_writes: MutexGuard<'a, PendingWrites>,
589    ) -> Result<SubmissionResult<'a>, DeviceError> {
590        self.queue.submit_pending_submission(pending_writes, self)
591    }
592}
593
594//TODO: move out common parts of write_xxx.
595
596impl Queue {
597    pub fn write_buffer(
598        &self,
599        buffer: Arc<Buffer>,
600        buffer_offset: wgt::BufferAddress,
601        data: &[u8],
602    ) -> Result<(), QueueWriteError> {
603        profiling::scope!("Queue::write_buffer");
604        api_log!("Queue::write_buffer");
605
606        self.device.check_is_valid()?;
607
608        let data_size = data.len() as wgt::BufferAddress;
609
610        self.same_device_as(buffer.as_ref())?;
611
612        let data_size = if let Some(data_size) = wgt::BufferSize::new(data_size) {
613            data_size
614        } else {
615            // even though a zero-length write is a no-op and no copy operation will occur,
616            // we must still validate the copy operation. This ensures that invalid
617            // API calls—like writing to a mapped buffer or out-of-bounds offsets—are
618            // caught consistently, even if no data is actually moved.
619            self.validate_write_buffer_impl(buffer.as_ref(), buffer_offset, 0)?;
620
621            log::trace!("Ignoring write_buffer of size 0");
622            return Ok(());
623        };
624
625        // Platform validation requires that the staging buffer always be
626        // freed, even if an error occurs. All paths from here must call
627        // `device.pending_writes.consume`.
628        let mut staging_buffer = StagingBuffer::new(&self.device, data_size)?;
629
630        let staging_buffer = {
631            profiling::scope!("copy");
632            staging_buffer.write(data);
633            staging_buffer.flush()
634        };
635
636        let snatch_guard = self.device.snatchable_lock.read();
637        let mut pending_writes = self.pending_writes.lock();
638
639        let result = self.write_staging_buffer_impl(
640            &snatch_guard,
641            &mut pending_writes,
642            &staging_buffer,
643            buffer,
644            buffer_offset,
645        );
646
647        drop(snatch_guard);
648
649        pending_writes.consume(staging_buffer);
650
651        drop(pending_writes);
652
653        result
654    }
655
656    pub fn create_staging_buffer(
657        &self,
658        buffer_size: wgt::BufferSize,
659    ) -> Result<(StagingBuffer, NonNull<u8>), QueueWriteError> {
660        profiling::scope!("Queue::create_staging_buffer");
661        resource_log!("Queue::create_staging_buffer");
662
663        self.device.check_is_valid()?;
664
665        let staging_buffer = StagingBuffer::new(&self.device, buffer_size)?;
666        let ptr = unsafe { staging_buffer.ptr() };
667
668        Ok((staging_buffer, ptr))
669    }
670
671    pub fn write_staging_buffer(
672        &self,
673        buffer: Fallible<Buffer>,
674        buffer_offset: wgt::BufferAddress,
675        staging_buffer: StagingBuffer,
676    ) -> Result<(), QueueWriteError> {
677        profiling::scope!("Queue::write_staging_buffer");
678
679        self.device.check_is_valid()?;
680
681        let buffer = buffer.get()?;
682
683        // At this point, we have taken ownership of the staging_buffer from the
684        // user. Platform validation requires that the staging buffer always
685        // be freed, even if an error occurs. All paths from here must call
686        // `device.pending_writes.consume`.
687        let staging_buffer = staging_buffer.flush();
688
689        let snatch_guard = self.device.snatchable_lock.read();
690        let mut pending_writes = self.pending_writes.lock();
691
692        let result = self.write_staging_buffer_impl(
693            &snatch_guard,
694            &mut pending_writes,
695            &staging_buffer,
696            buffer,
697            buffer_offset,
698        );
699
700        drop(snatch_guard);
701
702        pending_writes.consume(staging_buffer);
703
704        drop(pending_writes);
705
706        result
707    }
708
709    pub fn validate_write_buffer(
710        &self,
711        buffer: Fallible<Buffer>,
712        buffer_offset: u64,
713        buffer_size: wgt::BufferSize,
714    ) -> Result<(), QueueWriteError> {
715        profiling::scope!("Queue::validate_write_buffer");
716
717        self.device.check_is_valid()?;
718
719        let buffer = buffer.get()?;
720
721        self.validate_write_buffer_impl(&buffer, buffer_offset, buffer_size.into())?;
722
723        Ok(())
724    }
725
726    fn validate_write_buffer_impl(
727        &self,
728        buffer: &Buffer,
729        buffer_offset: u64,
730        buffer_size: u64,
731    ) -> Result<(), TransferError> {
732        if !matches!(&*buffer.map_state.lock(), BufferMapState::Idle) {
733            return Err(TransferError::BufferNotAvailable);
734        }
735        buffer.check_usage(wgt::BufferUsages::COPY_DST)?;
736        if !buffer_size.is_multiple_of(wgt::COPY_BUFFER_ALIGNMENT) {
737            return Err(TransferError::UnalignedCopySize(buffer_size));
738        }
739        if !buffer_offset.is_multiple_of(wgt::COPY_BUFFER_ALIGNMENT) {
740            return Err(TransferError::UnalignedBufferOffset(buffer_offset));
741        }
742
743        if buffer_offset > buffer.size {
744            return Err(TransferError::BufferStartOffsetOverrun {
745                start_offset: buffer_offset,
746                buffer_size: buffer.size,
747                side: CopySide::Destination,
748            });
749        }
750        if buffer_size > buffer.size - buffer_offset {
751            return Err(TransferError::BufferEndOffsetOverrun {
752                start_offset: buffer_offset,
753                size: buffer_size,
754                buffer_size: buffer.size,
755                side: CopySide::Destination,
756            });
757        }
758
759        Ok(())
760    }
761
762    fn write_staging_buffer_impl(
763        &self,
764        snatch_guard: &SnatchGuard,
765        pending_writes: &mut PendingWrites,
766        staging_buffer: &FlushedStagingBuffer,
767        buffer: Arc<Buffer>,
768        buffer_offset: u64,
769    ) -> Result<(), QueueWriteError> {
770        self.device.check_is_valid()?;
771
772        let transition = {
773            let mut trackers = self.device.trackers.lock();
774            trackers
775                .buffers
776                .set_single(&buffer, wgt::BufferUses::COPY_DST)
777        };
778
779        let dst_raw = buffer.try_raw(snatch_guard)?;
780
781        self.same_device_as(buffer.as_ref())?;
782
783        self.validate_write_buffer_impl(&buffer, buffer_offset, staging_buffer.size.into())?;
784
785        let region = hal::BufferCopy {
786            src_offset: 0,
787            dst_offset: buffer_offset,
788            size: staging_buffer.size,
789        };
790        let barriers = iter::once(hal::BufferBarrier {
791            buffer: staging_buffer.raw(),
792            usage: hal::StateTransition {
793                from: wgt::BufferUses::MAP_WRITE,
794                to: wgt::BufferUses::COPY_SRC,
795            },
796        })
797        .chain(transition.map(|pending| pending.into_hal(&buffer, snatch_guard)))
798        .collect::<Vec<_>>();
799        let encoder = pending_writes.activate();
800        unsafe {
801            encoder.transition_buffers(&barriers);
802            encoder.copy_buffer_to_buffer(staging_buffer.raw(), dst_raw, &[region]);
803        }
804
805        pending_writes.insert_buffer(&buffer);
806
807        // Ensure the overwritten bytes are marked as initialized so
808        // they don't need to be nulled prior to mapping or binding.
809        {
810            buffer
811                .initialization_status
812                .write()
813                .drain(buffer_offset..(buffer_offset + staging_buffer.size.get()));
814        }
815
816        Ok(())
817    }
818
819    pub fn write_texture(
820        &self,
821        destination: wgt::TexelCopyTextureInfo<Arc<Texture>>,
822        data: &[u8],
823        data_layout: &wgt::TexelCopyBufferLayout,
824        size: &wgt::Extent3d,
825    ) -> Result<(), QueueWriteError> {
826        profiling::scope!("Queue::write_texture");
827        api_log!("Queue::write_texture");
828
829        self.device.check_is_valid()?;
830
831        let dst = destination.texture;
832        let destination = wgt::TexelCopyTextureInfo {
833            texture: (),
834            mip_level: destination.mip_level,
835            origin: destination.origin,
836            aspect: destination.aspect,
837        };
838
839        self.same_device_as(dst.as_ref())?;
840
841        dst.check_usage(wgt::TextureUsages::COPY_DST)
842            .map_err(TransferError::MissingTextureUsage)?;
843
844        // Note: Doing the copy range validation early is important because ensures that the
845        // dimensions are not going to cause overflow in other parts of the validation.
846        let (hal_copy_size, array_layer_count) =
847            validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, size)?;
848
849        let (selector, dst_base) = extract_texture_selector(&destination, size, &dst)?;
850
851        validate_texture_copy_dst_format(dst.desc.format, destination.aspect)?;
852
853        validate_texture_buffer_copy(
854            &destination,
855            dst_base.aspect,
856            &dst.desc,
857            data_layout,
858            false, // alignment not required for buffer offset or bytes per row
859        )?;
860
861        // Note: `_source_bytes_per_array_layer` is ignored since we
862        // have a staging copy, and it can have a different value.
863        let (required_bytes_in_copy, _source_bytes_per_array_layer, _) =
864            validate_linear_texture_data(
865                data_layout,
866                dst.desc.format,
867                destination.aspect,
868                data.len() as wgt::BufferAddress,
869                CopySide::Source,
870                size,
871            )?;
872
873        if dst.desc.format.is_depth_stencil_format() {
874            self.device
875                .require_downlevel_flags(wgt::DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES)
876                .map_err(TransferError::from)?;
877        }
878
879        let snatch_guard = self.device.snatchable_lock.read();
880
881        let dst_raw = dst.try_raw(&snatch_guard)?;
882
883        // This must happen after parameter validation (so that errors are reported
884        // as required by the spec), but before any side effects.
885        if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
886            log::trace!("Ignoring write_texture of size 0");
887            return Ok(());
888        }
889
890        let mut pending_writes = self.pending_writes.lock();
891        let encoder = pending_writes.activate();
892
893        // If the copy does not fully cover the layers, we need to initialize to
894        // zero *first* as we don't keep track of partial texture layer inits.
895        //
896        // Strictly speaking we only need to clear the areas of a layer
897        // untouched, but this would get increasingly messy.
898        let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
899            // volume textures don't have a layer range as array volumes aren't supported
900            0..1
901        } else {
902            destination.origin.z..destination.origin.z + size.depth_or_array_layers
903        };
904        let mut dst_initialization_status = dst.initialization_status.write();
905        if dst_initialization_status.mips[destination.mip_level as usize]
906            .check(init_layer_range.clone())
907            .is_some()
908        {
909            if has_copy_partial_init_tracker_coverage(size, destination.mip_level, &dst.desc) {
910                for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
911                    .drain(init_layer_range)
912                    .collect::<Vec<core::ops::Range<u32>>>()
913                {
914                    let mut trackers = self.device.trackers.lock();
915                    crate::command::clear_texture(
916                        &dst,
917                        TextureInitRange {
918                            mip_range: destination.mip_level..(destination.mip_level + 1),
919                            layer_range,
920                        },
921                        encoder,
922                        &mut trackers.textures,
923                        &self.device.alignments,
924                        self.device.zero_buffer.as_ref(),
925                        &snatch_guard,
926                        self.device.instance_flags,
927                    )
928                    .map_err(QueueWriteError::from)?;
929                }
930            } else {
931                dst_initialization_status.mips[destination.mip_level as usize]
932                    .drain(init_layer_range);
933            }
934        }
935
936        let (block_width, block_height) = dst.desc.format.block_dimensions();
937        let width_in_blocks = size.width / block_width;
938        let height_in_blocks = size.height / block_height;
939
940        let block_size = dst
941            .desc
942            .format
943            .block_copy_size(Some(destination.aspect))
944            .unwrap();
945        let bytes_in_last_row = width_in_blocks * block_size;
946
947        let bytes_per_row = data_layout.bytes_per_row.unwrap_or(bytes_in_last_row);
948        let rows_per_image = data_layout.rows_per_image.unwrap_or(height_in_blocks);
949
950        let bytes_per_row_alignment = get_lowest_common_denom(
951            self.device.alignments.buffer_copy_pitch.get() as u32,
952            block_size,
953        );
954        assert!(u32::MAX - bytes_in_last_row >= bytes_per_row_alignment);
955        let stage_bytes_per_row = wgt::math::align_to(bytes_in_last_row, bytes_per_row_alignment);
956
957        // Platform validation requires that the staging buffer always be
958        // freed, even if an error occurs. All paths from here must call
959        // `device.pending_writes.consume`.
960        let staging_buffer = if stage_bytes_per_row == bytes_per_row {
961            profiling::scope!("copy aligned");
962            // Fast path if the data is already being aligned optimally.
963            let stage_size = wgt::BufferSize::new(required_bytes_in_copy).unwrap();
964            let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
965            staging_buffer.write(&data[data_layout.offset as usize..]);
966            staging_buffer
967        } else {
968            profiling::scope!("copy chunked");
969            // Copy row by row into the optimal alignment.
970            let block_rows_in_copy = u64::from(size.depth_or_array_layers - 1)
971                * u64::from(rows_per_image)
972                + u64::from(height_in_blocks);
973            // The copy size was validated against the source buffer, however,
974            // `stage_bytes_per_row` can differ, so let's be paranoid.
975            let stage_size = u64::from(stage_bytes_per_row)
976                .checked_mul(block_rows_in_copy)
977                .and_then(wgt::BufferSize::new)
978                .unwrap();
979            let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
980            for layer in 0..u64::from(size.depth_or_array_layers) {
981                let rows_offset = layer * u64::from(rows_per_image);
982                for row in rows_offset..rows_offset + u64::from(height_in_blocks) {
983                    let src_offset = data_layout.offset + row * u64::from(bytes_per_row);
984                    let dst_offset = row * u64::from(stage_bytes_per_row);
985                    unsafe {
986                        staging_buffer.write_with_offset(
987                            data,
988                            src_offset as isize,
989                            dst_offset as isize,
990                            bytes_in_last_row as usize,
991                        )
992                    }
993                }
994            }
995            staging_buffer
996        };
997
998        let staging_buffer = staging_buffer.flush();
999
1000        let regions = (0..array_layer_count)
1001            .map(|array_layer_offset| {
1002                let mut texture_base = dst_base.clone();
1003                texture_base.array_layer += array_layer_offset;
1004                hal::BufferTextureCopy {
1005                    buffer_layout: wgt::TexelCopyBufferLayout {
1006                        offset: array_layer_offset as u64
1007                            * rows_per_image as u64
1008                            * stage_bytes_per_row as u64,
1009                        bytes_per_row: Some(stage_bytes_per_row),
1010                        rows_per_image: Some(rows_per_image),
1011                    },
1012                    texture_base,
1013                    size: hal_copy_size,
1014                }
1015            })
1016            .collect::<Vec<_>>();
1017
1018        {
1019            let buffer_barrier = hal::BufferBarrier {
1020                buffer: staging_buffer.raw(),
1021                usage: hal::StateTransition {
1022                    from: wgt::BufferUses::MAP_WRITE,
1023                    to: wgt::BufferUses::COPY_SRC,
1024                },
1025            };
1026
1027            let mut trackers = self.device.trackers.lock();
1028            let transition =
1029                trackers
1030                    .textures
1031                    .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
1032            let texture_barriers = transition
1033                .map(|pending| pending.into_hal(dst_raw))
1034                .collect::<Vec<_>>();
1035
1036            unsafe {
1037                encoder.transition_textures(&texture_barriers);
1038                encoder.transition_buffers(&[buffer_barrier]);
1039                encoder.copy_buffer_to_texture(staging_buffer.raw(), dst_raw, &regions);
1040            }
1041        }
1042
1043        pending_writes.consume(staging_buffer);
1044        pending_writes.insert_texture(&dst);
1045
1046        Ok(())
1047    }
1048
1049    #[cfg(webgl)]
1050    pub fn copy_external_image_to_texture(
1051        &self,
1052        source: &wgt::CopyExternalImageSourceInfo,
1053        destination: wgt::CopyExternalImageDestInfo<Fallible<Texture>>,
1054        size: wgt::Extent3d,
1055    ) -> Result<(), QueueWriteError> {
1056        use crate::conv;
1057
1058        profiling::scope!("Queue::copy_external_image_to_texture");
1059
1060        self.device.check_is_valid()?;
1061
1062        let mut needs_flag = false;
1063        needs_flag |= matches!(source.source, wgt::ExternalImageSource::OffscreenCanvas(_));
1064        needs_flag |= source.origin != wgt::Origin2d::ZERO;
1065        needs_flag |= destination.color_space != wgt::PredefinedColorSpace::Srgb;
1066        #[allow(clippy::bool_comparison)]
1067        if matches!(source.source, wgt::ExternalImageSource::ImageBitmap(_)) {
1068            needs_flag |= source.flip_y != false;
1069            needs_flag |= destination.premultiplied_alpha != false;
1070        }
1071
1072        if needs_flag {
1073            self.device
1074                .require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES)
1075                .map_err(TransferError::from)?;
1076        }
1077
1078        let src_width = source.source.width();
1079        let src_height = source.source.height();
1080
1081        let dst = destination.texture.get()?;
1082        let premultiplied_alpha = destination.premultiplied_alpha;
1083        let destination = wgt::TexelCopyTextureInfo {
1084            texture: (),
1085            mip_level: destination.mip_level,
1086            origin: destination.origin,
1087            aspect: destination.aspect,
1088        };
1089
1090        if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) {
1091            return Err(
1092                TransferError::ExternalCopyToForbiddenTextureFormat(dst.desc.format).into(),
1093            );
1094        }
1095        if dst.desc.dimension != wgt::TextureDimension::D2 {
1096            return Err(TransferError::InvalidDimensionExternal.into());
1097        }
1098        dst.check_usage(wgt::TextureUsages::COPY_DST | wgt::TextureUsages::RENDER_ATTACHMENT)
1099            .map_err(TransferError::MissingTextureUsage)?;
1100        if dst.desc.sample_count != 1 {
1101            return Err(TransferError::InvalidSampleCount {
1102                sample_count: dst.desc.sample_count,
1103            }
1104            .into());
1105        }
1106
1107        if source.origin.x > src_width || src_width - source.origin.x < size.width {
1108            return Err(TransferError::TextureOverrun {
1109                start_offset: source.origin.x,
1110                end_offset: source.origin.x.saturating_add(size.width),
1111                texture_size: src_width,
1112                dimension: crate::resource::TextureErrorDimension::X,
1113                side: CopySide::Source,
1114            }
1115            .into());
1116        }
1117        if source.origin.y > src_height || src_height - source.origin.y < size.height {
1118            return Err(TransferError::TextureOverrun {
1119                start_offset: source.origin.y,
1120                end_offset: source.origin.y.saturating_add(size.height),
1121                texture_size: src_height,
1122                dimension: crate::resource::TextureErrorDimension::Y,
1123                side: CopySide::Source,
1124            }
1125            .into());
1126        }
1127        if size.depth_or_array_layers != 1 {
1128            return Err(TransferError::TextureOverrun {
1129                start_offset: 0,
1130                end_offset: size.depth_or_array_layers,
1131                texture_size: 1,
1132                dimension: crate::resource::TextureErrorDimension::Z,
1133                side: CopySide::Source,
1134            }
1135            .into());
1136        }
1137
1138        // Note: Doing the copy range validation early is important because ensures that the
1139        // dimensions are not going to cause overflow in other parts of the validation.
1140        let (hal_copy_size, _) =
1141            validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, &size)?;
1142
1143        let (selector, dst_base) = extract_texture_selector(&destination, &size, &dst)?;
1144
1145        // This must happen after parameter validation (so that errors are reported
1146        // as required by the spec), but before any side effects.
1147        if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
1148            log::trace!("Ignoring copy_external_image_to_texture of size 0");
1149            return Ok(());
1150        }
1151
1152        let mut pending_writes = self.pending_writes.lock();
1153        let encoder = pending_writes.activate();
1154
1155        // If the copy does not fully cover the layers, we need to initialize to
1156        // zero *first* as we don't keep track of partial texture layer inits.
1157        //
1158        // Strictly speaking we only need to clear the areas of a layer
1159        // untouched, but this would get increasingly messy.
1160        let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
1161            // volume textures don't have a layer range as array volumes aren't supported
1162            0..1
1163        } else {
1164            destination.origin.z..destination.origin.z + size.depth_or_array_layers
1165        };
1166        let mut dst_initialization_status = dst.initialization_status.write();
1167        if dst_initialization_status.mips[destination.mip_level as usize]
1168            .check(init_layer_range.clone())
1169            .is_some()
1170        {
1171            if has_copy_partial_init_tracker_coverage(&size, destination.mip_level, &dst.desc) {
1172                for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
1173                    .drain(init_layer_range)
1174                    .collect::<Vec<core::ops::Range<u32>>>()
1175                {
1176                    let mut trackers = self.device.trackers.lock();
1177                    crate::command::clear_texture(
1178                        &dst,
1179                        TextureInitRange {
1180                            mip_range: destination.mip_level..(destination.mip_level + 1),
1181                            layer_range,
1182                        },
1183                        encoder,
1184                        &mut trackers.textures,
1185                        &self.device.alignments,
1186                        self.device.zero_buffer.as_ref(),
1187                        &self.device.snatchable_lock.read(),
1188                        self.device.instance_flags,
1189                    )
1190                    .map_err(QueueWriteError::from)?;
1191                }
1192            } else {
1193                dst_initialization_status.mips[destination.mip_level as usize]
1194                    .drain(init_layer_range);
1195            }
1196        }
1197
1198        let snatch_guard = self.device.snatchable_lock.read();
1199        let dst_raw = dst.try_raw(&snatch_guard)?;
1200
1201        let regions = hal::TextureCopy {
1202            src_base: hal::TextureCopyBase {
1203                mip_level: 0,
1204                array_layer: 0,
1205                origin: source.origin.to_3d(0),
1206                aspect: hal::FormatAspects::COLOR,
1207            },
1208            dst_base,
1209            size: hal_copy_size,
1210        };
1211
1212        let mut trackers = self.device.trackers.lock();
1213        let transitions = trackers
1214            .textures
1215            .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
1216
1217        // `copy_external_image_to_texture` is exclusive to the WebGL backend.
1218        // Don't go through the `DynCommandEncoder` abstraction and directly to the WebGL backend.
1219        let encoder_webgl = encoder
1220            .as_any_mut()
1221            .downcast_mut::<hal::gles::CommandEncoder>()
1222            .unwrap();
1223        let dst_raw_webgl = dst_raw
1224            .as_any()
1225            .downcast_ref::<hal::gles::Texture>()
1226            .unwrap();
1227        let transitions_webgl = transitions.map(|pending| {
1228            let dyn_transition = pending.into_hal(dst_raw);
1229            hal::TextureBarrier {
1230                texture: dst_raw_webgl,
1231                range: dyn_transition.range,
1232                usage: dyn_transition.usage,
1233            }
1234        });
1235
1236        use hal::CommandEncoder as _;
1237        unsafe {
1238            encoder_webgl.transition_textures(transitions_webgl);
1239            encoder_webgl.copy_external_image_to_texture(
1240                source,
1241                dst_raw_webgl,
1242                premultiplied_alpha,
1243                iter::once(regions),
1244            );
1245        }
1246
1247        Ok(())
1248    }
1249
1250    /// Flush `PendingWrites` if it contains a write to `buffer`.
1251    pub fn flush_writes_for_buffer(
1252        &self,
1253        buffer: &Arc<Buffer>,
1254        snatch_guard: SnatchGuard,
1255    ) -> Result<(), BufferAccessError> {
1256        let (submission, _index) = self.allocate_submission(snatch_guard);
1257
1258        let pending_writes = self.pending_writes.lock();
1259        if !pending_writes.contains_buffer(buffer) {
1260            return Ok(());
1261        }
1262
1263        submission.submit(pending_writes)?;
1264
1265        Ok(())
1266    }
1267
1268    fn flush_pending_writes(&self) -> Result<Option<SubmissionIndex>, DeviceError> {
1269        let snatch_guard = self.device.snatchable_lock.read();
1270        let (submission, submit_index) = self.allocate_submission(snatch_guard);
1271        let pending_writes = self.pending_writes.lock();
1272        if pending_writes.is_recording {
1273            submission.submit(pending_writes)?;
1274            Ok(Some(submit_index))
1275        } else {
1276            Ok(None)
1277        }
1278    }
1279
1280    #[cfg(feature = "trace")]
1281    fn trace_submission(
1282        &self,
1283        submit_index: SubmissionIndex,
1284        commands: Vec<crate::command::Command<crate::command::PointerReferences>>,
1285    ) {
1286        if let Some(ref mut trace) = *self.device.trace.lock() {
1287            trace.add(Action::Submit(submit_index, commands));
1288        }
1289    }
1290
1291    #[cfg(feature = "trace")]
1292    fn trace_failed_submission(
1293        &self,
1294        submit_index: SubmissionIndex,
1295        commands: Option<Vec<crate::command::Command<crate::command::PointerReferences>>>,
1296        error: alloc::string::String,
1297    ) {
1298        if let Some(ref mut trace) = *self.device.trace.lock() {
1299            trace.add(Action::FailedCommands {
1300                commands,
1301                failed_at_submit: Some(submit_index),
1302                error,
1303            });
1304        }
1305    }
1306
1307    pub fn submit(
1308        &self,
1309        command_buffers: &[Arc<CommandBuffer>],
1310    ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1311        profiling::scope!("Queue::submit");
1312        api_log!("Queue::submit");
1313
1314        let snatch_guard = self.device.snatchable_lock.read();
1315        let (mut submission, submit_index) = self.allocate_submission(snatch_guard);
1316
1317        let res = 'error: {
1318            if let Err(e) = self.device.check_is_valid() {
1319                break 'error Err(e.into());
1320            }
1321
1322            let mut used_surface_textures = track::TextureUsageScope::default();
1323
1324            {
1325                if !command_buffers.is_empty() {
1326                    profiling::scope!("prepare");
1327
1328                    let mut first_error = None;
1329
1330                    //TODO: if multiple command buffers are submitted, we can re-use the last
1331                    // native command buffer of the previous chain instead of always creating
1332                    // a temporary one, since the chains are not finished.
1333
1334                    // finish all the command buffers first
1335                    for command_buffer in command_buffers {
1336                        profiling::scope!("process command buffer");
1337
1338                        // we reset the used surface textures every time we use
1339                        // it, so make sure to set_size on it.
1340                        used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1341
1342                        // Note that we are required to invalidate all command buffers in both the success and failure paths.
1343                        // This is why we `continue` and don't early return via `?`.
1344                        #[allow(unused_mut)]
1345                        let mut cmd_buf_data = command_buffer.take_finished();
1346
1347                        if first_error.is_some() {
1348                            continue;
1349                        }
1350
1351                        #[cfg(feature = "trace")]
1352                        let trace_commands = cmd_buf_data
1353                            .as_mut()
1354                            .ok()
1355                            .and_then(|data| mem::take(&mut data.trace_commands));
1356
1357                        let mut baked = match cmd_buf_data {
1358                            Ok(cmd_buf_data) => {
1359                                let res = validate_command_buffer(
1360                                    command_buffer,
1361                                    self,
1362                                    &cmd_buf_data,
1363                                    &submission.snatch_guard,
1364                                    &mut submission.surface_textures,
1365                                    &mut used_surface_textures,
1366                                    &mut submission.command_index_guard,
1367                                );
1368                                if let Err(err) = res {
1369                                    #[cfg(feature = "trace")]
1370                                    self.trace_failed_submission(
1371                                        submit_index,
1372                                        trace_commands,
1373                                        err.to_string(),
1374                                    );
1375                                    first_error.get_or_insert(err);
1376                                    continue;
1377                                }
1378
1379                                #[cfg(feature = "trace")]
1380                                if let Some(commands) = trace_commands {
1381                                    self.trace_submission(submit_index, commands);
1382                                }
1383
1384                                cmd_buf_data.set_acceleration_structure_dependencies(
1385                                    &submission.snatch_guard,
1386                                );
1387                                cmd_buf_data.into_baked_commands()
1388                            }
1389                            Err(err) => {
1390                                #[cfg(feature = "trace")]
1391                                self.trace_failed_submission(
1392                                    submit_index,
1393                                    trace_commands,
1394                                    err.to_string(),
1395                                );
1396                                first_error.get_or_insert(err.into());
1397                                continue;
1398                            }
1399                        };
1400
1401                        // execute resource transitions
1402                        if let Err(e) = baked.encoder.open_pass(hal_label(
1403                            Some("(wgpu internal) Transit"),
1404                            self.device.instance_flags,
1405                        )) {
1406                            break 'error Err(e.into());
1407                        }
1408
1409                        //Note: locking the trackers has to be done after the storages
1410                        let mut trackers = self.device.trackers.lock();
1411                        if let Err(e) =
1412                            baked.initialize_buffer_memory(&mut trackers, &submission.snatch_guard)
1413                        {
1414                            break 'error Err(e.into());
1415                        }
1416                        if let Err(e) = baked.initialize_texture_memory(
1417                            &mut trackers,
1418                            &self.device,
1419                            &submission.snatch_guard,
1420                        ) {
1421                            break 'error Err(e.into());
1422                        }
1423
1424                        //Note: stateless trackers are not merged:
1425                        // device already knows these resources exist.
1426                        CommandEncoder::insert_barriers_from_device_tracker(
1427                            baked.encoder.raw.as_mut(),
1428                            &mut trackers,
1429                            &baked.trackers,
1430                            &submission.snatch_guard,
1431                        );
1432
1433                        if let Err(e) = baked.encoder.close_and_push_front() {
1434                            break 'error Err(e.into());
1435                        }
1436
1437                        // Transition surface textures into `Present` state.
1438                        // Note: we could technically do it after all of the command buffers,
1439                        // but here we have a command encoder by hand, so it's easier to use it.
1440                        if !used_surface_textures.is_empty() {
1441                            if let Err(e) = baked.encoder.open_pass(hal_label(
1442                                Some("(wgpu internal) Present"),
1443                                self.device.instance_flags,
1444                            )) {
1445                                break 'error Err(e.into());
1446                            }
1447                            let texture_barriers = trackers
1448                                .textures
1449                                .set_from_usage_scope_and_drain_transitions(
1450                                    &used_surface_textures,
1451                                    &submission.snatch_guard,
1452                                )
1453                                .collect::<Vec<_>>();
1454                            unsafe {
1455                                baked.encoder.raw.transition_textures(&texture_barriers);
1456                            };
1457                            if let Err(e) = baked.encoder.close() {
1458                                break 'error Err(e.into());
1459                            }
1460                            used_surface_textures = track::TextureUsageScope::default();
1461                        }
1462
1463                        // done
1464                        submission.executions.push(EncoderInFlight {
1465                            inner: baked.encoder,
1466                            trackers: baked.trackers,
1467                            temp_resources: baked.temp_resources,
1468                            _indirect_draw_validation_resources: baked
1469                                .indirect_draw_validation_resources,
1470                            pending_buffers: FastHashMap::default(),
1471                            pending_textures: FastHashMap::default(),
1472                            pending_blas_s: FastHashMap::default(),
1473                        });
1474                    }
1475
1476                    if let Some(first_error) = first_error {
1477                        break 'error Err(first_error);
1478                    }
1479                }
1480            }
1481
1482            let pending_writes = self.pending_writes.lock();
1483
1484            let SubmissionResult {
1485                fence,
1486                snatch_guard,
1487            } = match submission.submit(pending_writes) {
1488                Ok(result) => result,
1489                Err(e) => break 'error Err(e.into()),
1490            };
1491
1492            profiling::scope!("cleanup");
1493
1494            // This will schedule destruction of all resources that are no longer needed
1495            // by the user but used in the command stream, among other things.
1496            // `device.maintain` consumes and will release the snatch guard.
1497            let (closures, result) = self
1498                .device
1499                .maintain(fence, wgt::PollType::Poll, snatch_guard);
1500            match result {
1501                Ok(status) => {
1502                    debug_assert!(matches!(
1503                        status,
1504                        wgt::PollStatus::QueueEmpty | wgt::PollStatus::Poll
1505                    ));
1506                }
1507                Err(WaitIdleError::Device(err)) => break 'error Err(QueueSubmitError::Queue(err)),
1508                Err(WaitIdleError::WrongSubmissionIndex(..)) => {
1509                    unreachable!("Cannot get WrongSubmissionIndex from Poll")
1510                }
1511                Err(WaitIdleError::Timeout) => unreachable!("Cannot get Timeout from Poll"),
1512            };
1513
1514            Ok(closures)
1515        };
1516
1517        let callbacks = match res {
1518            Ok(ok) => ok,
1519            Err(e) => return Err((submit_index, e)),
1520        };
1521
1522        // the closures should execute with nothing locked!
1523        callbacks.fire();
1524
1525        self.device.lose_if_oom();
1526
1527        api_log!("Queue::submit returned submit index {submit_index}");
1528
1529        Ok(submit_index)
1530    }
1531
1532    /// Allocate a submission index and prepare for a submission.
1533    ///
1534    /// This is an internal API used in [`Queue::submit`] and other places within
1535    /// `wgpu-core` that need to submit work.
1536    ///
1537    /// Returns the index and a [`PendingSubmission`].
1538    ///
1539    /// The caller passes in the already-acquired [`SnatchGuard`]. This function acquires
1540    /// the fence lock and the command index lock.
1541    ///
1542    /// The caller should update the [`PendingSubmission`] members `executions` and
1543    /// `surface_textures` with details of the submission.
1544    ///
1545    /// To finalize and submit the submission, call [`PendingSubmission::submit`] (which is
1546    /// a convenience wrapper around [`Queue::submit_pending_submission`]).
1547    ///
1548    /// After calling this function and before submitting, the caller must acquire the
1549    /// pending writes lock, and pass it to `submit`.
1550    ///
1551    /// It is also acceptable to drop the `PendingSubmission` without submitting. This may
1552    /// be necessary when locks are required to access the state that determines whether a
1553    /// submission is needed.
1554    fn allocate_submission<'a>(
1555        &'a self,
1556        snatch_guard: SnatchGuard<'a>,
1557    ) -> (PendingSubmission<'a>, SubmissionIndex) {
1558        // Lock ordering requires that the fence lock be acquired after the snatch lock and
1559        // before the command index lock.
1560        let fence = self.device.fence.write();
1561
1562        let mut command_index_guard = self.device.command_indices.write();
1563        command_index_guard.active_submission_index += 1;
1564        let index = command_index_guard.active_submission_index;
1565
1566        let submission = PendingSubmission {
1567            queue: self,
1568            snatch_guard,
1569            fence,
1570            command_index_guard,
1571            executions: Vec::new(),
1572            surface_textures: FastHashMap::default(),
1573            index,
1574        };
1575
1576        (submission, index)
1577    }
1578
1579    /// Finalize and submit a [`PendingSubmission`] that was returned by
1580    /// [`Queue::allocate_submission`].
1581    ///
1582    /// This is an internal API used in `Queue::submit` and other places within
1583    /// `wgpu-core` that need to submit work. See [`Queue::allocate_submission`]
1584    /// for more details.
1585    ///
1586    /// This function:
1587    ///
1588    /// - Performs a HAL submission of the pending writes command
1589    ///   encoder and any other command encoders that were added to the
1590    ///   [`PendingSubmission`].
1591    /// - Advances `last_successful_submission_index` and registers the
1592    ///   submission with the lifetime tracker.
1593    /// - Returns a [`SubmissionResult`], which contains the snatch guard
1594    ///   and a downgraded [`FenceReadGuard`].
1595    fn submit_pending_submission<'a>(
1596        &self,
1597        mut pending_writes: MutexGuard<'_, PendingWrites>,
1598        prepared: PendingSubmission<'a>,
1599    ) -> Result<SubmissionResult<'a>, DeviceError> {
1600        let PendingSubmission {
1601            queue: _,
1602            snatch_guard,
1603            mut fence,
1604            command_index_guard,
1605            mut executions,
1606            mut surface_textures,
1607            index: submit_index,
1608        } = prepared;
1609
1610        let mut used_surface_textures = track::TextureUsageScope::default();
1611        used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1612        for texture in pending_writes.dst_textures.values() {
1613            match texture.try_inner(&snatch_guard) {
1614                Ok(TextureInner::Native { .. }) => {}
1615                Ok(TextureInner::Surface { .. }) => {
1616                    // Compare the Arcs by pointer as Textures don't implement Eq
1617                    surface_textures.insert(Arc::as_ptr(texture), texture.clone());
1618
1619                    unsafe {
1620                        used_surface_textures
1621                            .merge_single(texture, None, wgt::TextureUses::PRESENT)
1622                            .unwrap()
1623                    };
1624                }
1625                // The texture must not have been destroyed when its usage here was
1626                // encoded. If it was destroyed after that, then it was transferred
1627                // to `pending_writes.temp_resources` at the time of destruction, so
1628                // we are still okay to use it.
1629                Err(DestroyedResourceError(_)) => {}
1630            }
1631        }
1632
1633        if !used_surface_textures.is_empty() {
1634            let mut trackers = self.device.trackers.lock();
1635
1636            let texture_barriers = trackers
1637                .textures
1638                .set_from_usage_scope_and_drain_transitions(&used_surface_textures, &snatch_guard)
1639                .collect::<Vec<_>>();
1640            unsafe {
1641                pending_writes
1642                    .command_encoder
1643                    .transition_textures(&texture_barriers);
1644            };
1645        }
1646
1647        match pending_writes.pre_submit(&self.device.command_allocator, &self.device, self) {
1648            Ok(Some(pending_execution)) => {
1649                executions.insert(0, pending_execution);
1650            }
1651            Ok(None) => {}
1652            Err(e) => return Err(e),
1653        }
1654        let hal_command_buffers = executions
1655            .iter()
1656            .flat_map(|e| e.inner.list.iter().map(|b| b.as_ref()))
1657            .collect::<Vec<_>>();
1658
1659        {
1660            let mut submit_surface_textures =
1661                SmallVec::<[&dyn hal::DynSurfaceTexture; 2]>::with_capacity(surface_textures.len());
1662            for texture in surface_textures.values() {
1663                let raw = match texture.inner.get(&snatch_guard) {
1664                    Some(TextureInner::Surface { raw, .. }) => raw.as_ref(),
1665                    _ => unreachable!(),
1666                };
1667                submit_surface_textures.push(raw);
1668            }
1669
1670            unsafe {
1671                self.raw().submit(
1672                    &hal_command_buffers,
1673                    &submit_surface_textures,
1674                    (fence.as_mut(), submit_index),
1675                )
1676            }
1677            .map_err(|e| self.device.handle_hal_error(e))?;
1678
1679            // Submissions must have strictly increasing indices, so we must hold the
1680            // command index guard until we have submitted, to prevent another submission
1681            // from claiming the next index and reaching `submit` before we do.
1682            drop(pending_writes);
1683            drop(command_index_guard);
1684
1685            // Advance the successful submission index.
1686            self.device
1687                .last_successful_submission_index
1688                .fetch_max(submit_index, Ordering::SeqCst);
1689        }
1690
1691        // this will register the new submission to the life time tracker
1692        self.lock_life().track_submission(submit_index, executions);
1693
1694        Ok(SubmissionResult {
1695            fence: RwLockWriteGuard::downgrade(fence),
1696            snatch_guard,
1697        })
1698    }
1699
1700    pub fn get_timestamp_period(&self) -> f32 {
1701        unsafe { self.raw().get_timestamp_period() }
1702    }
1703
1704    /// `closure` is guaranteed to be called.
1705    pub fn on_submitted_work_done(
1706        &self,
1707        closure: SubmittedWorkDoneClosure,
1708    ) -> Option<SubmissionIndex> {
1709        api_log!("Queue::on_submitted_work_done");
1710
1711        // A `DeviceError` means we're losing the device anyways, so we can ignore it here
1712        // (mostly to avoid a breaking change to the `on_submitted_work_done` signature
1713        // for an error case that it is unlikely the caller will be able to handle).
1714        let _: Result<_, DeviceError> = self.flush_pending_writes();
1715
1716        self.lock_life().add_work_done_closure(closure)
1717    }
1718
1719    pub fn compact_blas(&self, blas: &Arc<Blas>) -> Result<Arc<Blas>, CompactBlasError> {
1720        profiling::scope!("Queue::compact_blas");
1721        api_log!("Queue::compact_blas");
1722
1723        let new_label = blas.label.clone() + " (compacted)";
1724
1725        self.device.check_is_valid()?;
1726        self.same_device_as(blas.as_ref())?;
1727
1728        let device = blas.device.clone();
1729
1730        let snatch_guard = device.snatchable_lock.read();
1731
1732        let BlasCompactState::Ready { size } = *blas.compacted_state.lock() else {
1733            return Err(CompactBlasError::BlasNotReady);
1734        };
1735
1736        let mut size_info = blas.size_info;
1737        size_info.acceleration_structure_size = size;
1738
1739        let mut pending_writes = self.pending_writes.lock();
1740        let cmd_buf_raw = pending_writes.activate();
1741
1742        let raw = unsafe {
1743            device
1744                .raw()
1745                .create_acceleration_structure(&hal::AccelerationStructureDescriptor {
1746                    label: hal_label(Some(&new_label), device.instance_flags),
1747                    size: size_info.acceleration_structure_size,
1748                    format: hal::AccelerationStructureFormat::BottomLevel,
1749                    allow_compaction: false,
1750                })
1751        }
1752        .map_err(DeviceError::from_hal)?;
1753
1754        let src_raw = blas.try_raw(&snatch_guard)?;
1755
1756        unsafe {
1757            cmd_buf_raw.copy_acceleration_structure_to_acceleration_structure(
1758                src_raw,
1759                raw.as_ref(),
1760                wgt::AccelerationStructureCopy::Compact,
1761            )
1762        };
1763
1764        let handle = unsafe {
1765            device
1766                .raw()
1767                .get_acceleration_structure_device_address(raw.as_ref())
1768        };
1769
1770        drop(snatch_guard);
1771
1772        let mut command_indices_lock = device.command_indices.write();
1773        command_indices_lock.next_acceleration_structure_build_command_index += 1;
1774        let built_index =
1775            NonZeroU64::new(command_indices_lock.next_acceleration_structure_build_command_index)
1776                .unwrap();
1777
1778        let new_blas = Arc::new(Blas {
1779            raw: Snatchable::new(raw),
1780            device: device.clone(),
1781            size_info,
1782            sizes: blas.sizes.clone(),
1783            flags: blas.flags & !AccelerationStructureFlags::ALLOW_COMPACTION,
1784            update_mode: blas.update_mode,
1785            // Bypass the submit checks which update this because we don't submit this normally.
1786            built_index: RwLock::new(rank::BLAS_BUILT_INDEX, Some(built_index)),
1787            handle,
1788            label: new_label,
1789            tracking_data: TrackingData::new(blas.device.tracker_indices.blas_s.clone()),
1790            compaction_buffer: None,
1791            compacted_state: Mutex::new(rank::BLAS_COMPACTION_STATE, BlasCompactState::Compacted),
1792        });
1793
1794        pending_writes.insert_blas(blas);
1795        pending_writes.insert_blas(&new_blas);
1796
1797        Ok(new_blas)
1798    }
1799}
1800
1801impl Global {
1802    pub fn queue_write_buffer(
1803        &self,
1804        queue_id: QueueId,
1805        buffer_id: id::BufferId,
1806        buffer_offset: wgt::BufferAddress,
1807        data: &[u8],
1808    ) -> Result<(), QueueWriteError> {
1809        let queue = self.hub.queues.get(queue_id);
1810        let buffer = self.hub.buffers.get(buffer_id).get()?;
1811
1812        #[cfg(feature = "trace")]
1813        if let Some(ref mut trace) = *queue.device.trace.lock() {
1814            use crate::device::trace::DataKind;
1815            let size = data.len() as u64;
1816            let data = trace.make_binary(DataKind::Bin, data);
1817            trace.add(Action::WriteBuffer {
1818                id: buffer.to_trace(),
1819                data,
1820                offset: buffer_offset,
1821                size,
1822                queued: true,
1823            });
1824        }
1825
1826        queue.write_buffer(buffer, buffer_offset, data)
1827    }
1828
1829    pub fn queue_create_staging_buffer(
1830        &self,
1831        queue_id: QueueId,
1832        buffer_size: wgt::BufferSize,
1833        id_in: Option<id::StagingBufferId>,
1834    ) -> Result<(id::StagingBufferId, NonNull<u8>), QueueWriteError> {
1835        let queue = self.hub.queues.get(queue_id);
1836        let (staging_buffer, ptr) = queue.create_staging_buffer(buffer_size)?;
1837
1838        let fid = self.hub.staging_buffers.prepare(id_in);
1839        let id = fid.assign(staging_buffer);
1840
1841        Ok((id, ptr))
1842    }
1843
1844    pub fn queue_write_staging_buffer(
1845        &self,
1846        queue_id: QueueId,
1847        buffer_id: id::BufferId,
1848        buffer_offset: wgt::BufferAddress,
1849        staging_buffer_id: id::StagingBufferId,
1850    ) -> Result<(), QueueWriteError> {
1851        let queue = self.hub.queues.get(queue_id);
1852        let buffer = self.hub.buffers.get(buffer_id);
1853        let staging_buffer = self.hub.staging_buffers.remove(staging_buffer_id);
1854        queue.write_staging_buffer(buffer, buffer_offset, staging_buffer)
1855    }
1856
1857    pub fn queue_validate_write_buffer(
1858        &self,
1859        queue_id: QueueId,
1860        buffer_id: id::BufferId,
1861        buffer_offset: u64,
1862        buffer_size: wgt::BufferSize,
1863    ) -> Result<(), QueueWriteError> {
1864        let queue = self.hub.queues.get(queue_id);
1865        let buffer = self.hub.buffers.get(buffer_id);
1866        queue.validate_write_buffer(buffer, buffer_offset, buffer_size)
1867    }
1868
1869    pub fn queue_write_texture(
1870        &self,
1871        queue_id: QueueId,
1872        destination: &wgt::TexelCopyTextureInfo<id::TextureId>,
1873        data: &[u8],
1874        data_layout: &wgt::TexelCopyBufferLayout,
1875        size: &wgt::Extent3d,
1876    ) -> Result<(), QueueWriteError> {
1877        let queue = self.hub.queues.get(queue_id);
1878        let texture = self.hub.textures.get(destination.texture).get()?;
1879        let destination = wgt::TexelCopyTextureInfo {
1880            texture,
1881            mip_level: destination.mip_level,
1882            origin: destination.origin,
1883            aspect: destination.aspect,
1884        };
1885
1886        #[cfg(feature = "trace")]
1887        if let Some(ref mut trace) = *queue.device.trace.lock() {
1888            use crate::device::trace::DataKind;
1889            let data = trace.make_binary(DataKind::Bin, data);
1890            trace.add(Action::WriteTexture {
1891                to: destination.to_trace(),
1892                data,
1893                layout: *data_layout,
1894                size: *size,
1895            });
1896        }
1897
1898        queue.write_texture(destination, data, data_layout, size)
1899    }
1900
1901    #[cfg(webgl)]
1902    pub fn queue_copy_external_image_to_texture(
1903        &self,
1904        queue_id: QueueId,
1905        source: &wgt::CopyExternalImageSourceInfo,
1906        destination: crate::command::CopyExternalImageDestInfo,
1907        size: wgt::Extent3d,
1908    ) -> Result<(), QueueWriteError> {
1909        let queue = self.hub.queues.get(queue_id);
1910        let destination = wgt::CopyExternalImageDestInfo {
1911            texture: self.hub.textures.get(destination.texture),
1912            mip_level: destination.mip_level,
1913            origin: destination.origin,
1914            aspect: destination.aspect,
1915            color_space: destination.color_space,
1916            premultiplied_alpha: destination.premultiplied_alpha,
1917        };
1918        queue.copy_external_image_to_texture(source, destination, size)
1919    }
1920
1921    pub fn queue_submit(
1922        &self,
1923        queue_id: QueueId,
1924        command_buffer_ids: &[id::CommandBufferId],
1925    ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1926        let queue = self.hub.queues.get(queue_id);
1927        let command_buffer_guard = self.hub.command_buffers.read();
1928        let command_buffers = command_buffer_ids
1929            .iter()
1930            .map(|id| command_buffer_guard.get(*id))
1931            .collect::<Vec<_>>();
1932        drop(command_buffer_guard);
1933        queue.submit(&command_buffers)
1934    }
1935
1936    pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 {
1937        let queue = self.hub.queues.get(queue_id);
1938
1939        if queue.device.timestamp_normalizer.get().unwrap().enabled() {
1940            return 1.0;
1941        }
1942
1943        queue.get_timestamp_period()
1944    }
1945
1946    pub fn queue_on_submitted_work_done(
1947        &self,
1948        queue_id: QueueId,
1949        closure: SubmittedWorkDoneClosure,
1950    ) -> SubmissionIndex {
1951        api_log!("Queue::on_submitted_work_done {queue_id:?}");
1952
1953        let queue = self.hub.queues.get(queue_id);
1954        let result = queue.on_submitted_work_done(closure);
1955        result.unwrap_or(0) // '0' means no wait is necessary
1956    }
1957
1958    pub fn queue_compact_blas(
1959        &self,
1960        queue_id: QueueId,
1961        blas_id: BlasId,
1962        id_in: Option<BlasId>,
1963    ) -> (BlasId, Option<u64>, Option<CompactBlasError>) {
1964        api_log!("Queue::compact_blas {queue_id:?}, {blas_id:?}");
1965
1966        let fid = self.hub.blas_s.prepare(id_in);
1967
1968        let queue = self.hub.queues.get(queue_id);
1969        let blas = self.hub.blas_s.get(blas_id);
1970        let device = &queue.device;
1971
1972        // TODO: Tracing
1973
1974        let error = 'error: {
1975            match device.require_features(wgpu_types::Features::EXPERIMENTAL_RAY_QUERY) {
1976                Ok(_) => {}
1977                Err(err) => break 'error err.into(),
1978            }
1979
1980            let blas = match blas.get() {
1981                Ok(blas) => blas,
1982                Err(err) => break 'error err.into(),
1983            };
1984
1985            let new_blas = match queue.compact_blas(&blas) {
1986                Ok(blas) => blas,
1987                Err(err) => break 'error err,
1988            };
1989
1990            // We should have no more errors after this because we have marked the command encoder as successful.
1991            let old_blas_size = blas.size_info.acceleration_structure_size;
1992            let new_blas_size = new_blas.size_info.acceleration_structure_size;
1993            let handle = new_blas.handle;
1994
1995            let id = fid.assign(Fallible::Valid(new_blas));
1996
1997            api_log!("CommandEncoder::compact_blas {blas_id:?} (size: {old_blas_size}) -> {id:?} (size: {new_blas_size})");
1998
1999            return (id, Some(handle), None);
2000        };
2001
2002        let id = fid.assign(Fallible::Invalid(Arc::new(error.to_string())));
2003
2004        (id, None, Some(error))
2005    }
2006}
2007
2008fn validate_command_buffer(
2009    command_buffer: &CommandBuffer,
2010    queue: &Queue,
2011    cmd_buf_data: &crate::command::CommandBufferMutable,
2012    snatch_guard: &SnatchGuard,
2013    surface_textures: &mut FastHashMap<*const Texture, Arc<Texture>>,
2014    used_surface_textures: &mut track::TextureUsageScope,
2015    command_index_guard: &mut RwLockWriteGuard<CommandIndices>,
2016) -> Result<(), QueueSubmitError> {
2017    command_buffer.same_device_as(queue)?;
2018
2019    {
2020        profiling::scope!("check resource state");
2021
2022        {
2023            profiling::scope!("buffers");
2024            for buffer in cmd_buf_data.trackers.buffers.used_resources() {
2025                buffer.check_destroyed(snatch_guard)?;
2026
2027                match *buffer.map_state.lock() {
2028                    BufferMapState::Idle => (),
2029                    _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())),
2030                }
2031            }
2032        }
2033        {
2034            profiling::scope!("textures");
2035            for texture in cmd_buf_data.trackers.textures.used_resources() {
2036                let should_extend = match texture.try_inner(snatch_guard)? {
2037                    TextureInner::Native { .. } => false,
2038                    TextureInner::Surface { .. } => {
2039                        // Compare the Arcs by pointer as Textures don't implement Eq.
2040                        surface_textures.insert(Arc::as_ptr(texture), texture.clone());
2041
2042                        true
2043                    }
2044                };
2045                if should_extend {
2046                    unsafe {
2047                        used_surface_textures
2048                            .merge_single(texture, None, wgt::TextureUses::PRESENT)
2049                            .unwrap();
2050                    };
2051                }
2052            }
2053        }
2054        // WebGPU requires that we check every bind group referenced during
2055        // encoding, even ones that may have been replaced before being used.
2056        // TODO(<https://github.com/gfx-rs/wgpu/issues/8510>): Optimize this.
2057        {
2058            profiling::scope!("bind groups");
2059            for bind_group in &cmd_buf_data.trackers.bind_groups {
2060                // This checks the bind group and all resources it references.
2061                bind_group.try_raw(snatch_guard)?;
2062            }
2063        }
2064
2065        if let Err(e) =
2066            cmd_buf_data.validate_acceleration_structure_actions(snatch_guard, command_index_guard)
2067        {
2068            return Err(e.into());
2069        }
2070    }
2071    Ok(())
2072}