wgpu_core/device/
queue.rs

1use alloc::{boxed::Box, string::ToString, sync::Arc, vec, vec::Vec};
2use core::{
3    iter,
4    mem::{self, ManuallyDrop},
5    num::NonZeroU64,
6    ptr::NonNull,
7    sync::atomic::Ordering,
8};
9use smallvec::SmallVec;
10use thiserror::Error;
11use wgt::{
12    error::{ErrorType, WebGpuError},
13    AccelerationStructureFlags,
14};
15
16use super::{life::LifetimeTracker, Device};
17#[cfg(feature = "trace")]
18use crate::device::trace::{Action, IntoTrace};
19use crate::{
20    api_log,
21    command::{
22        extract_texture_selector, validate_linear_texture_data, validate_texture_buffer_copy,
23        validate_texture_copy_dst_format, validate_texture_copy_range, ClearError,
24        CommandAllocator, CommandBuffer, CommandEncoder, CommandEncoderError, CopySide,
25        TransferError,
26    },
27    device::{DeviceError, WaitIdleError},
28    get_lowest_common_denom,
29    global::Global,
30    hal_label,
31    id::{self, BlasId, QueueId},
32    init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
33    lock::{rank, Mutex, MutexGuard, RwLock, RwLockWriteGuard},
34    ray_tracing::{BlasCompactReadyPendingClosure, CompactBlasError},
35    resource::{
36        Blas, BlasCompactState, Buffer, BufferAccessError, BufferMapState, DestroyedBuffer,
37        DestroyedResourceError, DestroyedTexture, Fallible, FlushedStagingBuffer,
38        InvalidResourceError, Labeled, ParentDevice, ResourceErrorIdent, StagingBuffer, Texture,
39        TextureInner, Trackable, TrackingData,
40    },
41    resource_log,
42    scratch::ScratchBuffer,
43    snatch::{SnatchGuard, Snatchable},
44    track::{self, Tracker, TrackerIndex},
45    FastHashMap, SubmissionIndex,
46};
47use crate::{device::resource::CommandIndices, resource::RawResourceAccess};
48
49pub struct Queue {
50    raw: Box<dyn hal::DynQueue>,
51    pub(crate) pending_writes: Mutex<PendingWrites>,
52    life_tracker: Mutex<LifetimeTracker>,
53    // The device needs to be dropped last (`Device.zero_buffer` might be referenced by the encoder in pending writes).
54    pub(crate) device: Arc<Device>,
55}
56
57impl Queue {
58    pub(crate) fn new(
59        device: Arc<Device>,
60        raw: Box<dyn hal::DynQueue>,
61        instance_flags: wgt::InstanceFlags,
62    ) -> Result<Self, DeviceError> {
63        let pending_encoder = device
64            .command_allocator
65            .acquire_encoder(device.raw(), raw.as_ref())
66            .map_err(DeviceError::from_hal);
67
68        let pending_encoder = match pending_encoder {
69            Ok(pending_encoder) => pending_encoder,
70            Err(e) => {
71                return Err(e);
72            }
73        };
74
75        let mut pending_writes = PendingWrites::new(pending_encoder, instance_flags);
76
77        let zero_buffer = device.zero_buffer.as_ref();
78        pending_writes.activate();
79        unsafe {
80            pending_writes
81                .command_encoder
82                .transition_buffers(&[hal::BufferBarrier {
83                    buffer: zero_buffer,
84                    usage: hal::StateTransition {
85                        from: wgt::BufferUses::empty(),
86                        to: wgt::BufferUses::COPY_DST,
87                    },
88                }]);
89            pending_writes
90                .command_encoder
91                .clear_buffer(zero_buffer, 0..super::ZERO_BUFFER_SIZE);
92            pending_writes
93                .command_encoder
94                .transition_buffers(&[hal::BufferBarrier {
95                    buffer: zero_buffer,
96                    usage: hal::StateTransition {
97                        from: wgt::BufferUses::COPY_DST,
98                        to: wgt::BufferUses::COPY_SRC,
99                    },
100                }]);
101        }
102
103        Ok(Queue {
104            raw,
105            device,
106            pending_writes: Mutex::new(rank::QUEUE_PENDING_WRITES, pending_writes),
107            life_tracker: Mutex::new(rank::QUEUE_LIFE_TRACKER, LifetimeTracker::new()),
108        })
109    }
110
111    pub(crate) fn raw(&self) -> &dyn hal::DynQueue {
112        self.raw.as_ref()
113    }
114
115    #[track_caller]
116    pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
117        self.life_tracker.lock()
118    }
119
120    pub(crate) fn maintain(
121        &self,
122        submission_index: u64,
123        snatch_guard: &SnatchGuard,
124    ) -> (
125        SmallVec<[SubmittedWorkDoneClosure; 1]>,
126        Vec<super::BufferMapPendingClosure>,
127        Vec<BlasCompactReadyPendingClosure>,
128        bool,
129    ) {
130        let mut life_tracker = self.lock_life();
131        let submission_closures = life_tracker.triage_submissions(submission_index);
132
133        let mapping_closures = life_tracker.handle_mapping(snatch_guard);
134        let blas_closures = life_tracker.handle_compact_read_back();
135
136        let queue_empty = life_tracker.queue_empty();
137
138        (
139            submission_closures,
140            mapping_closures,
141            blas_closures,
142            queue_empty,
143        )
144    }
145}
146
147crate::impl_resource_type!(Queue);
148// TODO: https://github.com/gfx-rs/wgpu/issues/4014
149impl Labeled for Queue {
150    fn label(&self) -> &str {
151        ""
152    }
153}
154crate::impl_parent_device!(Queue);
155crate::impl_storage_item!(Queue);
156
157impl Drop for Queue {
158    fn drop(&mut self) {
159        resource_log!("Drop {}", self.error_ident());
160
161        let last_successful_submission_index = self
162            .device
163            .last_successful_submission_index
164            .load(Ordering::Acquire);
165
166        let fence = self.device.fence.read();
167
168        // Try waiting on the last submission using the following sequence of timeouts
169        let timeouts_in_ms = [100, 200, 400, 800, 1600, 3200];
170
171        for (i, timeout_ms) in timeouts_in_ms.into_iter().enumerate() {
172            let is_last_iter = i == timeouts_in_ms.len() - 1;
173
174            api_log!(
175                "Waiting on last submission. try: {}/{}. timeout: {}ms",
176                i + 1,
177                timeouts_in_ms.len(),
178                timeout_ms
179            );
180
181            let wait_res = unsafe {
182                self.device.raw().wait(
183                    fence.as_ref(),
184                    last_successful_submission_index,
185                    #[cfg(not(target_arch = "wasm32"))]
186                    Some(core::time::Duration::from_millis(timeout_ms)),
187                    #[cfg(target_arch = "wasm32")]
188                    Some(core::time::Duration::ZERO), // WebKit and Chromium don't support a non-0 timeout
189                )
190            };
191            // Note: If we don't panic below we are in UB land (destroying resources while they are still in use by the GPU).
192            match wait_res {
193                Ok(true) => break,
194                Ok(false) => {
195                    // It's fine that we timed out on WebGL; GL objects can be deleted early as they
196                    // will be kept around by the driver if GPU work hasn't finished.
197                    // Moreover, the way we emulate read mappings on WebGL allows us to execute map_buffer earlier than on other
198                    // backends since getBufferSubData is synchronous with respect to the other previously enqueued GL commands.
199                    // Relying on this behavior breaks the clean abstraction wgpu-hal tries to maintain and
200                    // we should find ways to improve this. See https://github.com/gfx-rs/wgpu/issues/6538.
201                    #[cfg(target_arch = "wasm32")]
202                    {
203                        break;
204                    }
205                    #[cfg(not(target_arch = "wasm32"))]
206                    {
207                        if is_last_iter {
208                            panic!(
209                                "We timed out while waiting on the last successful submission to complete!"
210                            );
211                        }
212                    }
213                }
214                Err(e) => match e {
215                    hal::DeviceError::OutOfMemory => {
216                        if is_last_iter {
217                            panic!(
218                                "We ran into an OOM error while waiting on the last successful submission to complete!"
219                            );
220                        }
221                    }
222                    hal::DeviceError::Lost => {
223                        self.device.handle_hal_error(e); // will lose the device
224                        break;
225                    }
226                    hal::DeviceError::Unexpected => {
227                        panic!(
228                            "We ran into an unexpected error while waiting on the last successful submission to complete!"
229                        );
230                    }
231                },
232            }
233        }
234        drop(fence);
235
236        let snatch_guard = self.device.snatchable_lock.read();
237        let (submission_closures, mapping_closures, blas_compact_ready_closures, queue_empty) =
238            self.maintain(last_successful_submission_index, &snatch_guard);
239        drop(snatch_guard);
240
241        assert!(queue_empty);
242
243        let closures = crate::device::UserClosures {
244            mappings: mapping_closures,
245            blas_compact_ready: blas_compact_ready_closures,
246            submissions: submission_closures,
247            device_lost_invocations: SmallVec::new(),
248        };
249
250        closures.fire();
251    }
252}
253
254#[cfg(send_sync)]
255pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + Send + 'static>;
256#[cfg(not(send_sync))]
257pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + 'static>;
258
259/// A texture or buffer to be freed soon.
260///
261/// This is just a tagged raw texture or buffer, generally about to be added to
262/// some other more specific container like:
263///
264/// - `PendingWrites::temp_resources`: resources used by queue writes and
265///   unmaps, waiting to be folded in with the next queue submission
266///
267/// - `ActiveSubmission::temp_resources`: temporary resources used by a queue
268///   submission, to be freed when it completes
269#[derive(Debug)]
270pub enum TempResource {
271    StagingBuffer(FlushedStagingBuffer),
272    ScratchBuffer(ScratchBuffer),
273    DestroyedBuffer(DestroyedBuffer),
274    DestroyedTexture(DestroyedTexture),
275}
276
277/// A series of raw [`CommandBuffer`]s that have been submitted to a
278/// queue, and the [`wgpu_hal::CommandEncoder`] that built them.
279///
280/// [`CommandBuffer`]: hal::Api::CommandBuffer
281/// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
282pub(crate) struct EncoderInFlight {
283    inner: crate::command::InnerCommandEncoder,
284    pub(crate) trackers: Tracker,
285    pub(crate) temp_resources: Vec<TempResource>,
286    /// We only need to keep these resources alive.
287    _indirect_draw_validation_resources: crate::indirect_validation::DrawResources,
288
289    /// These are the buffers that have been tracked by `PendingWrites`.
290    pub(crate) pending_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
291    /// These are the textures that have been tracked by `PendingWrites`.
292    pub(crate) pending_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
293    /// These are the BLASes that have been tracked by `PendingWrites`.
294    pub(crate) pending_blas_s: FastHashMap<TrackerIndex, Arc<Blas>>,
295}
296
297/// A private command encoder for writes made directly on the device
298/// or queue.
299///
300/// Operations like `buffer_unmap`, `queue_write_buffer`, and
301/// `queue_write_texture` need to copy data to the GPU. At the hal
302/// level, this must be done by encoding and submitting commands, but
303/// these operations are not associated with any specific wgpu command
304/// buffer.
305///
306/// Instead, `Device::pending_writes` owns one of these values, which
307/// has its own hal command encoder and resource lists. The commands
308/// accumulated here are automatically submitted to the queue the next
309/// time the user submits a wgpu command buffer, ahead of the user's
310/// commands.
311///
312/// Important:
313/// When locking pending_writes be sure that tracker is not locked
314/// and try to lock trackers for the minimum timespan possible
315///
316/// All uses of [`StagingBuffer`]s end up here.
317#[derive(Debug)]
318pub(crate) struct PendingWrites {
319    // The command encoder needs to be destroyed before any other resource in pending writes.
320    pub command_encoder: Box<dyn hal::DynCommandEncoder>,
321
322    /// True if `command_encoder` is in the "recording" state, as
323    /// described in the docs for the [`wgpu_hal::CommandEncoder`]
324    /// trait.
325    ///
326    /// [`wgpu_hal::CommandEncoder`]: hal::CommandEncoder
327    pub is_recording: bool,
328
329    temp_resources: Vec<TempResource>,
330    dst_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
331    dst_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
332    copied_blas_s: FastHashMap<TrackerIndex, Arc<Blas>>,
333    instance_flags: wgt::InstanceFlags,
334}
335
336impl PendingWrites {
337    pub fn new(
338        command_encoder: Box<dyn hal::DynCommandEncoder>,
339        instance_flags: wgt::InstanceFlags,
340    ) -> Self {
341        Self {
342            command_encoder,
343            is_recording: false,
344            temp_resources: Vec::new(),
345            dst_buffers: FastHashMap::default(),
346            dst_textures: FastHashMap::default(),
347            copied_blas_s: FastHashMap::default(),
348            instance_flags,
349        }
350    }
351
352    pub fn insert_buffer(&mut self, buffer: &Arc<Buffer>) {
353        self.dst_buffers
354            .insert(buffer.tracker_index(), buffer.clone());
355    }
356
357    pub fn insert_texture(&mut self, texture: &Arc<Texture>) {
358        self.dst_textures
359            .insert(texture.tracker_index(), texture.clone());
360    }
361
362    pub fn insert_blas(&mut self, blas: &Arc<Blas>) {
363        self.copied_blas_s
364            .insert(blas.tracker_index(), blas.clone());
365    }
366
367    pub fn contains_buffer(&self, buffer: &Arc<Buffer>) -> bool {
368        self.dst_buffers.contains_key(&buffer.tracker_index())
369    }
370
371    pub fn contains_texture(&self, texture: &Arc<Texture>) -> bool {
372        self.dst_textures.contains_key(&texture.tracker_index())
373    }
374
375    pub fn consume_temp(&mut self, resource: TempResource) {
376        self.temp_resources.push(resource);
377    }
378
379    pub fn consume(&mut self, buffer: FlushedStagingBuffer) {
380        self.temp_resources
381            .push(TempResource::StagingBuffer(buffer));
382    }
383
384    fn pre_submit(
385        &mut self,
386        command_allocator: &CommandAllocator,
387        device: &Arc<Device>,
388        queue: &Queue,
389    ) -> Result<Option<EncoderInFlight>, DeviceError> {
390        if self.is_recording {
391            let pending_buffers = mem::take(&mut self.dst_buffers);
392            let pending_textures = mem::take(&mut self.dst_textures);
393            let pending_blas_s = mem::take(&mut self.copied_blas_s);
394
395            let cmd_buf = unsafe { self.command_encoder.end_encoding() }
396                .map_err(|e| device.handle_hal_error(e))?;
397            self.is_recording = false;
398
399            let new_encoder = command_allocator
400                .acquire_encoder(device.raw(), queue.raw())
401                .map_err(|e| device.handle_hal_error(e))?;
402
403            let encoder = EncoderInFlight {
404                inner: crate::command::InnerCommandEncoder {
405                    raw: ManuallyDrop::new(mem::replace(&mut self.command_encoder, new_encoder)),
406                    list: vec![cmd_buf],
407                    device: device.clone(),
408                    is_open: false,
409                    api: crate::command::EncodingApi::InternalUse,
410                    label: "(wgpu internal) PendingWrites command encoder".into(),
411                },
412                trackers: Tracker::new(),
413                temp_resources: mem::take(&mut self.temp_resources),
414                _indirect_draw_validation_resources: crate::indirect_validation::DrawResources::new(
415                    device.clone(),
416                ),
417                pending_buffers,
418                pending_textures,
419                pending_blas_s,
420            };
421            Ok(Some(encoder))
422        } else {
423            self.dst_buffers.clear();
424            self.dst_textures.clear();
425            self.copied_blas_s.clear();
426            Ok(None)
427        }
428    }
429
430    pub fn activate(&mut self) -> &mut dyn hal::DynCommandEncoder {
431        if !self.is_recording {
432            unsafe {
433                self.command_encoder
434                    .begin_encoding(hal_label(
435                        Some("(wgpu internal) PendingWrites"),
436                        self.instance_flags,
437                    ))
438                    .unwrap();
439            }
440            self.is_recording = true;
441        }
442        self.command_encoder.as_mut()
443    }
444}
445
446impl Drop for PendingWrites {
447    fn drop(&mut self) {
448        unsafe {
449            if self.is_recording {
450                self.command_encoder.discard_encoding();
451            }
452        }
453    }
454}
455
456#[derive(Clone, Debug, Error)]
457#[non_exhaustive]
458pub enum QueueWriteError {
459    #[error(transparent)]
460    Queue(#[from] DeviceError),
461    #[error(transparent)]
462    Transfer(#[from] TransferError),
463    #[error(transparent)]
464    MemoryInitFailure(#[from] ClearError),
465    #[error(transparent)]
466    DestroyedResource(#[from] DestroyedResourceError),
467    #[error(transparent)]
468    InvalidResource(#[from] InvalidResourceError),
469}
470
471impl WebGpuError for QueueWriteError {
472    fn webgpu_error_type(&self) -> ErrorType {
473        let e: &dyn WebGpuError = match self {
474            Self::Queue(e) => e,
475            Self::Transfer(e) => e,
476            Self::MemoryInitFailure(e) => e,
477            Self::DestroyedResource(e) => e,
478            Self::InvalidResource(e) => e,
479        };
480        e.webgpu_error_type()
481    }
482}
483
484#[derive(Clone, Debug, Error)]
485#[non_exhaustive]
486pub enum QueueSubmitError {
487    #[error(transparent)]
488    Queue(#[from] DeviceError),
489    #[error(transparent)]
490    DestroyedResource(#[from] DestroyedResourceError),
491    #[error(transparent)]
492    Unmap(#[from] BufferAccessError),
493    #[error("{0} is still mapped")]
494    BufferStillMapped(ResourceErrorIdent),
495    #[error(transparent)]
496    InvalidResource(#[from] InvalidResourceError),
497    #[error(transparent)]
498    CommandEncoder(#[from] CommandEncoderError),
499    #[error(transparent)]
500    ValidateAsActionsError(#[from] crate::ray_tracing::ValidateAsActionsError),
501}
502
503impl WebGpuError for QueueSubmitError {
504    fn webgpu_error_type(&self) -> ErrorType {
505        let e: &dyn WebGpuError = match self {
506            Self::Queue(e) => e,
507            Self::Unmap(e) => e,
508            Self::CommandEncoder(e) => e,
509            Self::ValidateAsActionsError(e) => e,
510            Self::InvalidResource(e) => e,
511            Self::DestroyedResource(_) | Self::BufferStillMapped(_) => {
512                return ErrorType::Validation
513            }
514        };
515        e.webgpu_error_type()
516    }
517}
518
519//TODO: move out common parts of write_xxx.
520
521impl Queue {
522    pub fn write_buffer(
523        &self,
524        buffer: Arc<Buffer>,
525        buffer_offset: wgt::BufferAddress,
526        data: &[u8],
527    ) -> Result<(), QueueWriteError> {
528        profiling::scope!("Queue::write_buffer");
529        api_log!("Queue::write_buffer");
530
531        self.device.check_is_valid()?;
532
533        let data_size = data.len() as wgt::BufferAddress;
534
535        self.same_device_as(buffer.as_ref())?;
536
537        let data_size = if let Some(data_size) = wgt::BufferSize::new(data_size) {
538            data_size
539        } else {
540            // This must happen after parameter validation (so that errors are reported
541            // as required by the spec), but before any side effects.
542            log::trace!("Ignoring write_buffer of size 0");
543            return Ok(());
544        };
545
546        // Platform validation requires that the staging buffer always be
547        // freed, even if an error occurs. All paths from here must call
548        // `device.pending_writes.consume`.
549        let mut staging_buffer = StagingBuffer::new(&self.device, data_size)?;
550
551        let staging_buffer = {
552            profiling::scope!("copy");
553            staging_buffer.write(data);
554            staging_buffer.flush()
555        };
556
557        let snatch_guard = self.device.snatchable_lock.read();
558        let mut pending_writes = self.pending_writes.lock();
559
560        let result = self.write_staging_buffer_impl(
561            &snatch_guard,
562            &mut pending_writes,
563            &staging_buffer,
564            buffer,
565            buffer_offset,
566        );
567
568        drop(snatch_guard);
569
570        pending_writes.consume(staging_buffer);
571
572        drop(pending_writes);
573
574        result
575    }
576
577    pub fn create_staging_buffer(
578        &self,
579        buffer_size: wgt::BufferSize,
580    ) -> Result<(StagingBuffer, NonNull<u8>), QueueWriteError> {
581        profiling::scope!("Queue::create_staging_buffer");
582        resource_log!("Queue::create_staging_buffer");
583
584        self.device.check_is_valid()?;
585
586        let staging_buffer = StagingBuffer::new(&self.device, buffer_size)?;
587        let ptr = unsafe { staging_buffer.ptr() };
588
589        Ok((staging_buffer, ptr))
590    }
591
592    pub fn write_staging_buffer(
593        &self,
594        buffer: Fallible<Buffer>,
595        buffer_offset: wgt::BufferAddress,
596        staging_buffer: StagingBuffer,
597    ) -> Result<(), QueueWriteError> {
598        profiling::scope!("Queue::write_staging_buffer");
599
600        self.device.check_is_valid()?;
601
602        let buffer = buffer.get()?;
603
604        // At this point, we have taken ownership of the staging_buffer from the
605        // user. Platform validation requires that the staging buffer always
606        // be freed, even if an error occurs. All paths from here must call
607        // `device.pending_writes.consume`.
608        let staging_buffer = staging_buffer.flush();
609
610        let snatch_guard = self.device.snatchable_lock.read();
611        let mut pending_writes = self.pending_writes.lock();
612
613        let result = self.write_staging_buffer_impl(
614            &snatch_guard,
615            &mut pending_writes,
616            &staging_buffer,
617            buffer,
618            buffer_offset,
619        );
620
621        drop(snatch_guard);
622
623        pending_writes.consume(staging_buffer);
624
625        drop(pending_writes);
626
627        result
628    }
629
630    pub fn validate_write_buffer(
631        &self,
632        buffer: Fallible<Buffer>,
633        buffer_offset: u64,
634        buffer_size: wgt::BufferSize,
635    ) -> Result<(), QueueWriteError> {
636        profiling::scope!("Queue::validate_write_buffer");
637
638        self.device.check_is_valid()?;
639
640        let buffer = buffer.get()?;
641
642        self.validate_write_buffer_impl(&buffer, buffer_offset, buffer_size)?;
643
644        Ok(())
645    }
646
647    fn validate_write_buffer_impl(
648        &self,
649        buffer: &Buffer,
650        buffer_offset: u64,
651        buffer_size: wgt::BufferSize,
652    ) -> Result<(), TransferError> {
653        if !matches!(&*buffer.map_state.lock(), BufferMapState::Idle) {
654            return Err(TransferError::BufferNotAvailable);
655        }
656        buffer.check_usage(wgt::BufferUsages::COPY_DST)?;
657        if buffer_size.get() % wgt::COPY_BUFFER_ALIGNMENT != 0 {
658            return Err(TransferError::UnalignedCopySize(buffer_size.get()));
659        }
660        if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
661            return Err(TransferError::UnalignedBufferOffset(buffer_offset));
662        }
663        if buffer_offset + buffer_size.get() > buffer.size {
664            return Err(TransferError::BufferOverrun {
665                start_offset: buffer_offset,
666                end_offset: buffer_offset + buffer_size.get(),
667                buffer_size: buffer.size,
668                side: CopySide::Destination,
669            });
670        }
671
672        Ok(())
673    }
674
675    fn write_staging_buffer_impl(
676        &self,
677        snatch_guard: &SnatchGuard,
678        pending_writes: &mut PendingWrites,
679        staging_buffer: &FlushedStagingBuffer,
680        buffer: Arc<Buffer>,
681        buffer_offset: u64,
682    ) -> Result<(), QueueWriteError> {
683        self.device.check_is_valid()?;
684
685        let transition = {
686            let mut trackers = self.device.trackers.lock();
687            trackers
688                .buffers
689                .set_single(&buffer, wgt::BufferUses::COPY_DST)
690        };
691
692        let dst_raw = buffer.try_raw(snatch_guard)?;
693
694        self.same_device_as(buffer.as_ref())?;
695
696        self.validate_write_buffer_impl(&buffer, buffer_offset, staging_buffer.size)?;
697
698        let region = hal::BufferCopy {
699            src_offset: 0,
700            dst_offset: buffer_offset,
701            size: staging_buffer.size,
702        };
703        let barriers = iter::once(hal::BufferBarrier {
704            buffer: staging_buffer.raw(),
705            usage: hal::StateTransition {
706                from: wgt::BufferUses::MAP_WRITE,
707                to: wgt::BufferUses::COPY_SRC,
708            },
709        })
710        .chain(transition.map(|pending| pending.into_hal(&buffer, snatch_guard)))
711        .collect::<Vec<_>>();
712        let encoder = pending_writes.activate();
713        unsafe {
714            encoder.transition_buffers(&barriers);
715            encoder.copy_buffer_to_buffer(staging_buffer.raw(), dst_raw, &[region]);
716        }
717
718        pending_writes.insert_buffer(&buffer);
719
720        // Ensure the overwritten bytes are marked as initialized so
721        // they don't need to be nulled prior to mapping or binding.
722        {
723            buffer
724                .initialization_status
725                .write()
726                .drain(buffer_offset..(buffer_offset + staging_buffer.size.get()));
727        }
728
729        Ok(())
730    }
731
732    pub fn write_texture(
733        &self,
734        destination: wgt::TexelCopyTextureInfo<Arc<Texture>>,
735        data: &[u8],
736        data_layout: &wgt::TexelCopyBufferLayout,
737        size: &wgt::Extent3d,
738    ) -> Result<(), QueueWriteError> {
739        profiling::scope!("Queue::write_texture");
740        api_log!("Queue::write_texture");
741
742        self.device.check_is_valid()?;
743
744        let dst = destination.texture;
745        let destination = wgt::TexelCopyTextureInfo {
746            texture: (),
747            mip_level: destination.mip_level,
748            origin: destination.origin,
749            aspect: destination.aspect,
750        };
751
752        self.same_device_as(dst.as_ref())?;
753
754        dst.check_usage(wgt::TextureUsages::COPY_DST)
755            .map_err(TransferError::MissingTextureUsage)?;
756
757        // Note: Doing the copy range validation early is important because ensures that the
758        // dimensions are not going to cause overflow in other parts of the validation.
759        let (hal_copy_size, array_layer_count) =
760            validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, size)?;
761
762        let (selector, dst_base) = extract_texture_selector(&destination, size, &dst)?;
763
764        validate_texture_copy_dst_format(dst.desc.format, destination.aspect)?;
765
766        validate_texture_buffer_copy(
767            &destination,
768            dst_base.aspect,
769            &dst.desc,
770            data_layout,
771            false, // alignment not required for buffer offset or bytes per row
772        )?;
773
774        // Note: `_source_bytes_per_array_layer` is ignored since we
775        // have a staging copy, and it can have a different value.
776        let (required_bytes_in_copy, _source_bytes_per_array_layer, _) =
777            validate_linear_texture_data(
778                data_layout,
779                dst.desc.format,
780                destination.aspect,
781                data.len() as wgt::BufferAddress,
782                CopySide::Source,
783                size,
784            )?;
785
786        if dst.desc.format.is_depth_stencil_format() {
787            self.device
788                .require_downlevel_flags(wgt::DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES)
789                .map_err(TransferError::from)?;
790        }
791
792        let snatch_guard = self.device.snatchable_lock.read();
793
794        let dst_raw = dst.try_raw(&snatch_guard)?;
795
796        // This must happen after parameter validation (so that errors are reported
797        // as required by the spec), but before any side effects.
798        if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
799            log::trace!("Ignoring write_texture of size 0");
800            return Ok(());
801        }
802
803        let mut pending_writes = self.pending_writes.lock();
804        let encoder = pending_writes.activate();
805
806        // If the copy does not fully cover the layers, we need to initialize to
807        // zero *first* as we don't keep track of partial texture layer inits.
808        //
809        // Strictly speaking we only need to clear the areas of a layer
810        // untouched, but this would get increasingly messy.
811        let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
812            // volume textures don't have a layer range as array volumes aren't supported
813            0..1
814        } else {
815            destination.origin.z..destination.origin.z + size.depth_or_array_layers
816        };
817        let mut dst_initialization_status = dst.initialization_status.write();
818        if dst_initialization_status.mips[destination.mip_level as usize]
819            .check(init_layer_range.clone())
820            .is_some()
821        {
822            if has_copy_partial_init_tracker_coverage(size, destination.mip_level, &dst.desc) {
823                for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
824                    .drain(init_layer_range)
825                    .collect::<Vec<core::ops::Range<u32>>>()
826                {
827                    let mut trackers = self.device.trackers.lock();
828                    crate::command::clear_texture(
829                        &dst,
830                        TextureInitRange {
831                            mip_range: destination.mip_level..(destination.mip_level + 1),
832                            layer_range,
833                        },
834                        encoder,
835                        &mut trackers.textures,
836                        &self.device.alignments,
837                        self.device.zero_buffer.as_ref(),
838                        &snatch_guard,
839                        self.device.instance_flags,
840                    )
841                    .map_err(QueueWriteError::from)?;
842                }
843            } else {
844                dst_initialization_status.mips[destination.mip_level as usize]
845                    .drain(init_layer_range);
846            }
847        }
848
849        let (block_width, block_height) = dst.desc.format.block_dimensions();
850        let width_in_blocks = size.width / block_width;
851        let height_in_blocks = size.height / block_height;
852
853        let block_size = dst
854            .desc
855            .format
856            .block_copy_size(Some(destination.aspect))
857            .unwrap();
858        let bytes_in_last_row = width_in_blocks * block_size;
859
860        let bytes_per_row = data_layout.bytes_per_row.unwrap_or(bytes_in_last_row);
861        let rows_per_image = data_layout.rows_per_image.unwrap_or(height_in_blocks);
862
863        let bytes_per_row_alignment = get_lowest_common_denom(
864            self.device.alignments.buffer_copy_pitch.get() as u32,
865            block_size,
866        );
867        let stage_bytes_per_row = wgt::math::align_to(bytes_in_last_row, bytes_per_row_alignment);
868
869        // Platform validation requires that the staging buffer always be
870        // freed, even if an error occurs. All paths from here must call
871        // `device.pending_writes.consume`.
872        let staging_buffer = if stage_bytes_per_row == bytes_per_row {
873            profiling::scope!("copy aligned");
874            // Fast path if the data is already being aligned optimally.
875            let stage_size = wgt::BufferSize::new(required_bytes_in_copy).unwrap();
876            let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
877            staging_buffer.write(&data[data_layout.offset as usize..]);
878            staging_buffer
879        } else {
880            profiling::scope!("copy chunked");
881            // Copy row by row into the optimal alignment.
882            let block_rows_in_copy =
883                (size.depth_or_array_layers - 1) * rows_per_image + height_in_blocks;
884            let stage_size =
885                wgt::BufferSize::new(stage_bytes_per_row as u64 * block_rows_in_copy as u64)
886                    .unwrap();
887            let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
888            for layer in 0..size.depth_or_array_layers {
889                let rows_offset = layer * rows_per_image;
890                for row in rows_offset..rows_offset + height_in_blocks {
891                    let src_offset = data_layout.offset as u32 + row * bytes_per_row;
892                    let dst_offset = row * stage_bytes_per_row;
893                    unsafe {
894                        staging_buffer.write_with_offset(
895                            data,
896                            src_offset as isize,
897                            dst_offset as isize,
898                            bytes_in_last_row as usize,
899                        )
900                    }
901                }
902            }
903            staging_buffer
904        };
905
906        let staging_buffer = staging_buffer.flush();
907
908        let regions = (0..array_layer_count)
909            .map(|array_layer_offset| {
910                let mut texture_base = dst_base.clone();
911                texture_base.array_layer += array_layer_offset;
912                hal::BufferTextureCopy {
913                    buffer_layout: wgt::TexelCopyBufferLayout {
914                        offset: array_layer_offset as u64
915                            * rows_per_image as u64
916                            * stage_bytes_per_row as u64,
917                        bytes_per_row: Some(stage_bytes_per_row),
918                        rows_per_image: Some(rows_per_image),
919                    },
920                    texture_base,
921                    size: hal_copy_size,
922                }
923            })
924            .collect::<Vec<_>>();
925
926        {
927            let buffer_barrier = hal::BufferBarrier {
928                buffer: staging_buffer.raw(),
929                usage: hal::StateTransition {
930                    from: wgt::BufferUses::MAP_WRITE,
931                    to: wgt::BufferUses::COPY_SRC,
932                },
933            };
934
935            let mut trackers = self.device.trackers.lock();
936            let transition =
937                trackers
938                    .textures
939                    .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
940            let texture_barriers = transition
941                .map(|pending| pending.into_hal(dst_raw))
942                .collect::<Vec<_>>();
943
944            unsafe {
945                encoder.transition_textures(&texture_barriers);
946                encoder.transition_buffers(&[buffer_barrier]);
947                encoder.copy_buffer_to_texture(staging_buffer.raw(), dst_raw, &regions);
948            }
949        }
950
951        pending_writes.consume(staging_buffer);
952        pending_writes.insert_texture(&dst);
953
954        Ok(())
955    }
956
957    #[cfg(webgl)]
958    pub fn copy_external_image_to_texture(
959        &self,
960        source: &wgt::CopyExternalImageSourceInfo,
961        destination: wgt::CopyExternalImageDestInfo<Fallible<Texture>>,
962        size: wgt::Extent3d,
963    ) -> Result<(), QueueWriteError> {
964        use crate::conv;
965
966        profiling::scope!("Queue::copy_external_image_to_texture");
967
968        self.device.check_is_valid()?;
969
970        let mut needs_flag = false;
971        needs_flag |= matches!(source.source, wgt::ExternalImageSource::OffscreenCanvas(_));
972        needs_flag |= source.origin != wgt::Origin2d::ZERO;
973        needs_flag |= destination.color_space != wgt::PredefinedColorSpace::Srgb;
974        #[allow(clippy::bool_comparison)]
975        if matches!(source.source, wgt::ExternalImageSource::ImageBitmap(_)) {
976            needs_flag |= source.flip_y != false;
977            needs_flag |= destination.premultiplied_alpha != false;
978        }
979
980        if needs_flag {
981            self.device
982                .require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES)
983                .map_err(TransferError::from)?;
984        }
985
986        let src_width = source.source.width();
987        let src_height = source.source.height();
988
989        let dst = destination.texture.get()?;
990        let premultiplied_alpha = destination.premultiplied_alpha;
991        let destination = wgt::TexelCopyTextureInfo {
992            texture: (),
993            mip_level: destination.mip_level,
994            origin: destination.origin,
995            aspect: destination.aspect,
996        };
997
998        if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) {
999            return Err(
1000                TransferError::ExternalCopyToForbiddenTextureFormat(dst.desc.format).into(),
1001            );
1002        }
1003        if dst.desc.dimension != wgt::TextureDimension::D2 {
1004            return Err(TransferError::InvalidDimensionExternal.into());
1005        }
1006        dst.check_usage(wgt::TextureUsages::COPY_DST | wgt::TextureUsages::RENDER_ATTACHMENT)
1007            .map_err(TransferError::MissingTextureUsage)?;
1008        if dst.desc.sample_count != 1 {
1009            return Err(TransferError::InvalidSampleCount {
1010                sample_count: dst.desc.sample_count,
1011            }
1012            .into());
1013        }
1014
1015        if source.origin.x + size.width > src_width {
1016            return Err(TransferError::TextureOverrun {
1017                start_offset: source.origin.x,
1018                end_offset: source.origin.x + size.width,
1019                texture_size: src_width,
1020                dimension: crate::resource::TextureErrorDimension::X,
1021                side: CopySide::Source,
1022            }
1023            .into());
1024        }
1025        if source.origin.y + size.height > src_height {
1026            return Err(TransferError::TextureOverrun {
1027                start_offset: source.origin.y,
1028                end_offset: source.origin.y + size.height,
1029                texture_size: src_height,
1030                dimension: crate::resource::TextureErrorDimension::Y,
1031                side: CopySide::Source,
1032            }
1033            .into());
1034        }
1035        if size.depth_or_array_layers != 1 {
1036            return Err(TransferError::TextureOverrun {
1037                start_offset: 0,
1038                end_offset: size.depth_or_array_layers,
1039                texture_size: 1,
1040                dimension: crate::resource::TextureErrorDimension::Z,
1041                side: CopySide::Source,
1042            }
1043            .into());
1044        }
1045
1046        // Note: Doing the copy range validation early is important because ensures that the
1047        // dimensions are not going to cause overflow in other parts of the validation.
1048        let (hal_copy_size, _) =
1049            validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, &size)?;
1050
1051        let (selector, dst_base) = extract_texture_selector(&destination, &size, &dst)?;
1052
1053        // This must happen after parameter validation (so that errors are reported
1054        // as required by the spec), but before any side effects.
1055        if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
1056            log::trace!("Ignoring copy_external_image_to_texture of size 0");
1057            return Ok(());
1058        }
1059
1060        let mut pending_writes = self.pending_writes.lock();
1061        let encoder = pending_writes.activate();
1062
1063        // If the copy does not fully cover the layers, we need to initialize to
1064        // zero *first* as we don't keep track of partial texture layer inits.
1065        //
1066        // Strictly speaking we only need to clear the areas of a layer
1067        // untouched, but this would get increasingly messy.
1068        let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
1069            // volume textures don't have a layer range as array volumes aren't supported
1070            0..1
1071        } else {
1072            destination.origin.z..destination.origin.z + size.depth_or_array_layers
1073        };
1074        let mut dst_initialization_status = dst.initialization_status.write();
1075        if dst_initialization_status.mips[destination.mip_level as usize]
1076            .check(init_layer_range.clone())
1077            .is_some()
1078        {
1079            if has_copy_partial_init_tracker_coverage(&size, destination.mip_level, &dst.desc) {
1080                for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
1081                    .drain(init_layer_range)
1082                    .collect::<Vec<core::ops::Range<u32>>>()
1083                {
1084                    let mut trackers = self.device.trackers.lock();
1085                    crate::command::clear_texture(
1086                        &dst,
1087                        TextureInitRange {
1088                            mip_range: destination.mip_level..(destination.mip_level + 1),
1089                            layer_range,
1090                        },
1091                        encoder,
1092                        &mut trackers.textures,
1093                        &self.device.alignments,
1094                        self.device.zero_buffer.as_ref(),
1095                        &self.device.snatchable_lock.read(),
1096                        self.device.instance_flags,
1097                    )
1098                    .map_err(QueueWriteError::from)?;
1099                }
1100            } else {
1101                dst_initialization_status.mips[destination.mip_level as usize]
1102                    .drain(init_layer_range);
1103            }
1104        }
1105
1106        let snatch_guard = self.device.snatchable_lock.read();
1107        let dst_raw = dst.try_raw(&snatch_guard)?;
1108
1109        let regions = hal::TextureCopy {
1110            src_base: hal::TextureCopyBase {
1111                mip_level: 0,
1112                array_layer: 0,
1113                origin: source.origin.to_3d(0),
1114                aspect: hal::FormatAspects::COLOR,
1115            },
1116            dst_base,
1117            size: hal_copy_size,
1118        };
1119
1120        let mut trackers = self.device.trackers.lock();
1121        let transitions = trackers
1122            .textures
1123            .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
1124
1125        // `copy_external_image_to_texture` is exclusive to the WebGL backend.
1126        // Don't go through the `DynCommandEncoder` abstraction and directly to the WebGL backend.
1127        let encoder_webgl = encoder
1128            .as_any_mut()
1129            .downcast_mut::<hal::gles::CommandEncoder>()
1130            .unwrap();
1131        let dst_raw_webgl = dst_raw
1132            .as_any()
1133            .downcast_ref::<hal::gles::Texture>()
1134            .unwrap();
1135        let transitions_webgl = transitions.map(|pending| {
1136            let dyn_transition = pending.into_hal(dst_raw);
1137            hal::TextureBarrier {
1138                texture: dst_raw_webgl,
1139                range: dyn_transition.range,
1140                usage: dyn_transition.usage,
1141            }
1142        });
1143
1144        use hal::CommandEncoder as _;
1145        unsafe {
1146            encoder_webgl.transition_textures(transitions_webgl);
1147            encoder_webgl.copy_external_image_to_texture(
1148                source,
1149                dst_raw_webgl,
1150                premultiplied_alpha,
1151                iter::once(regions),
1152            );
1153        }
1154
1155        Ok(())
1156    }
1157
1158    #[cfg(feature = "trace")]
1159    fn trace_submission(
1160        &self,
1161        submit_index: SubmissionIndex,
1162        commands: Vec<crate::command::Command<crate::command::PointerReferences>>,
1163    ) {
1164        if let Some(ref mut trace) = *self.device.trace.lock() {
1165            trace.add(Action::Submit(submit_index, commands));
1166        }
1167    }
1168
1169    #[cfg(feature = "trace")]
1170    fn trace_failed_submission(
1171        &self,
1172        submit_index: SubmissionIndex,
1173        commands: Option<Vec<crate::command::Command<crate::command::PointerReferences>>>,
1174        error: alloc::string::String,
1175    ) {
1176        if let Some(ref mut trace) = *self.device.trace.lock() {
1177            trace.add(Action::FailedCommands {
1178                commands,
1179                failed_at_submit: Some(submit_index),
1180                error,
1181            });
1182        }
1183    }
1184
1185    pub fn submit(
1186        &self,
1187        command_buffers: &[Arc<CommandBuffer>],
1188    ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1189        profiling::scope!("Queue::submit");
1190        api_log!("Queue::submit");
1191
1192        let submit_index;
1193
1194        let res = 'error: {
1195            let snatch_guard = self.device.snatchable_lock.read();
1196
1197            // Fence lock must be acquired after the snatch lock everywhere to avoid deadlocks.
1198            let mut fence = self.device.fence.write();
1199
1200            let mut command_index_guard = self.device.command_indices.write();
1201            command_index_guard.active_submission_index += 1;
1202            submit_index = command_index_guard.active_submission_index;
1203
1204            if let Err(e) = self.device.check_is_valid() {
1205                break 'error Err(e.into());
1206            }
1207
1208            let mut active_executions = Vec::new();
1209
1210            let mut used_surface_textures = track::TextureUsageScope::default();
1211
1212            // Use a hashmap here to deduplicate the surface textures that are used in the command buffers.
1213            // This avoids vulkan deadlocking from the same surface texture being submitted multiple times.
1214            let mut submit_surface_textures_owned = FastHashMap::default();
1215
1216            {
1217                if !command_buffers.is_empty() {
1218                    profiling::scope!("prepare");
1219
1220                    let mut first_error = None;
1221
1222                    //TODO: if multiple command buffers are submitted, we can re-use the last
1223                    // native command buffer of the previous chain instead of always creating
1224                    // a temporary one, since the chains are not finished.
1225
1226                    // finish all the command buffers first
1227                    for command_buffer in command_buffers {
1228                        profiling::scope!("process command buffer");
1229
1230                        // we reset the used surface textures every time we use
1231                        // it, so make sure to set_size on it.
1232                        used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1233
1234                        // Note that we are required to invalidate all command buffers in both the success and failure paths.
1235                        // This is why we `continue` and don't early return via `?`.
1236                        #[allow(unused_mut)]
1237                        let mut cmd_buf_data = command_buffer.take_finished();
1238
1239                        if first_error.is_some() {
1240                            continue;
1241                        }
1242
1243                        #[cfg(feature = "trace")]
1244                        let trace_commands = cmd_buf_data
1245                            .as_mut()
1246                            .ok()
1247                            .and_then(|data| mem::take(&mut data.trace_commands));
1248
1249                        let mut baked = match cmd_buf_data {
1250                            Ok(cmd_buf_data) => {
1251                                let res = validate_command_buffer(
1252                                    command_buffer,
1253                                    self,
1254                                    &cmd_buf_data,
1255                                    &snatch_guard,
1256                                    &mut submit_surface_textures_owned,
1257                                    &mut used_surface_textures,
1258                                    &mut command_index_guard,
1259                                );
1260                                if let Err(err) = res {
1261                                    #[cfg(feature = "trace")]
1262                                    self.trace_failed_submission(
1263                                        submit_index,
1264                                        trace_commands,
1265                                        err.to_string(),
1266                                    );
1267                                    first_error.get_or_insert(err);
1268                                    continue;
1269                                }
1270
1271                                #[cfg(feature = "trace")]
1272                                if let Some(commands) = trace_commands {
1273                                    self.trace_submission(submit_index, commands);
1274                                }
1275
1276                                cmd_buf_data.into_baked_commands()
1277                            }
1278                            Err(err) => {
1279                                #[cfg(feature = "trace")]
1280                                self.trace_failed_submission(
1281                                    submit_index,
1282                                    trace_commands,
1283                                    err.to_string(),
1284                                );
1285                                first_error.get_or_insert(err.into());
1286                                continue;
1287                            }
1288                        };
1289
1290                        // execute resource transitions
1291                        if let Err(e) = baked.encoder.open_pass(hal_label(
1292                            Some("(wgpu internal) Transit"),
1293                            self.device.instance_flags,
1294                        )) {
1295                            break 'error Err(e.into());
1296                        }
1297
1298                        //Note: locking the trackers has to be done after the storages
1299                        let mut trackers = self.device.trackers.lock();
1300                        if let Err(e) = baked.initialize_buffer_memory(&mut trackers, &snatch_guard)
1301                        {
1302                            break 'error Err(e.into());
1303                        }
1304                        if let Err(e) = baked.initialize_texture_memory(
1305                            &mut trackers,
1306                            &self.device,
1307                            &snatch_guard,
1308                        ) {
1309                            break 'error Err(e.into());
1310                        }
1311
1312                        //Note: stateless trackers are not merged:
1313                        // device already knows these resources exist.
1314                        CommandEncoder::insert_barriers_from_device_tracker(
1315                            baked.encoder.raw.as_mut(),
1316                            &mut trackers,
1317                            &baked.trackers,
1318                            &snatch_guard,
1319                        );
1320
1321                        if let Err(e) = baked.encoder.close_and_push_front() {
1322                            break 'error Err(e.into());
1323                        }
1324
1325                        // Transition surface textures into `Present` state.
1326                        // Note: we could technically do it after all of the command buffers,
1327                        // but here we have a command encoder by hand, so it's easier to use it.
1328                        if !used_surface_textures.is_empty() {
1329                            if let Err(e) = baked.encoder.open_pass(hal_label(
1330                                Some("(wgpu internal) Present"),
1331                                self.device.instance_flags,
1332                            )) {
1333                                break 'error Err(e.into());
1334                            }
1335                            let texture_barriers = trackers
1336                                .textures
1337                                .set_from_usage_scope_and_drain_transitions(
1338                                    &used_surface_textures,
1339                                    &snatch_guard,
1340                                )
1341                                .collect::<Vec<_>>();
1342                            unsafe {
1343                                baked.encoder.raw.transition_textures(&texture_barriers);
1344                            };
1345                            if let Err(e) = baked.encoder.close() {
1346                                break 'error Err(e.into());
1347                            }
1348                            used_surface_textures = track::TextureUsageScope::default();
1349                        }
1350
1351                        // done
1352                        active_executions.push(EncoderInFlight {
1353                            inner: baked.encoder,
1354                            trackers: baked.trackers,
1355                            temp_resources: baked.temp_resources,
1356                            _indirect_draw_validation_resources: baked
1357                                .indirect_draw_validation_resources,
1358                            pending_buffers: FastHashMap::default(),
1359                            pending_textures: FastHashMap::default(),
1360                            pending_blas_s: FastHashMap::default(),
1361                        });
1362                    }
1363
1364                    if let Some(first_error) = first_error {
1365                        break 'error Err(first_error);
1366                    }
1367                }
1368            }
1369
1370            let mut pending_writes = self.pending_writes.lock();
1371
1372            {
1373                used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1374                for texture in pending_writes.dst_textures.values() {
1375                    match texture.try_inner(&snatch_guard) {
1376                        Ok(TextureInner::Native { .. }) => {}
1377                        Ok(TextureInner::Surface { .. }) => {
1378                            // Compare the Arcs by pointer as Textures don't implement Eq
1379                            submit_surface_textures_owned
1380                                .insert(Arc::as_ptr(texture), texture.clone());
1381
1382                            unsafe {
1383                                used_surface_textures
1384                                    .merge_single(texture, None, wgt::TextureUses::PRESENT)
1385                                    .unwrap()
1386                            };
1387                        }
1388                        // The texture must not have been destroyed when its usage here was
1389                        // encoded. If it was destroyed after that, then it was transferred
1390                        // to `pending_writes.temp_resources` at the time of destruction, so
1391                        // we are still okay to use it.
1392                        Err(DestroyedResourceError(_)) => {}
1393                    }
1394                }
1395
1396                if !used_surface_textures.is_empty() {
1397                    let mut trackers = self.device.trackers.lock();
1398
1399                    let texture_barriers = trackers
1400                        .textures
1401                        .set_from_usage_scope_and_drain_transitions(
1402                            &used_surface_textures,
1403                            &snatch_guard,
1404                        )
1405                        .collect::<Vec<_>>();
1406                    unsafe {
1407                        pending_writes
1408                            .command_encoder
1409                            .transition_textures(&texture_barriers);
1410                    };
1411                }
1412            }
1413
1414            match pending_writes.pre_submit(&self.device.command_allocator, &self.device, self) {
1415                Ok(Some(pending_execution)) => {
1416                    active_executions.insert(0, pending_execution);
1417                }
1418                Ok(None) => {}
1419                Err(e) => break 'error Err(e.into()),
1420            }
1421            let hal_command_buffers = active_executions
1422                .iter()
1423                .flat_map(|e| e.inner.list.iter().map(|b| b.as_ref()))
1424                .collect::<Vec<_>>();
1425
1426            {
1427                let mut submit_surface_textures =
1428                    SmallVec::<[&dyn hal::DynSurfaceTexture; 2]>::with_capacity(
1429                        submit_surface_textures_owned.len(),
1430                    );
1431
1432                for texture in submit_surface_textures_owned.values() {
1433                    let raw = match texture.inner.get(&snatch_guard) {
1434                        Some(TextureInner::Surface { raw, .. }) => raw.as_ref(),
1435                        _ => unreachable!(),
1436                    };
1437                    submit_surface_textures.push(raw);
1438                }
1439
1440                if let Err(e) = unsafe {
1441                    self.raw().submit(
1442                        &hal_command_buffers,
1443                        &submit_surface_textures,
1444                        (fence.as_mut(), submit_index),
1445                    )
1446                }
1447                .map_err(|e| self.device.handle_hal_error(e))
1448                {
1449                    break 'error Err(e.into());
1450                }
1451
1452                drop(command_index_guard);
1453
1454                // Advance the successful submission index.
1455                self.device
1456                    .last_successful_submission_index
1457                    .fetch_max(submit_index, Ordering::SeqCst);
1458            }
1459
1460            profiling::scope!("cleanup");
1461
1462            // this will register the new submission to the life time tracker
1463            self.lock_life()
1464                .track_submission(submit_index, active_executions);
1465            drop(pending_writes);
1466
1467            // This will schedule destruction of all resources that are no longer needed
1468            // by the user but used in the command stream, among other things.
1469            let fence_guard = RwLockWriteGuard::downgrade(fence);
1470            let (closures, result) =
1471                self.device
1472                    .maintain(fence_guard, wgt::PollType::Poll, snatch_guard);
1473            match result {
1474                Ok(status) => {
1475                    debug_assert!(matches!(
1476                        status,
1477                        wgt::PollStatus::QueueEmpty | wgt::PollStatus::Poll
1478                    ));
1479                }
1480                Err(WaitIdleError::Device(err)) => break 'error Err(QueueSubmitError::Queue(err)),
1481                Err(WaitIdleError::WrongSubmissionIndex(..)) => {
1482                    unreachable!("Cannot get WrongSubmissionIndex from Poll")
1483                }
1484                Err(WaitIdleError::Timeout) => unreachable!("Cannot get Timeout from Poll"),
1485            };
1486
1487            Ok(closures)
1488        };
1489
1490        let callbacks = match res {
1491            Ok(ok) => ok,
1492            Err(e) => return Err((submit_index, e)),
1493        };
1494
1495        // the closures should execute with nothing locked!
1496        callbacks.fire();
1497
1498        self.device.lose_if_oom();
1499
1500        api_log!("Queue::submit returned submit index {submit_index}");
1501
1502        Ok(submit_index)
1503    }
1504
1505    pub fn get_timestamp_period(&self) -> f32 {
1506        unsafe { self.raw().get_timestamp_period() }
1507    }
1508
1509    /// `closure` is guaranteed to be called.
1510    pub fn on_submitted_work_done(
1511        &self,
1512        closure: SubmittedWorkDoneClosure,
1513    ) -> Option<SubmissionIndex> {
1514        api_log!("Queue::on_submitted_work_done");
1515        //TODO: flush pending writes
1516        self.lock_life().add_work_done_closure(closure)
1517    }
1518
1519    pub fn compact_blas(&self, blas: &Arc<Blas>) -> Result<Arc<Blas>, CompactBlasError> {
1520        profiling::scope!("Queue::compact_blas");
1521        api_log!("Queue::compact_blas");
1522
1523        let new_label = blas.label.clone() + " (compacted)";
1524
1525        self.device.check_is_valid()?;
1526        self.same_device_as(blas.as_ref())?;
1527
1528        let device = blas.device.clone();
1529
1530        let snatch_guard = device.snatchable_lock.read();
1531
1532        let BlasCompactState::Ready { size } = *blas.compacted_state.lock() else {
1533            return Err(CompactBlasError::BlasNotReady);
1534        };
1535
1536        let mut size_info = blas.size_info;
1537        size_info.acceleration_structure_size = size;
1538
1539        let mut pending_writes = self.pending_writes.lock();
1540        let cmd_buf_raw = pending_writes.activate();
1541
1542        let raw = unsafe {
1543            device
1544                .raw()
1545                .create_acceleration_structure(&hal::AccelerationStructureDescriptor {
1546                    label: hal_label(Some(&new_label), device.instance_flags),
1547                    size: size_info.acceleration_structure_size,
1548                    format: hal::AccelerationStructureFormat::BottomLevel,
1549                    allow_compaction: false,
1550                })
1551        }
1552        .map_err(DeviceError::from_hal)?;
1553
1554        let src_raw = blas.try_raw(&snatch_guard)?;
1555
1556        unsafe {
1557            cmd_buf_raw.copy_acceleration_structure_to_acceleration_structure(
1558                src_raw,
1559                raw.as_ref(),
1560                wgt::AccelerationStructureCopy::Compact,
1561            )
1562        };
1563
1564        let handle = unsafe {
1565            device
1566                .raw()
1567                .get_acceleration_structure_device_address(raw.as_ref())
1568        };
1569
1570        drop(snatch_guard);
1571
1572        let mut command_indices_lock = device.command_indices.write();
1573        command_indices_lock.next_acceleration_structure_build_command_index += 1;
1574        let built_index =
1575            NonZeroU64::new(command_indices_lock.next_acceleration_structure_build_command_index)
1576                .unwrap();
1577
1578        let new_blas = Arc::new(Blas {
1579            raw: Snatchable::new(raw),
1580            device: device.clone(),
1581            size_info,
1582            sizes: blas.sizes.clone(),
1583            flags: blas.flags & !AccelerationStructureFlags::ALLOW_COMPACTION,
1584            update_mode: blas.update_mode,
1585            // Bypass the submit checks which update this because we don't submit this normally.
1586            built_index: RwLock::new(rank::BLAS_BUILT_INDEX, Some(built_index)),
1587            handle,
1588            label: new_label,
1589            tracking_data: TrackingData::new(blas.device.tracker_indices.blas_s.clone()),
1590            compaction_buffer: None,
1591            compacted_state: Mutex::new(rank::BLAS_COMPACTION_STATE, BlasCompactState::Compacted),
1592        });
1593
1594        pending_writes.insert_blas(blas);
1595        pending_writes.insert_blas(&new_blas);
1596
1597        Ok(new_blas)
1598    }
1599}
1600
1601impl Global {
1602    pub fn queue_write_buffer(
1603        &self,
1604        queue_id: QueueId,
1605        buffer_id: id::BufferId,
1606        buffer_offset: wgt::BufferAddress,
1607        data: &[u8],
1608    ) -> Result<(), QueueWriteError> {
1609        let queue = self.hub.queues.get(queue_id);
1610        let buffer = self.hub.buffers.get(buffer_id).get()?;
1611
1612        #[cfg(feature = "trace")]
1613        if let Some(ref mut trace) = *queue.device.trace.lock() {
1614            use crate::device::trace::DataKind;
1615            let range = buffer_offset..buffer_offset + data.len() as u64;
1616            let data = trace.make_binary(DataKind::Bin, data);
1617            trace.add(Action::WriteBuffer {
1618                id: buffer.to_trace(),
1619                data,
1620                range,
1621                queued: true,
1622            });
1623        }
1624
1625        queue.write_buffer(buffer, buffer_offset, data)
1626    }
1627
1628    pub fn queue_create_staging_buffer(
1629        &self,
1630        queue_id: QueueId,
1631        buffer_size: wgt::BufferSize,
1632        id_in: Option<id::StagingBufferId>,
1633    ) -> Result<(id::StagingBufferId, NonNull<u8>), QueueWriteError> {
1634        let queue = self.hub.queues.get(queue_id);
1635        let (staging_buffer, ptr) = queue.create_staging_buffer(buffer_size)?;
1636
1637        let fid = self.hub.staging_buffers.prepare(id_in);
1638        let id = fid.assign(staging_buffer);
1639
1640        Ok((id, ptr))
1641    }
1642
1643    pub fn queue_write_staging_buffer(
1644        &self,
1645        queue_id: QueueId,
1646        buffer_id: id::BufferId,
1647        buffer_offset: wgt::BufferAddress,
1648        staging_buffer_id: id::StagingBufferId,
1649    ) -> Result<(), QueueWriteError> {
1650        let queue = self.hub.queues.get(queue_id);
1651        let buffer = self.hub.buffers.get(buffer_id);
1652        let staging_buffer = self.hub.staging_buffers.remove(staging_buffer_id);
1653        queue.write_staging_buffer(buffer, buffer_offset, staging_buffer)
1654    }
1655
1656    pub fn queue_validate_write_buffer(
1657        &self,
1658        queue_id: QueueId,
1659        buffer_id: id::BufferId,
1660        buffer_offset: u64,
1661        buffer_size: wgt::BufferSize,
1662    ) -> Result<(), QueueWriteError> {
1663        let queue = self.hub.queues.get(queue_id);
1664        let buffer = self.hub.buffers.get(buffer_id);
1665        queue.validate_write_buffer(buffer, buffer_offset, buffer_size)
1666    }
1667
1668    pub fn queue_write_texture(
1669        &self,
1670        queue_id: QueueId,
1671        destination: &wgt::TexelCopyTextureInfo<id::TextureId>,
1672        data: &[u8],
1673        data_layout: &wgt::TexelCopyBufferLayout,
1674        size: &wgt::Extent3d,
1675    ) -> Result<(), QueueWriteError> {
1676        let queue = self.hub.queues.get(queue_id);
1677        let texture = self.hub.textures.get(destination.texture).get()?;
1678        let destination = wgt::TexelCopyTextureInfo {
1679            texture,
1680            mip_level: destination.mip_level,
1681            origin: destination.origin,
1682            aspect: destination.aspect,
1683        };
1684
1685        #[cfg(feature = "trace")]
1686        if let Some(ref mut trace) = *queue.device.trace.lock() {
1687            use crate::device::trace::DataKind;
1688            let data = trace.make_binary(DataKind::Bin, data);
1689            trace.add(Action::WriteTexture {
1690                to: destination.to_trace(),
1691                data,
1692                layout: *data_layout,
1693                size: *size,
1694            });
1695        }
1696
1697        queue.write_texture(destination, data, data_layout, size)
1698    }
1699
1700    #[cfg(webgl)]
1701    pub fn queue_copy_external_image_to_texture(
1702        &self,
1703        queue_id: QueueId,
1704        source: &wgt::CopyExternalImageSourceInfo,
1705        destination: crate::command::CopyExternalImageDestInfo,
1706        size: wgt::Extent3d,
1707    ) -> Result<(), QueueWriteError> {
1708        let queue = self.hub.queues.get(queue_id);
1709        let destination = wgt::CopyExternalImageDestInfo {
1710            texture: self.hub.textures.get(destination.texture),
1711            mip_level: destination.mip_level,
1712            origin: destination.origin,
1713            aspect: destination.aspect,
1714            color_space: destination.color_space,
1715            premultiplied_alpha: destination.premultiplied_alpha,
1716        };
1717        queue.copy_external_image_to_texture(source, destination, size)
1718    }
1719
1720    pub fn queue_submit(
1721        &self,
1722        queue_id: QueueId,
1723        command_buffer_ids: &[id::CommandBufferId],
1724    ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1725        let queue = self.hub.queues.get(queue_id);
1726        let command_buffer_guard = self.hub.command_buffers.read();
1727        let command_buffers = command_buffer_ids
1728            .iter()
1729            .map(|id| command_buffer_guard.get(*id))
1730            .collect::<Vec<_>>();
1731        drop(command_buffer_guard);
1732        queue.submit(&command_buffers)
1733    }
1734
1735    pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 {
1736        let queue = self.hub.queues.get(queue_id);
1737
1738        if queue.device.timestamp_normalizer.get().unwrap().enabled() {
1739            return 1.0;
1740        }
1741
1742        queue.get_timestamp_period()
1743    }
1744
1745    pub fn queue_on_submitted_work_done(
1746        &self,
1747        queue_id: QueueId,
1748        closure: SubmittedWorkDoneClosure,
1749    ) -> SubmissionIndex {
1750        api_log!("Queue::on_submitted_work_done {queue_id:?}");
1751
1752        //TODO: flush pending writes
1753        let queue = self.hub.queues.get(queue_id);
1754        let result = queue.on_submitted_work_done(closure);
1755        result.unwrap_or(0) // '0' means no wait is necessary
1756    }
1757
1758    pub fn queue_compact_blas(
1759        &self,
1760        queue_id: QueueId,
1761        blas_id: BlasId,
1762        id_in: Option<BlasId>,
1763    ) -> (BlasId, Option<u64>, Option<CompactBlasError>) {
1764        api_log!("Queue::compact_blas {queue_id:?}, {blas_id:?}");
1765
1766        let fid = self.hub.blas_s.prepare(id_in);
1767
1768        let queue = self.hub.queues.get(queue_id);
1769        let blas = self.hub.blas_s.get(blas_id);
1770        let device = &queue.device;
1771
1772        // TODO: Tracing
1773
1774        let error = 'error: {
1775            match device.require_features(wgpu_types::Features::EXPERIMENTAL_RAY_QUERY) {
1776                Ok(_) => {}
1777                Err(err) => break 'error err.into(),
1778            }
1779
1780            let blas = match blas.get() {
1781                Ok(blas) => blas,
1782                Err(err) => break 'error err.into(),
1783            };
1784
1785            let new_blas = match queue.compact_blas(&blas) {
1786                Ok(blas) => blas,
1787                Err(err) => break 'error err,
1788            };
1789
1790            // We should have no more errors after this because we have marked the command encoder as successful.
1791            let old_blas_size = blas.size_info.acceleration_structure_size;
1792            let new_blas_size = new_blas.size_info.acceleration_structure_size;
1793            let handle = new_blas.handle;
1794
1795            let id = fid.assign(Fallible::Valid(new_blas));
1796
1797            api_log!("CommandEncoder::compact_blas {blas_id:?} (size: {old_blas_size}) -> {id:?} (size: {new_blas_size})");
1798
1799            return (id, Some(handle), None);
1800        };
1801
1802        let id = fid.assign(Fallible::Invalid(Arc::new(error.to_string())));
1803
1804        (id, None, Some(error))
1805    }
1806}
1807
1808fn validate_command_buffer(
1809    command_buffer: &CommandBuffer,
1810    queue: &Queue,
1811    cmd_buf_data: &crate::command::CommandBufferMutable,
1812    snatch_guard: &SnatchGuard,
1813    submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc<Texture>>,
1814    used_surface_textures: &mut track::TextureUsageScope,
1815    command_index_guard: &mut RwLockWriteGuard<CommandIndices>,
1816) -> Result<(), QueueSubmitError> {
1817    command_buffer.same_device_as(queue)?;
1818
1819    {
1820        profiling::scope!("check resource state");
1821
1822        {
1823            profiling::scope!("buffers");
1824            for buffer in cmd_buf_data.trackers.buffers.used_resources() {
1825                buffer.check_destroyed(snatch_guard)?;
1826
1827                match *buffer.map_state.lock() {
1828                    BufferMapState::Idle => (),
1829                    _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())),
1830                }
1831            }
1832        }
1833        {
1834            profiling::scope!("textures");
1835            for texture in cmd_buf_data.trackers.textures.used_resources() {
1836                let should_extend = match texture.try_inner(snatch_guard)? {
1837                    TextureInner::Native { .. } => false,
1838                    TextureInner::Surface { .. } => {
1839                        // Compare the Arcs by pointer as Textures don't implement Eq.
1840                        submit_surface_textures_owned.insert(Arc::as_ptr(texture), texture.clone());
1841
1842                        true
1843                    }
1844                };
1845                if should_extend {
1846                    unsafe {
1847                        used_surface_textures
1848                            .merge_single(texture, None, wgt::TextureUses::PRESENT)
1849                            .unwrap();
1850                    };
1851                }
1852            }
1853        }
1854        // WebGPU requires that we check every bind group referenced during
1855        // encoding, even ones that may have been replaced before being used.
1856        // TODO(<https://github.com/gfx-rs/wgpu/issues/8510>): Optimize this.
1857        {
1858            profiling::scope!("bind groups");
1859            for bind_group in &cmd_buf_data.trackers.bind_groups {
1860                // This checks the bind group and all resources it references.
1861                bind_group.try_raw(snatch_guard)?;
1862            }
1863        }
1864
1865        if let Err(e) =
1866            cmd_buf_data.validate_acceleration_structure_actions(snatch_guard, command_index_guard)
1867        {
1868            return Err(e.into());
1869        }
1870    }
1871    Ok(())
1872}