1use alloc::{boxed::Box, string::ToString, sync::Arc, vec, vec::Vec};
2use core::{
3 iter,
4 mem::{self, ManuallyDrop},
5 num::NonZeroU64,
6 ptr::NonNull,
7 sync::atomic::Ordering,
8};
9use smallvec::SmallVec;
10use thiserror::Error;
11use wgt::{
12 error::{ErrorType, WebGpuError},
13 AccelerationStructureFlags,
14};
15
16use super::{life::LifetimeTracker, Device};
17#[cfg(feature = "trace")]
18use crate::device::trace::Action;
19use crate::{
20 api_log,
21 command::{
22 extract_texture_selector, validate_linear_texture_data, validate_texture_buffer_copy,
23 validate_texture_copy_range, ClearError, CommandAllocator, CommandBuffer, CommandEncoder,
24 CommandEncoderError, CopySide, TexelCopyTextureInfo, TransferError,
25 },
26 conv,
27 device::{DeviceError, WaitIdleError},
28 get_lowest_common_denom,
29 global::Global,
30 hal_label,
31 id::{self, BlasId, QueueId},
32 init_tracker::{has_copy_partial_init_tracker_coverage, TextureInitRange},
33 lock::{rank, Mutex, MutexGuard, RwLock, RwLockWriteGuard},
34 ray_tracing::{BlasCompactReadyPendingClosure, CompactBlasError},
35 resource::{
36 Blas, BlasCompactState, Buffer, BufferAccessError, BufferMapState, DestroyedBuffer,
37 DestroyedResourceError, DestroyedTexture, Fallible, FlushedStagingBuffer,
38 InvalidResourceError, Labeled, ParentDevice, ResourceErrorIdent, StagingBuffer, Texture,
39 TextureInner, Trackable, TrackingData,
40 },
41 resource_log,
42 scratch::ScratchBuffer,
43 snatch::{SnatchGuard, Snatchable},
44 track::{self, Tracker, TrackerIndex},
45 FastHashMap, SubmissionIndex,
46};
47use crate::{device::resource::CommandIndices, resource::RawResourceAccess};
48
49pub struct Queue {
50 raw: Box<dyn hal::DynQueue>,
51 pub(crate) pending_writes: Mutex<PendingWrites>,
52 life_tracker: Mutex<LifetimeTracker>,
53 pub(crate) device: Arc<Device>,
55}
56
57impl Queue {
58 pub(crate) fn new(
59 device: Arc<Device>,
60 raw: Box<dyn hal::DynQueue>,
61 instance_flags: wgt::InstanceFlags,
62 ) -> Result<Self, DeviceError> {
63 let pending_encoder = device
64 .command_allocator
65 .acquire_encoder(device.raw(), raw.as_ref())
66 .map_err(DeviceError::from_hal);
67
68 let pending_encoder = match pending_encoder {
69 Ok(pending_encoder) => pending_encoder,
70 Err(e) => {
71 return Err(e);
72 }
73 };
74
75 let mut pending_writes = PendingWrites::new(pending_encoder, instance_flags);
76
77 let zero_buffer = device.zero_buffer.as_ref();
78 pending_writes.activate();
79 unsafe {
80 pending_writes
81 .command_encoder
82 .transition_buffers(&[hal::BufferBarrier {
83 buffer: zero_buffer,
84 usage: hal::StateTransition {
85 from: wgt::BufferUses::empty(),
86 to: wgt::BufferUses::COPY_DST,
87 },
88 }]);
89 pending_writes
90 .command_encoder
91 .clear_buffer(zero_buffer, 0..super::ZERO_BUFFER_SIZE);
92 pending_writes
93 .command_encoder
94 .transition_buffers(&[hal::BufferBarrier {
95 buffer: zero_buffer,
96 usage: hal::StateTransition {
97 from: wgt::BufferUses::COPY_DST,
98 to: wgt::BufferUses::COPY_SRC,
99 },
100 }]);
101 }
102
103 Ok(Queue {
104 raw,
105 device,
106 pending_writes: Mutex::new(rank::QUEUE_PENDING_WRITES, pending_writes),
107 life_tracker: Mutex::new(rank::QUEUE_LIFE_TRACKER, LifetimeTracker::new()),
108 })
109 }
110
111 pub(crate) fn raw(&self) -> &dyn hal::DynQueue {
112 self.raw.as_ref()
113 }
114
115 #[track_caller]
116 pub(crate) fn lock_life<'a>(&'a self) -> MutexGuard<'a, LifetimeTracker> {
117 self.life_tracker.lock()
118 }
119
120 pub(crate) fn maintain(
121 &self,
122 submission_index: u64,
123 snatch_guard: &SnatchGuard,
124 ) -> (
125 SmallVec<[SubmittedWorkDoneClosure; 1]>,
126 Vec<super::BufferMapPendingClosure>,
127 Vec<BlasCompactReadyPendingClosure>,
128 bool,
129 ) {
130 let mut life_tracker = self.lock_life();
131 let submission_closures = life_tracker.triage_submissions(submission_index);
132
133 let mapping_closures = life_tracker.handle_mapping(snatch_guard);
134 let blas_closures = life_tracker.handle_compact_read_back();
135
136 let queue_empty = life_tracker.queue_empty();
137
138 (
139 submission_closures,
140 mapping_closures,
141 blas_closures,
142 queue_empty,
143 )
144 }
145}
146
147crate::impl_resource_type!(Queue);
148impl Labeled for Queue {
150 fn label(&self) -> &str {
151 ""
152 }
153}
154crate::impl_parent_device!(Queue);
155crate::impl_storage_item!(Queue);
156
157impl Drop for Queue {
158 fn drop(&mut self) {
159 resource_log!("Drop {}", self.error_ident());
160
161 let last_successful_submission_index = self
162 .device
163 .last_successful_submission_index
164 .load(Ordering::Acquire);
165
166 let fence = self.device.fence.read();
167
168 let timeouts_in_ms = [100, 200, 400, 800, 1600, 3200];
170
171 for (i, timeout_ms) in timeouts_in_ms.into_iter().enumerate() {
172 let is_last_iter = i == timeouts_in_ms.len() - 1;
173
174 api_log!(
175 "Waiting on last submission. try: {}/{}. timeout: {}ms",
176 i + 1,
177 timeouts_in_ms.len(),
178 timeout_ms
179 );
180
181 let wait_res = unsafe {
182 self.device.raw().wait(
183 fence.as_ref(),
184 last_successful_submission_index,
185 #[cfg(not(target_arch = "wasm32"))]
186 timeout_ms,
187 #[cfg(target_arch = "wasm32")]
188 0, )
190 };
191 match wait_res {
193 Ok(true) => break,
194 Ok(false) => {
195 #[cfg(target_arch = "wasm32")]
202 {
203 break;
204 }
205 #[cfg(not(target_arch = "wasm32"))]
206 {
207 if is_last_iter {
208 panic!(
209 "We timed out while waiting on the last successful submission to complete!"
210 );
211 }
212 }
213 }
214 Err(e) => match e {
215 hal::DeviceError::OutOfMemory => {
216 if is_last_iter {
217 panic!(
218 "We ran into an OOM error while waiting on the last successful submission to complete!"
219 );
220 }
221 }
222 hal::DeviceError::Lost => {
223 self.device.handle_hal_error(e); break;
225 }
226 hal::DeviceError::Unexpected => {
227 panic!(
228 "We ran into an unexpected error while waiting on the last successful submission to complete!"
229 );
230 }
231 },
232 }
233 }
234 drop(fence);
235
236 let snatch_guard = self.device.snatchable_lock.read();
237 let (submission_closures, mapping_closures, blas_compact_ready_closures, queue_empty) =
238 self.maintain(last_successful_submission_index, &snatch_guard);
239 drop(snatch_guard);
240
241 assert!(queue_empty);
242
243 let closures = crate::device::UserClosures {
244 mappings: mapping_closures,
245 blas_compact_ready: blas_compact_ready_closures,
246 submissions: submission_closures,
247 device_lost_invocations: SmallVec::new(),
248 };
249
250 closures.fire();
251 }
252}
253
254#[cfg(send_sync)]
255pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + Send + 'static>;
256#[cfg(not(send_sync))]
257pub type SubmittedWorkDoneClosure = Box<dyn FnOnce() + 'static>;
258
259#[derive(Debug)]
270pub enum TempResource {
271 StagingBuffer(FlushedStagingBuffer),
272 ScratchBuffer(ScratchBuffer),
273 DestroyedBuffer(DestroyedBuffer),
274 DestroyedTexture(DestroyedTexture),
275}
276
277pub(crate) struct EncoderInFlight {
283 inner: crate::command::InnerCommandEncoder,
284 pub(crate) trackers: Tracker,
285 pub(crate) temp_resources: Vec<TempResource>,
286 _indirect_draw_validation_resources: crate::indirect_validation::DrawResources,
288
289 pub(crate) pending_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
291 pub(crate) pending_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
293 pub(crate) pending_blas_s: FastHashMap<TrackerIndex, Arc<Blas>>,
295}
296
297#[derive(Debug)]
318pub(crate) struct PendingWrites {
319 pub command_encoder: Box<dyn hal::DynCommandEncoder>,
321
322 pub is_recording: bool,
328
329 temp_resources: Vec<TempResource>,
330 dst_buffers: FastHashMap<TrackerIndex, Arc<Buffer>>,
331 dst_textures: FastHashMap<TrackerIndex, Arc<Texture>>,
332 copied_blas_s: FastHashMap<TrackerIndex, Arc<Blas>>,
333 instance_flags: wgt::InstanceFlags,
334}
335
336impl PendingWrites {
337 pub fn new(
338 command_encoder: Box<dyn hal::DynCommandEncoder>,
339 instance_flags: wgt::InstanceFlags,
340 ) -> Self {
341 Self {
342 command_encoder,
343 is_recording: false,
344 temp_resources: Vec::new(),
345 dst_buffers: FastHashMap::default(),
346 dst_textures: FastHashMap::default(),
347 copied_blas_s: FastHashMap::default(),
348 instance_flags,
349 }
350 }
351
352 pub fn insert_buffer(&mut self, buffer: &Arc<Buffer>) {
353 self.dst_buffers
354 .insert(buffer.tracker_index(), buffer.clone());
355 }
356
357 pub fn insert_texture(&mut self, texture: &Arc<Texture>) {
358 self.dst_textures
359 .insert(texture.tracker_index(), texture.clone());
360 }
361
362 pub fn insert_blas(&mut self, blas: &Arc<Blas>) {
363 self.copied_blas_s
364 .insert(blas.tracker_index(), blas.clone());
365 }
366
367 pub fn contains_buffer(&self, buffer: &Arc<Buffer>) -> bool {
368 self.dst_buffers.contains_key(&buffer.tracker_index())
369 }
370
371 pub fn contains_texture(&self, texture: &Arc<Texture>) -> bool {
372 self.dst_textures.contains_key(&texture.tracker_index())
373 }
374
375 pub fn consume_temp(&mut self, resource: TempResource) {
376 self.temp_resources.push(resource);
377 }
378
379 pub fn consume(&mut self, buffer: FlushedStagingBuffer) {
380 self.temp_resources
381 .push(TempResource::StagingBuffer(buffer));
382 }
383
384 fn pre_submit(
385 &mut self,
386 command_allocator: &CommandAllocator,
387 device: &Arc<Device>,
388 queue: &Queue,
389 ) -> Result<Option<EncoderInFlight>, DeviceError> {
390 if self.is_recording {
391 let pending_buffers = mem::take(&mut self.dst_buffers);
392 let pending_textures = mem::take(&mut self.dst_textures);
393 let pending_blas_s = mem::take(&mut self.copied_blas_s);
394
395 let cmd_buf = unsafe { self.command_encoder.end_encoding() }
396 .map_err(|e| device.handle_hal_error(e))?;
397 self.is_recording = false;
398
399 let new_encoder = command_allocator
400 .acquire_encoder(device.raw(), queue.raw())
401 .map_err(|e| device.handle_hal_error(e))?;
402
403 let encoder = EncoderInFlight {
404 inner: crate::command::InnerCommandEncoder {
405 raw: ManuallyDrop::new(mem::replace(&mut self.command_encoder, new_encoder)),
406 list: vec![cmd_buf],
407 device: device.clone(),
408 is_open: false,
409 label: "(wgpu internal) PendingWrites command encoder".into(),
410 },
411 trackers: Tracker::new(),
412 temp_resources: mem::take(&mut self.temp_resources),
413 _indirect_draw_validation_resources: crate::indirect_validation::DrawResources::new(
414 device.clone(),
415 ),
416 pending_buffers,
417 pending_textures,
418 pending_blas_s,
419 };
420 Ok(Some(encoder))
421 } else {
422 self.dst_buffers.clear();
423 self.dst_textures.clear();
424 self.copied_blas_s.clear();
425 Ok(None)
426 }
427 }
428
429 pub fn activate(&mut self) -> &mut dyn hal::DynCommandEncoder {
430 if !self.is_recording {
431 unsafe {
432 self.command_encoder
433 .begin_encoding(hal_label(
434 Some("(wgpu internal) PendingWrites"),
435 self.instance_flags,
436 ))
437 .unwrap();
438 }
439 self.is_recording = true;
440 }
441 self.command_encoder.as_mut()
442 }
443}
444
445impl Drop for PendingWrites {
446 fn drop(&mut self) {
447 unsafe {
448 if self.is_recording {
449 self.command_encoder.discard_encoding();
450 }
451 }
452 }
453}
454
455#[derive(Clone, Debug, Error)]
456#[non_exhaustive]
457pub enum QueueWriteError {
458 #[error(transparent)]
459 Queue(#[from] DeviceError),
460 #[error(transparent)]
461 Transfer(#[from] TransferError),
462 #[error(transparent)]
463 MemoryInitFailure(#[from] ClearError),
464 #[error(transparent)]
465 DestroyedResource(#[from] DestroyedResourceError),
466 #[error(transparent)]
467 InvalidResource(#[from] InvalidResourceError),
468}
469
470impl WebGpuError for QueueWriteError {
471 fn webgpu_error_type(&self) -> ErrorType {
472 let e: &dyn WebGpuError = match self {
473 Self::Queue(e) => e,
474 Self::Transfer(e) => e,
475 Self::MemoryInitFailure(e) => e,
476 Self::DestroyedResource(e) => e,
477 Self::InvalidResource(e) => e,
478 };
479 e.webgpu_error_type()
480 }
481}
482
483#[derive(Clone, Debug, Error)]
484#[non_exhaustive]
485pub enum QueueSubmitError {
486 #[error(transparent)]
487 Queue(#[from] DeviceError),
488 #[error(transparent)]
489 DestroyedResource(#[from] DestroyedResourceError),
490 #[error(transparent)]
491 Unmap(#[from] BufferAccessError),
492 #[error("{0} is still mapped")]
493 BufferStillMapped(ResourceErrorIdent),
494 #[error(transparent)]
495 InvalidResource(#[from] InvalidResourceError),
496 #[error(transparent)]
497 CommandEncoder(#[from] CommandEncoderError),
498 #[error(transparent)]
499 ValidateAsActionsError(#[from] crate::ray_tracing::ValidateAsActionsError),
500}
501
502impl WebGpuError for QueueSubmitError {
503 fn webgpu_error_type(&self) -> ErrorType {
504 let e: &dyn WebGpuError = match self {
505 Self::Queue(e) => e,
506 Self::Unmap(e) => e,
507 Self::CommandEncoder(e) => e,
508 Self::ValidateAsActionsError(e) => e,
509 Self::InvalidResource(e) => e,
510 Self::DestroyedResource(_) | Self::BufferStillMapped(_) => {
511 return ErrorType::Validation
512 }
513 };
514 e.webgpu_error_type()
515 }
516}
517
518impl Queue {
521 pub fn write_buffer(
522 &self,
523 buffer: Fallible<Buffer>,
524 buffer_offset: wgt::BufferAddress,
525 data: &[u8],
526 ) -> Result<(), QueueWriteError> {
527 profiling::scope!("Queue::write_buffer");
528 api_log!("Queue::write_buffer");
529
530 self.device.check_is_valid()?;
531
532 let buffer = buffer.get()?;
533
534 let data_size = data.len() as wgt::BufferAddress;
535
536 self.same_device_as(buffer.as_ref())?;
537
538 let data_size = if let Some(data_size) = wgt::BufferSize::new(data_size) {
539 data_size
540 } else {
541 log::trace!("Ignoring write_buffer of size 0");
542 return Ok(());
543 };
544
545 let snatch_guard = self.device.snatchable_lock.read();
546
547 let mut staging_buffer = StagingBuffer::new(&self.device, data_size)?;
551 let mut pending_writes = self.pending_writes.lock();
552
553 let staging_buffer = {
554 profiling::scope!("copy");
555 staging_buffer.write(data);
556 staging_buffer.flush()
557 };
558
559 let result = self.write_staging_buffer_impl(
560 &snatch_guard,
561 &mut pending_writes,
562 &staging_buffer,
563 buffer,
564 buffer_offset,
565 );
566
567 pending_writes.consume(staging_buffer);
568 result
569 }
570
571 pub fn create_staging_buffer(
572 &self,
573 buffer_size: wgt::BufferSize,
574 ) -> Result<(StagingBuffer, NonNull<u8>), QueueWriteError> {
575 profiling::scope!("Queue::create_staging_buffer");
576 resource_log!("Queue::create_staging_buffer");
577
578 self.device.check_is_valid()?;
579
580 let staging_buffer = StagingBuffer::new(&self.device, buffer_size)?;
581 let ptr = unsafe { staging_buffer.ptr() };
582
583 Ok((staging_buffer, ptr))
584 }
585
586 pub fn write_staging_buffer(
587 &self,
588 buffer: Fallible<Buffer>,
589 buffer_offset: wgt::BufferAddress,
590 staging_buffer: StagingBuffer,
591 ) -> Result<(), QueueWriteError> {
592 profiling::scope!("Queue::write_staging_buffer");
593
594 self.device.check_is_valid()?;
595
596 let buffer = buffer.get()?;
597
598 let snatch_guard = self.device.snatchable_lock.read();
599 let mut pending_writes = self.pending_writes.lock();
600
601 let staging_buffer = staging_buffer.flush();
606
607 let result = self.write_staging_buffer_impl(
608 &snatch_guard,
609 &mut pending_writes,
610 &staging_buffer,
611 buffer,
612 buffer_offset,
613 );
614
615 pending_writes.consume(staging_buffer);
616 result
617 }
618
619 pub fn validate_write_buffer(
620 &self,
621 buffer: Fallible<Buffer>,
622 buffer_offset: u64,
623 buffer_size: wgt::BufferSize,
624 ) -> Result<(), QueueWriteError> {
625 profiling::scope!("Queue::validate_write_buffer");
626
627 self.device.check_is_valid()?;
628
629 let buffer = buffer.get()?;
630
631 self.validate_write_buffer_impl(&buffer, buffer_offset, buffer_size)?;
632
633 Ok(())
634 }
635
636 fn validate_write_buffer_impl(
637 &self,
638 buffer: &Buffer,
639 buffer_offset: u64,
640 buffer_size: wgt::BufferSize,
641 ) -> Result<(), TransferError> {
642 buffer.check_usage(wgt::BufferUsages::COPY_DST)?;
643 if buffer_size.get() % wgt::COPY_BUFFER_ALIGNMENT != 0 {
644 return Err(TransferError::UnalignedCopySize(buffer_size.get()));
645 }
646 if buffer_offset % wgt::COPY_BUFFER_ALIGNMENT != 0 {
647 return Err(TransferError::UnalignedBufferOffset(buffer_offset));
648 }
649 if buffer_offset + buffer_size.get() > buffer.size {
650 return Err(TransferError::BufferOverrun {
651 start_offset: buffer_offset,
652 end_offset: buffer_offset + buffer_size.get(),
653 buffer_size: buffer.size,
654 side: CopySide::Destination,
655 });
656 }
657
658 Ok(())
659 }
660
661 fn write_staging_buffer_impl(
662 &self,
663 snatch_guard: &SnatchGuard,
664 pending_writes: &mut PendingWrites,
665 staging_buffer: &FlushedStagingBuffer,
666 buffer: Arc<Buffer>,
667 buffer_offset: u64,
668 ) -> Result<(), QueueWriteError> {
669 self.device.check_is_valid()?;
670
671 let transition = {
672 let mut trackers = self.device.trackers.lock();
673 trackers
674 .buffers
675 .set_single(&buffer, wgt::BufferUses::COPY_DST)
676 };
677
678 let dst_raw = buffer.try_raw(snatch_guard)?;
679
680 self.same_device_as(buffer.as_ref())?;
681
682 self.validate_write_buffer_impl(&buffer, buffer_offset, staging_buffer.size)?;
683
684 let region = hal::BufferCopy {
685 src_offset: 0,
686 dst_offset: buffer_offset,
687 size: staging_buffer.size,
688 };
689 let barriers = iter::once(hal::BufferBarrier {
690 buffer: staging_buffer.raw(),
691 usage: hal::StateTransition {
692 from: wgt::BufferUses::MAP_WRITE,
693 to: wgt::BufferUses::COPY_SRC,
694 },
695 })
696 .chain(transition.map(|pending| pending.into_hal(&buffer, snatch_guard)))
697 .collect::<Vec<_>>();
698 let encoder = pending_writes.activate();
699 unsafe {
700 encoder.transition_buffers(&barriers);
701 encoder.copy_buffer_to_buffer(staging_buffer.raw(), dst_raw, &[region]);
702 }
703
704 pending_writes.insert_buffer(&buffer);
705
706 {
709 buffer
710 .initialization_status
711 .write()
712 .drain(buffer_offset..(buffer_offset + staging_buffer.size.get()));
713 }
714
715 Ok(())
716 }
717
718 pub fn write_texture(
719 &self,
720 destination: wgt::TexelCopyTextureInfo<Fallible<Texture>>,
721 data: &[u8],
722 data_layout: &wgt::TexelCopyBufferLayout,
723 size: &wgt::Extent3d,
724 ) -> Result<(), QueueWriteError> {
725 profiling::scope!("Queue::write_texture");
726 api_log!("Queue::write_texture");
727
728 self.device.check_is_valid()?;
729
730 let dst = destination.texture.get()?;
731 let destination = wgt::TexelCopyTextureInfo {
732 texture: (),
733 mip_level: destination.mip_level,
734 origin: destination.origin,
735 aspect: destination.aspect,
736 };
737
738 self.same_device_as(dst.as_ref())?;
739
740 dst.check_usage(wgt::TextureUsages::COPY_DST)
741 .map_err(TransferError::MissingTextureUsage)?;
742
743 let (hal_copy_size, array_layer_count) =
746 validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, size)?;
747
748 let (selector, dst_base) = extract_texture_selector(&destination, size, &dst)?;
749
750 if !conv::is_valid_copy_dst_texture_format(dst.desc.format, destination.aspect) {
751 return Err(TransferError::CopyToForbiddenTextureFormat {
752 format: dst.desc.format,
753 aspect: destination.aspect,
754 }
755 .into());
756 }
757
758 validate_texture_buffer_copy(
759 &destination,
760 dst_base.aspect,
761 &dst.desc,
762 data_layout.offset,
763 false, )?;
765
766 let (required_bytes_in_copy, _source_bytes_per_array_layer) = validate_linear_texture_data(
769 data_layout,
770 dst.desc.format,
771 destination.aspect,
772 data.len() as wgt::BufferAddress,
773 CopySide::Source,
774 size,
775 false,
776 )?;
777
778 if dst.desc.format.is_depth_stencil_format() {
779 self.device
780 .require_downlevel_flags(wgt::DownlevelFlags::DEPTH_TEXTURE_AND_BUFFER_COPIES)
781 .map_err(TransferError::from)?;
782 }
783
784 let snatch_guard = self.device.snatchable_lock.read();
785
786 let dst_raw = dst.try_raw(&snatch_guard)?;
787
788 if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
789 log::trace!("Ignoring write_texture of size 0");
790 return Ok(());
791 }
792
793 let mut pending_writes = self.pending_writes.lock();
794 let encoder = pending_writes.activate();
795
796 let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
802 0..1
804 } else {
805 destination.origin.z..destination.origin.z + size.depth_or_array_layers
806 };
807 let mut dst_initialization_status = dst.initialization_status.write();
808 if dst_initialization_status.mips[destination.mip_level as usize]
809 .check(init_layer_range.clone())
810 .is_some()
811 {
812 if has_copy_partial_init_tracker_coverage(size, destination.mip_level, &dst.desc) {
813 for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
814 .drain(init_layer_range)
815 .collect::<Vec<core::ops::Range<u32>>>()
816 {
817 let mut trackers = self.device.trackers.lock();
818 crate::command::clear_texture(
819 &dst,
820 TextureInitRange {
821 mip_range: destination.mip_level..(destination.mip_level + 1),
822 layer_range,
823 },
824 encoder,
825 &mut trackers.textures,
826 &self.device.alignments,
827 self.device.zero_buffer.as_ref(),
828 &snatch_guard,
829 self.device.instance_flags,
830 )
831 .map_err(QueueWriteError::from)?;
832 }
833 } else {
834 dst_initialization_status.mips[destination.mip_level as usize]
835 .drain(init_layer_range);
836 }
837 }
838
839 let (block_width, block_height) = dst.desc.format.block_dimensions();
840 let width_in_blocks = size.width / block_width;
841 let height_in_blocks = size.height / block_height;
842
843 let block_size = dst
844 .desc
845 .format
846 .block_copy_size(Some(destination.aspect))
847 .unwrap();
848 let bytes_in_last_row = width_in_blocks * block_size;
849
850 let bytes_per_row = data_layout.bytes_per_row.unwrap_or(bytes_in_last_row);
851 let rows_per_image = data_layout.rows_per_image.unwrap_or(height_in_blocks);
852
853 let bytes_per_row_alignment = get_lowest_common_denom(
854 self.device.alignments.buffer_copy_pitch.get() as u32,
855 block_size,
856 );
857 let stage_bytes_per_row = wgt::math::align_to(bytes_in_last_row, bytes_per_row_alignment);
858
859 let staging_buffer = if stage_bytes_per_row == bytes_per_row {
863 profiling::scope!("copy aligned");
864 let stage_size = wgt::BufferSize::new(required_bytes_in_copy).unwrap();
866 let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
867 staging_buffer.write(&data[data_layout.offset as usize..]);
868 staging_buffer
869 } else {
870 profiling::scope!("copy chunked");
871 let block_rows_in_copy =
873 (size.depth_or_array_layers - 1) * rows_per_image + height_in_blocks;
874 let stage_size =
875 wgt::BufferSize::new(stage_bytes_per_row as u64 * block_rows_in_copy as u64)
876 .unwrap();
877 let mut staging_buffer = StagingBuffer::new(&self.device, stage_size)?;
878 for layer in 0..size.depth_or_array_layers {
879 let rows_offset = layer * rows_per_image;
880 for row in rows_offset..rows_offset + height_in_blocks {
881 let src_offset = data_layout.offset as u32 + row * bytes_per_row;
882 let dst_offset = row * stage_bytes_per_row;
883 unsafe {
884 staging_buffer.write_with_offset(
885 data,
886 src_offset as isize,
887 dst_offset as isize,
888 bytes_in_last_row as usize,
889 )
890 }
891 }
892 }
893 staging_buffer
894 };
895
896 let staging_buffer = staging_buffer.flush();
897
898 let regions = (0..array_layer_count)
899 .map(|array_layer_offset| {
900 let mut texture_base = dst_base.clone();
901 texture_base.array_layer += array_layer_offset;
902 hal::BufferTextureCopy {
903 buffer_layout: wgt::TexelCopyBufferLayout {
904 offset: array_layer_offset as u64
905 * rows_per_image as u64
906 * stage_bytes_per_row as u64,
907 bytes_per_row: Some(stage_bytes_per_row),
908 rows_per_image: Some(rows_per_image),
909 },
910 texture_base,
911 size: hal_copy_size,
912 }
913 })
914 .collect::<Vec<_>>();
915
916 {
917 let buffer_barrier = hal::BufferBarrier {
918 buffer: staging_buffer.raw(),
919 usage: hal::StateTransition {
920 from: wgt::BufferUses::MAP_WRITE,
921 to: wgt::BufferUses::COPY_SRC,
922 },
923 };
924
925 let mut trackers = self.device.trackers.lock();
926 let transition =
927 trackers
928 .textures
929 .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
930 let texture_barriers = transition
931 .map(|pending| pending.into_hal(dst_raw))
932 .collect::<Vec<_>>();
933
934 unsafe {
935 encoder.transition_textures(&texture_barriers);
936 encoder.transition_buffers(&[buffer_barrier]);
937 encoder.copy_buffer_to_texture(staging_buffer.raw(), dst_raw, ®ions);
938 }
939 }
940
941 pending_writes.consume(staging_buffer);
942 pending_writes.insert_texture(&dst);
943
944 Ok(())
945 }
946
947 #[cfg(webgl)]
948 pub fn copy_external_image_to_texture(
949 &self,
950 source: &wgt::CopyExternalImageSourceInfo,
951 destination: wgt::CopyExternalImageDestInfo<Fallible<Texture>>,
952 size: wgt::Extent3d,
953 ) -> Result<(), QueueWriteError> {
954 profiling::scope!("Queue::copy_external_image_to_texture");
955
956 self.device.check_is_valid()?;
957
958 if size.width == 0 || size.height == 0 || size.depth_or_array_layers == 0 {
959 log::trace!("Ignoring write_texture of size 0");
960 return Ok(());
961 }
962
963 let mut needs_flag = false;
964 needs_flag |= matches!(source.source, wgt::ExternalImageSource::OffscreenCanvas(_));
965 needs_flag |= source.origin != wgt::Origin2d::ZERO;
966 needs_flag |= destination.color_space != wgt::PredefinedColorSpace::Srgb;
967 #[allow(clippy::bool_comparison)]
968 if matches!(source.source, wgt::ExternalImageSource::ImageBitmap(_)) {
969 needs_flag |= source.flip_y != false;
970 needs_flag |= destination.premultiplied_alpha != false;
971 }
972
973 if needs_flag {
974 self.device
975 .require_downlevel_flags(wgt::DownlevelFlags::UNRESTRICTED_EXTERNAL_TEXTURE_COPIES)
976 .map_err(TransferError::from)?;
977 }
978
979 let src_width = source.source.width();
980 let src_height = source.source.height();
981
982 let dst = destination.texture.get()?;
983 let premultiplied_alpha = destination.premultiplied_alpha;
984 let destination = wgt::TexelCopyTextureInfo {
985 texture: (),
986 mip_level: destination.mip_level,
987 origin: destination.origin,
988 aspect: destination.aspect,
989 };
990
991 if !conv::is_valid_external_image_copy_dst_texture_format(dst.desc.format) {
992 return Err(
993 TransferError::ExternalCopyToForbiddenTextureFormat(dst.desc.format).into(),
994 );
995 }
996 if dst.desc.dimension != wgt::TextureDimension::D2 {
997 return Err(TransferError::InvalidDimensionExternal.into());
998 }
999 dst.check_usage(wgt::TextureUsages::COPY_DST | wgt::TextureUsages::RENDER_ATTACHMENT)
1000 .map_err(TransferError::MissingTextureUsage)?;
1001 if dst.desc.sample_count != 1 {
1002 return Err(TransferError::InvalidSampleCount {
1003 sample_count: dst.desc.sample_count,
1004 }
1005 .into());
1006 }
1007
1008 if source.origin.x + size.width > src_width {
1009 return Err(TransferError::TextureOverrun {
1010 start_offset: source.origin.x,
1011 end_offset: source.origin.x + size.width,
1012 texture_size: src_width,
1013 dimension: crate::resource::TextureErrorDimension::X,
1014 side: CopySide::Source,
1015 }
1016 .into());
1017 }
1018 if source.origin.y + size.height > src_height {
1019 return Err(TransferError::TextureOverrun {
1020 start_offset: source.origin.y,
1021 end_offset: source.origin.y + size.height,
1022 texture_size: src_height,
1023 dimension: crate::resource::TextureErrorDimension::Y,
1024 side: CopySide::Source,
1025 }
1026 .into());
1027 }
1028 if size.depth_or_array_layers != 1 {
1029 return Err(TransferError::TextureOverrun {
1030 start_offset: 0,
1031 end_offset: size.depth_or_array_layers,
1032 texture_size: 1,
1033 dimension: crate::resource::TextureErrorDimension::Z,
1034 side: CopySide::Source,
1035 }
1036 .into());
1037 }
1038
1039 let (hal_copy_size, _) =
1042 validate_texture_copy_range(&destination, &dst.desc, CopySide::Destination, &size)?;
1043
1044 let (selector, dst_base) = extract_texture_selector(&destination, &size, &dst)?;
1045
1046 let mut pending_writes = self.pending_writes.lock();
1047 let encoder = pending_writes.activate();
1048
1049 let init_layer_range = if dst.desc.dimension == wgt::TextureDimension::D3 {
1055 0..1
1057 } else {
1058 destination.origin.z..destination.origin.z + size.depth_or_array_layers
1059 };
1060 let mut dst_initialization_status = dst.initialization_status.write();
1061 if dst_initialization_status.mips[destination.mip_level as usize]
1062 .check(init_layer_range.clone())
1063 .is_some()
1064 {
1065 if has_copy_partial_init_tracker_coverage(&size, destination.mip_level, &dst.desc) {
1066 for layer_range in dst_initialization_status.mips[destination.mip_level as usize]
1067 .drain(init_layer_range)
1068 .collect::<Vec<core::ops::Range<u32>>>()
1069 {
1070 let mut trackers = self.device.trackers.lock();
1071 crate::command::clear_texture(
1072 &dst,
1073 TextureInitRange {
1074 mip_range: destination.mip_level..(destination.mip_level + 1),
1075 layer_range,
1076 },
1077 encoder,
1078 &mut trackers.textures,
1079 &self.device.alignments,
1080 self.device.zero_buffer.as_ref(),
1081 &self.device.snatchable_lock.read(),
1082 self.device.instance_flags,
1083 )
1084 .map_err(QueueWriteError::from)?;
1085 }
1086 } else {
1087 dst_initialization_status.mips[destination.mip_level as usize]
1088 .drain(init_layer_range);
1089 }
1090 }
1091
1092 let snatch_guard = self.device.snatchable_lock.read();
1093 let dst_raw = dst.try_raw(&snatch_guard)?;
1094
1095 let regions = hal::TextureCopy {
1096 src_base: hal::TextureCopyBase {
1097 mip_level: 0,
1098 array_layer: 0,
1099 origin: source.origin.to_3d(0),
1100 aspect: hal::FormatAspects::COLOR,
1101 },
1102 dst_base,
1103 size: hal_copy_size,
1104 };
1105
1106 let mut trackers = self.device.trackers.lock();
1107 let transitions = trackers
1108 .textures
1109 .set_single(&dst, selector, wgt::TextureUses::COPY_DST);
1110
1111 let encoder_webgl = encoder
1114 .as_any_mut()
1115 .downcast_mut::<hal::gles::CommandEncoder>()
1116 .unwrap();
1117 let dst_raw_webgl = dst_raw
1118 .as_any()
1119 .downcast_ref::<hal::gles::Texture>()
1120 .unwrap();
1121 let transitions_webgl = transitions.map(|pending| {
1122 let dyn_transition = pending.into_hal(dst_raw);
1123 hal::TextureBarrier {
1124 texture: dst_raw_webgl,
1125 range: dyn_transition.range,
1126 usage: dyn_transition.usage,
1127 }
1128 });
1129
1130 use hal::CommandEncoder as _;
1131 unsafe {
1132 encoder_webgl.transition_textures(transitions_webgl);
1133 encoder_webgl.copy_external_image_to_texture(
1134 source,
1135 dst_raw_webgl,
1136 premultiplied_alpha,
1137 iter::once(regions),
1138 );
1139 }
1140
1141 Ok(())
1142 }
1143
1144 pub fn submit(
1145 &self,
1146 command_buffers: &[Arc<CommandBuffer>],
1147 ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1148 profiling::scope!("Queue::submit");
1149 api_log!("Queue::submit");
1150
1151 let submit_index;
1152
1153 let res = 'error: {
1154 let snatch_guard = self.device.snatchable_lock.read();
1155
1156 let mut fence = self.device.fence.write();
1158
1159 let mut command_index_guard = self.device.command_indices.write();
1160 command_index_guard.active_submission_index += 1;
1161 submit_index = command_index_guard.active_submission_index;
1162
1163 if let Err(e) = self.device.check_is_valid() {
1164 break 'error Err(e.into());
1165 }
1166
1167 let mut active_executions = Vec::new();
1168
1169 let mut used_surface_textures = track::TextureUsageScope::default();
1170
1171 let mut submit_surface_textures_owned = FastHashMap::default();
1174
1175 {
1176 if !command_buffers.is_empty() {
1177 profiling::scope!("prepare");
1178
1179 let mut first_error = None;
1180
1181 for command_buffer in command_buffers {
1187 profiling::scope!("process command buffer");
1188
1189 used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1192
1193 #[allow(unused_mut)]
1196 let mut cmd_buf_data = command_buffer.take_finished();
1197
1198 #[cfg(feature = "trace")]
1199 if let Some(ref mut trace) = *self.device.trace.lock() {
1200 if let Ok(ref mut cmd_buf_data) = cmd_buf_data {
1201 trace.add(Action::Submit(
1202 submit_index,
1203 cmd_buf_data.commands.take().unwrap(),
1204 ));
1205 }
1206 }
1207
1208 if first_error.is_some() {
1209 continue;
1210 }
1211
1212 let mut baked = match cmd_buf_data {
1213 Ok(cmd_buf_data) => {
1214 let res = validate_command_buffer(
1215 command_buffer,
1216 self,
1217 &cmd_buf_data,
1218 &snatch_guard,
1219 &mut submit_surface_textures_owned,
1220 &mut used_surface_textures,
1221 &mut command_index_guard,
1222 );
1223 if let Err(err) = res {
1224 first_error.get_or_insert(err);
1225 continue;
1226 }
1227 cmd_buf_data.into_baked_commands()
1228 }
1229 Err(err) => {
1230 first_error.get_or_insert(err.into());
1231 continue;
1232 }
1233 };
1234
1235 if let Err(e) = baked.encoder.open_pass(hal_label(
1237 Some("(wgpu internal) Transit"),
1238 self.device.instance_flags,
1239 )) {
1240 break 'error Err(e.into());
1241 }
1242
1243 let mut trackers = self.device.trackers.lock();
1245 if let Err(e) = baked.initialize_buffer_memory(&mut trackers, &snatch_guard)
1246 {
1247 break 'error Err(e.into());
1248 }
1249 if let Err(e) = baked.initialize_texture_memory(
1250 &mut trackers,
1251 &self.device,
1252 &snatch_guard,
1253 ) {
1254 break 'error Err(e.into());
1255 }
1256
1257 CommandEncoder::insert_barriers_from_device_tracker(
1260 baked.encoder.raw.as_mut(),
1261 &mut trackers,
1262 &baked.trackers,
1263 &snatch_guard,
1264 );
1265
1266 if let Err(e) = baked.encoder.close_and_push_front() {
1267 break 'error Err(e.into());
1268 }
1269
1270 if !used_surface_textures.is_empty() {
1274 if let Err(e) = baked.encoder.open_pass(hal_label(
1275 Some("(wgpu internal) Present"),
1276 self.device.instance_flags,
1277 )) {
1278 break 'error Err(e.into());
1279 }
1280 let texture_barriers = trackers
1281 .textures
1282 .set_from_usage_scope_and_drain_transitions(
1283 &used_surface_textures,
1284 &snatch_guard,
1285 )
1286 .collect::<Vec<_>>();
1287 unsafe {
1288 baked.encoder.raw.transition_textures(&texture_barriers);
1289 };
1290 if let Err(e) = baked.encoder.close() {
1291 break 'error Err(e.into());
1292 }
1293 used_surface_textures = track::TextureUsageScope::default();
1294 }
1295
1296 active_executions.push(EncoderInFlight {
1298 inner: baked.encoder,
1299 trackers: baked.trackers,
1300 temp_resources: baked.temp_resources,
1301 _indirect_draw_validation_resources: baked
1302 .indirect_draw_validation_resources,
1303 pending_buffers: FastHashMap::default(),
1304 pending_textures: FastHashMap::default(),
1305 pending_blas_s: FastHashMap::default(),
1306 });
1307 }
1308
1309 if let Some(first_error) = first_error {
1310 break 'error Err(first_error);
1311 }
1312 }
1313 }
1314
1315 let mut pending_writes = self.pending_writes.lock();
1316
1317 {
1318 used_surface_textures.set_size(self.device.tracker_indices.textures.size());
1319 for texture in pending_writes.dst_textures.values() {
1320 match texture.try_inner(&snatch_guard) {
1321 Ok(TextureInner::Native { .. }) => {}
1322 Ok(TextureInner::Surface { .. }) => {
1323 submit_surface_textures_owned
1325 .insert(Arc::as_ptr(texture), texture.clone());
1326
1327 unsafe {
1328 used_surface_textures
1329 .merge_single(texture, None, wgt::TextureUses::PRESENT)
1330 .unwrap()
1331 };
1332 }
1333 Err(DestroyedResourceError(_)) => {}
1338 }
1339 }
1340
1341 if !used_surface_textures.is_empty() {
1342 let mut trackers = self.device.trackers.lock();
1343
1344 let texture_barriers = trackers
1345 .textures
1346 .set_from_usage_scope_and_drain_transitions(
1347 &used_surface_textures,
1348 &snatch_guard,
1349 )
1350 .collect::<Vec<_>>();
1351 unsafe {
1352 pending_writes
1353 .command_encoder
1354 .transition_textures(&texture_barriers);
1355 };
1356 }
1357 }
1358
1359 match pending_writes.pre_submit(&self.device.command_allocator, &self.device, self) {
1360 Ok(Some(pending_execution)) => {
1361 active_executions.insert(0, pending_execution);
1362 }
1363 Ok(None) => {}
1364 Err(e) => break 'error Err(e.into()),
1365 }
1366 let hal_command_buffers = active_executions
1367 .iter()
1368 .flat_map(|e| e.inner.list.iter().map(|b| b.as_ref()))
1369 .collect::<Vec<_>>();
1370
1371 {
1372 let mut submit_surface_textures =
1373 SmallVec::<[&dyn hal::DynSurfaceTexture; 2]>::with_capacity(
1374 submit_surface_textures_owned.len(),
1375 );
1376
1377 for texture in submit_surface_textures_owned.values() {
1378 let raw = match texture.inner.get(&snatch_guard) {
1379 Some(TextureInner::Surface { raw, .. }) => raw.as_ref(),
1380 _ => unreachable!(),
1381 };
1382 submit_surface_textures.push(raw);
1383 }
1384
1385 if let Err(e) = unsafe {
1386 self.raw().submit(
1387 &hal_command_buffers,
1388 &submit_surface_textures,
1389 (fence.as_mut(), submit_index),
1390 )
1391 }
1392 .map_err(|e| self.device.handle_hal_error(e))
1393 {
1394 break 'error Err(e.into());
1395 }
1396
1397 drop(command_index_guard);
1398
1399 self.device
1401 .last_successful_submission_index
1402 .fetch_max(submit_index, Ordering::SeqCst);
1403 }
1404
1405 profiling::scope!("cleanup");
1406
1407 self.lock_life()
1409 .track_submission(submit_index, active_executions);
1410 drop(pending_writes);
1411
1412 let fence_guard = RwLockWriteGuard::downgrade(fence);
1415 let (closures, result) =
1416 self.device
1417 .maintain(fence_guard, wgt::PollType::Poll, snatch_guard);
1418 match result {
1419 Ok(status) => {
1420 debug_assert!(matches!(
1421 status,
1422 wgt::PollStatus::QueueEmpty | wgt::PollStatus::Poll
1423 ));
1424 }
1425 Err(WaitIdleError::Device(err)) => break 'error Err(QueueSubmitError::Queue(err)),
1426 Err(WaitIdleError::WrongSubmissionIndex(..)) => {
1427 unreachable!("Cannot get WrongSubmissionIndex from Poll")
1428 }
1429 Err(WaitIdleError::Timeout) => unreachable!("Cannot get Timeout from Poll"),
1430 };
1431
1432 Ok(closures)
1433 };
1434
1435 let callbacks = match res {
1436 Ok(ok) => ok,
1437 Err(e) => return Err((submit_index, e)),
1438 };
1439
1440 callbacks.fire();
1442
1443 self.device.lose_if_oom();
1444
1445 api_log!("Queue::submit returned submit index {submit_index}");
1446
1447 Ok(submit_index)
1448 }
1449
1450 pub fn get_timestamp_period(&self) -> f32 {
1451 unsafe { self.raw().get_timestamp_period() }
1452 }
1453
1454 pub fn on_submitted_work_done(
1456 &self,
1457 closure: SubmittedWorkDoneClosure,
1458 ) -> Option<SubmissionIndex> {
1459 api_log!("Queue::on_submitted_work_done");
1460 self.lock_life().add_work_done_closure(closure)
1462 }
1463
1464 pub fn compact_blas(&self, blas: &Arc<Blas>) -> Result<Arc<Blas>, CompactBlasError> {
1465 profiling::scope!("Queue::compact_blas");
1466 api_log!("Queue::compact_blas");
1467
1468 self.device.check_is_valid()?;
1469 self.same_device_as(blas.as_ref())?;
1470
1471 let device = blas.device.clone();
1472
1473 let snatch_guard = device.snatchable_lock.read();
1474
1475 let BlasCompactState::Ready { size } = *blas.compacted_state.lock() else {
1476 return Err(CompactBlasError::BlasNotReady);
1477 };
1478
1479 let mut size_info = blas.size_info;
1480 size_info.acceleration_structure_size = size;
1481
1482 let mut pending_writes = self.pending_writes.lock();
1483 let cmd_buf_raw = pending_writes.activate();
1484
1485 let raw = unsafe {
1486 device
1487 .raw()
1488 .create_acceleration_structure(&hal::AccelerationStructureDescriptor {
1489 label: None,
1490 size: size_info.acceleration_structure_size,
1491 format: hal::AccelerationStructureFormat::BottomLevel,
1492 allow_compaction: false,
1493 })
1494 }
1495 .map_err(DeviceError::from_hal)?;
1496
1497 let src_raw = blas.try_raw(&snatch_guard)?;
1498
1499 unsafe {
1500 cmd_buf_raw.copy_acceleration_structure_to_acceleration_structure(
1501 src_raw,
1502 raw.as_ref(),
1503 wgt::AccelerationStructureCopy::Compact,
1504 )
1505 };
1506
1507 let handle = unsafe {
1508 device
1509 .raw()
1510 .get_acceleration_structure_device_address(raw.as_ref())
1511 };
1512
1513 drop(snatch_guard);
1514
1515 let mut command_indices_lock = device.command_indices.write();
1516 command_indices_lock.next_acceleration_structure_build_command_index += 1;
1517 let built_index =
1518 NonZeroU64::new(command_indices_lock.next_acceleration_structure_build_command_index)
1519 .unwrap();
1520
1521 let new_blas = Arc::new(Blas {
1522 raw: Snatchable::new(raw),
1523 device: device.clone(),
1524 size_info,
1525 sizes: blas.sizes.clone(),
1526 flags: blas.flags & !AccelerationStructureFlags::ALLOW_COMPACTION,
1527 update_mode: blas.update_mode,
1528 built_index: RwLock::new(rank::BLAS_BUILT_INDEX, Some(built_index)),
1530 handle,
1531 label: blas.label.clone() + " compacted",
1532 tracking_data: TrackingData::new(blas.device.tracker_indices.blas_s.clone()),
1533 compaction_buffer: None,
1534 compacted_state: Mutex::new(rank::BLAS_COMPACTION_STATE, BlasCompactState::Compacted),
1535 });
1536
1537 pending_writes.insert_blas(blas);
1538 pending_writes.insert_blas(&new_blas);
1539
1540 Ok(new_blas)
1541 }
1542}
1543
1544impl Global {
1545 pub fn queue_write_buffer(
1546 &self,
1547 queue_id: QueueId,
1548 buffer_id: id::BufferId,
1549 buffer_offset: wgt::BufferAddress,
1550 data: &[u8],
1551 ) -> Result<(), QueueWriteError> {
1552 let queue = self.hub.queues.get(queue_id);
1553
1554 #[cfg(feature = "trace")]
1555 if let Some(ref mut trace) = *queue.device.trace.lock() {
1556 let data_path = trace.make_binary("bin", data);
1557 trace.add(Action::WriteBuffer {
1558 id: buffer_id,
1559 data: data_path,
1560 range: buffer_offset..buffer_offset + data.len() as u64,
1561 queued: true,
1562 });
1563 }
1564
1565 let buffer = self.hub.buffers.get(buffer_id);
1566 queue.write_buffer(buffer, buffer_offset, data)
1567 }
1568
1569 pub fn queue_create_staging_buffer(
1570 &self,
1571 queue_id: QueueId,
1572 buffer_size: wgt::BufferSize,
1573 id_in: Option<id::StagingBufferId>,
1574 ) -> Result<(id::StagingBufferId, NonNull<u8>), QueueWriteError> {
1575 let queue = self.hub.queues.get(queue_id);
1576 let (staging_buffer, ptr) = queue.create_staging_buffer(buffer_size)?;
1577
1578 let fid = self.hub.staging_buffers.prepare(id_in);
1579 let id = fid.assign(staging_buffer);
1580
1581 Ok((id, ptr))
1582 }
1583
1584 pub fn queue_write_staging_buffer(
1585 &self,
1586 queue_id: QueueId,
1587 buffer_id: id::BufferId,
1588 buffer_offset: wgt::BufferAddress,
1589 staging_buffer_id: id::StagingBufferId,
1590 ) -> Result<(), QueueWriteError> {
1591 let queue = self.hub.queues.get(queue_id);
1592 let buffer = self.hub.buffers.get(buffer_id);
1593 let staging_buffer = self.hub.staging_buffers.remove(staging_buffer_id);
1594 queue.write_staging_buffer(buffer, buffer_offset, staging_buffer)
1595 }
1596
1597 pub fn queue_validate_write_buffer(
1598 &self,
1599 queue_id: QueueId,
1600 buffer_id: id::BufferId,
1601 buffer_offset: u64,
1602 buffer_size: wgt::BufferSize,
1603 ) -> Result<(), QueueWriteError> {
1604 let queue = self.hub.queues.get(queue_id);
1605 let buffer = self.hub.buffers.get(buffer_id);
1606 queue.validate_write_buffer(buffer, buffer_offset, buffer_size)
1607 }
1608
1609 pub fn queue_write_texture(
1610 &self,
1611 queue_id: QueueId,
1612 destination: &TexelCopyTextureInfo,
1613 data: &[u8],
1614 data_layout: &wgt::TexelCopyBufferLayout,
1615 size: &wgt::Extent3d,
1616 ) -> Result<(), QueueWriteError> {
1617 let queue = self.hub.queues.get(queue_id);
1618
1619 #[cfg(feature = "trace")]
1620 if let Some(ref mut trace) = *queue.device.trace.lock() {
1621 let data_path = trace.make_binary("bin", data);
1622 trace.add(Action::WriteTexture {
1623 to: *destination,
1624 data: data_path,
1625 layout: *data_layout,
1626 size: *size,
1627 });
1628 }
1629
1630 let destination = wgt::TexelCopyTextureInfo {
1631 texture: self.hub.textures.get(destination.texture),
1632 mip_level: destination.mip_level,
1633 origin: destination.origin,
1634 aspect: destination.aspect,
1635 };
1636 queue.write_texture(destination, data, data_layout, size)
1637 }
1638
1639 #[cfg(webgl)]
1640 pub fn queue_copy_external_image_to_texture(
1641 &self,
1642 queue_id: QueueId,
1643 source: &wgt::CopyExternalImageSourceInfo,
1644 destination: crate::command::CopyExternalImageDestInfo,
1645 size: wgt::Extent3d,
1646 ) -> Result<(), QueueWriteError> {
1647 let queue = self.hub.queues.get(queue_id);
1648 let destination = wgt::CopyExternalImageDestInfo {
1649 texture: self.hub.textures.get(destination.texture),
1650 mip_level: destination.mip_level,
1651 origin: destination.origin,
1652 aspect: destination.aspect,
1653 color_space: destination.color_space,
1654 premultiplied_alpha: destination.premultiplied_alpha,
1655 };
1656 queue.copy_external_image_to_texture(source, destination, size)
1657 }
1658
1659 pub fn queue_submit(
1660 &self,
1661 queue_id: QueueId,
1662 command_buffer_ids: &[id::CommandBufferId],
1663 ) -> Result<SubmissionIndex, (SubmissionIndex, QueueSubmitError)> {
1664 let queue = self.hub.queues.get(queue_id);
1665 let command_buffer_guard = self.hub.command_buffers.read();
1666 let command_buffers = command_buffer_ids
1667 .iter()
1668 .map(|id| command_buffer_guard.get(*id))
1669 .collect::<Vec<_>>();
1670 drop(command_buffer_guard);
1671 queue.submit(&command_buffers)
1672 }
1673
1674 pub fn queue_get_timestamp_period(&self, queue_id: QueueId) -> f32 {
1675 let queue = self.hub.queues.get(queue_id);
1676
1677 if queue.device.timestamp_normalizer.get().unwrap().enabled() {
1678 return 1.0;
1679 }
1680
1681 queue.get_timestamp_period()
1682 }
1683
1684 pub fn queue_on_submitted_work_done(
1685 &self,
1686 queue_id: QueueId,
1687 closure: SubmittedWorkDoneClosure,
1688 ) -> SubmissionIndex {
1689 api_log!("Queue::on_submitted_work_done {queue_id:?}");
1690
1691 let queue = self.hub.queues.get(queue_id);
1693 let result = queue.on_submitted_work_done(closure);
1694 result.unwrap_or(0) }
1696
1697 pub fn queue_compact_blas(
1698 &self,
1699 queue_id: QueueId,
1700 blas_id: BlasId,
1701 id_in: Option<BlasId>,
1702 ) -> (BlasId, Option<u64>, Option<CompactBlasError>) {
1703 api_log!("Queue::compact_blas {queue_id:?}, {blas_id:?}");
1704
1705 let fid = self.hub.blas_s.prepare(id_in);
1706
1707 let queue = self.hub.queues.get(queue_id);
1708 let blas = self.hub.blas_s.get(blas_id);
1709 let device = &queue.device;
1710
1711 let error = 'error: {
1714 match device.require_features(wgpu_types::Features::EXPERIMENTAL_RAY_QUERY) {
1715 Ok(_) => {}
1716 Err(err) => break 'error err.into(),
1717 }
1718
1719 let blas = match blas.get() {
1720 Ok(blas) => blas,
1721 Err(err) => break 'error err.into(),
1722 };
1723
1724 let new_blas = match queue.compact_blas(&blas) {
1725 Ok(blas) => blas,
1726 Err(err) => break 'error err,
1727 };
1728
1729 let old_blas_size = blas.size_info.acceleration_structure_size;
1731 let new_blas_size = new_blas.size_info.acceleration_structure_size;
1732 let handle = new_blas.handle;
1733
1734 let id = fid.assign(Fallible::Valid(new_blas));
1735
1736 api_log!("CommandEncoder::compact_blas {blas_id:?} (size: {old_blas_size}) -> {id:?} (size: {new_blas_size})");
1737
1738 return (id, Some(handle), None);
1739 };
1740
1741 let id = fid.assign(Fallible::Invalid(Arc::new(error.to_string())));
1742
1743 (id, None, Some(error))
1744 }
1745}
1746
1747fn validate_command_buffer(
1748 command_buffer: &CommandBuffer,
1749 queue: &Queue,
1750 cmd_buf_data: &crate::command::CommandBufferMutable,
1751 snatch_guard: &SnatchGuard,
1752 submit_surface_textures_owned: &mut FastHashMap<*const Texture, Arc<Texture>>,
1753 used_surface_textures: &mut track::TextureUsageScope,
1754 command_index_guard: &mut RwLockWriteGuard<CommandIndices>,
1755) -> Result<(), QueueSubmitError> {
1756 command_buffer.same_device_as(queue)?;
1757
1758 {
1759 profiling::scope!("check resource state");
1760
1761 {
1762 profiling::scope!("buffers");
1763 for buffer in cmd_buf_data.trackers.buffers.used_resources() {
1764 buffer.check_destroyed(snatch_guard)?;
1765
1766 match *buffer.map_state.lock() {
1767 BufferMapState::Idle => (),
1768 _ => return Err(QueueSubmitError::BufferStillMapped(buffer.error_ident())),
1769 }
1770 }
1771 }
1772 {
1773 profiling::scope!("textures");
1774 for texture in cmd_buf_data.trackers.textures.used_resources() {
1775 let should_extend = match texture.try_inner(snatch_guard)? {
1776 TextureInner::Native { .. } => false,
1777 TextureInner::Surface { .. } => {
1778 submit_surface_textures_owned.insert(Arc::as_ptr(texture), texture.clone());
1780
1781 true
1782 }
1783 };
1784 if should_extend {
1785 unsafe {
1786 used_surface_textures
1787 .merge_single(texture, None, wgt::TextureUses::PRESENT)
1788 .unwrap();
1789 };
1790 }
1791 }
1792 }
1793
1794 if let Err(e) =
1795 cmd_buf_data.validate_acceleration_structure_actions(snatch_guard, command_index_guard)
1796 {
1797 return Err(e.into());
1798 }
1799 }
1800 Ok(())
1801}