wgpu/api/
queue.rs

1use alloc::boxed::Box;
2use core::ops::{Deref, DerefMut};
3
4use crate::{api::DeferredCommandBufferActions, *};
5
6/// Handle to a command queue on a device.
7///
8/// A `Queue` executes recorded [`CommandBuffer`] objects and provides convenience methods
9/// for writing to [buffers](Queue::write_buffer) and [textures](Queue::write_texture).
10/// It can be created along with a [`Device`] by calling [`Adapter::request_device`].
11///
12/// Corresponds to [WebGPU `GPUQueue`](https://gpuweb.github.io/gpuweb/#gpu-queue).
13#[derive(Debug, Clone)]
14pub struct Queue {
15    pub(crate) inner: dispatch::DispatchQueue,
16}
17#[cfg(send_sync)]
18static_assertions::assert_impl_all!(Queue: Send, Sync);
19
20crate::cmp::impl_eq_ord_hash_proxy!(Queue => .inner);
21
22impl Queue {
23    #[cfg(custom)]
24    /// Returns custom implementation of Queue (if custom backend and is internally T)
25    pub fn as_custom<T: custom::QueueInterface>(&self) -> Option<&T> {
26        self.inner.as_custom()
27    }
28
29    #[cfg(custom)]
30    /// Creates Queue from custom implementation
31    pub fn from_custom<T: custom::QueueInterface>(queue: T) -> Self {
32        Self {
33            inner: dispatch::DispatchQueue::custom(queue),
34        }
35    }
36}
37
38/// Identifier for a particular call to [`Queue::submit`]. Can be used
39/// as part of an argument to [`Device::poll`] to block for a particular
40/// submission to finish.
41///
42/// This type is unique to the Rust API of `wgpu`.
43/// There is no analogue in the WebGPU specification.
44#[derive(Debug, Clone)]
45pub struct SubmissionIndex {
46    pub(crate) index: u64,
47}
48#[cfg(send_sync)]
49static_assertions::assert_impl_all!(SubmissionIndex: Send, Sync);
50
51/// Passed to [`Device::poll`] to control how and if it should block.
52pub type PollType = wgt::PollType<SubmissionIndex>;
53#[cfg(send_sync)]
54static_assertions::assert_impl_all!(PollType: Send, Sync);
55
56/// A write-only view into a staging buffer.
57///
58/// Reading into this buffer won't yield the contents of the buffer from the
59/// GPU and is likely to be slow. Because of this, although [`AsMut`] is
60/// implemented for this type, [`AsRef`] is not.
61pub struct QueueWriteBufferView {
62    queue: Queue,
63    buffer: Buffer,
64    offset: BufferAddress,
65    inner: dispatch::DispatchQueueWriteBuffer,
66}
67#[cfg(send_sync)]
68static_assertions::assert_impl_all!(QueueWriteBufferView: Send, Sync);
69
70impl QueueWriteBufferView {
71    #[cfg(custom)]
72    /// Returns custom implementation of QueueWriteBufferView (if custom backend and is internally T)
73    pub fn as_custom<T: custom::QueueWriteBufferInterface>(&self) -> Option<&T> {
74        self.inner.as_custom()
75    }
76}
77
78impl Deref for QueueWriteBufferView {
79    type Target = [u8];
80
81    fn deref(&self) -> &Self::Target {
82        self.inner.slice()
83    }
84}
85
86impl DerefMut for QueueWriteBufferView {
87    fn deref_mut(&mut self) -> &mut Self::Target {
88        self.inner.slice_mut()
89    }
90}
91
92impl AsMut<[u8]> for QueueWriteBufferView {
93    fn as_mut(&mut self) -> &mut [u8] {
94        self.inner.slice_mut()
95    }
96}
97
98impl Drop for QueueWriteBufferView {
99    fn drop(&mut self) {
100        self.queue
101            .inner
102            .write_staging_buffer(&self.buffer.inner, self.offset, &self.inner);
103    }
104}
105
106impl Queue {
107    /// Copies the bytes of `data` into `buffer` starting at `offset`.
108    ///
109    /// The data must be written fully in-bounds, that is, `offset + data.len() <= buffer.len()`.
110    ///
111    /// # Performance considerations
112    ///
113    /// * Calls to `write_buffer()` do *not* submit the transfer to the GPU
114    ///   immediately. They begin GPU execution only on the next call to
115    ///   [`Queue::submit()`], just before the explicitly submitted commands.
116    ///   To get a set of scheduled transfers started immediately,
117    ///   it's fine to call `submit` with no command buffers at all:
118    ///
119    ///   ```no_run
120    ///   # let queue: wgpu::Queue = todo!();
121    ///   # let buffer: wgpu::Buffer = todo!();
122    ///   # let data = [0u8];
123    ///   queue.write_buffer(&buffer, 0, &data);
124    ///   queue.submit([]);
125    ///   ```
126    ///
127    ///   However, `data` will be immediately copied into staging memory, so the
128    ///   caller may discard it any time after this call completes.
129    ///
130    /// * Consider using [`Queue::write_buffer_with()`] instead.
131    ///   That method allows you to prepare your data directly within the staging
132    ///   memory, rather than first placing it in a separate `[u8]` to be copied.
133    ///   That is, `queue.write_buffer(b, offset, data)` is approximately equivalent
134    ///   to `queue.write_buffer_with(b, offset, data.len()).copy_from_slice(data)`,
135    ///   so use `write_buffer_with()` if you can do something smarter than that
136    ///   [`copy_from_slice()`](slice::copy_from_slice). However, for small values
137    ///   (e.g. a typical uniform buffer whose contents come from a `struct`),
138    ///   there will likely be no difference, since the compiler will be able to
139    ///   optimize out unnecessary copies regardless.
140    ///
141    /// * Currently on native platforms, for both of these methods, the staging
142    ///   memory will be a new allocation. This will then be released after the
143    ///   next submission finishes. To entirely avoid short-lived allocations, you might
144    ///   be able to use [`StagingBelt`](crate::util::StagingBelt),
145    ///   or buffers you explicitly create, map, and unmap yourself.
146    pub fn write_buffer(&self, buffer: &Buffer, offset: BufferAddress, data: &[u8]) {
147        self.inner.write_buffer(&buffer.inner, offset, data);
148    }
149
150    /// Prepares to write data to a buffer via a mapped staging buffer.
151    ///
152    /// This operation allocates a temporary buffer and then returns a
153    /// [`QueueWriteBufferView`], which
154    ///
155    /// * dereferences to a `[u8]` of length `size`, and
156    /// * when dropped, schedules a copy of its contents into `buffer` at `offset`.
157    ///
158    /// Therefore, this obtains the same result as [`Queue::write_buffer()`], but may
159    /// allow you to skip one allocation and one copy of your data, if you are able to
160    /// assemble your data directly into the returned [`QueueWriteBufferView`] instead of
161    /// into a separate allocation like a [`Vec`](alloc::vec::Vec) first.
162    ///
163    /// The data must be written fully in-bounds, that is, `offset + size <= buffer.len()`.
164    ///
165    /// # Performance considerations
166    ///
167    /// * For small data not separately heap-allocated, there is no advantage of this
168    ///   over [`Queue::write_buffer()`].
169    ///
170    /// * Reading from the returned view may be slow, and will not yield the current
171    ///   contents of `buffer`. You should treat it as “write-only”.
172    ///
173    /// * Dropping the [`QueueWriteBufferView`] does *not* submit the
174    ///   transfer to the GPU immediately. The transfer begins only on the next
175    ///   call to [`Queue::submit()`] after the view is dropped, just before the
176    ///   explicitly submitted commands. To get a set of scheduled transfers started
177    ///   immediately, it's fine to call `queue.submit([])` with no command buffers at all.
178    ///
179    /// * Currently on native platforms, the staging memory will be a new allocation, which will
180    ///   then be released after the next submission finishes. To entirely avoid short-lived
181    ///   allocations, you might be able to use [`StagingBelt`](crate::util::StagingBelt),
182    ///   or buffers you explicitly create, map, and unmap yourself.
183    #[must_use]
184    pub fn write_buffer_with(
185        &self,
186        buffer: &Buffer,
187        offset: BufferAddress,
188        size: BufferSize,
189    ) -> Option<QueueWriteBufferView> {
190        profiling::scope!("Queue::write_buffer_with");
191        self.inner
192            .validate_write_buffer(&buffer.inner, offset, size)?;
193        let staging_buffer = self.inner.create_staging_buffer(size)?;
194        Some(QueueWriteBufferView {
195            queue: self.clone(),
196            buffer: buffer.clone(),
197            offset,
198            inner: staging_buffer,
199        })
200    }
201
202    /// Copies the bytes of `data` into into a texture.
203    ///
204    /// * `data` contains the texels to be written, which must be in
205    ///   [the same format as the texture](TextureFormat).
206    /// * `data_layout` describes the memory layout of `data`, which does not necessarily
207    ///   have to have tightly packed rows.
208    /// * `texture` specifies the texture to write into, and the location within the
209    ///   texture (coordinate offset, mip level) that will be overwritten.
210    /// * `size` is the size, in texels, of the region to be written.
211    ///
212    /// This method fails if `size` overruns the size of `texture`, or if `data` is too short.
213    ///
214    /// # Performance considerations
215    ///
216    /// This operation has the same performance considerations as [`Queue::write_buffer()`];
217    /// see its documentation for details.
218    ///
219    /// However, since there is no “mapped texture” like a mapped buffer,
220    /// alternate techniques for writing to textures will generally consist of first copying
221    /// the data to a buffer, then using [`CommandEncoder::copy_buffer_to_texture()`], or in
222    /// some cases a compute shader, to copy texels from that buffer to the texture.
223    pub fn write_texture(
224        &self,
225        texture: TexelCopyTextureInfo<'_>,
226        data: &[u8],
227        data_layout: TexelCopyBufferLayout,
228        size: Extent3d,
229    ) {
230        self.inner.write_texture(texture, data, data_layout, size);
231    }
232
233    /// Schedule a copy of data from `image` into `texture`.
234    #[cfg(web)]
235    pub fn copy_external_image_to_texture(
236        &self,
237        source: &wgt::CopyExternalImageSourceInfo,
238        dest: wgt::CopyExternalImageDestInfo<&api::Texture>,
239        size: Extent3d,
240    ) {
241        self.inner
242            .copy_external_image_to_texture(source, dest, size);
243    }
244
245    /// Submits a series of finished command buffers for execution.
246    pub fn submit<I: IntoIterator<Item = CommandBuffer>>(
247        &self,
248        command_buffers: I,
249    ) -> SubmissionIndex {
250        // As submit drains the iterator (even on error), collect deferred actions
251        // from each CommandBuffer along the way.
252        let mut actions = DeferredCommandBufferActions::default();
253
254        let mut command_buffers = command_buffers.into_iter().map(|comb| {
255            actions.append(&mut comb.actions.lock());
256            comb.buffer
257        });
258        let index = self.inner.submit(&mut command_buffers);
259
260        // Execute all deferred actions after submit.
261        actions.execute(&self.inner);
262
263        SubmissionIndex { index }
264    }
265
266    /// Gets the amount of nanoseconds each tick of a timestamp query represents.
267    ///
268    /// Returns zero if timestamp queries are unsupported.
269    ///
270    /// Timestamp values are represented in nanosecond values on WebGPU, see <https://gpuweb.github.io/gpuweb/#timestamp>
271    /// Therefore, this is always 1.0 on the web, but on wgpu-core a manual conversion is required.
272    pub fn get_timestamp_period(&self) -> f32 {
273        self.inner.get_timestamp_period()
274    }
275
276    /// Registers a callback that is invoked when the previous [`Queue::submit`] finishes executing
277    /// on the GPU. When this callback runs, all mapped-buffer callbacks registered for the same
278    /// submission are guaranteed to have been called.
279    ///
280    /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
281    /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
282    /// an event loop or run on a separate thread.
283    ///
284    /// The callback runs on the thread that first calls one of the above functions after the GPU work
285    /// completes. There are no restrictions on the code you can run in the callback; however, on native
286    /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
287    /// send messages, etc.).
288    ///
289    /// [q::s]: Queue::submit
290    /// [i::p_a]: Instance::poll_all
291    /// [d::p]: Device::poll
292    pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
293        self.inner.on_submitted_work_done(Box::new(callback));
294    }
295
296    /// Get the [`wgpu_hal`] device from this `Queue`.
297    ///
298    /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
299    /// and pass that struct to the to the `A` type parameter.
300    ///
301    /// Returns a guard that dereferences to the type of the hal backend
302    /// which implements [`A::Queue`].
303    ///
304    /// # Types
305    ///
306    /// The returned type depends on the backend:
307    ///
308    #[doc = crate::hal_type_vulkan!("Queue")]
309    #[doc = crate::hal_type_metal!("Queue")]
310    #[doc = crate::hal_type_dx12!("Queue")]
311    #[doc = crate::hal_type_gles!("Queue")]
312    ///
313    /// # Errors
314    ///
315    /// This method will return None if:
316    /// - The queue is not from the backend specified by `A`.
317    /// - The queue is from the `webgpu` or `custom` backend.
318    ///
319    /// # Safety
320    ///
321    /// - The returned resource must not be destroyed unless the guard
322    ///   is the last reference to it and it is not in use by the GPU.
323    ///   The guard and handle may be dropped at any time however.
324    /// - All the safety requirements of wgpu-hal must be upheld.
325    ///
326    /// [`A::Queue`]: hal::Api::Queue
327    #[cfg(wgpu_core)]
328    pub unsafe fn as_hal<A: hal::Api>(
329        &self,
330    ) -> Option<impl Deref<Target = A::Queue> + WasmNotSendSync> {
331        let queue = self.inner.as_core_opt()?;
332        unsafe { queue.context.queue_as_hal::<A>(queue) }
333    }
334
335    /// Compact a BLAS, it must have had [`Blas::prepare_compaction_async`] called on it and had the
336    /// callback provided called.
337    ///
338    /// The returned BLAS is more restricted than a normal BLAS because it may not be rebuilt or
339    /// compacted.
340    pub fn compact_blas(&self, blas: &Blas) -> Blas {
341        let (handle, dispatch) = self.inner.compact_blas(&blas.inner);
342        Blas {
343            handle,
344            inner: dispatch,
345        }
346    }
347}