wgpu/api/
compute_pass.rs

1use crate::{
2    api::{impl_deferred_command_buffer_actions, SharedDeferredCommandBufferActions},
3    *,
4};
5
6/// In-progress recording of a compute pass.
7///
8/// It can be created with [`CommandEncoder::begin_compute_pass`].
9///
10/// Corresponds to [WebGPU `GPUComputePassEncoder`](
11/// https://gpuweb.github.io/gpuweb/#compute-pass-encoder).
12#[derive(Debug)]
13pub struct ComputePass<'encoder> {
14    pub(crate) inner: dispatch::DispatchComputePass,
15
16    /// Shared with CommandEncoder to enqueue deferred actions from within a pass.
17    pub(crate) actions: SharedDeferredCommandBufferActions,
18
19    /// This lifetime is used to protect the [`CommandEncoder`] from being used
20    /// while the pass is alive. This needs to be PhantomDrop to prevent the lifetime
21    /// from being shortened.
22    pub(crate) _encoder_guard: crate::api::PhantomDrop<&'encoder ()>,
23}
24
25#[cfg(send_sync)]
26static_assertions::assert_impl_all!(ComputePass<'_>: Send, Sync);
27
28crate::cmp::impl_eq_ord_hash_proxy!(ComputePass<'_> => .inner);
29
30impl ComputePass<'_> {
31    /// Drops the lifetime relationship to the parent command encoder, making usage of
32    /// the encoder while this pass is recorded a run-time error instead.
33    ///
34    /// Attention: As long as the compute pass has not been ended, any mutating operation on the parent
35    /// command encoder will cause a run-time error and invalidate it!
36    /// By default, the lifetime constraint prevents this, but it can be useful
37    /// to handle this at run time, such as when storing the pass and encoder in the same
38    /// data structure.
39    ///
40    /// This operation has no effect on pass recording.
41    /// It's a safe operation, since [`CommandEncoder`] is in a locked state as long as the pass is active
42    /// regardless of the lifetime constraint or its absence.
43    pub fn forget_lifetime(self) -> ComputePass<'static> {
44        ComputePass {
45            inner: self.inner,
46            actions: self.actions,
47            _encoder_guard: crate::api::PhantomDrop::default(),
48        }
49    }
50
51    /// Sets the active bind group for a given bind group index. The bind group layout
52    /// in the active pipeline when the `dispatch()` function is called must match the layout of this bind group.
53    ///
54    /// If the bind group have dynamic offsets, provide them in the binding order.
55    /// These offsets have to be aligned to [`Limits::min_uniform_buffer_offset_alignment`]
56    /// or [`Limits::min_storage_buffer_offset_alignment`] appropriately.
57    pub fn set_bind_group<'a, BG>(&mut self, index: u32, bind_group: BG, offsets: &[DynamicOffset])
58    where
59        Option<&'a BindGroup>: From<BG>,
60    {
61        let bg: Option<&BindGroup> = bind_group.into();
62        let bg = bg.map(|bg| &bg.inner);
63        self.inner.set_bind_group(index, bg, offsets);
64    }
65
66    /// Sets the active compute pipeline.
67    pub fn set_pipeline(&mut self, pipeline: &ComputePipeline) {
68        self.inner.set_pipeline(&pipeline.inner);
69    }
70
71    /// Inserts debug marker.
72    pub fn insert_debug_marker(&mut self, label: &str) {
73        self.inner.insert_debug_marker(label);
74    }
75
76    /// Start record commands and group it into debug marker group.
77    pub fn push_debug_group(&mut self, label: &str) {
78        self.inner.push_debug_group(label);
79    }
80
81    /// Stops command recording and creates debug group.
82    pub fn pop_debug_group(&mut self) {
83        self.inner.pop_debug_group();
84    }
85
86    /// Dispatches compute work operations.
87    ///
88    /// `x`, `y` and `z` denote the number of work groups to dispatch in each dimension.
89    pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
90        self.inner.dispatch_workgroups(x, y, z);
91    }
92
93    /// Dispatches compute work operations, based on the contents of the `indirect_buffer`.
94    ///
95    /// The structure expected in `indirect_buffer` must conform to [`DispatchIndirectArgs`](crate::util::DispatchIndirectArgs).
96    pub fn dispatch_workgroups_indirect(
97        &mut self,
98        indirect_buffer: &Buffer,
99        indirect_offset: BufferAddress,
100    ) {
101        self.inner
102            .dispatch_workgroups_indirect(&indirect_buffer.inner, indirect_offset);
103    }
104
105    impl_deferred_command_buffer_actions!();
106
107    #[cfg(custom)]
108    /// Returns custom implementation of ComputePass (if custom backend and is internally T)
109    pub fn as_custom<T: custom::ComputePassInterface>(&self) -> Option<&T> {
110        self.inner.as_custom()
111    }
112}
113
114/// [`Features::PUSH_CONSTANTS`] must be enabled on the device in order to call these functions.
115impl ComputePass<'_> {
116    /// Set push constant data for subsequent dispatch calls.
117    ///
118    /// Write the bytes in `data` at offset `offset` within push constant
119    /// storage.  Both `offset` and the length of `data` must be
120    /// multiples of [`PUSH_CONSTANT_ALIGNMENT`], which is always 4.
121    ///
122    /// For example, if `offset` is `4` and `data` is eight bytes long, this
123    /// call will write `data` to bytes `4..12` of push constant storage.
124    pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) {
125        self.inner.set_push_constants(offset, data);
126    }
127}
128
129/// [`Features::TIMESTAMP_QUERY_INSIDE_PASSES`] must be enabled on the device in order to call these functions.
130impl ComputePass<'_> {
131    /// Issue a timestamp command at this point in the queue. The timestamp will be written to the specified query set, at the specified index.
132    ///
133    /// Must be multiplied by [`Queue::get_timestamp_period`] to get
134    /// the value in nanoseconds. Absolute values have no meaning,
135    /// but timestamps can be subtracted to get the time it takes
136    /// for a string of operations to complete.
137    pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
138        self.inner.write_timestamp(&query_set.inner, query_index);
139    }
140}
141
142/// [`Features::PIPELINE_STATISTICS_QUERY`] must be enabled on the device in order to call these functions.
143impl ComputePass<'_> {
144    /// Start a pipeline statistics query on this compute pass. It can be ended with
145    /// `end_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
146    pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
147        self.inner
148            .begin_pipeline_statistics_query(&query_set.inner, query_index);
149    }
150
151    /// End the pipeline statistics query on this compute pass. It can be started with
152    /// `begin_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
153    pub fn end_pipeline_statistics_query(&mut self) {
154        self.inner.end_pipeline_statistics_query();
155    }
156}
157
158/// Describes the timestamp writes of a compute pass.
159///
160/// For use with [`ComputePassDescriptor`].
161/// At least one of `beginning_of_pass_write_index` and `end_of_pass_write_index` must be `Some`.
162///
163/// Corresponds to [WebGPU `GPUComputePassTimestampWrites`](
164/// https://gpuweb.github.io/gpuweb/#dictdef-gpucomputepasstimestampwrites).
165#[derive(Clone, Debug)]
166pub struct ComputePassTimestampWrites<'a> {
167    /// The query set to write to.
168    pub query_set: &'a QuerySet,
169    /// The index of the query set at which a start timestamp of this pass is written, if any.
170    pub beginning_of_pass_write_index: Option<u32>,
171    /// The index of the query set at which an end timestamp of this pass is written, if any.
172    pub end_of_pass_write_index: Option<u32>,
173}
174#[cfg(send_sync)]
175static_assertions::assert_impl_all!(ComputePassTimestampWrites<'_>: Send, Sync);
176
177/// Describes the attachments of a compute pass.
178///
179/// For use with [`CommandEncoder::begin_compute_pass`].
180///
181/// Corresponds to [WebGPU `GPUComputePassDescriptor`](
182/// https://gpuweb.github.io/gpuweb/#dictdef-gpucomputepassdescriptor).
183#[derive(Clone, Default, Debug)]
184pub struct ComputePassDescriptor<'a> {
185    /// Debug label of the compute pass. This will show up in graphics debuggers for easy identification.
186    pub label: Label<'a>,
187    /// Defines which timestamp values will be written for this pass, and where to write them to.
188    ///
189    /// Requires [`Features::TIMESTAMP_QUERY`] to be enabled.
190    pub timestamp_writes: Option<ComputePassTimestampWrites<'a>>,
191}
192#[cfg(send_sync)]
193static_assertions::assert_impl_all!(ComputePassDescriptor<'_>: Send, Sync);