wgpu/api/compute_pass.rs
1use crate::*;
2
3/// In-progress recording of a compute pass.
4///
5/// It can be created with [`CommandEncoder::begin_compute_pass`].
6///
7/// Corresponds to [WebGPU `GPUComputePassEncoder`](
8/// https://gpuweb.github.io/gpuweb/#compute-pass-encoder).
9#[derive(Debug)]
10pub struct ComputePass<'encoder> {
11 pub(crate) inner: dispatch::DispatchComputePass,
12
13 /// This lifetime is used to protect the [`CommandEncoder`] from being used
14 /// while the pass is alive. This needs to be PhantomDrop to prevent the lifetime
15 /// from being shortened.
16 pub(crate) _encoder_guard: crate::api::PhantomDrop<&'encoder ()>,
17}
18
19#[cfg(send_sync)]
20static_assertions::assert_impl_all!(ComputePass<'_>: Send, Sync);
21
22crate::cmp::impl_eq_ord_hash_proxy!(ComputePass<'_> => .inner);
23
24impl ComputePass<'_> {
25 /// Drops the lifetime relationship to the parent command encoder, making usage of
26 /// the encoder while this pass is recorded a run-time error instead.
27 ///
28 /// Attention: As long as the compute pass has not been ended, any mutating operation on the parent
29 /// command encoder will cause a run-time error and invalidate it!
30 /// By default, the lifetime constraint prevents this, but it can be useful
31 /// to handle this at run time, such as when storing the pass and encoder in the same
32 /// data structure.
33 ///
34 /// This operation has no effect on pass recording.
35 /// It's a safe operation, since [`CommandEncoder`] is in a locked state as long as the pass is active
36 /// regardless of the lifetime constraint or its absence.
37 pub fn forget_lifetime(self) -> ComputePass<'static> {
38 ComputePass {
39 inner: self.inner,
40 _encoder_guard: crate::api::PhantomDrop::default(),
41 }
42 }
43
44 /// Sets the active bind group for a given bind group index. The bind group layout
45 /// in the active pipeline when the `dispatch()` function is called must match the layout of this bind group.
46 ///
47 /// If the bind group have dynamic offsets, provide them in the binding order.
48 /// These offsets have to be aligned to [`Limits::min_uniform_buffer_offset_alignment`]
49 /// or [`Limits::min_storage_buffer_offset_alignment`] appropriately.
50 pub fn set_bind_group<'a, BG>(&mut self, index: u32, bind_group: BG, offsets: &[DynamicOffset])
51 where
52 Option<&'a BindGroup>: From<BG>,
53 {
54 let bg: Option<&BindGroup> = bind_group.into();
55 let bg = bg.map(|bg| &bg.inner);
56 self.inner.set_bind_group(index, bg, offsets);
57 }
58
59 /// Sets the active compute pipeline.
60 pub fn set_pipeline(&mut self, pipeline: &ComputePipeline) {
61 self.inner.set_pipeline(&pipeline.inner);
62 }
63
64 /// Inserts debug marker.
65 pub fn insert_debug_marker(&mut self, label: &str) {
66 self.inner.insert_debug_marker(label);
67 }
68
69 /// Start record commands and group it into debug marker group.
70 pub fn push_debug_group(&mut self, label: &str) {
71 self.inner.push_debug_group(label);
72 }
73
74 /// Stops command recording and creates debug group.
75 pub fn pop_debug_group(&mut self) {
76 self.inner.pop_debug_group();
77 }
78
79 /// Dispatches compute work operations.
80 ///
81 /// `x`, `y` and `z` denote the number of work groups to dispatch in each dimension.
82 pub fn dispatch_workgroups(&mut self, x: u32, y: u32, z: u32) {
83 self.inner.dispatch_workgroups(x, y, z);
84 }
85
86 /// Dispatches compute work operations, based on the contents of the `indirect_buffer`.
87 ///
88 /// The structure expected in `indirect_buffer` must conform to [`DispatchIndirectArgs`](crate::util::DispatchIndirectArgs).
89 pub fn dispatch_workgroups_indirect(
90 &mut self,
91 indirect_buffer: &Buffer,
92 indirect_offset: BufferAddress,
93 ) {
94 self.inner
95 .dispatch_workgroups_indirect(&indirect_buffer.inner, indirect_offset);
96 }
97
98 #[cfg(custom)]
99 /// Returns custom implementation of ComputePass (if custom backend and is internally T)
100 pub fn as_custom<T: custom::ComputePassInterface>(&self) -> Option<&T> {
101 self.inner.as_custom()
102 }
103}
104
105/// [`Features::PUSH_CONSTANTS`] must be enabled on the device in order to call these functions.
106impl ComputePass<'_> {
107 /// Set push constant data for subsequent dispatch calls.
108 ///
109 /// Write the bytes in `data` at offset `offset` within push constant
110 /// storage. Both `offset` and the length of `data` must be
111 /// multiples of [`PUSH_CONSTANT_ALIGNMENT`], which is always 4.
112 ///
113 /// For example, if `offset` is `4` and `data` is eight bytes long, this
114 /// call will write `data` to bytes `4..12` of push constant storage.
115 pub fn set_push_constants(&mut self, offset: u32, data: &[u8]) {
116 self.inner.set_push_constants(offset, data);
117 }
118}
119
120/// [`Features::TIMESTAMP_QUERY_INSIDE_PASSES`] must be enabled on the device in order to call these functions.
121impl ComputePass<'_> {
122 /// Issue a timestamp command at this point in the queue. The timestamp will be written to the specified query set, at the specified index.
123 ///
124 /// Must be multiplied by [`Queue::get_timestamp_period`] to get
125 /// the value in nanoseconds. Absolute values have no meaning,
126 /// but timestamps can be subtracted to get the time it takes
127 /// for a string of operations to complete.
128 pub fn write_timestamp(&mut self, query_set: &QuerySet, query_index: u32) {
129 self.inner.write_timestamp(&query_set.inner, query_index);
130 }
131}
132
133/// [`Features::PIPELINE_STATISTICS_QUERY`] must be enabled on the device in order to call these functions.
134impl ComputePass<'_> {
135 /// Start a pipeline statistics query on this compute pass. It can be ended with
136 /// `end_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
137 pub fn begin_pipeline_statistics_query(&mut self, query_set: &QuerySet, query_index: u32) {
138 self.inner
139 .begin_pipeline_statistics_query(&query_set.inner, query_index);
140 }
141
142 /// End the pipeline statistics query on this compute pass. It can be started with
143 /// `begin_pipeline_statistics_query`. Pipeline statistics queries may not be nested.
144 pub fn end_pipeline_statistics_query(&mut self) {
145 self.inner.end_pipeline_statistics_query();
146 }
147}
148
149/// Describes the timestamp writes of a compute pass.
150///
151/// For use with [`ComputePassDescriptor`].
152/// At least one of `beginning_of_pass_write_index` and `end_of_pass_write_index` must be `Some`.
153///
154/// Corresponds to [WebGPU `GPUComputePassTimestampWrites`](
155/// https://gpuweb.github.io/gpuweb/#dictdef-gpucomputepasstimestampwrites).
156#[derive(Clone, Debug)]
157pub struct ComputePassTimestampWrites<'a> {
158 /// The query set to write to.
159 pub query_set: &'a QuerySet,
160 /// The index of the query set at which a start timestamp of this pass is written, if any.
161 pub beginning_of_pass_write_index: Option<u32>,
162 /// The index of the query set at which an end timestamp of this pass is written, if any.
163 pub end_of_pass_write_index: Option<u32>,
164}
165#[cfg(send_sync)]
166static_assertions::assert_impl_all!(ComputePassTimestampWrites<'_>: Send, Sync);
167
168/// Describes the attachments of a compute pass.
169///
170/// For use with [`CommandEncoder::begin_compute_pass`].
171///
172/// Corresponds to [WebGPU `GPUComputePassDescriptor`](
173/// https://gpuweb.github.io/gpuweb/#dictdef-gpucomputepassdescriptor).
174#[derive(Clone, Default, Debug)]
175pub struct ComputePassDescriptor<'a> {
176 /// Debug label of the compute pass. This will show up in graphics debuggers for easy identification.
177 pub label: Label<'a>,
178 /// Defines which timestamp values will be written for this pass, and where to write them to.
179 ///
180 /// Requires [`Features::TIMESTAMP_QUERY`] to be enabled.
181 pub timestamp_writes: Option<ComputePassTimestampWrites<'a>>,
182}
183#[cfg(send_sync)]
184static_assertions::assert_impl_all!(ComputePassDescriptor<'_>: Send, Sync);