wgpu/api/command_buffer_actions.rs
1use alloc::{sync::Arc, vec::Vec};
2
3use crate::{util::Mutex, *};
4
5/// A deferred buffer mapping request captured during encoding (or a pass)
6/// and executed later when the command buffer is submitted.
7pub(crate) struct DeferredBufferMapping {
8 pub buffer: api::Buffer,
9 pub mode: MapMode,
10 pub offset: BufferAddress,
11 pub size: BufferAddress,
12 pub callback: dispatch::BufferMapCallback,
13}
14
15pub(super) type SharedDeferredCommandBufferActions = Arc<Mutex<DeferredCommandBufferActions>>;
16
17/// Set of actions to take when the command buffer is submitted.
18#[derive(Default)]
19pub(crate) struct DeferredCommandBufferActions {
20 pub buffer_mappings: Vec<DeferredBufferMapping>,
21 pub on_submitted_work_done_callbacks: Vec<dispatch::BoxSubmittedWorkDoneCallback>,
22}
23
24impl DeferredCommandBufferActions {
25 pub fn append(&mut self, other: &mut Self) {
26 self.buffer_mappings.append(&mut other.buffer_mappings);
27 self.on_submitted_work_done_callbacks
28 .append(&mut other.on_submitted_work_done_callbacks);
29 }
30
31 pub fn execute(self, queue: &dispatch::DispatchQueue) {
32 for mapping in self.buffer_mappings {
33 mapping.buffer.map_async(
34 mapping.mode,
35 mapping.offset..mapping.offset + mapping.size,
36 mapping.callback,
37 );
38 }
39 for callback in self.on_submitted_work_done_callbacks {
40 queue.on_submitted_work_done(callback);
41 }
42 }
43}
44
45impl core::fmt::Debug for DeferredCommandBufferActions {
46 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
47 f.debug_struct("DeferredCommandBufferActions")
48 .field("buffer_mappings.len()", &self.buffer_mappings.len())
49 .field(
50 "on_submitted_work_done_callbacks.len()",
51 &self.on_submitted_work_done_callbacks.len(),
52 )
53 .finish()
54 }
55}
56
57// We can't just implement this on CommandEncoders as by default passes make it so that
58// you can't call any commands on the encoder while this is happening. As such, we need
59// to implement these methods on the passes too. Use a macro to avoid massive code duplication
60macro_rules! impl_deferred_command_buffer_actions {
61 () => {
62 /// On submission, maps the buffer to host (CPU) memory, making it available
63 /// for reading or writing via [`get_mapped_range()`](Buffer::get_mapped_range).
64 /// The buffer becomes accessible once the `callback` is invoked with [`Ok`].
65 ///
66 /// Use this when you need to submit work that uses the buffer before mapping it.
67 /// Because that submission must happen before calling `map_async`, this method
68 /// schedules the mapping for after submission, avoiding extra calls to
69 /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and letting you start
70 /// the mapping from a more convenient place.
71 ///
72 /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
73 /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated
74 /// into an event loop or run on a separate thread.
75 ///
76 /// The callback runs on the thread that first calls one of the above functions
77 /// after the GPU work completes. There are no restrictions on the code you can run
78 /// in the callback; however, on native the polling call will not return until the
79 /// callback finishes, so keep callbacks short (set flags, send messages, etc.).
80 ///
81 /// While a buffer is mapped, it cannot be used by other commands; at any time,
82 /// either the GPU or the CPU has exclusive access to the buffer’s contents.
83 ///
84 /// # Panics
85 ///
86 /// - If `bounds` is outside the bounds of `buffer`.
87 ///
88 /// # Panics During Submit
89 ///
90 /// - If the buffer is already mapped.
91 /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
92 /// - If `bounds` is outside of the bounds of `buffer`.
93 /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
94 /// - If `bounds` has a length that is not a multiple of 4.
95 ///
96 /// [q::s]: Queue::submit
97 /// [i::p_a]: Instance::poll_all
98 /// [d::p]: Device::poll
99 /// [CEmbos]: CommandEncoder::map_buffer_on_submit
100 /// [CBmbos]: CommandBuffer::map_buffer_on_submit
101 /// [RPmbos]: RenderPass::map_buffer_on_submit
102 /// [CPmbos]: ComputePass::map_buffer_on_submit
103 pub fn map_buffer_on_submit<S: core::ops::RangeBounds<BufferAddress>>(
104 &self,
105 buffer: &api::Buffer,
106 mode: MapMode,
107 bounds: S,
108 callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
109 ) {
110 let (offset, size) = range_to_offset_size(bounds, buffer.size);
111 self.actions.lock().buffer_mappings.push(
112 crate::api::command_buffer_actions::DeferredBufferMapping {
113 buffer: buffer.clone(),
114 mode,
115 offset,
116 size,
117 callback: alloc::boxed::Box::new(callback),
118 },
119 );
120 }
121
122 /// Registers a callback that is invoked when this command buffer’s work finishes
123 /// executing on the GPU. When this callback runs, all mapped-buffer callbacks
124 /// registered for the same submission are guaranteed to have been called.
125 ///
126 /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
127 /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated
128 /// into an event loop or run on a separate thread.
129 ///
130 /// The callback runs on the thread that first calls one of the above functions
131 /// after the GPU work completes. There are no restrictions on the code you can run
132 /// in the callback; however, on native the polling call will not return until the
133 /// callback finishes, so keep callbacks short (set flags, send messages, etc.).
134 ///
135 /// [q::s]: Queue::submit
136 /// [i::p_a]: Instance::poll_all
137 /// [d::p]: Device::poll
138 pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
139 self.actions
140 .lock()
141 .on_submitted_work_done_callbacks
142 .push(alloc::boxed::Box::new(callback));
143 }
144 };
145}
146
147pub(crate) use impl_deferred_command_buffer_actions;