wgpu/api/
command_buffer_actions.rs

1use alloc::{sync::Arc, vec::Vec};
2use core::num::NonZeroU64;
3
4use crate::{util::Mutex, *};
5
6/// A deferred buffer mapping request captured during encoding (or a pass)
7/// and executed later when the command buffer is submitted.
8pub(crate) struct DeferredBufferMapping {
9    pub buffer: api::Buffer,
10    pub mode: MapMode,
11    pub offset: u64,
12    pub size: NonZeroU64,
13    pub callback: dispatch::BufferMapCallback,
14}
15
16pub(super) type SharedDeferredCommandBufferActions = Arc<Mutex<DeferredCommandBufferActions>>;
17
18/// Set of actions to take when the command buffer is submitted.
19#[derive(Default)]
20pub(crate) struct DeferredCommandBufferActions {
21    pub buffer_mappings: Vec<DeferredBufferMapping>,
22    pub on_submitted_work_done_callbacks: Vec<dispatch::BoxSubmittedWorkDoneCallback>,
23}
24
25impl DeferredCommandBufferActions {
26    pub fn append(&mut self, other: &mut Self) {
27        self.buffer_mappings.append(&mut other.buffer_mappings);
28        self.on_submitted_work_done_callbacks
29            .append(&mut other.on_submitted_work_done_callbacks);
30    }
31
32    pub fn execute(self, queue: &dispatch::DispatchQueue) {
33        for mapping in self.buffer_mappings {
34            mapping.buffer.map_async(
35                mapping.mode,
36                mapping.offset..mapping.offset + mapping.size.get(),
37                mapping.callback,
38            );
39        }
40        for callback in self.on_submitted_work_done_callbacks {
41            queue.on_submitted_work_done(callback);
42        }
43    }
44}
45
46impl core::fmt::Debug for DeferredCommandBufferActions {
47    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
48        f.debug_struct("DeferredCommandBufferActions")
49            .field("buffer_mappings.len()", &self.buffer_mappings.len())
50            .field(
51                "on_submitted_work_done_callbacks.len()",
52                &self.on_submitted_work_done_callbacks.len(),
53            )
54            .finish()
55    }
56}
57
58// We can't just implement this on CommandEncoders as by default passes make it so that
59// you can't call any commands on the encoder while this is happening. As such, we need
60// to implement these methods on the passes too. Use a macro to avoid massive code duplication
61macro_rules! impl_deferred_command_buffer_actions {
62    () => {
63        /// On submission, maps the buffer to host (CPU) memory, making it available
64        /// for reading or writing via [`get_mapped_range()`](Buffer::get_mapped_range).
65        /// The buffer becomes accessible once the `callback` is invoked with [`Ok`].
66        ///
67        /// Use this when you need to submit work that uses the buffer before mapping it.
68        /// Because that submission must happen before calling `map_async`, this method
69        /// schedules the mapping for after submission, avoiding extra calls to
70        /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and letting you start
71        /// the mapping from a more convenient place.
72        ///
73        /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
74        /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated
75        /// into an event loop or run on a separate thread.
76        ///
77        /// The callback runs on the thread that first calls one of the above functions
78        /// after the GPU work completes. There are no restrictions on the code you can run
79        /// in the callback; however, on native the polling call will not return until the
80        /// callback finishes, so keep callbacks short (set flags, send messages, etc.).
81        ///
82        /// While a buffer is mapped, it cannot be used by other commands; at any time,
83        /// either the GPU or the CPU has exclusive access to the buffer’s contents.
84        ///
85        /// # Panics
86        ///
87        /// - If `bounds` is outside the bounds of `buffer`.
88        /// - If `bounds` has a length less than 1.
89        ///
90        /// # Panics During Submit
91        ///
92        /// - If the buffer is already mapped.
93        /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
94        /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`] within the buffer.
95        ///
96        /// [q::s]: Queue::submit
97        /// [i::p_a]: Instance::poll_all
98        /// [d::p]: Device::poll
99        /// [CEmbos]: CommandEncoder::map_buffer_on_submit
100        /// [CBmbos]: CommandBuffer::map_buffer_on_submit
101        /// [RPmbos]: RenderPass::map_buffer_on_submit
102        /// [CPmbos]: ComputePass::map_buffer_on_submit
103        pub fn map_buffer_on_submit<S: core::ops::RangeBounds<BufferAddress>>(
104            &self,
105            buffer: &api::Buffer,
106            mode: MapMode,
107            bounds: S,
108            callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
109        ) {
110            let (offset, size) = range_to_offset_size(bounds, buffer.size);
111            self.actions.lock().buffer_mappings.push(
112                crate::api::command_buffer_actions::DeferredBufferMapping {
113                    buffer: buffer.clone(),
114                    mode,
115                    offset,
116                    size,
117                    callback: alloc::boxed::Box::new(callback),
118                },
119            );
120        }
121
122        /// Registers a callback that is invoked when this command buffer’s work finishes
123        /// executing on the GPU. When this callback runs, all mapped-buffer callbacks
124        /// registered for the same submission are guaranteed to have been called.
125        ///
126        /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
127        /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated
128        /// into an event loop or run on a separate thread.
129        ///
130        /// The callback runs on the thread that first calls one of the above functions
131        /// after the GPU work completes. There are no restrictions on the code you can run
132        /// in the callback; however, on native the polling call will not return until the
133        /// callback finishes, so keep callbacks short (set flags, send messages, etc.).
134        ///
135        /// [q::s]: Queue::submit
136        /// [i::p_a]: Instance::poll_all
137        /// [d::p]: Device::poll
138        pub fn on_submitted_work_done(&self, callback: impl FnOnce() + Send + 'static) {
139            self.actions
140                .lock()
141                .on_submitted_work_done_callbacks
142                .push(alloc::boxed::Box::new(callback));
143        }
144    };
145}
146
147pub(crate) use impl_deferred_command_buffer_actions;