wgpu/api/
buffer.rs

1use alloc::{boxed::Box, sync::Arc, vec::Vec};
2use core::{
3    error, fmt,
4    ops::{Bound, Deref, DerefMut, Range, RangeBounds},
5};
6
7use crate::util::Mutex;
8use crate::*;
9
10/// Handle to a GPU-accessible buffer.
11///
12/// A `Buffer` is a memory allocation for use by the GPU, somewhat analogous to
13/// <code>[Box]&lt;[\[u8\]][primitive@slice]&gt;</code> in Rust.
14/// The contents of buffers are untyped bytes; it is up to the application to
15/// specify the interpretation of the bytes when the buffer is used, in ways
16/// such as [`VertexBufferLayout`].
17/// A single buffer can be used to hold multiple independent pieces of data at
18/// different offsets (e.g. both vertices and indices for one or more meshes).
19///
20/// A `Buffer`'s bytes have "interior mutability": functions like
21/// [`Queue::write_buffer`] or [mapping] a buffer for writing only require a
22/// `&Buffer`, not a `&mut Buffer`, even though they modify its contents. `wgpu`
23/// prevents simultaneous reads and writes of buffer contents using run-time
24/// checks.
25///
26/// Created with [`Device::create_buffer()`] or
27/// [`DeviceExt::create_buffer_init()`].
28///
29/// Corresponds to [WebGPU `GPUBuffer`](https://gpuweb.github.io/gpuweb/#buffer-interface).
30///
31/// [mapping]: Buffer#mapping-buffers
32///
33/// # How to get your data into a buffer
34///
35/// Every `Buffer` starts with all bytes zeroed.
36/// There are many ways to load data into a `Buffer`:
37///
38/// - When creating a buffer, you may set the [`mapped_at_creation`][mac] flag,
39///   then write to its [`get_mapped_range_mut()`][Buffer::get_mapped_range_mut].
40///   This only works when the buffer is created and has not yet been used by
41///   the GPU, but it is all you need for buffers whose contents do not change
42///   after creation.
43///   - You may use [`DeviceExt::create_buffer_init()`] as a convenient way to
44///     do that and copy data from a `&[u8]` you provide.
45/// - After creation, you may use [`Buffer::map_async()`] to map it again;
46///   however, you then need to wait until the GPU is no longer using the buffer
47///   before you begin writing.
48/// - You may use [`CommandEncoder::copy_buffer_to_buffer()`] to copy data into
49///   this buffer from another buffer.
50/// - You may use [`Queue::write_buffer()`] to copy data into the buffer from a
51///   `&[u8]`. This uses a temporary “staging” buffer managed by `wgpu` to hold
52///   the data.
53///   - [`Queue::write_buffer_with()`] allows you to write directly into temporary
54///     storage instead of providing a slice you already prepared, which may
55///     allow *your* code to save the allocation of a [`Vec`] or such.
56/// - You may use [`util::StagingBelt`] to manage a set of temporary buffers.
57///   This may be more efficient than [`Queue::write_buffer_with()`] when you
58///   have many small copies to perform, but requires more steps to use, and
59///   tuning of the belt buffer size.
60/// - You may write your own staging buffer management customized to your
61///   application, based on mapped buffers and
62///   [`CommandEncoder::copy_buffer_to_buffer()`].
63/// - A GPU computation’s results can be stored in a buffer:
64///   - A [compute shader][ComputePipeline] may write to a buffer bound as a
65///     [storage buffer][BufferBindingType::Storage].
66///   - A render pass may render to a texture which is then copied to a buffer
67///     using [`CommandEncoder::copy_texture_to_buffer()`].
68///
69/// # Mapping buffers
70///
71/// If a `Buffer` is created with the appropriate [`usage`], it can be *mapped*:
72/// you can make its contents accessible to the CPU as an ordinary `&[u8]` or
73/// `&mut [u8]` slice of bytes. Buffers created with the
74/// [`mapped_at_creation`][mac] flag set are also mapped initially.
75///
76/// Depending on the hardware, the buffer could be memory shared between CPU and
77/// GPU, so that the CPU has direct access to the same bytes the GPU will
78/// consult; or it may be ordinary CPU memory, whose contents the system must
79/// copy to/from the GPU as needed. This crate's API is designed to work the
80/// same way in either case: at any given time, a buffer is either mapped and
81/// available to the CPU, or unmapped and ready for use by the GPU, but never
82/// both. This makes it impossible for either side to observe changes by the
83/// other immediately, and any necessary transfers can be carried out when the
84/// buffer transitions from one state to the other.
85///
86/// There are two ways to map a buffer:
87///
88/// - If [`BufferDescriptor::mapped_at_creation`] is `true`, then the entire
89///   buffer is mapped when it is created. This is the easiest way to initialize
90///   a new buffer. You can set `mapped_at_creation` on any kind of buffer,
91///   regardless of its [`usage`] flags.
92///
93/// - If the buffer's [`usage`] includes the [`MAP_READ`] or [`MAP_WRITE`]
94///   flags, then you can call `buffer.slice(range).map_async(mode, callback)`
95///   to map the portion of `buffer` given by `range`. This waits for the GPU to
96///   finish using the buffer, and invokes `callback` as soon as the buffer is
97///   safe for the CPU to access.
98///
99/// Once a buffer is mapped:
100///
101/// - You can call `buffer.slice(range).get_mapped_range()` to obtain a
102///   [`BufferView`], which dereferences to a `&[u8]` that you can use to read
103///   the buffer's contents.
104///
105/// - Or, you can call `buffer.slice(range).get_mapped_range_mut()` to obtain a
106///   [`BufferViewMut`], which dereferences to a `&mut [u8]` that you can use to
107///   read and write the buffer's contents.
108///
109/// The given `range` must fall within the mapped portion of the buffer. If you
110/// attempt to access overlapping ranges, even for shared access only, these
111/// methods panic.
112///
113/// While a buffer is mapped, you may not submit any commands to the GPU that
114/// access it. You may record command buffers that use the buffer, but if you
115/// submit them while the buffer is mapped, submission will panic.
116///
117/// When you are done using the buffer on the CPU, you must call
118/// [`Buffer::unmap`] to make it available for use by the GPU again. All
119/// [`BufferView`] and [`BufferViewMut`] views referring to the buffer must be
120/// dropped before you unmap it; otherwise, [`Buffer::unmap`] will panic.
121///
122/// # Example
123///
124/// If `buffer` was created with [`BufferUsages::MAP_WRITE`], we could fill it
125/// with `f32` values like this:
126///
127/// ```
128/// # #[cfg(feature = "noop")]
129/// # let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
130/// # #[cfg(not(feature = "noop"))]
131/// # let device: wgpu::Device = { return; };
132/// #
133/// # let buffer = device.create_buffer(&wgpu::BufferDescriptor {
134/// #     label: None,
135/// #     size: 400,
136/// #     usage: wgpu::BufferUsages::MAP_WRITE,
137/// #     mapped_at_creation: false,
138/// # });
139/// let capturable = buffer.clone();
140/// buffer.map_async(wgpu::MapMode::Write, .., move |result| {
141///     if result.is_ok() {
142///         let mut view = capturable.get_mapped_range_mut(..);
143///         let floats: &mut [f32] = bytemuck::cast_slice_mut(&mut view);
144///         floats.fill(42.0);
145///         drop(view);
146///         capturable.unmap();
147///     }
148/// });
149/// ```
150///
151/// This code takes the following steps:
152///
153/// - First, it makes a cloned handle to the buffer for capture by
154///   the callback passed to [`map_async`]. Since a [`map_async`] callback may be
155///   invoked from another thread, interaction between the callback and the
156///   thread calling [`map_async`] generally requires some sort of shared heap
157///   data like this. In real code, there might be an [`Arc`] to some larger
158///   structure that itself owns `buffer`.
159///
160/// - Then, it calls [`Buffer::slice`] to make a [`BufferSlice`] referring to
161///   the buffer's entire contents.
162///
163/// - Next, it calls [`BufferSlice::map_async`] to request that the bytes to
164///   which the slice refers be made accessible to the CPU ("mapped"). This may
165///   entail waiting for previously enqueued operations on `buffer` to finish.
166///   Although [`map_async`] itself always returns immediately, it saves the
167///   callback function to be invoked later.
168///
169/// - When some later call to [`Device::poll`] or [`Instance::poll_all`] (not
170///   shown in this example) determines that the buffer is mapped and ready for
171///   the CPU to use, it invokes the callback function.
172///
173/// - The callback function calls [`Buffer::slice`] and then
174///   [`BufferSlice::get_mapped_range_mut`] to obtain a [`BufferViewMut`], which
175///   dereferences to a `&mut [u8]` slice referring to the buffer's bytes.
176///
177/// - It then uses the [`bytemuck`] crate to turn the `&mut [u8]` into a `&mut
178///   [f32]`, and calls the slice [`fill`] method to fill the buffer with a
179///   useful value.
180///
181/// - Finally, the callback drops the view and calls [`Buffer::unmap`] to unmap
182///   the buffer. In real code, the callback would also need to do some sort of
183///   synchronization to let the rest of the program know that it has completed
184///   its work.
185///
186/// If using [`map_async`] directly is awkward, you may find it more convenient to
187/// use [`Queue::write_buffer`] and [`util::DownloadBuffer::read_buffer`].
188/// However, those each have their own tradeoffs; the asynchronous nature of GPU
189/// execution makes it hard to avoid friction altogether.
190///
191/// [`Arc`]: std::sync::Arc
192/// [`map_async`]: BufferSlice::map_async
193/// [`bytemuck`]: https://crates.io/crates/bytemuck
194/// [`fill`]: slice::fill
195///
196/// ## Mapping buffers on the web
197///
198/// When compiled to WebAssembly and running in a browser content process,
199/// `wgpu` implements its API in terms of the browser's WebGPU implementation.
200/// In this context, `wgpu` is further isolated from the GPU:
201///
202/// - Depending on the browser's WebGPU implementation, mapping and unmapping
203///   buffers probably entails copies between WebAssembly linear memory and the
204///   graphics driver's buffers.
205///
206/// - All modern web browsers isolate web content in its own sandboxed process,
207///   which can only interact with the GPU via interprocess communication (IPC).
208///   Although most browsers' IPC systems use shared memory for large data
209///   transfers, there will still probably need to be copies into and out of the
210///   shared memory buffers.
211///
212/// All of these copies contribute to the cost of buffer mapping in this
213/// configuration.
214///
215/// [`usage`]: BufferDescriptor::usage
216/// [mac]: BufferDescriptor::mapped_at_creation
217/// [`MAP_READ`]: BufferUsages::MAP_READ
218/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
219/// [`DeviceExt::create_buffer_init()`]: util::DeviceExt::create_buffer_init
220#[derive(Debug, Clone)]
221pub struct Buffer {
222    pub(crate) inner: dispatch::DispatchBuffer,
223    pub(crate) map_context: Arc<Mutex<MapContext>>,
224    pub(crate) size: wgt::BufferAddress,
225    pub(crate) usage: BufferUsages,
226    // Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
227}
228#[cfg(send_sync)]
229static_assertions::assert_impl_all!(Buffer: Send, Sync);
230
231crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
232
233impl Buffer {
234    /// Return the binding view of the entire buffer.
235    pub fn as_entire_binding(&self) -> BindingResource<'_> {
236        BindingResource::Buffer(self.as_entire_buffer_binding())
237    }
238
239    /// Return the binding view of the entire buffer.
240    pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_> {
241        BufferBinding {
242            buffer: self,
243            offset: 0,
244            size: None,
245        }
246    }
247
248    /// Get the [`wgpu_hal`] buffer from this `Buffer`.
249    ///
250    /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
251    /// and pass that struct to the to the `A` type parameter.
252    ///
253    /// Returns a guard that dereferences to the type of the hal backend
254    /// which implements [`A::Buffer`].
255    ///
256    /// # Types
257    ///
258    /// The returned type depends on the backend:
259    ///
260    #[doc = crate::hal_type_vulkan!("Buffer")]
261    #[doc = crate::hal_type_metal!("Buffer")]
262    #[doc = crate::hal_type_dx12!("Buffer")]
263    #[doc = crate::hal_type_gles!("Buffer")]
264    ///
265    /// # Deadlocks
266    ///
267    /// - The returned guard holds a read-lock on a device-local "destruction"
268    ///   lock, which will cause all calls to `destroy` to block until the
269    ///   guard is released.
270    ///
271    /// # Errors
272    ///
273    /// This method will return None if:
274    /// - The buffer is not from the backend specified by `A`.
275    /// - The buffer is from the `webgpu` or `custom` backend.
276    /// - The buffer has had [`Self::destroy()`] called on it.
277    ///
278    /// # Safety
279    ///
280    /// - The returned resource must not be destroyed unless the guard
281    ///   is the last reference to it and it is not in use by the GPU.
282    ///   The guard and handle may be dropped at any time however.
283    /// - All the safety requirements of wgpu-hal must be upheld.
284    ///
285    /// [`A::Buffer`]: hal::Api::Buffer
286    #[cfg(wgpu_core)]
287    pub unsafe fn as_hal<A: hal::Api>(
288        &self,
289    ) -> Option<impl Deref<Target = A::Buffer> + WasmNotSendSync> {
290        let buffer = self.inner.as_core_opt()?;
291        unsafe { buffer.context.buffer_as_hal::<A>(buffer) }
292    }
293
294    /// Returns a [`BufferSlice`] referring to the portion of `self`'s contents
295    /// indicated by `bounds`. Regardless of what sort of data `self` stores,
296    /// `bounds` start and end are given in bytes.
297    ///
298    /// A [`BufferSlice`] can be used to supply vertex and index data, or to map
299    /// buffer contents for access from the CPU. See the [`BufferSlice`]
300    /// documentation for details.
301    ///
302    /// The `range` argument can be half or fully unbounded: for example,
303    /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
304    /// refers to the portion starting at the `n`th byte and extending to the
305    /// end of the buffer.
306    ///
307    /// # Panics
308    ///
309    /// - If `bounds` is outside of the bounds of `self`.
310    /// - If `bounds` has a length less than 1.
311    #[track_caller]
312    pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
313        let (offset, size) = range_to_offset_size(bounds, self.size);
314        check_buffer_bounds(self.size, offset, size);
315        BufferSlice {
316            buffer: self,
317            offset,
318            size,
319        }
320    }
321
322    /// Unmaps the buffer from host memory.
323    ///
324    /// This terminates the effect of all previous [`map_async()`](Self::map_async) operations and
325    /// makes the buffer available for use by the GPU again.
326    pub fn unmap(&self) {
327        self.map_context.lock().reset();
328        self.inner.unmap();
329    }
330
331    /// Destroy the associated native resources as soon as possible.
332    pub fn destroy(&self) {
333        self.inner.destroy();
334    }
335
336    /// Returns the length of the buffer allocation in bytes.
337    ///
338    /// This is always equal to the `size` that was specified when creating the buffer.
339    pub fn size(&self) -> BufferAddress {
340        self.size
341    }
342
343    /// Returns the allowed usages for this `Buffer`.
344    ///
345    /// This is always equal to the `usage` that was specified when creating the buffer.
346    pub fn usage(&self) -> BufferUsages {
347        self.usage
348    }
349
350    /// Map the buffer to host (CPU) memory, making it available for reading or writing via
351    /// [`get_mapped_range()`](Self::get_mapped_range). The buffer becomes accessible once the
352    /// `callback` is invoked with [`Ok`].
353    ///
354    /// Use this when you want to map the buffer immediately. If you need to submit GPU work that
355    /// uses the buffer before mapping it, use `map_buffer_on_submit` on
356    /// [`CommandEncoder`][CEmbos], [`CommandBuffer`][CBmbos], [`RenderPass`][RPmbos], or
357    /// [`ComputePass`][CPmbos] to schedule the mapping after submission. This avoids extra calls to
358    /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and lets you initiate mapping from a
359    /// more convenient place.
360    ///
361    /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
362    /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
363    /// an event loop or run on a separate thread.
364    ///
365    /// The callback runs on the thread that first calls one of the above functions after the GPU work
366    /// completes. There are no restrictions on the code you can run in the callback; however, on native
367    /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
368    /// send messages, etc.).
369    ///
370    /// While a buffer is mapped, it cannot be used by other commands; at any time, either the GPU or
371    /// the CPU has exclusive access to the buffer’s contents.
372    ///
373    /// This can also be performed using [`BufferSlice::map_async()`].
374    ///
375    /// # Panics
376    ///
377    /// - If the buffer is already mapped.
378    /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
379    /// - If `bounds` is outside of the bounds of `self`.
380    /// - If `bounds` has a length less than 1.
381    /// - If the start and end of `bounds` are not be aligned to [`MAP_ALIGNMENT`].
382    ///
383    /// [CEmbos]: CommandEncoder::map_buffer_on_submit
384    /// [CBmbos]: CommandBuffer::map_buffer_on_submit
385    /// [RPmbos]: RenderPass::map_buffer_on_submit
386    /// [CPmbos]: ComputePass::map_buffer_on_submit
387    /// [q::s]: Queue::submit
388    /// [i::p_a]: Instance::poll_all
389    /// [d::p]: Device::poll
390    pub fn map_async<S: RangeBounds<BufferAddress>>(
391        &self,
392        mode: MapMode,
393        bounds: S,
394        callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
395    ) {
396        self.slice(bounds).map_async(mode, callback)
397    }
398
399    /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
400    ///
401    /// Returns a [`BufferView`] referring to the buffer range represented by
402    /// `self`. See the documentation for [`BufferView`] for details.
403    ///
404    /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
405    /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
406    ///
407    /// This can also be performed using [`BufferSlice::get_mapped_range()`].
408    ///
409    /// # Panics
410    ///
411    /// - If `bounds` is outside of the bounds of `self`.
412    /// - If `bounds` has a length less than 1.
413    /// - If the start and end of `bounds` are not aligned to [`MAP_ALIGNMENT`].
414    /// - If the buffer to which `self` refers is not currently [mapped].
415    /// - If you try to create a view which overlaps an existing [`BufferViewMut`].
416    ///
417    /// [mapped]: Buffer#mapping-buffers
418    #[track_caller]
419    pub fn get_mapped_range<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferView {
420        self.slice(bounds).get_mapped_range()
421    }
422
423    /// Gain write access to the bytes of a [mapped] [`Buffer`].
424    ///
425    /// Returns a [`BufferViewMut`] referring to the buffer range represented by
426    /// `self`. See the documentation for [`BufferViewMut`] for more details.
427    ///
428    /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
429    /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
430    ///
431    /// This can also be performed using [`BufferSlice::get_mapped_range_mut()`].
432    ///
433    /// # Panics
434    ///
435    /// - If `bounds` is outside of the bounds of `self`.
436    /// - If `bounds` has a length less than 1.
437    /// - If the start and end of `bounds` are not aligned to [`MAP_ALIGNMENT`].
438    /// - If the buffer to which `self` refers is not currently [mapped].
439    /// - If you try to create a view which overlaps an existing [`BufferView`] or [`BufferViewMut`].
440    ///
441    /// [mapped]: Buffer#mapping-buffers
442    #[track_caller]
443    pub fn get_mapped_range_mut<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferViewMut {
444        self.slice(bounds).get_mapped_range_mut()
445    }
446
447    #[cfg(custom)]
448    /// Returns custom implementation of Buffer (if custom backend and is internally T)
449    pub fn as_custom<T: custom::BufferInterface>(&self) -> Option<&T> {
450        self.inner.as_custom()
451    }
452}
453
454/// A slice of a [`Buffer`], to be mapped, used for vertex or index data, or the like.
455///
456/// You can create a `BufferSlice` by calling [`Buffer::slice`]:
457///
458/// ```no_run
459/// # let buffer: wgpu::Buffer = todo!();
460/// let slice = buffer.slice(10..20);
461/// ```
462///
463/// This returns a slice referring to the second ten bytes of `buffer`. To get a
464/// slice of the entire `Buffer`:
465///
466/// ```no_run
467/// # let buffer: wgpu::Buffer = todo!();
468/// let whole_buffer_slice = buffer.slice(..);
469/// ```
470///
471/// You can pass buffer slices to methods like [`RenderPass::set_vertex_buffer`]
472/// and [`RenderPass::set_index_buffer`] to indicate which portion of the buffer
473/// a draw call should consult. You can also convert it to a [`BufferBinding`]
474/// with `.into()`.
475///
476/// To access the slice's contents on the CPU, you must first [map] the buffer,
477/// and then call [`BufferSlice::get_mapped_range`] or
478/// [`BufferSlice::get_mapped_range_mut`] to obtain a view of the slice's
479/// contents. See the documentation on [mapping][map] for more details,
480/// including example code.
481///
482/// Unlike a Rust shared slice `&[T]`, whose existence guarantees that
483/// nobody else is modifying the `T` values to which it refers, a
484/// [`BufferSlice`] doesn't guarantee that the buffer's contents aren't
485/// changing. You can still record and submit commands operating on the
486/// buffer while holding a [`BufferSlice`]. A [`BufferSlice`] simply
487/// represents a certain range of the buffer's bytes.
488///
489/// The `BufferSlice` type is unique to the Rust API of `wgpu`. In the WebGPU
490/// specification, an offset and size are specified as arguments to each call
491/// working with the [`Buffer`], instead.
492///
493/// [map]: Buffer#mapping-buffers
494#[derive(Copy, Clone, Debug, PartialEq)]
495pub struct BufferSlice<'a> {
496    pub(crate) buffer: &'a Buffer,
497    pub(crate) offset: BufferAddress,
498    pub(crate) size: BufferSize,
499}
500#[cfg(send_sync)]
501static_assertions::assert_impl_all!(BufferSlice<'_>: Send, Sync);
502
503impl<'a> BufferSlice<'a> {
504    /// Return another [`BufferSlice`] referring to the portion of `self`'s contents
505    /// indicated by `bounds`.
506    ///
507    /// The `range` argument can be half or fully unbounded: for example,
508    /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
509    /// refers to the portion starting at the `n`th byte and extending to the
510    /// end of the buffer.
511    ///
512    /// # Panics
513    ///
514    /// - If `bounds` is outside of the bounds of `self`.
515    /// - If `bounds` has a length less than 1.
516    #[track_caller]
517    pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'a> {
518        let (offset, size) = range_to_offset_size(bounds, self.size.get());
519        check_buffer_bounds(self.size.get(), offset, size);
520        BufferSlice {
521            buffer: self.buffer,
522            offset: self.offset + offset, // check_buffer_bounds ensures this does not overflow
523            size,                         // check_buffer_bounds ensures this is essentially min()
524        }
525    }
526
527    /// Map the buffer to host (CPU) memory, making it available for reading or writing via
528    /// [`get_mapped_range()`](Self::get_mapped_range). The buffer becomes accessible once the
529    /// `callback` is invoked with [`Ok`].
530    ///
531    /// Use this when you want to map the buffer immediately. If you need to submit GPU work that
532    /// uses the buffer before mapping it, use `map_buffer_on_submit` on
533    /// [`CommandEncoder`][CEmbos], [`CommandBuffer`][CBmbos], [`RenderPass`][RPmbos], or
534    /// [`ComputePass`][CPmbos] to schedule the mapping after submission. This avoids extra calls to
535    /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and lets you initiate mapping from a
536    /// more convenient place.
537    ///
538    /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
539    /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
540    /// an event loop or run on a separate thread.
541    ///
542    /// The callback runs on the thread that first calls one of the above functions after the GPU work
543    /// completes. There are no restrictions on the code you can run in the callback; however, on native
544    /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
545    /// send messages, etc.).
546    ///
547    /// While a buffer is mapped, it cannot be used by other commands; at any time, either the GPU or
548    /// the CPU has exclusive access to the buffer’s contents.
549    ///
550    /// This can also be performed using [`Buffer::map_async()`].
551    ///
552    /// # Panics
553    ///
554    /// - If the buffer is already mapped.
555    /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
556    /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`] within the buffer.
557    ///
558    /// [CEmbos]: CommandEncoder::map_buffer_on_submit
559    /// [CBmbos]: CommandBuffer::map_buffer_on_submit
560    /// [RPmbos]: RenderPass::map_buffer_on_submit
561    /// [CPmbos]: ComputePass::map_buffer_on_submit
562    /// [q::s]: Queue::submit
563    /// [i::p_a]: Instance::poll_all
564    /// [d::p]: Device::poll
565    pub fn map_async(
566        &self,
567        mode: MapMode,
568        callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
569    ) {
570        let mut mc = self.buffer.map_context.lock();
571        assert_eq!(mc.mapped_range, 0..0, "Buffer is already mapped");
572        let end = self.offset + self.size.get();
573        mc.mapped_range = self.offset..end;
574
575        self.buffer
576            .inner
577            .map_async(mode, self.offset..end, Box::new(callback));
578    }
579
580    /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
581    ///
582    /// Returns a [`BufferView`] referring to the buffer range represented by
583    /// `self`. See the documentation for [`BufferView`] for details.
584    ///
585    /// Multiple views may be obtained and used simultaneously as long as they are from
586    /// non-overlapping slices.
587    ///
588    /// This can also be performed using [`Buffer::get_mapped_range()`].
589    ///
590    /// # Panics
591    ///
592    /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`] within the buffer.
593    /// - If the buffer to which `self` refers is not currently [mapped].
594    /// - If you try to create a view which overlaps an existing [`BufferViewMut`].
595    ///
596    /// [mapped]: Buffer#mapping-buffers
597    #[track_caller]
598    pub fn get_mapped_range(&self) -> BufferView {
599        let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Immutable);
600        self.buffer
601            .map_context
602            .lock()
603            .validate_and_add(subrange.clone());
604        let range = self.buffer.inner.get_mapped_range(subrange.index);
605        BufferView {
606            buffer: self.buffer.clone(),
607            size: self.size,
608            offset: self.offset,
609            inner: range,
610        }
611    }
612
613    /// Gain write access to the bytes of a [mapped] [`Buffer`].
614    ///
615    /// Returns a [`BufferViewMut`] referring to the buffer range represented by
616    /// `self`. See the documentation for [`BufferViewMut`] for more details.
617    ///
618    /// Multiple views may be obtained and used simultaneously as long as they are from
619    /// non-overlapping slices.
620    ///
621    /// This can also be performed using [`Buffer::get_mapped_range_mut()`].
622    ///
623    /// # Panics
624    ///
625    /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`].
626    /// - If the buffer to which `self` refers is not currently [mapped].
627    /// - If you try to create a view which overlaps an existing [`BufferView`] or [`BufferViewMut`].
628    ///
629    /// [mapped]: Buffer#mapping-buffers
630    #[track_caller]
631    pub fn get_mapped_range_mut(&self) -> BufferViewMut {
632        let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Mutable);
633        self.buffer
634            .map_context
635            .lock()
636            .validate_and_add(subrange.clone());
637        let range = self.buffer.inner.get_mapped_range(subrange.index);
638        BufferViewMut {
639            buffer: self.buffer.clone(),
640            size: self.size,
641            offset: self.offset,
642            inner: range,
643            readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
644        }
645    }
646
647    /// Returns the buffer this is a slice of.
648    ///
649    /// You should usually not need to call this, and if you received the buffer from code you
650    /// do not control, you should refrain from accessing the buffer outside the bounds of the
651    /// slice. Nevertheless, it’s possible to get this access, so this method makes it simple.
652    pub fn buffer(&self) -> &'a Buffer {
653        self.buffer
654    }
655
656    /// Returns the offset in [`Self::buffer()`] this slice starts at.
657    pub fn offset(&self) -> BufferAddress {
658        self.offset
659    }
660
661    /// Returns the size of this slice.
662    pub fn size(&self) -> BufferSize {
663        self.size
664    }
665}
666
667impl<'a> From<BufferSlice<'a>> for crate::BufferBinding<'a> {
668    /// Convert a [`BufferSlice`] to an equivalent [`BufferBinding`],
669    /// provided that it will be used without a dynamic offset.
670    fn from(value: BufferSlice<'a>) -> Self {
671        BufferBinding {
672            buffer: value.buffer,
673            offset: value.offset,
674            size: Some(value.size),
675        }
676    }
677}
678
679impl<'a> From<BufferSlice<'a>> for crate::BindingResource<'a> {
680    /// Convert a [`BufferSlice`] to an equivalent [`BindingResource::Buffer`],
681    /// provided that it will be used without a dynamic offset.
682    fn from(value: BufferSlice<'a>) -> Self {
683        crate::BindingResource::Buffer(crate::BufferBinding::from(value))
684    }
685}
686
687fn range_overlaps(a: &Range<BufferAddress>, b: &Range<BufferAddress>) -> bool {
688    a.start < b.end && b.start < a.end
689}
690
691#[derive(Debug, Copy, Clone)]
692enum RangeMappingKind {
693    Mutable,
694    Immutable,
695}
696
697impl RangeMappingKind {
698    /// Returns true if a range of this kind can touch the same bytes as a range of the other kind.
699    ///
700    /// This is Rust's Mutable XOR Shared rule.
701    fn allowed_concurrently_with(self, other: Self) -> bool {
702        matches!(
703            (self, other),
704            (RangeMappingKind::Immutable, RangeMappingKind::Immutable)
705        )
706    }
707}
708
709#[derive(Debug, Clone)]
710struct Subrange {
711    index: Range<BufferAddress>,
712    kind: RangeMappingKind,
713}
714
715impl Subrange {
716    fn new(offset: BufferAddress, size: BufferSize, kind: RangeMappingKind) -> Self {
717        Self {
718            index: offset..(offset + size.get()),
719            kind,
720        }
721    }
722}
723
724impl fmt::Display for Subrange {
725    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
726        write!(
727            f,
728            "{}..{} ({:?})",
729            self.index.start, self.index.end, self.kind
730        )
731    }
732}
733
734/// The mapped portion of a buffer, if any, and its outstanding views.
735///
736/// This ensures that views fall within the mapped range and don't overlap.
737#[derive(Debug)]
738pub(crate) struct MapContext {
739    /// The range of the buffer that is mapped.
740    ///
741    /// This is `0..0` if the buffer is not mapped. This becomes non-empty when
742    /// the buffer is mapped at creation time, and when you call `map_async` on
743    /// some [`BufferSlice`] (so technically, it indicates the portion that is
744    /// *or has been requested to be* mapped.)
745    ///
746    /// All [`BufferView`]s and [`BufferViewMut`]s must fall within this range.
747    mapped_range: Range<BufferAddress>,
748
749    /// The ranges covered by all outstanding [`BufferView`]s and
750    /// [`BufferViewMut`]s. These are non-overlapping, and are all contained
751    /// within `mapped_range`.
752    sub_ranges: Vec<Subrange>,
753}
754
755impl MapContext {
756    /// Creates a new `MapContext`.
757    ///
758    /// For [`mapped_at_creation`] buffers, pass the full buffer range in the
759    /// `mapped_range` argument. For other buffers, pass `None`.
760    ///
761    /// [`mapped_at_creation`]: BufferDescriptor::mapped_at_creation
762    pub(crate) fn new(mapped_range: Option<Range<BufferAddress>>) -> Self {
763        Self {
764            mapped_range: mapped_range.unwrap_or(0..0),
765            sub_ranges: Vec::new(),
766        }
767    }
768
769    /// Record that the buffer is no longer mapped.
770    fn reset(&mut self) {
771        self.mapped_range = 0..0;
772
773        assert!(
774            self.sub_ranges.is_empty(),
775            "You cannot unmap a buffer that still has accessible mapped views"
776        );
777    }
778
779    /// Record that the `size` bytes of the buffer at `offset` are now viewed.
780    ///
781    /// # Panics
782    ///
783    /// This panics if the given range is invalid.
784    #[track_caller]
785    fn validate_and_add(&mut self, new_sub: Subrange) {
786        if self.mapped_range.is_empty() {
787            panic!("tried to call get_mapped_range(_mut) on an unmapped buffer");
788        }
789        if !range_overlaps(&self.mapped_range, &new_sub.index) {
790            panic!(
791                "tried to call get_mapped_range(_mut) on a range that is not entirely mapped. \
792                 Attempted to get range {}, but the mapped range is {}..{}",
793                new_sub, self.mapped_range.start, self.mapped_range.end
794            );
795        }
796
797        // This check is essential for avoiding undefined behavior: it is the
798        // only thing that ensures that `&mut` references to the buffer's
799        // contents don't alias anything else.
800        for sub in self.sub_ranges.iter() {
801            if range_overlaps(&sub.index, &new_sub.index)
802                && !sub.kind.allowed_concurrently_with(new_sub.kind)
803            {
804                panic!(
805                    "tried to call get_mapped_range(_mut) on a range that has already \
806                     been mapped and would break Rust memory aliasing rules. Attempted \
807                     to get range {}, and the conflicting range is {}",
808                    new_sub, sub
809                );
810            }
811        }
812        self.sub_ranges.push(new_sub);
813    }
814
815    /// Record that the `size` bytes of the buffer at `offset` are no longer viewed.
816    ///
817    /// # Panics
818    ///
819    /// This panics if the given range does not exactly match one previously
820    /// passed to [`MapContext::validate_and_add`].
821    fn remove(&mut self, offset: BufferAddress, size: BufferSize) {
822        let end = offset + size.get();
823
824        let index = self
825            .sub_ranges
826            .iter()
827            .position(|r| r.index == (offset..end))
828            .expect("unable to remove range from map context");
829        self.sub_ranges.swap_remove(index);
830    }
831}
832
833/// Describes a [`Buffer`].
834///
835/// For use with [`Device::create_buffer`].
836///
837/// Corresponds to [WebGPU `GPUBufferDescriptor`](
838/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferdescriptor).
839pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
840static_assertions::assert_impl_all!(BufferDescriptor<'_>: Send, Sync);
841
842/// Error occurred when trying to async map a buffer.
843#[derive(Clone, PartialEq, Eq, Debug)]
844pub struct BufferAsyncError;
845static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
846
847impl fmt::Display for BufferAsyncError {
848    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
849        write!(f, "Error occurred when trying to async map a buffer")
850    }
851}
852
853impl error::Error for BufferAsyncError {}
854
855/// Type of buffer mapping.
856#[derive(Debug, Clone, Copy, Eq, PartialEq)]
857pub enum MapMode {
858    /// Map only for reading
859    Read,
860    /// Map only for writing
861    Write,
862}
863static_assertions::assert_impl_all!(MapMode: Send, Sync);
864
865/// A read-only view of a mapped buffer's bytes.
866///
867/// To get a `BufferView`, first [map] the buffer, and then
868/// call `buffer.slice(range).get_mapped_range()`.
869///
870/// `BufferView` dereferences to `&[u8]`, so you can use all the usual Rust
871/// slice methods to access the buffer's contents. It also implements
872/// `AsRef<[u8]>`, if that's more convenient.
873///
874/// Before the buffer can be unmapped, all `BufferView`s observing it
875/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
876///
877/// For example code, see the documentation on [mapping buffers][map].
878///
879/// [map]: Buffer#mapping-buffers
880/// [`map_async`]: BufferSlice::map_async
881#[derive(Debug)]
882pub struct BufferView {
883    // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
884    buffer: Buffer,
885    offset: BufferAddress,
886    size: BufferSize,
887    inner: dispatch::DispatchBufferMappedRange,
888}
889
890#[cfg(webgpu)]
891impl BufferView {
892    /// Provides the same data as dereferencing the view, but as a `Uint8Array` in js.
893    /// This can be MUCH faster than dereferencing the view which copies the data into
894    /// the Rust / wasm heap.
895    pub fn as_uint8array(&self) -> &js_sys::Uint8Array {
896        self.inner.as_uint8array()
897    }
898}
899
900impl core::ops::Deref for BufferView {
901    type Target = [u8];
902
903    #[inline]
904    fn deref(&self) -> &[u8] {
905        self.inner.slice()
906    }
907}
908
909impl AsRef<[u8]> for BufferView {
910    #[inline]
911    fn as_ref(&self) -> &[u8] {
912        self.inner.slice()
913    }
914}
915
916/// A write-only view of a mapped buffer's bytes.
917///
918/// To get a `BufferViewMut`, first [map] the buffer, and then
919/// call `buffer.slice(range).get_mapped_range_mut()`.
920///
921/// `BufferViewMut` dereferences to `&mut [u8]`, so you can use all the usual
922/// Rust slice methods to access the buffer's contents. It also implements
923/// `AsMut<[u8]>`, if that's more convenient.
924///
925/// It is possible to read the buffer using this view, but doing so is not
926/// recommended, as it is likely to be slow.
927///
928/// Before the buffer can be unmapped, all `BufferViewMut`s observing it
929/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
930///
931/// For example code, see the documentation on [mapping buffers][map].
932///
933/// [map]: Buffer#mapping-buffers
934#[derive(Debug)]
935pub struct BufferViewMut {
936    // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
937    buffer: Buffer,
938    offset: BufferAddress,
939    size: BufferSize,
940    inner: dispatch::DispatchBufferMappedRange,
941    readable: bool,
942}
943
944impl AsMut<[u8]> for BufferViewMut {
945    #[inline]
946    fn as_mut(&mut self) -> &mut [u8] {
947        self.inner.slice_mut()
948    }
949}
950
951impl Deref for BufferViewMut {
952    type Target = [u8];
953
954    fn deref(&self) -> &Self::Target {
955        if !self.readable {
956            log::warn!("Reading from a BufferViewMut is slow and not recommended.");
957        }
958
959        self.inner.slice()
960    }
961}
962
963impl DerefMut for BufferViewMut {
964    fn deref_mut(&mut self) -> &mut Self::Target {
965        self.inner.slice_mut()
966    }
967}
968
969impl Drop for BufferView {
970    fn drop(&mut self) {
971        self.buffer
972            .map_context
973            .lock()
974            .remove(self.offset, self.size);
975    }
976}
977
978impl Drop for BufferViewMut {
979    fn drop(&mut self) {
980        self.buffer
981            .map_context
982            .lock()
983            .remove(self.offset, self.size);
984    }
985}
986
987#[track_caller]
988fn check_buffer_bounds(
989    buffer_size: BufferAddress,
990    slice_offset: BufferAddress,
991    slice_size: BufferSize,
992) {
993    // A slice of length 0 is invalid, so the offset must not be equal to or greater than the buffer size.
994    if slice_offset >= buffer_size {
995        panic!(
996            "slice offset {} is out of range for buffer of size {}",
997            slice_offset, buffer_size
998        );
999    }
1000
1001    // Detect integer overflow.
1002    let end = slice_offset.checked_add(slice_size.get());
1003    if end.is_none_or(|end| end > buffer_size) {
1004        panic!(
1005            "slice offset {} size {} is out of range for buffer of size {}",
1006            slice_offset, slice_size, buffer_size
1007        );
1008    }
1009}
1010
1011#[track_caller]
1012pub(crate) fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
1013    bounds: S,
1014    whole_size: BufferAddress,
1015) -> (BufferAddress, BufferSize) {
1016    let offset = match bounds.start_bound() {
1017        Bound::Included(&bound) => bound,
1018        Bound::Excluded(&bound) => bound + 1,
1019        Bound::Unbounded => 0,
1020    };
1021    let size = BufferSize::new(match bounds.end_bound() {
1022        Bound::Included(&bound) => bound + 1 - offset,
1023        Bound::Excluded(&bound) => bound - offset,
1024        Bound::Unbounded => whole_size - offset,
1025    })
1026    .expect("buffer slices can not be empty");
1027
1028    (offset, size)
1029}
1030
1031#[cfg(test)]
1032mod tests {
1033    use super::{
1034        check_buffer_bounds, range_overlaps, range_to_offset_size, BufferAddress, BufferSize,
1035    };
1036
1037    fn bs(value: BufferAddress) -> BufferSize {
1038        BufferSize::new(value).unwrap()
1039    }
1040
1041    #[test]
1042    fn range_to_offset_size_works() {
1043        let whole = 100;
1044
1045        assert_eq!(range_to_offset_size(0..2, whole), (0, bs(2)));
1046        assert_eq!(range_to_offset_size(2..5, whole), (2, bs(3)));
1047        assert_eq!(range_to_offset_size(.., whole), (0, bs(whole)));
1048        assert_eq!(range_to_offset_size(21.., whole), (21, bs(whole - 21)));
1049        assert_eq!(range_to_offset_size(0.., whole), (0, bs(whole)));
1050        assert_eq!(range_to_offset_size(..21, whole), (0, bs(21)));
1051    }
1052
1053    #[test]
1054    #[should_panic = "buffer slices can not be empty"]
1055    fn range_to_offset_size_panics_for_empty_range() {
1056        range_to_offset_size(123..123, 200);
1057    }
1058
1059    #[test]
1060    #[should_panic = "buffer slices can not be empty"]
1061    fn range_to_offset_size_panics_for_unbounded_empty_range() {
1062        range_to_offset_size(..0, 100);
1063    }
1064
1065    #[test]
1066    fn check_buffer_bounds_works_for_end_in_range() {
1067        check_buffer_bounds(200, 100, bs(50));
1068        check_buffer_bounds(200, 100, bs(100));
1069        check_buffer_bounds(u64::MAX, u64::MAX - 100, bs(100));
1070        check_buffer_bounds(u64::MAX, 0, bs(u64::MAX));
1071        check_buffer_bounds(u64::MAX, 1, bs(u64::MAX - 1));
1072    }
1073
1074    #[test]
1075    #[should_panic]
1076    fn check_buffer_bounds_panics_for_end_over_size() {
1077        check_buffer_bounds(200, 100, bs(101));
1078    }
1079
1080    #[test]
1081    #[should_panic]
1082    fn check_buffer_bounds_panics_for_end_wraparound() {
1083        check_buffer_bounds(u64::MAX, 1, bs(u64::MAX));
1084    }
1085
1086    #[test]
1087    fn range_overlapping() {
1088        // First range to the left
1089        assert_eq!(range_overlaps(&(0..1), &(1..3)), false);
1090        // First range overlaps left edge
1091        assert_eq!(range_overlaps(&(0..2), &(1..3)), true);
1092        // First range completely inside second
1093        assert_eq!(range_overlaps(&(1..2), &(0..3)), true);
1094        // First range completely surrounds second
1095        assert_eq!(range_overlaps(&(0..3), &(1..2)), true);
1096        // First range overlaps right edge
1097        assert_eq!(range_overlaps(&(1..3), &(0..2)), true);
1098        // First range entirely to the right
1099        assert_eq!(range_overlaps(&(2..3), &(0..2)), false);
1100    }
1101}