wgpu/api/
buffer.rs

1use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec};
2use core::{
3    error, fmt,
4    num::NonZero,
5    ops::{Bound, Deref, Range, RangeBounds},
6};
7
8use crate::util::Mutex;
9use crate::*;
10
11/// Handle to a GPU-accessible buffer.
12///
13/// A `Buffer` is a memory allocation for use by the GPU, somewhat analogous to
14/// <code>[Box]&lt;[\[u8\]][primitive@slice]&gt;</code> in Rust.
15/// The contents of buffers are untyped bytes; it is up to the application to
16/// specify the interpretation of the bytes when the buffer is used, in ways
17/// such as [`VertexBufferLayout`].
18/// A single buffer can be used to hold multiple independent pieces of data at
19/// different offsets (e.g. both vertices and indices for one or more meshes).
20///
21/// A `Buffer`'s bytes have "interior mutability": functions like
22/// [`Queue::write_buffer`] or [mapping] a buffer for writing only require a
23/// `&Buffer`, not a `&mut Buffer`, even though they modify its contents. `wgpu`
24/// prevents simultaneous reads and writes of buffer contents using run-time
25/// checks.
26///
27/// Created with [`Device::create_buffer()`] or
28/// [`DeviceExt::create_buffer_init()`].
29///
30/// Corresponds to [WebGPU `GPUBuffer`](https://gpuweb.github.io/gpuweb/#buffer-interface).
31///
32/// [mapping]: Buffer#mapping-buffers
33///
34/// # How to get your data into a buffer
35///
36/// Every `Buffer` starts with all bytes zeroed.
37/// There are many ways to load data into a `Buffer`:
38///
39/// - When creating a buffer, you may set the [`mapped_at_creation`][mac] flag,
40///   then write to its [`get_mapped_range_mut()`][Buffer::get_mapped_range_mut].
41///   This only works when the buffer is created and has not yet been used by
42///   the GPU, but it is all you need for buffers whose contents do not change
43///   after creation.
44///   - You may use [`DeviceExt::create_buffer_init()`] as a convenient way to
45///     do that and copy data from a `&[u8]` you provide.
46/// - After creation, you may use [`Buffer::map_async()`] to map it again;
47///   however, you then need to wait until the GPU is no longer using the buffer
48///   before you begin writing.
49/// - You may use [`CommandEncoder::copy_buffer_to_buffer()`] to copy data into
50///   this buffer from another buffer.
51/// - You may use [`Queue::write_buffer()`] to copy data into the buffer from a
52///   `&[u8]`. This uses a temporary “staging” buffer managed by `wgpu` to hold
53///   the data.
54///   - [`Queue::write_buffer_with()`] allows you to write directly into temporary
55///     storage instead of providing a slice you already prepared, which may
56///     allow *your* code to save the allocation of a [`Vec`] or such.
57/// - You may use [`util::StagingBelt`] to manage a set of temporary buffers.
58///   This may be more efficient than [`Queue::write_buffer_with()`] when you
59///   have many small copies to perform, but requires more steps to use, and
60///   tuning of the belt buffer size.
61/// - You may write your own staging buffer management customized to your
62///   application, based on mapped buffers and
63///   [`CommandEncoder::copy_buffer_to_buffer()`].
64/// - A GPU computation’s results can be stored in a buffer:
65///   - A [compute shader][ComputePipeline] may write to a buffer bound as a
66///     [storage buffer][BufferBindingType::Storage].
67///   - A render pass may render to a texture which is then copied to a buffer
68///     using [`CommandEncoder::copy_texture_to_buffer()`].
69///
70/// # Mapping buffers
71///
72/// If a `Buffer` is created with the appropriate [`usage`], it can be *mapped*:
73/// you can make its contents accessible to the CPU as an ordinary `&[u8]` or
74/// `&mut [u8]` slice of bytes. Buffers created with the
75/// [`mapped_at_creation`][mac] flag set are also mapped initially.
76///
77/// Depending on the hardware, the buffer could be memory shared between CPU and
78/// GPU, so that the CPU has direct access to the same bytes the GPU will
79/// consult; or it may be ordinary CPU memory, whose contents the system must
80/// copy to/from the GPU as needed. This crate's API is designed to work the
81/// same way in either case: at any given time, a buffer is either mapped and
82/// available to the CPU, or unmapped and ready for use by the GPU, but never
83/// both. This makes it impossible for either side to observe changes by the
84/// other immediately, and any necessary transfers can be carried out when the
85/// buffer transitions from one state to the other.
86///
87/// There are two ways to map a buffer:
88///
89/// - If [`BufferDescriptor::mapped_at_creation`] is `true`, then the entire
90///   buffer is mapped when it is created. This is the easiest way to initialize
91///   a new buffer. You can set `mapped_at_creation` on any kind of buffer,
92///   regardless of its [`usage`] flags.
93///
94/// - If the buffer's [`usage`] includes the [`MAP_READ`] or [`MAP_WRITE`]
95///   flags, then you can call `buffer.slice(range).map_async(mode, callback)`
96///   to map the portion of `buffer` given by `range`. This waits for the GPU to
97///   finish using the buffer, and invokes `callback` as soon as the buffer is
98///   safe for the CPU to access.
99///
100/// Once a buffer is mapped:
101///
102/// - You can call `buffer.slice(range).get_mapped_range()` to obtain a
103///   [`BufferView`], which dereferences to a `&[u8]` that you can use to read
104///   the buffer's contents.
105///
106/// - Or, you can call `buffer.slice(range).get_mapped_range_mut()` to obtain a
107///   [`BufferViewMut`], which dereferences to a `&mut [u8]` that you can use to
108///   read and write the buffer's contents.
109///
110/// The given `range` must fall within the mapped portion of the buffer. If you
111/// attempt to access overlapping ranges, even for shared access only, these
112/// methods panic.
113///
114/// While a buffer is mapped, you may not submit any commands to the GPU that
115/// access it. You may record command buffers that use the buffer, but if you
116/// submit them while the buffer is mapped, submission will panic.
117///
118/// When you are done using the buffer on the CPU, you must call
119/// [`Buffer::unmap`] to make it available for use by the GPU again. All
120/// [`BufferView`] and [`BufferViewMut`] views referring to the buffer must be
121/// dropped before you unmap it; otherwise, [`Buffer::unmap`] will panic.
122///
123/// # Example
124///
125/// If `buffer` was created with [`BufferUsages::MAP_WRITE`], we could fill it
126/// with `f32` values like this:
127///
128/// ```
129/// # #[cfg(feature = "noop")]
130/// # let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
131/// # #[cfg(not(feature = "noop"))]
132/// # let device: wgpu::Device = { return; };
133/// #
134/// # let buffer = device.create_buffer(&wgpu::BufferDescriptor {
135/// #     label: None,
136/// #     size: 400,
137/// #     usage: wgpu::BufferUsages::MAP_WRITE,
138/// #     mapped_at_creation: false,
139/// # });
140/// let capturable = buffer.clone();
141/// buffer.map_async(wgpu::MapMode::Write, .., move |result| {
142///     if result.is_ok() {
143///         let mut view = capturable.get_mapped_range_mut(..).unwrap();
144///         let mut floats: wgpu::WriteOnly<[[u8; 4]]> = view.slice(..).into_chunks::<4>().0;
145///         floats.fill(42.0f32.to_ne_bytes());
146///         drop(view);
147///         capturable.unmap();
148///     }
149/// });
150/// ```
151///
152/// This code takes the following steps:
153///
154/// - First, it makes a cloned handle to the buffer for capture by
155///   the callback passed to [`map_async`]. Since a [`map_async`] callback may be
156///   invoked from another thread, interaction between the callback and the
157///   thread calling [`map_async`] generally requires some sort of shared heap
158///   data like this. In real code, there might be an [`Arc`] to some larger
159///   structure that itself owns `buffer`.
160///
161/// - Then, it calls [`Buffer::slice`] to make a [`BufferSlice`] referring to
162///   the buffer's entire contents.
163///
164/// - Next, it calls [`BufferSlice::map_async`] to request that the bytes to
165///   which the slice refers be made accessible to the CPU ("mapped"). This may
166///   entail waiting for previously enqueued operations on `buffer` to finish.
167///   Although [`map_async`] itself always returns immediately, it saves the
168///   callback function to be invoked later.
169///
170/// - When some later call to [`Device::poll`] or [`Instance::poll_all`] (not
171///   shown in this example) determines that the buffer is mapped and ready for
172///   the CPU to use, it invokes the callback function.
173///
174/// - The callback function calls [`Buffer::slice`] and then
175///   [`BufferSlice::get_mapped_range_mut`] to obtain a [`BufferViewMut`], which
176///   dereferences to a `&mut [u8]` slice referring to the buffer's bytes.
177///
178/// - It then uses the [`bytemuck`] crate to turn the `&mut [u8]` into a `&mut
179///   [f32]`, and calls the slice [`fill`] method to fill the buffer with a
180///   useful value.
181///
182/// - Finally, the callback drops the view and calls [`Buffer::unmap`] to unmap
183///   the buffer. In real code, the callback would also need to do some sort of
184///   synchronization to let the rest of the program know that it has completed
185///   its work.
186///
187/// If using [`map_async`] directly is awkward, you may find it more convenient to
188/// use [`Queue::write_buffer`] and [`util::DownloadBuffer::read_buffer`].
189/// However, those each have their own tradeoffs; the asynchronous nature of GPU
190/// execution makes it hard to avoid friction altogether.
191///
192/// [`Arc`]: std::sync::Arc
193/// [`map_async`]: BufferSlice::map_async
194/// [`bytemuck`]: https://crates.io/crates/bytemuck
195/// [`fill`]: slice::fill
196///
197/// ## Mapping buffers on the web
198///
199/// When compiled to WebAssembly and running in a browser content process,
200/// `wgpu` implements its API in terms of the browser's WebGPU implementation.
201/// In this context, `wgpu` is further isolated from the GPU:
202///
203/// - Depending on the browser's WebGPU implementation, mapping and unmapping
204///   buffers probably entails copies between WebAssembly linear memory and the
205///   graphics driver's buffers.
206///
207/// - All modern web browsers isolate web content in its own sandboxed process,
208///   which can only interact with the GPU via interprocess communication (IPC).
209///   Although most browsers' IPC systems use shared memory for large data
210///   transfers, there will still probably need to be copies into and out of the
211///   shared memory buffers.
212///
213/// All of these copies contribute to the cost of buffer mapping in this
214/// configuration.
215///
216/// [`usage`]: BufferDescriptor::usage
217/// [mac]: BufferDescriptor::mapped_at_creation
218/// [`MAP_READ`]: BufferUsages::MAP_READ
219/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
220/// [`DeviceExt::create_buffer_init()`]: util::DeviceExt::create_buffer_init
221#[derive(Debug, Clone)]
222pub struct Buffer {
223    pub(crate) inner: dispatch::DispatchBuffer,
224    pub(crate) map_context: Arc<Mutex<MapContext>>,
225    pub(crate) size: wgt::BufferAddress,
226    pub(crate) usage: BufferUsages,
227    // Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
228}
229#[cfg(send_sync)]
230static_assertions::assert_impl_all!(Buffer: Send, Sync);
231
232crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
233
234impl Buffer {
235    /// Return the binding view of the entire buffer.
236    pub fn as_entire_binding(&self) -> BindingResource<'_> {
237        BindingResource::Buffer(self.as_entire_buffer_binding())
238    }
239
240    /// Return the binding view of the entire buffer.
241    pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_> {
242        BufferBinding {
243            buffer: self,
244            offset: 0,
245            size: None,
246        }
247    }
248
249    /// Get the [`wgpu_hal`] buffer from this `Buffer`.
250    ///
251    /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
252    /// and pass that struct to the to the `A` type parameter.
253    ///
254    /// Returns a guard that dereferences to the type of the hal backend
255    /// which implements [`A::Buffer`].
256    ///
257    /// # Types
258    ///
259    /// The returned type depends on the backend:
260    ///
261    #[doc = crate::macros::hal_type_vulkan!("Buffer")]
262    #[doc = crate::macros::hal_type_metal!("Buffer")]
263    #[doc = crate::macros::hal_type_dx12!("Buffer")]
264    #[doc = crate::macros::hal_type_gles!("Buffer")]
265    ///
266    /// # Deadlocks
267    ///
268    /// - The returned guard holds a read-lock on a device-local "destruction"
269    ///   lock, which will cause all calls to `destroy` to block until the
270    ///   guard is released.
271    ///
272    /// # Errors
273    ///
274    /// This method will return None if:
275    /// - The buffer is not from the backend specified by `A`.
276    /// - The buffer is from the `webgpu` or `custom` backend.
277    /// - The buffer has had [`Self::destroy()`] called on it.
278    ///
279    /// # Safety
280    ///
281    /// - The returned resource must not be destroyed unless the guard
282    ///   is the last reference to it and it is not in use by the GPU.
283    ///   The guard and handle may be dropped at any time however.
284    /// - All the safety requirements of wgpu-hal must be upheld.
285    ///
286    /// [`A::Buffer`]: hal::Api::Buffer
287    #[cfg(wgpu_core)]
288    pub unsafe fn as_hal<A: hal::Api>(
289        &self,
290    ) -> Option<impl Deref<Target = A::Buffer> + WasmNotSendSync> {
291        let buffer = self.inner.as_core_opt()?;
292        unsafe { buffer.context.buffer_as_hal::<A>(buffer) }
293    }
294
295    /// Returns a [`BufferSlice`] referring to the portion of `self`'s contents
296    /// indicated by `bounds`. Regardless of what sort of data `self` stores,
297    /// `bounds` start and end are given in bytes.
298    ///
299    /// A [`BufferSlice`] can be used to supply vertex and index data, or to map
300    /// buffer contents for access from the CPU. See the [`BufferSlice`]
301    /// documentation for details.
302    ///
303    /// The `range` argument can be half or fully unbounded: for example,
304    /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
305    /// refers to the portion starting at the `n`th byte and extending to the
306    /// end of the buffer.
307    ///
308    /// # Panics
309    ///
310    /// - If `bounds` is outside of the bounds of `self`.
311    #[track_caller]
312    pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
313        let (offset, size) = range_to_offset_size(bounds, self.size);
314        check_buffer_bounds(self.size, offset, size);
315        BufferSlice {
316            buffer: self,
317            offset,
318            size,
319        }
320    }
321
322    /// Unmaps the buffer from host memory.
323    ///
324    /// This terminates the effect of all previous [`map_async()`](Self::map_async) operations and
325    /// makes the buffer available for use by the GPU again.
326    pub fn unmap(&self) {
327        self.map_context.lock().reset();
328        self.inner.unmap();
329    }
330
331    /// Destroy the associated native resources as soon as possible.
332    pub fn destroy(&self) {
333        self.inner.destroy();
334    }
335
336    /// Returns the length of the buffer allocation in bytes.
337    ///
338    /// This is always equal to the `size` that was specified when creating the buffer.
339    pub fn size(&self) -> BufferAddress {
340        self.size
341    }
342
343    /// Returns the allowed usages for this `Buffer`.
344    ///
345    /// This is always equal to the `usage` that was specified when creating the buffer.
346    pub fn usage(&self) -> BufferUsages {
347        self.usage
348    }
349
350    /// Map the buffer to host (CPU) memory, making it available for reading or writing via
351    /// [`get_mapped_range()`](Self::get_mapped_range). The buffer becomes accessible once the
352    /// `callback` is invoked with [`Ok`].
353    ///
354    /// Use this when you want to map the buffer immediately. If you need to submit GPU work that
355    /// uses the buffer before mapping it, use `map_buffer_on_submit` on
356    /// [`CommandEncoder`][CEmbos], [`CommandBuffer`][CBmbos], [`RenderPass`][RPmbos], or
357    /// [`ComputePass`][CPmbos] to schedule the mapping after submission. This avoids extra calls to
358    /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and lets you initiate mapping from a
359    /// more convenient place.
360    ///
361    /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
362    /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
363    /// an event loop or run on a separate thread.
364    ///
365    /// The callback runs on the thread that first calls one of the above functions after the GPU work
366    /// completes. There are no restrictions on the code you can run in the callback; however, on native
367    /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
368    /// send messages, etc.).
369    ///
370    /// While a buffer is mapped, it cannot be used by other commands; at any time, either the GPU or
371    /// the CPU has exclusive access to the buffer’s contents.
372    ///
373    /// This can also be performed using [`BufferSlice::map_async()`].
374    ///
375    /// # Panics
376    ///
377    /// - If the buffer is already mapped.
378    /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
379    /// - If `bounds` is outside of the bounds of `self`.
380    /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
381    /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
382    ///
383    /// [CEmbos]: CommandEncoder::map_buffer_on_submit
384    /// [CBmbos]: CommandBuffer::map_buffer_on_submit
385    /// [RPmbos]: RenderPass::map_buffer_on_submit
386    /// [CPmbos]: ComputePass::map_buffer_on_submit
387    /// [q::s]: Queue::submit
388    /// [i::p_a]: Instance::poll_all
389    /// [d::p]: Device::poll
390    pub fn map_async<S: RangeBounds<BufferAddress>>(
391        &self,
392        mode: MapMode,
393        bounds: S,
394        callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
395    ) {
396        self.slice(bounds).map_async(mode, callback)
397    }
398
399    /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
400    ///
401    /// Returns a [`BufferView`] referring to the buffer range represented by
402    /// `self`. See the documentation for [`BufferView`] for details.
403    ///
404    /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
405    /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
406    ///
407    /// This can also be performed using [`BufferSlice::get_mapped_range()`].
408    ///
409    /// # Errors
410    ///
411    /// - If `bounds` is outside of the bounds of `self`.
412    /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
413    /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
414    /// - If the buffer to which `self` refers is not currently [mapped].
415    /// - If you try to create a view which overlaps an existing [`BufferViewMut`].
416    ///
417    /// [mapped]: Buffer#mapping-buffers
418    #[track_caller]
419    pub fn get_mapped_range<S: RangeBounds<BufferAddress>>(
420        &self,
421        bounds: S,
422    ) -> Result<BufferView, MapRangeError> {
423        self.slice(bounds).get_mapped_range()
424    }
425
426    /// Gain write access to the bytes of a [mapped] [`Buffer`].
427    ///
428    /// Returns a [`BufferViewMut`] referring to the buffer range represented by
429    /// `self`. See the documentation for [`BufferViewMut`] for more details.
430    ///
431    /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
432    /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
433    ///
434    /// This can also be performed using [`BufferSlice::get_mapped_range_mut()`].
435    ///
436    /// # Errors
437    ///
438    /// - If `bounds` is outside of the bounds of `self`.
439    /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
440    /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
441    /// - If the buffer to which `self` refers is not currently [mapped].
442    /// - If you try to create a view which overlaps an existing [`BufferView`] or [`BufferViewMut`].
443    ///
444    /// [mapped]: Buffer#mapping-buffers
445    #[track_caller]
446    pub fn get_mapped_range_mut<S: RangeBounds<BufferAddress>>(
447        &self,
448        bounds: S,
449    ) -> Result<BufferViewMut, MapRangeError> {
450        self.slice(bounds).get_mapped_range_mut()
451    }
452
453    #[cfg(custom)]
454    /// Returns custom implementation of Buffer (if custom backend and is internally T)
455    pub fn as_custom<T: custom::BufferInterface>(&self) -> Option<&T> {
456        self.inner.as_custom()
457    }
458}
459
460/// A slice of a [`Buffer`], to be mapped, used for vertex or index data, or the like.
461///
462/// You can create a `BufferSlice` by calling [`Buffer::slice`]:
463///
464/// ```no_run
465/// # let buffer: wgpu::Buffer = todo!();
466/// let slice = buffer.slice(10..20);
467/// ```
468///
469/// This returns a slice referring to the second ten bytes of `buffer`. To get a
470/// slice of the entire `Buffer`:
471///
472/// ```no_run
473/// # let buffer: wgpu::Buffer = todo!();
474/// let whole_buffer_slice = buffer.slice(..);
475/// ```
476///
477/// You can pass buffer slices to methods like [`RenderPass::set_vertex_buffer`]
478/// and [`RenderPass::set_index_buffer`] to indicate which portion of the buffer
479/// a draw call should consult. You can also convert it to a [`BufferBinding`]
480/// with `.try_into()`, which fails if the slice length is 0.
481///
482/// To access the slice's contents on the CPU, you must first [map] the buffer,
483/// and then call [`BufferSlice::get_mapped_range`] or
484/// [`BufferSlice::get_mapped_range_mut`] to obtain a view of the slice's
485/// contents. See the documentation on [mapping][map] for more details,
486/// including example code.
487///
488/// Unlike a Rust shared slice `&[T]`, whose existence guarantees that
489/// nobody else is modifying the `T` values to which it refers, a
490/// [`BufferSlice`] doesn't guarantee that the buffer's contents aren't
491/// changing. You can still record and submit commands operating on the
492/// buffer while holding a [`BufferSlice`]. A [`BufferSlice`] simply
493/// represents a certain range of the buffer's bytes.
494///
495/// The `BufferSlice` type is unique to the Rust API of `wgpu`. In the WebGPU
496/// specification, an offset and size are specified as arguments to each call
497/// working with the [`Buffer`], instead.
498///
499/// [map]: Buffer#mapping-buffers
500#[derive(Copy, Clone, Debug, PartialEq)]
501pub struct BufferSlice<'a> {
502    pub(crate) buffer: &'a Buffer,
503    pub(crate) offset: BufferAddress,
504    pub(crate) size: BufferAddress,
505}
506#[cfg(send_sync)]
507static_assertions::assert_impl_all!(BufferSlice<'_>: Send, Sync);
508
509impl<'a> BufferSlice<'a> {
510    /// Return another [`BufferSlice`] referring to the portion of `self`'s contents
511    /// indicated by `bounds`.
512    ///
513    /// The `range` argument can be half or fully unbounded: for example,
514    /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
515    /// refers to the portion starting at the `n`th byte and extending to the
516    /// end of the buffer.
517    ///
518    /// # Panics
519    ///
520    /// - If `bounds` is outside of the bounds of `self`.
521    #[track_caller]
522    pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'a> {
523        let (offset, size) = range_to_offset_size(bounds, self.size);
524        check_buffer_bounds(self.size, offset, size);
525        BufferSlice {
526            buffer: self.buffer,
527            offset: self.offset + offset, // check_buffer_bounds ensures this does not overflow
528            size,                         // check_buffer_bounds ensures this is essentially min()
529        }
530    }
531
532    /// Map the buffer to host (CPU) memory, making it available for reading or writing via
533    /// [`get_mapped_range()`](Self::get_mapped_range). The buffer becomes accessible once the
534    /// `callback` is invoked with [`Ok`].
535    ///
536    /// Use this when you want to map the buffer immediately. If you need to submit GPU work that
537    /// uses the buffer before mapping it, use `map_buffer_on_submit` on
538    /// [`CommandEncoder`][CEmbos], [`CommandBuffer`][CBmbos], [`RenderPass`][RPmbos], or
539    /// [`ComputePass`][CPmbos] to schedule the mapping after submission. This avoids extra calls to
540    /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and lets you initiate mapping from a
541    /// more convenient place.
542    ///
543    /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
544    /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
545    /// an event loop or run on a separate thread.
546    ///
547    /// The callback runs on the thread that first calls one of the above functions after the GPU work
548    /// completes. There are no restrictions on the code you can run in the callback; however, on native
549    /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
550    /// send messages, etc.).
551    ///
552    /// While a buffer is mapped, it cannot be used by other commands; at any time, either the GPU or
553    /// the CPU has exclusive access to the buffer’s contents.
554    ///
555    /// This can also be performed using [`Buffer::map_async()`].
556    ///
557    /// # Panics
558    ///
559    /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
560    /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
561    /// - If the length of this slice is not a multiple of 4.
562    ///
563    /// [CEmbos]: CommandEncoder::map_buffer_on_submit
564    /// [CBmbos]: CommandBuffer::map_buffer_on_submit
565    /// [RPmbos]: RenderPass::map_buffer_on_submit
566    /// [CPmbos]: ComputePass::map_buffer_on_submit
567    /// [q::s]: Queue::submit
568    /// [i::p_a]: Instance::poll_all
569    /// [d::p]: Device::poll
570    pub fn map_async(
571        &self,
572        mode: MapMode,
573        callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
574    ) {
575        let mut mc = self.buffer.map_context.lock();
576        if mc.mapped_range.is_some() {
577            // Buffer is already mapped; fail
578            drop(mc);
579            callback(Err(BufferAsyncError));
580            return;
581        }
582
583        let end = self.offset + self.size;
584        mc.mapped_range = Some(self.offset..end);
585        drop(mc); // release the lock of map_context as callback can call lock it again
586
587        self.buffer
588            .inner
589            .map_async(mode, self.offset..end, Box::new(callback));
590    }
591
592    /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
593    ///
594    /// Returns a [`BufferView`] referring to the buffer range represented by
595    /// `self`. See the documentation for [`BufferView`] for details.
596    ///
597    /// Multiple views may be obtained and used simultaneously as long as they are from
598    /// non-overlapping slices.
599    ///
600    /// This can also be performed using [`Buffer::get_mapped_range()`].
601    ///
602    /// # Errors
603    ///
604    /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
605    /// - If the length of this slice is not a multiple of 4.
606    /// - If the buffer to which `self` refers is not currently [mapped].
607    /// - If you try to create a view which overlaps an existing [`BufferViewMut`].
608    ///
609    /// [mapped]: Buffer#mapping-buffers
610    #[track_caller]
611    pub fn get_mapped_range(&self) -> Result<BufferView, MapRangeError> {
612        let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Immutable);
613        let range = self.buffer.inner.get_mapped_range(subrange.index.clone())?;
614        self.buffer.map_context.lock().validate_and_add(subrange)?;
615        Ok(BufferView {
616            buffer: self.buffer.clone(),
617            size: self.size,
618            offset: self.offset,
619            inner: range,
620        })
621    }
622
623    /// Gain write-only access to the bytes of a [mapped] [`Buffer`].
624    ///
625    /// Returns a [`BufferViewMut`] referring to the buffer range represented by
626    /// `self`. See the documentation for [`BufferViewMut`] for more details.
627    ///
628    /// Multiple views may be obtained and used simultaneously as long as they are from
629    /// non-overlapping slices.
630    ///
631    /// This can also be performed using [`Buffer::get_mapped_range_mut()`].
632    ///
633    /// # Errors
634    ///
635    /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
636    /// - If the length of this slice is not a multiple of 4.
637    /// - If the buffer to which `self` refers is not currently [mapped].
638    /// - If you try to create a view which overlaps an existing [`BufferView`] or [`BufferViewMut`].
639    ///
640    /// [mapped]: Buffer#mapping-buffers
641    #[track_caller]
642    pub fn get_mapped_range_mut(&self) -> Result<BufferViewMut, MapRangeError> {
643        let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Mutable);
644        let range = self.buffer.inner.get_mapped_range(subrange.index.clone())?;
645        self.buffer.map_context.lock().validate_and_add(subrange)?;
646        Ok(BufferViewMut {
647            buffer: self.buffer.clone(),
648            size: self.size,
649            offset: self.offset,
650            inner: range,
651        })
652    }
653
654    /// Returns the buffer this is a slice of.
655    ///
656    /// You should usually not need to call this, and if you received the buffer from code you
657    /// do not control, you should refrain from accessing the buffer outside the bounds of the
658    /// slice. Nevertheless, it’s possible to get this access, so this method makes it simple.
659    pub fn buffer(&self) -> &'a Buffer {
660        self.buffer
661    }
662
663    /// Returns the offset in [`Self::buffer()`] this slice starts at.
664    pub fn offset(&self) -> BufferAddress {
665        self.offset
666    }
667
668    /// Returns the size of this slice.
669    pub fn size(&self) -> BufferAddress {
670        self.size
671    }
672
673    pub(crate) fn size_expect_nonzero(&self) -> BufferSize {
674        BufferSize::new(self.size).expect("buffer slice can not be empty")
675    }
676}
677
678impl<'a> TryFrom<BufferSlice<'a>> for crate::BufferBinding<'a> {
679    type Error = ();
680
681    /// Convert a [`BufferSlice`] to an equivalent [`BufferBinding`],
682    /// provided that it will be used without a dynamic offset.
683    fn try_from(value: BufferSlice<'a>) -> Result<Self, Self::Error> {
684        Ok(BufferBinding {
685            buffer: value.buffer,
686            offset: value.offset,
687            size: Some(NonZero::new(value.size()).ok_or(())?),
688        })
689    }
690}
691
692impl<'a> TryFrom<BufferSlice<'a>> for crate::BindingResource<'a> {
693    type Error = ();
694
695    /// Convert a [`BufferSlice`] to an equivalent [`BindingResource::Buffer`],
696    /// provided that it will be used without a dynamic offset.
697    fn try_from(value: BufferSlice<'a>) -> Result<Self, Self::Error> {
698        Ok(crate::BindingResource::Buffer(
699            crate::BufferBinding::try_from(value)?,
700        ))
701    }
702}
703
704fn range_overlaps(a: &Range<BufferAddress>, b: &Range<BufferAddress>) -> bool {
705    a.start < b.end && b.start < a.end
706}
707
708fn range_contains(a: &Range<BufferAddress>, b: &Range<BufferAddress>) -> bool {
709    a.start <= b.start && a.end >= b.end
710}
711
712#[derive(Debug, Copy, Clone)]
713enum RangeMappingKind {
714    Mutable,
715    Immutable,
716}
717
718impl RangeMappingKind {
719    /// Returns true if a range of this kind can touch the same bytes as a range of the other kind.
720    ///
721    /// This is Rust's Mutable XOR Shared rule.
722    fn allowed_concurrently_with(self, other: Self) -> bool {
723        matches!(
724            (self, other),
725            (RangeMappingKind::Immutable, RangeMappingKind::Immutable)
726        )
727    }
728}
729
730#[derive(Debug, Clone)]
731struct Subrange {
732    index: Range<BufferAddress>,
733    kind: RangeMappingKind,
734}
735
736impl Subrange {
737    fn new(offset: BufferAddress, size: BufferAddress, kind: RangeMappingKind) -> Self {
738        Self {
739            index: offset..(offset + size),
740            kind,
741        }
742    }
743}
744
745impl fmt::Display for Subrange {
746    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
747        write!(
748            f,
749            "{}..{} ({:?})",
750            self.index.start, self.index.end, self.kind
751        )
752    }
753}
754
755/// The mapped portion of a buffer, if any, and its outstanding views.
756///
757/// This ensures that views fall within the mapped range and don't overlap.
758#[derive(Debug)]
759pub(crate) struct MapContext {
760    /// The range of the buffer that is mapped.
761    ///
762    /// This becomes Some(...) when the buffer is mapped at creation time, and
763    /// when you call `map_async` on some [`BufferSlice`] (so technically, it
764    /// indicates the portion that is *or has been requested to be* mapped.)
765    ///
766    /// All [`BufferView`]s and [`BufferViewMut`]s must fall within this range.
767    mapped_range: Option<Range<BufferAddress>>,
768
769    /// The ranges covered by all outstanding [`BufferView`]s and
770    /// [`BufferViewMut`]s. These are non-overlapping, and are all contained
771    /// within `mapped_range`.
772    sub_ranges: Vec<Subrange>,
773}
774
775impl MapContext {
776    /// Creates a new `MapContext`.
777    ///
778    /// For [`mapped_at_creation`] buffers, pass the full buffer range in the
779    /// `mapped_range` argument. For other buffers, pass `None`.
780    ///
781    /// [`mapped_at_creation`]: BufferDescriptor::mapped_at_creation
782    pub(crate) fn new(mapped_range: Option<Range<BufferAddress>>) -> Self {
783        Self {
784            mapped_range,
785            sub_ranges: Vec::new(),
786        }
787    }
788
789    /// Record that the buffer is no longer mapped.
790    fn reset(&mut self) {
791        self.mapped_range = None;
792
793        assert!(
794            self.sub_ranges.is_empty(),
795            "You cannot unmap a buffer that still has accessible mapped views"
796        );
797    }
798
799    /// Record that the `size` bytes of the buffer at `offset` are now viewed.
800    ///
801    /// # Errors
802    ///
803    /// This returns an error if the given range is invalid.
804    fn validate_and_add(&mut self, new_sub: Subrange) -> Result<(), MapRangeError> {
805        if self.mapped_range.is_none() {
806            return Err(MapRangeError(
807                "tried to call get_mapped_range(_mut) on an unmapped buffer".into(),
808            ));
809        }
810        let mapped_range = self.mapped_range.as_ref().unwrap();
811        if !range_contains(mapped_range, &new_sub.index) {
812            return Err(MapRangeError(alloc::format!(
813                "tried to call get_mapped_range(_mut) on a range that is not entirely mapped. \
814                 Attempted to get range {}, but the mapped range is {}..{}",
815                new_sub,
816                mapped_range.start,
817                mapped_range.end
818            )));
819        }
820        // This check is essential for avoiding undefined behavior: it is the
821        // only thing that ensures that `&mut` references to the buffer's
822        // contents don't alias anything else.
823        for sub in self.sub_ranges.iter() {
824            if range_overlaps(&sub.index, &new_sub.index)
825                && !sub.kind.allowed_concurrently_with(new_sub.kind)
826            {
827                return Err(MapRangeError(alloc::format!(
828                    "tried to call get_mapped_range(_mut) on a range that has already \
829                     been mapped and would break Rust memory aliasing rules. Attempted \
830                     to get range {}, and the conflicting range is {}",
831                    new_sub,
832                    sub
833                )));
834            }
835        }
836        self.sub_ranges.push(new_sub);
837        Ok(())
838    }
839
840    /// Record that the `size` bytes of the buffer at `offset` are no longer viewed.
841    ///
842    /// # Panics
843    ///
844    /// This panics if the given range does not exactly match one previously
845    /// passed to [`MapContext::validate_and_add`].
846    pub(crate) fn remove(&mut self, offset: BufferAddress, size: BufferAddress) {
847        let end = offset + size;
848
849        let index = self
850            .sub_ranges
851            .iter()
852            .position(|r| r.index == (offset..end))
853            .expect("unable to remove range from map context");
854        self.sub_ranges.swap_remove(index);
855    }
856}
857
858/// Describes a [`Buffer`].
859///
860/// For use with [`Device::create_buffer`].
861///
862/// Corresponds to [WebGPU `GPUBufferDescriptor`](
863/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferdescriptor).
864pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
865static_assertions::assert_impl_all!(BufferDescriptor<'_>: Send, Sync);
866
867/// Error occurred when trying to async map a buffer.
868#[derive(Clone, PartialEq, Eq, Debug)]
869pub struct BufferAsyncError;
870static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
871
872impl fmt::Display for BufferAsyncError {
873    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
874        write!(f, "Error occurred when trying to async map a buffer")
875    }
876}
877
878impl error::Error for BufferAsyncError {}
879
880/// Error returned by [`BufferSlice::get_mapped_range`] and [`BufferSlice::get_mapped_range_mut`].
881///
882/// Corresponds to the `OperationError` thrown by
883/// [`getMappedRange()`](https://gpuweb.github.io/gpuweb/#dom-gpubuffer-getmappedrange)
884/// in the WebGPU spec.
885#[derive(Clone, Debug)]
886pub struct MapRangeError(pub(crate) String);
887static_assertions::assert_impl_all!(MapRangeError: Send, Sync);
888
889impl fmt::Display for MapRangeError {
890    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
891        write!(f, "Buffer view error: {}", self.0)
892    }
893}
894
895impl error::Error for MapRangeError {}
896
897/// Type of buffer mapping.
898#[derive(Debug, Clone, Copy, Eq, PartialEq)]
899pub enum MapMode {
900    /// Map only for reading
901    Read,
902    /// Map only for writing
903    Write,
904}
905static_assertions::assert_impl_all!(MapMode: Send, Sync);
906
907/// A read-only view of a mapped buffer's bytes.
908///
909/// To get a `BufferView`, first [map] the buffer, and then
910/// call `buffer.slice(range).get_mapped_range()`.
911///
912/// `BufferView` dereferences to `&[u8]`, so you can use all the usual Rust
913/// slice methods to access the buffer's contents. It also implements
914/// `AsRef<[u8]>`, if that's more convenient.
915///
916/// Before the buffer can be unmapped, all `BufferView`s observing it
917/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
918///
919/// For example code, see the documentation on [mapping buffers][map].
920///
921/// [map]: Buffer#mapping-buffers
922/// [`map_async`]: BufferSlice::map_async
923#[derive(Debug)]
924pub struct BufferView {
925    // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
926    buffer: Buffer,
927    offset: BufferAddress,
928    size: BufferAddress,
929    inner: dispatch::DispatchBufferMappedRange,
930}
931
932/// A write-only view of a mapped buffer's bytes.
933///
934/// To get a `BufferViewMut`, first [map] the buffer, and then
935/// call `buffer.slice(range).get_mapped_range_mut()`.
936///
937/// Because Rust has no write-only reference type
938/// (`&[u8]` is read-only and `&mut [u8]` is read-write),
939/// this type does not dereference to a slice in the way that [`BufferView`] does.
940/// Instead, [`.slice()`][BufferViewMut::slice] returns a special [`WriteOnly`] pointer type,
941/// and there are also a few convenience methods such as [`BufferViewMut::copy_from_slice()`].
942///
943/// Before the buffer can be unmapped, all `BufferViewMut`s observing it
944/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
945///
946/// For example code, see the documentation on [mapping buffers][map].
947///
948/// [map]: Buffer#mapping-buffers
949#[derive(Debug)]
950pub struct BufferViewMut {
951    // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
952    buffer: Buffer,
953    offset: BufferAddress,
954    size: BufferAddress,
955    inner: dispatch::DispatchBufferMappedRange,
956}
957
958// `BufferView` simply dereferences. `BufferViewMut` cannot, because mapped memory may be
959// write-combining memory <https://en.wikipedia.org/wiki/Write_combining>,
960// and not support the expected behavior of atomic accesses.
961// Further context: <https://github.com/gfx-rs/wgpu/issues/8897>
962
963impl core::ops::Deref for BufferView {
964    type Target = [u8];
965
966    #[inline]
967    fn deref(&self) -> &[u8] {
968        // SAFETY: this is a read mapping
969        unsafe { self.inner.read_slice() }
970    }
971}
972
973impl AsRef<[u8]> for BufferView {
974    #[inline]
975    fn as_ref(&self) -> &[u8] {
976        self
977    }
978}
979
980impl Drop for BufferView {
981    fn drop(&mut self) {
982        self.buffer
983            .map_context
984            .lock()
985            .remove(self.offset, self.size);
986    }
987}
988
989impl Drop for BufferViewMut {
990    fn drop(&mut self) {
991        self.buffer
992            .map_context
993            .lock()
994            .remove(self.offset, self.size);
995    }
996}
997
998#[cfg(webgpu)]
999impl BufferView {
1000    /// Provides the same data as dereferencing the view, but as a `Uint8Array` in js.
1001    /// This can be MUCH faster than dereferencing the view which copies the data into
1002    /// the Rust / wasm heap.
1003    pub fn as_uint8array(&self) -> &js_sys::Uint8Array {
1004        self.inner.as_uint8array()
1005    }
1006}
1007
1008/// These methods are equivalent to the methods of the same names on [`WriteOnly`].
1009impl BufferViewMut {
1010    /// Returns the length of this view; the number of bytes to be written.
1011    pub fn len(&self) -> usize {
1012        // cannot fail because we can't actually map more than isize::MAX bytes
1013        usize::try_from(self.size).unwrap()
1014    }
1015
1016    /// Returns `true` if the view has a length of 0.
1017    ///
1018    /// Note that this is currently impossible.
1019    pub fn is_empty(&self) -> bool {
1020        self.len() == 0
1021    }
1022
1023    /// Returns a [`WriteOnly`] reference to a portion of this.
1024    ///
1025    /// `.slice(..)` can be used to access the whole data.
1026    pub fn slice<'a, S: RangeBounds<usize>>(&'a mut self, bounds: S) -> WriteOnly<'a, [u8]> {
1027        // SAFETY: this is a write mapping
1028        unsafe { self.inner.write_slice() }.into_slice(bounds)
1029    }
1030
1031    /// Copies all elements from src into `self`.
1032    ///
1033    /// The length of `src` must be the same as `self`.
1034    ///
1035    /// This method is equivalent to
1036    /// [`self.slice(..).copy_from_slice(src)`][WriteOnly::copy_from_slice].
1037    pub fn copy_from_slice(&mut self, src: &[u8]) {
1038        self.slice(..).copy_from_slice(src)
1039    }
1040}
1041
1042#[track_caller]
1043fn check_buffer_bounds(
1044    whole_size: BufferAddress,
1045    slice_offset: BufferAddress,
1046    slice_size: BufferAddress,
1047) {
1048    if slice_offset > whole_size {
1049        panic!(
1050            "slice offset {} is out of range for buffer of size {}",
1051            slice_offset, whole_size
1052        );
1053    }
1054
1055    // Detect integer overflow.
1056    let end = slice_offset.checked_add(slice_size);
1057    if end.is_none_or(|end| end > whole_size) {
1058        panic!(
1059            "slice offset {} size {} is out of range for buffer of size {}",
1060            slice_offset, slice_size, whole_size
1061        );
1062    }
1063}
1064
1065#[track_caller]
1066pub(crate) fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
1067    bounds: S,
1068    whole_size: BufferAddress,
1069) -> (BufferAddress, BufferAddress) {
1070    let offset = match bounds.start_bound() {
1071        Bound::Included(&bound) => bound,
1072        Bound::Excluded(&bound) => bound + 1,
1073        Bound::Unbounded => 0,
1074    };
1075    let size = match bounds.end_bound() {
1076        Bound::Included(&bound) => bound + 1 - offset,
1077        Bound::Excluded(&bound) => bound - offset,
1078        Bound::Unbounded => whole_size - offset,
1079    };
1080
1081    (offset, size)
1082}
1083
1084#[cfg(test)]
1085mod tests {
1086    use super::{check_buffer_bounds, range_overlaps, range_to_offset_size};
1087
1088    #[test]
1089    fn range_to_offset_size_works() {
1090        let whole = 100;
1091
1092        assert_eq!(range_to_offset_size(0..2, whole), (0, 2));
1093        assert_eq!(range_to_offset_size(2..5, whole), (2, 3));
1094        assert_eq!(range_to_offset_size(.., whole), (0, whole));
1095        assert_eq!(range_to_offset_size(21.., whole), (21, whole - 21));
1096        assert_eq!(range_to_offset_size(0.., whole), (0, whole));
1097        assert_eq!(range_to_offset_size(..21, whole), (0, 21));
1098    }
1099
1100    #[test]
1101    fn check_buffer_bounds_works_for_end_in_range() {
1102        check_buffer_bounds(200, 100, 50);
1103        check_buffer_bounds(200, 100, 100);
1104        check_buffer_bounds(u64::MAX, u64::MAX - 100, 100);
1105        check_buffer_bounds(u64::MAX, 0, u64::MAX);
1106        check_buffer_bounds(u64::MAX, 1, u64::MAX - 1);
1107        // Test empty buffer slices
1108        check_buffer_bounds(0, 0, 0);
1109        check_buffer_bounds(u64::MAX, u64::MAX, 0);
1110    }
1111
1112    #[test]
1113    #[should_panic]
1114    fn check_buffer_bounds_panics_for_end_over_size() {
1115        check_buffer_bounds(200, 100, 101);
1116    }
1117
1118    #[test]
1119    #[should_panic]
1120    fn check_buffer_bounds_panics_for_end_wraparound() {
1121        check_buffer_bounds(u64::MAX, 1, u64::MAX);
1122    }
1123
1124    #[test]
1125    fn range_overlapping() {
1126        // First range to the left
1127        assert_eq!(range_overlaps(&(0..1), &(1..3)), false);
1128        // First range overlaps left edge
1129        assert_eq!(range_overlaps(&(0..2), &(1..3)), true);
1130        // First range completely inside second
1131        assert_eq!(range_overlaps(&(1..2), &(0..3)), true);
1132        // First range completely surrounds second
1133        assert_eq!(range_overlaps(&(0..3), &(1..2)), true);
1134        // First range overlaps right edge
1135        assert_eq!(range_overlaps(&(1..3), &(0..2)), true);
1136        // First range entirely to the right
1137        assert_eq!(range_overlaps(&(2..3), &(0..2)), false);
1138    }
1139}