wgpu/api/
buffer.rs

1use alloc::{boxed::Box, sync::Arc, vec::Vec};
2use core::{
3    error, fmt,
4    ops::{Bound, Deref, DerefMut, Range, RangeBounds},
5};
6
7use crate::util::Mutex;
8use crate::*;
9
10/// Handle to a GPU-accessible buffer.
11///
12/// A `Buffer` is a memory allocation for use by the GPU, somewhat analogous to
13/// <code>[Box]&lt;[\[u8\]][primitive@slice]&gt;</code> in Rust.
14/// The contents of buffers are untyped bytes; it is up to the application to
15/// specify the interpretation of the bytes when the buffer is used, in ways
16/// such as [`VertexBufferLayout`].
17/// A single buffer can be used to hold multiple independent pieces of data at
18/// different offsets (e.g. both vertices and indices for one or more meshes).
19///
20/// A `Buffer`'s bytes have "interior mutability": functions like
21/// [`Queue::write_buffer`] or [mapping] a buffer for writing only require a
22/// `&Buffer`, not a `&mut Buffer`, even though they modify its contents. `wgpu`
23/// prevents simultaneous reads and writes of buffer contents using run-time
24/// checks.
25///
26/// Created with [`Device::create_buffer()`] or
27/// [`DeviceExt::create_buffer_init()`].
28///
29/// Corresponds to [WebGPU `GPUBuffer`](https://gpuweb.github.io/gpuweb/#buffer-interface).
30///
31/// [mapping]: Buffer#mapping-buffers
32///
33/// # How to get your data into a buffer
34///
35/// Every `Buffer` starts with all bytes zeroed.
36/// There are many ways to load data into a `Buffer`:
37///
38/// - When creating a buffer, you may set the [`mapped_at_creation`][mac] flag,
39///   then write to its [`get_mapped_range_mut()`][Buffer::get_mapped_range_mut].
40///   This only works when the buffer is created and has not yet been used by
41///   the GPU, but it is all you need for buffers whose contents do not change
42///   after creation.
43///   - You may use [`DeviceExt::create_buffer_init()`] as a convenient way to
44///     do that and copy data from a `&[u8]` you provide.
45/// - After creation, you may use [`Buffer::map_async()`] to map it again;
46///   however, you then need to wait until the GPU is no longer using the buffer
47///   before you begin writing.
48/// - You may use [`CommandEncoder::copy_buffer_to_buffer()`] to copy data into
49///   this buffer from another buffer.
50/// - You may use [`Queue::write_buffer()`] to copy data into the buffer from a
51///   `&[u8]`. This uses a temporary “staging” buffer managed by `wgpu` to hold
52///   the data.
53///   - [`Queue::write_buffer_with()`] allows you to write directly into temporary
54///     storage instead of providing a slice you already prepared, which may
55///     allow *your* code to save the allocation of a [`Vec`] or such.
56/// - You may use [`util::StagingBelt`] to manage a set of temporary buffers.
57///   This may be more efficient than [`Queue::write_buffer_with()`] when you
58///   have many small copies to perform, but requires more steps to use, and
59///   tuning of the belt buffer size.
60/// - You may write your own staging buffer management customized to your
61///   application, based on mapped buffers and
62///   [`CommandEncoder::copy_buffer_to_buffer()`].
63/// - A GPU computation’s results can be stored in a buffer:
64///   - A [compute shader][ComputePipeline] may write to a buffer bound as a
65///     [storage buffer][BufferBindingType::Storage].
66///   - A render pass may render to a texture which is then copied to a buffer
67///     using [`CommandEncoder::copy_texture_to_buffer()`].
68///
69/// # Mapping buffers
70///
71/// If a `Buffer` is created with the appropriate [`usage`], it can be *mapped*:
72/// you can make its contents accessible to the CPU as an ordinary `&[u8]` or
73/// `&mut [u8]` slice of bytes. Buffers created with the
74/// [`mapped_at_creation`][mac] flag set are also mapped initially.
75///
76/// Depending on the hardware, the buffer could be memory shared between CPU and
77/// GPU, so that the CPU has direct access to the same bytes the GPU will
78/// consult; or it may be ordinary CPU memory, whose contents the system must
79/// copy to/from the GPU as needed. This crate's API is designed to work the
80/// same way in either case: at any given time, a buffer is either mapped and
81/// available to the CPU, or unmapped and ready for use by the GPU, but never
82/// both. This makes it impossible for either side to observe changes by the
83/// other immediately, and any necessary transfers can be carried out when the
84/// buffer transitions from one state to the other.
85///
86/// There are two ways to map a buffer:
87///
88/// - If [`BufferDescriptor::mapped_at_creation`] is `true`, then the entire
89///   buffer is mapped when it is created. This is the easiest way to initialize
90///   a new buffer. You can set `mapped_at_creation` on any kind of buffer,
91///   regardless of its [`usage`] flags.
92///
93/// - If the buffer's [`usage`] includes the [`MAP_READ`] or [`MAP_WRITE`]
94///   flags, then you can call `buffer.slice(range).map_async(mode, callback)`
95///   to map the portion of `buffer` given by `range`. This waits for the GPU to
96///   finish using the buffer, and invokes `callback` as soon as the buffer is
97///   safe for the CPU to access.
98///
99/// Once a buffer is mapped:
100///
101/// - You can call `buffer.slice(range).get_mapped_range()` to obtain a
102///   [`BufferView`], which dereferences to a `&[u8]` that you can use to read
103///   the buffer's contents.
104///
105/// - Or, you can call `buffer.slice(range).get_mapped_range_mut()` to obtain a
106///   [`BufferViewMut`], which dereferences to a `&mut [u8]` that you can use to
107///   read and write the buffer's contents.
108///
109/// The given `range` must fall within the mapped portion of the buffer. If you
110/// attempt to access overlapping ranges, even for shared access only, these
111/// methods panic.
112///
113/// While a buffer is mapped, you may not submit any commands to the GPU that
114/// access it. You may record command buffers that use the buffer, but if you
115/// submit them while the buffer is mapped, submission will panic.
116///
117/// When you are done using the buffer on the CPU, you must call
118/// [`Buffer::unmap`] to make it available for use by the GPU again. All
119/// [`BufferView`] and [`BufferViewMut`] views referring to the buffer must be
120/// dropped before you unmap it; otherwise, [`Buffer::unmap`] will panic.
121///
122/// # Example
123///
124/// If `buffer` was created with [`BufferUsages::MAP_WRITE`], we could fill it
125/// with `f32` values like this:
126///
127/// ```
128/// # #[cfg(feature = "noop")]
129/// # let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
130/// # #[cfg(not(feature = "noop"))]
131/// # let device: wgpu::Device = { return; };
132/// #
133/// # let buffer = device.create_buffer(&wgpu::BufferDescriptor {
134/// #     label: None,
135/// #     size: 400,
136/// #     usage: wgpu::BufferUsages::MAP_WRITE,
137/// #     mapped_at_creation: false,
138/// # });
139/// let capturable = buffer.clone();
140/// buffer.map_async(wgpu::MapMode::Write, .., move |result| {
141///     if result.is_ok() {
142///         let mut view = capturable.get_mapped_range_mut(..);
143///         let floats: &mut [f32] = bytemuck::cast_slice_mut(&mut view);
144///         floats.fill(42.0);
145///         drop(view);
146///         capturable.unmap();
147///     }
148/// });
149/// ```
150///
151/// This code takes the following steps:
152///
153/// - First, it makes a cloned handle to the buffer for capture by
154///   the callback passed to [`map_async`]. Since a [`map_async`] callback may be
155///   invoked from another thread, interaction between the callback and the
156///   thread calling [`map_async`] generally requires some sort of shared heap
157///   data like this. In real code, there might be an [`Arc`] to some larger
158///   structure that itself owns `buffer`.
159///
160/// - Then, it calls [`Buffer::slice`] to make a [`BufferSlice`] referring to
161///   the buffer's entire contents.
162///
163/// - Next, it calls [`BufferSlice::map_async`] to request that the bytes to
164///   which the slice refers be made accessible to the CPU ("mapped"). This may
165///   entail waiting for previously enqueued operations on `buffer` to finish.
166///   Although [`map_async`] itself always returns immediately, it saves the
167///   callback function to be invoked later.
168///
169/// - When some later call to [`Device::poll`] or [`Instance::poll_all`] (not
170///   shown in this example) determines that the buffer is mapped and ready for
171///   the CPU to use, it invokes the callback function.
172///
173/// - The callback function calls [`Buffer::slice`] and then
174///   [`BufferSlice::get_mapped_range_mut`] to obtain a [`BufferViewMut`], which
175///   dereferences to a `&mut [u8]` slice referring to the buffer's bytes.
176///
177/// - It then uses the [`bytemuck`] crate to turn the `&mut [u8]` into a `&mut
178///   [f32]`, and calls the slice [`fill`] method to fill the buffer with a
179///   useful value.
180///
181/// - Finally, the callback drops the view and calls [`Buffer::unmap`] to unmap
182///   the buffer. In real code, the callback would also need to do some sort of
183///   synchronization to let the rest of the program know that it has completed
184///   its work.
185///
186/// If using [`map_async`] directly is awkward, you may find it more convenient to
187/// use [`Queue::write_buffer`] and [`util::DownloadBuffer::read_buffer`].
188/// However, those each have their own tradeoffs; the asynchronous nature of GPU
189/// execution makes it hard to avoid friction altogether.
190///
191/// [`Arc`]: std::sync::Arc
192/// [`map_async`]: BufferSlice::map_async
193/// [`bytemuck`]: https://crates.io/crates/bytemuck
194/// [`fill`]: slice::fill
195///
196/// ## Mapping buffers on the web
197///
198/// When compiled to WebAssembly and running in a browser content process,
199/// `wgpu` implements its API in terms of the browser's WebGPU implementation.
200/// In this context, `wgpu` is further isolated from the GPU:
201///
202/// - Depending on the browser's WebGPU implementation, mapping and unmapping
203///   buffers probably entails copies between WebAssembly linear memory and the
204///   graphics driver's buffers.
205///
206/// - All modern web browsers isolate web content in its own sandboxed process,
207///   which can only interact with the GPU via interprocess communication (IPC).
208///   Although most browsers' IPC systems use shared memory for large data
209///   transfers, there will still probably need to be copies into and out of the
210///   shared memory buffers.
211///
212/// All of these copies contribute to the cost of buffer mapping in this
213/// configuration.
214///
215/// [`usage`]: BufferDescriptor::usage
216/// [mac]: BufferDescriptor::mapped_at_creation
217/// [`MAP_READ`]: BufferUsages::MAP_READ
218/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
219/// [`DeviceExt::create_buffer_init()`]: util::DeviceExt::create_buffer_init
220#[derive(Debug, Clone)]
221pub struct Buffer {
222    pub(crate) inner: dispatch::DispatchBuffer,
223    pub(crate) map_context: Arc<Mutex<MapContext>>,
224    pub(crate) size: wgt::BufferAddress,
225    pub(crate) usage: BufferUsages,
226    // Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
227}
228#[cfg(send_sync)]
229static_assertions::assert_impl_all!(Buffer: Send, Sync);
230
231crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
232
233impl Buffer {
234    /// Return the binding view of the entire buffer.
235    pub fn as_entire_binding(&self) -> BindingResource<'_> {
236        BindingResource::Buffer(self.as_entire_buffer_binding())
237    }
238
239    /// Return the binding view of the entire buffer.
240    pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_> {
241        BufferBinding {
242            buffer: self,
243            offset: 0,
244            size: None,
245        }
246    }
247
248    /// Get the [`wgpu_hal`] buffer from this `Buffer`.
249    ///
250    /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
251    /// and pass that struct to the to the `A` type parameter.
252    ///
253    /// Returns a guard that dereferences to the type of the hal backend
254    /// which implements [`A::Buffer`].
255    ///
256    /// # Types
257    ///
258    /// The returned type depends on the backend:
259    ///
260    #[doc = crate::hal_type_vulkan!("Buffer")]
261    #[doc = crate::hal_type_metal!("Buffer")]
262    #[doc = crate::hal_type_dx12!("Buffer")]
263    #[doc = crate::hal_type_gles!("Buffer")]
264    ///
265    /// # Deadlocks
266    ///
267    /// - The returned guard holds a read-lock on a device-local "destruction"
268    ///   lock, which will cause all calls to `destroy` to block until the
269    ///   guard is released.
270    ///
271    /// # Errors
272    ///
273    /// This method will return None if:
274    /// - The buffer is not from the backend specified by `A`.
275    /// - The buffer is from the `webgpu` or `custom` backend.
276    /// - The buffer has had [`Self::destroy()`] called on it.
277    ///
278    /// # Safety
279    ///
280    /// - The returned resource must not be destroyed unless the guard
281    ///   is the last reference to it and it is not in use by the GPU.
282    ///   The guard and handle may be dropped at any time however.
283    /// - All the safety requirements of wgpu-hal must be upheld.
284    ///
285    /// [`A::Buffer`]: hal::Api::Buffer
286    #[cfg(wgpu_core)]
287    pub unsafe fn as_hal<A: hal::Api>(
288        &self,
289    ) -> Option<impl Deref<Target = A::Buffer> + WasmNotSendSync> {
290        let buffer = self.inner.as_core_opt()?;
291        unsafe { buffer.context.buffer_as_hal::<A>(buffer) }
292    }
293
294    /// Returns a [`BufferSlice`] referring to the portion of `self`'s contents
295    /// indicated by `bounds`. Regardless of what sort of data `self` stores,
296    /// `bounds` start and end are given in bytes.
297    ///
298    /// A [`BufferSlice`] can be used to supply vertex and index data, or to map
299    /// buffer contents for access from the CPU. See the [`BufferSlice`]
300    /// documentation for details.
301    ///
302    /// The `range` argument can be half or fully unbounded: for example,
303    /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
304    /// refers to the portion starting at the `n`th byte and extending to the
305    /// end of the buffer.
306    ///
307    /// # Panics
308    ///
309    /// - If `bounds` is outside of the bounds of `self`.
310    /// - If `bounds` has a length less than 1.
311    #[track_caller]
312    pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
313        let (offset, size) = range_to_offset_size(bounds, self.size);
314        check_buffer_bounds(self.size, offset, size);
315        BufferSlice {
316            buffer: self,
317            offset,
318            size,
319        }
320    }
321
322    /// Unmaps the buffer from host memory.
323    ///
324    /// This terminates the effect of all previous [`map_async()`](Self::map_async) operations and
325    /// makes the buffer available for use by the GPU again.
326    pub fn unmap(&self) {
327        self.map_context.lock().reset();
328        self.inner.unmap();
329    }
330
331    /// Destroy the associated native resources as soon as possible.
332    pub fn destroy(&self) {
333        self.inner.destroy();
334    }
335
336    /// Returns the length of the buffer allocation in bytes.
337    ///
338    /// This is always equal to the `size` that was specified when creating the buffer.
339    pub fn size(&self) -> BufferAddress {
340        self.size
341    }
342
343    /// Returns the allowed usages for this `Buffer`.
344    ///
345    /// This is always equal to the `usage` that was specified when creating the buffer.
346    pub fn usage(&self) -> BufferUsages {
347        self.usage
348    }
349
350    /// Map the buffer to host (CPU) memory, making it available for reading or writing via
351    /// [`get_mapped_range()`](Self::get_mapped_range). The buffer becomes accessible once the
352    /// `callback` is invoked with [`Ok`].
353    ///
354    /// Use this when you want to map the buffer immediately. If you need to submit GPU work that
355    /// uses the buffer before mapping it, use `map_buffer_on_submit` on
356    /// [`CommandEncoder`][CEmbos], [`CommandBuffer`][CBmbos], [`RenderPass`][RPmbos], or
357    /// [`ComputePass`][CPmbos] to schedule the mapping after submission. This avoids extra calls to
358    /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and lets you initiate mapping from a
359    /// more convenient place.
360    ///
361    /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
362    /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
363    /// an event loop or run on a separate thread.
364    ///
365    /// The callback runs on the thread that first calls one of the above functions after the GPU work
366    /// completes. There are no restrictions on the code you can run in the callback; however, on native
367    /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
368    /// send messages, etc.).
369    ///
370    /// While a buffer is mapped, it cannot be used by other commands; at any time, either the GPU or
371    /// the CPU has exclusive access to the buffer’s contents.
372    ///
373    /// This can also be performed using [`BufferSlice::map_async()`].
374    ///
375    /// # Panics
376    ///
377    /// - If the buffer is already mapped.
378    /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
379    /// - If `bounds` is outside of the bounds of `self`.
380    /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
381    /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
382    ///
383    /// [CEmbos]: CommandEncoder::map_buffer_on_submit
384    /// [CBmbos]: CommandBuffer::map_buffer_on_submit
385    /// [RPmbos]: RenderPass::map_buffer_on_submit
386    /// [CPmbos]: ComputePass::map_buffer_on_submit
387    /// [q::s]: Queue::submit
388    /// [i::p_a]: Instance::poll_all
389    /// [d::p]: Device::poll
390    pub fn map_async<S: RangeBounds<BufferAddress>>(
391        &self,
392        mode: MapMode,
393        bounds: S,
394        callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
395    ) {
396        self.slice(bounds).map_async(mode, callback)
397    }
398
399    /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
400    ///
401    /// Returns a [`BufferView`] referring to the buffer range represented by
402    /// `self`. See the documentation for [`BufferView`] for details.
403    ///
404    /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
405    /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
406    ///
407    /// This can also be performed using [`BufferSlice::get_mapped_range()`].
408    ///
409    /// # Panics
410    ///
411    /// - If `bounds` is outside of the bounds of `self`.
412    /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
413    /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
414    /// - If the buffer to which `self` refers is not currently [mapped].
415    /// - If you try to create a view which overlaps an existing [`BufferViewMut`].
416    ///
417    /// [mapped]: Buffer#mapping-buffers
418    #[track_caller]
419    pub fn get_mapped_range<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferView {
420        self.slice(bounds).get_mapped_range()
421    }
422
423    /// Gain write access to the bytes of a [mapped] [`Buffer`].
424    ///
425    /// Returns a [`BufferViewMut`] referring to the buffer range represented by
426    /// `self`. See the documentation for [`BufferViewMut`] for more details.
427    ///
428    /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
429    /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
430    ///
431    /// This can also be performed using [`BufferSlice::get_mapped_range_mut()`].
432    ///
433    /// # Panics
434    ///
435    /// - If `bounds` is outside of the bounds of `self`.
436    /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
437    /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
438    /// - If the buffer to which `self` refers is not currently [mapped].
439    /// - If you try to create a view which overlaps an existing [`BufferView`] or [`BufferViewMut`].
440    ///
441    /// [mapped]: Buffer#mapping-buffers
442    #[track_caller]
443    pub fn get_mapped_range_mut<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferViewMut {
444        self.slice(bounds).get_mapped_range_mut()
445    }
446
447    #[cfg(custom)]
448    /// Returns custom implementation of Buffer (if custom backend and is internally T)
449    pub fn as_custom<T: custom::BufferInterface>(&self) -> Option<&T> {
450        self.inner.as_custom()
451    }
452}
453
454/// A slice of a [`Buffer`], to be mapped, used for vertex or index data, or the like.
455///
456/// You can create a `BufferSlice` by calling [`Buffer::slice`]:
457///
458/// ```no_run
459/// # let buffer: wgpu::Buffer = todo!();
460/// let slice = buffer.slice(10..20);
461/// ```
462///
463/// This returns a slice referring to the second ten bytes of `buffer`. To get a
464/// slice of the entire `Buffer`:
465///
466/// ```no_run
467/// # let buffer: wgpu::Buffer = todo!();
468/// let whole_buffer_slice = buffer.slice(..);
469/// ```
470///
471/// You can pass buffer slices to methods like [`RenderPass::set_vertex_buffer`]
472/// and [`RenderPass::set_index_buffer`] to indicate which portion of the buffer
473/// a draw call should consult. You can also convert it to a [`BufferBinding`]
474/// with `.into()`.
475///
476/// To access the slice's contents on the CPU, you must first [map] the buffer,
477/// and then call [`BufferSlice::get_mapped_range`] or
478/// [`BufferSlice::get_mapped_range_mut`] to obtain a view of the slice's
479/// contents. See the documentation on [mapping][map] for more details,
480/// including example code.
481///
482/// Unlike a Rust shared slice `&[T]`, whose existence guarantees that
483/// nobody else is modifying the `T` values to which it refers, a
484/// [`BufferSlice`] doesn't guarantee that the buffer's contents aren't
485/// changing. You can still record and submit commands operating on the
486/// buffer while holding a [`BufferSlice`]. A [`BufferSlice`] simply
487/// represents a certain range of the buffer's bytes.
488///
489/// The `BufferSlice` type is unique to the Rust API of `wgpu`. In the WebGPU
490/// specification, an offset and size are specified as arguments to each call
491/// working with the [`Buffer`], instead.
492///
493/// [map]: Buffer#mapping-buffers
494#[derive(Copy, Clone, Debug, PartialEq)]
495pub struct BufferSlice<'a> {
496    pub(crate) buffer: &'a Buffer,
497    pub(crate) offset: BufferAddress,
498    pub(crate) size: BufferSize,
499}
500#[cfg(send_sync)]
501static_assertions::assert_impl_all!(BufferSlice<'_>: Send, Sync);
502
503impl<'a> BufferSlice<'a> {
504    /// Return another [`BufferSlice`] referring to the portion of `self`'s contents
505    /// indicated by `bounds`.
506    ///
507    /// The `range` argument can be half or fully unbounded: for example,
508    /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
509    /// refers to the portion starting at the `n`th byte and extending to the
510    /// end of the buffer.
511    ///
512    /// # Panics
513    ///
514    /// - If `bounds` is outside of the bounds of `self`.
515    /// - If `bounds` has a length less than 1.
516    #[track_caller]
517    pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'a> {
518        let (offset, size) = range_to_offset_size(bounds, self.size.get());
519        check_buffer_bounds(self.size.get(), offset, size);
520        BufferSlice {
521            buffer: self.buffer,
522            offset: self.offset + offset, // check_buffer_bounds ensures this does not overflow
523            size,                         // check_buffer_bounds ensures this is essentially min()
524        }
525    }
526
527    /// Map the buffer to host (CPU) memory, making it available for reading or writing via
528    /// [`get_mapped_range()`](Self::get_mapped_range). The buffer becomes accessible once the
529    /// `callback` is invoked with [`Ok`].
530    ///
531    /// Use this when you want to map the buffer immediately. If you need to submit GPU work that
532    /// uses the buffer before mapping it, use `map_buffer_on_submit` on
533    /// [`CommandEncoder`][CEmbos], [`CommandBuffer`][CBmbos], [`RenderPass`][RPmbos], or
534    /// [`ComputePass`][CPmbos] to schedule the mapping after submission. This avoids extra calls to
535    /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and lets you initiate mapping from a
536    /// more convenient place.
537    ///
538    /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
539    /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
540    /// an event loop or run on a separate thread.
541    ///
542    /// The callback runs on the thread that first calls one of the above functions after the GPU work
543    /// completes. There are no restrictions on the code you can run in the callback; however, on native
544    /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
545    /// send messages, etc.).
546    ///
547    /// While a buffer is mapped, it cannot be used by other commands; at any time, either the GPU or
548    /// the CPU has exclusive access to the buffer’s contents.
549    ///
550    /// This can also be performed using [`Buffer::map_async()`].
551    ///
552    /// # Panics
553    ///
554    /// - If the buffer is already mapped.
555    /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
556    /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
557    /// - If the length of this slice is not a multiple of 4.
558    ///
559    /// [CEmbos]: CommandEncoder::map_buffer_on_submit
560    /// [CBmbos]: CommandBuffer::map_buffer_on_submit
561    /// [RPmbos]: RenderPass::map_buffer_on_submit
562    /// [CPmbos]: ComputePass::map_buffer_on_submit
563    /// [q::s]: Queue::submit
564    /// [i::p_a]: Instance::poll_all
565    /// [d::p]: Device::poll
566    pub fn map_async(
567        &self,
568        mode: MapMode,
569        callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
570    ) {
571        let mut mc = self.buffer.map_context.lock();
572        assert_eq!(mc.mapped_range, 0..0, "Buffer is already mapped");
573        let end = self.offset + self.size.get();
574        mc.mapped_range = self.offset..end;
575
576        self.buffer
577            .inner
578            .map_async(mode, self.offset..end, Box::new(callback));
579    }
580
581    /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
582    ///
583    /// Returns a [`BufferView`] referring to the buffer range represented by
584    /// `self`. See the documentation for [`BufferView`] for details.
585    ///
586    /// Multiple views may be obtained and used simultaneously as long as they are from
587    /// non-overlapping slices.
588    ///
589    /// This can also be performed using [`Buffer::get_mapped_range()`].
590    ///
591    /// # Panics
592    ///
593    /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
594    /// - If the length of this slice is not a multiple of 4.
595    /// - If the buffer to which `self` refers is not currently [mapped].
596    /// - If you try to create a view which overlaps an existing [`BufferViewMut`].
597    ///
598    /// [mapped]: Buffer#mapping-buffers
599    #[track_caller]
600    pub fn get_mapped_range(&self) -> BufferView {
601        let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Immutable);
602        self.buffer
603            .map_context
604            .lock()
605            .validate_and_add(subrange.clone());
606        let range = self.buffer.inner.get_mapped_range(subrange.index);
607        BufferView {
608            buffer: self.buffer.clone(),
609            size: self.size,
610            offset: self.offset,
611            inner: range,
612        }
613    }
614
615    /// Gain write access to the bytes of a [mapped] [`Buffer`].
616    ///
617    /// Returns a [`BufferViewMut`] referring to the buffer range represented by
618    /// `self`. See the documentation for [`BufferViewMut`] for more details.
619    ///
620    /// Multiple views may be obtained and used simultaneously as long as they are from
621    /// non-overlapping slices.
622    ///
623    /// This can also be performed using [`Buffer::get_mapped_range_mut()`].
624    ///
625    /// # Panics
626    ///
627    /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
628    /// - If the length of this slice is not a multiple of 4.
629    /// - If the buffer to which `self` refers is not currently [mapped].
630    /// - If you try to create a view which overlaps an existing [`BufferView`] or [`BufferViewMut`].
631    ///
632    /// [mapped]: Buffer#mapping-buffers
633    #[track_caller]
634    pub fn get_mapped_range_mut(&self) -> BufferViewMut {
635        let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Mutable);
636        self.buffer
637            .map_context
638            .lock()
639            .validate_and_add(subrange.clone());
640        let range = self.buffer.inner.get_mapped_range(subrange.index);
641        BufferViewMut {
642            buffer: self.buffer.clone(),
643            size: self.size,
644            offset: self.offset,
645            inner: range,
646        }
647    }
648
649    /// Returns the buffer this is a slice of.
650    ///
651    /// You should usually not need to call this, and if you received the buffer from code you
652    /// do not control, you should refrain from accessing the buffer outside the bounds of the
653    /// slice. Nevertheless, it’s possible to get this access, so this method makes it simple.
654    pub fn buffer(&self) -> &'a Buffer {
655        self.buffer
656    }
657
658    /// Returns the offset in [`Self::buffer()`] this slice starts at.
659    pub fn offset(&self) -> BufferAddress {
660        self.offset
661    }
662
663    /// Returns the size of this slice.
664    pub fn size(&self) -> BufferSize {
665        self.size
666    }
667}
668
669impl<'a> From<BufferSlice<'a>> for crate::BufferBinding<'a> {
670    /// Convert a [`BufferSlice`] to an equivalent [`BufferBinding`],
671    /// provided that it will be used without a dynamic offset.
672    fn from(value: BufferSlice<'a>) -> Self {
673        BufferBinding {
674            buffer: value.buffer,
675            offset: value.offset,
676            size: Some(value.size),
677        }
678    }
679}
680
681impl<'a> From<BufferSlice<'a>> for crate::BindingResource<'a> {
682    /// Convert a [`BufferSlice`] to an equivalent [`BindingResource::Buffer`],
683    /// provided that it will be used without a dynamic offset.
684    fn from(value: BufferSlice<'a>) -> Self {
685        crate::BindingResource::Buffer(crate::BufferBinding::from(value))
686    }
687}
688
689fn range_overlaps(a: &Range<BufferAddress>, b: &Range<BufferAddress>) -> bool {
690    a.start < b.end && b.start < a.end
691}
692
693fn range_contains(a: &Range<BufferAddress>, b: &Range<BufferAddress>) -> bool {
694    a.start <= b.start && a.end >= b.end
695}
696
697#[derive(Debug, Copy, Clone)]
698enum RangeMappingKind {
699    Mutable,
700    Immutable,
701}
702
703impl RangeMappingKind {
704    /// Returns true if a range of this kind can touch the same bytes as a range of the other kind.
705    ///
706    /// This is Rust's Mutable XOR Shared rule.
707    fn allowed_concurrently_with(self, other: Self) -> bool {
708        matches!(
709            (self, other),
710            (RangeMappingKind::Immutable, RangeMappingKind::Immutable)
711        )
712    }
713}
714
715#[derive(Debug, Clone)]
716struct Subrange {
717    index: Range<BufferAddress>,
718    kind: RangeMappingKind,
719}
720
721impl Subrange {
722    fn new(offset: BufferAddress, size: BufferSize, kind: RangeMappingKind) -> Self {
723        Self {
724            index: offset..(offset + size.get()),
725            kind,
726        }
727    }
728}
729
730impl fmt::Display for Subrange {
731    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
732        write!(
733            f,
734            "{}..{} ({:?})",
735            self.index.start, self.index.end, self.kind
736        )
737    }
738}
739
740/// The mapped portion of a buffer, if any, and its outstanding views.
741///
742/// This ensures that views fall within the mapped range and don't overlap.
743#[derive(Debug)]
744pub(crate) struct MapContext {
745    /// The range of the buffer that is mapped.
746    ///
747    /// This is `0..0` if the buffer is not mapped. This becomes non-empty when
748    /// the buffer is mapped at creation time, and when you call `map_async` on
749    /// some [`BufferSlice`] (so technically, it indicates the portion that is
750    /// *or has been requested to be* mapped.)
751    ///
752    /// All [`BufferView`]s and [`BufferViewMut`]s must fall within this range.
753    mapped_range: Range<BufferAddress>,
754
755    /// The ranges covered by all outstanding [`BufferView`]s and
756    /// [`BufferViewMut`]s. These are non-overlapping, and are all contained
757    /// within `mapped_range`.
758    sub_ranges: Vec<Subrange>,
759}
760
761impl MapContext {
762    /// Creates a new `MapContext`.
763    ///
764    /// For [`mapped_at_creation`] buffers, pass the full buffer range in the
765    /// `mapped_range` argument. For other buffers, pass `None`.
766    ///
767    /// [`mapped_at_creation`]: BufferDescriptor::mapped_at_creation
768    pub(crate) fn new(mapped_range: Option<Range<BufferAddress>>) -> Self {
769        Self {
770            mapped_range: mapped_range.unwrap_or(0..0),
771            sub_ranges: Vec::new(),
772        }
773    }
774
775    /// Record that the buffer is no longer mapped.
776    fn reset(&mut self) {
777        self.mapped_range = 0..0;
778
779        assert!(
780            self.sub_ranges.is_empty(),
781            "You cannot unmap a buffer that still has accessible mapped views"
782        );
783    }
784
785    /// Record that the `size` bytes of the buffer at `offset` are now viewed.
786    ///
787    /// # Panics
788    ///
789    /// This panics if the given range is invalid.
790    #[track_caller]
791    fn validate_and_add(&mut self, new_sub: Subrange) {
792        if self.mapped_range.is_empty() {
793            panic!("tried to call get_mapped_range(_mut) on an unmapped buffer");
794        }
795        if !range_contains(&self.mapped_range, &new_sub.index) {
796            panic!(
797                "tried to call get_mapped_range(_mut) on a range that is not entirely mapped. \
798                 Attempted to get range {}, but the mapped range is {}..{}",
799                new_sub, self.mapped_range.start, self.mapped_range.end
800            );
801        }
802
803        // This check is essential for avoiding undefined behavior: it is the
804        // only thing that ensures that `&mut` references to the buffer's
805        // contents don't alias anything else.
806        for sub in self.sub_ranges.iter() {
807            if range_overlaps(&sub.index, &new_sub.index)
808                && !sub.kind.allowed_concurrently_with(new_sub.kind)
809            {
810                panic!(
811                    "tried to call get_mapped_range(_mut) on a range that has already \
812                     been mapped and would break Rust memory aliasing rules. Attempted \
813                     to get range {}, and the conflicting range is {}",
814                    new_sub, sub
815                );
816            }
817        }
818        self.sub_ranges.push(new_sub);
819    }
820
821    /// Record that the `size` bytes of the buffer at `offset` are no longer viewed.
822    ///
823    /// # Panics
824    ///
825    /// This panics if the given range does not exactly match one previously
826    /// passed to [`MapContext::validate_and_add`].
827    fn remove(&mut self, offset: BufferAddress, size: BufferSize) {
828        let end = offset + size.get();
829
830        let index = self
831            .sub_ranges
832            .iter()
833            .position(|r| r.index == (offset..end))
834            .expect("unable to remove range from map context");
835        self.sub_ranges.swap_remove(index);
836    }
837}
838
839/// Describes a [`Buffer`].
840///
841/// For use with [`Device::create_buffer`].
842///
843/// Corresponds to [WebGPU `GPUBufferDescriptor`](
844/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferdescriptor).
845pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
846static_assertions::assert_impl_all!(BufferDescriptor<'_>: Send, Sync);
847
848/// Error occurred when trying to async map a buffer.
849#[derive(Clone, PartialEq, Eq, Debug)]
850pub struct BufferAsyncError;
851static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
852
853impl fmt::Display for BufferAsyncError {
854    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
855        write!(f, "Error occurred when trying to async map a buffer")
856    }
857}
858
859impl error::Error for BufferAsyncError {}
860
861/// Type of buffer mapping.
862#[derive(Debug, Clone, Copy, Eq, PartialEq)]
863pub enum MapMode {
864    /// Map only for reading
865    Read,
866    /// Map only for writing
867    Write,
868}
869static_assertions::assert_impl_all!(MapMode: Send, Sync);
870
871/// A read-only view of a mapped buffer's bytes.
872///
873/// To get a `BufferView`, first [map] the buffer, and then
874/// call `buffer.slice(range).get_mapped_range()`.
875///
876/// `BufferView` dereferences to `&[u8]`, so you can use all the usual Rust
877/// slice methods to access the buffer's contents. It also implements
878/// `AsRef<[u8]>`, if that's more convenient.
879///
880/// Before the buffer can be unmapped, all `BufferView`s observing it
881/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
882///
883/// For example code, see the documentation on [mapping buffers][map].
884///
885/// [map]: Buffer#mapping-buffers
886/// [`map_async`]: BufferSlice::map_async
887#[derive(Debug)]
888pub struct BufferView {
889    // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
890    buffer: Buffer,
891    offset: BufferAddress,
892    size: BufferSize,
893    inner: dispatch::DispatchBufferMappedRange,
894}
895
896#[cfg(webgpu)]
897impl BufferView {
898    /// Provides the same data as dereferencing the view, but as a `Uint8Array` in js.
899    /// This can be MUCH faster than dereferencing the view which copies the data into
900    /// the Rust / wasm heap.
901    pub fn as_uint8array(&self) -> &js_sys::Uint8Array {
902        self.inner.as_uint8array()
903    }
904}
905
906impl core::ops::Deref for BufferView {
907    type Target = [u8];
908
909    #[inline]
910    fn deref(&self) -> &[u8] {
911        self.inner.slice()
912    }
913}
914
915impl AsRef<[u8]> for BufferView {
916    #[inline]
917    fn as_ref(&self) -> &[u8] {
918        self.inner.slice()
919    }
920}
921
922/// A write-only view of a mapped buffer's bytes.
923///
924/// To get a `BufferViewMut`, first [map] the buffer, and then
925/// call `buffer.slice(range).get_mapped_range_mut()`.
926///
927/// `BufferViewMut` dereferences to `&mut [u8]`, so you can use all the usual
928/// Rust slice methods to access the buffer's contents. It also implements
929/// `AsMut<[u8]>`, if that's more convenient.
930///
931/// It is possible to read the buffer using this view, but doing so is not
932/// recommended, as it is likely to be slow.
933///
934/// Before the buffer can be unmapped, all `BufferViewMut`s observing it
935/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
936///
937/// For example code, see the documentation on [mapping buffers][map].
938///
939/// [map]: Buffer#mapping-buffers
940#[derive(Debug)]
941pub struct BufferViewMut {
942    // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
943    buffer: Buffer,
944    offset: BufferAddress,
945    size: BufferSize,
946    inner: dispatch::DispatchBufferMappedRange,
947}
948
949impl AsMut<[u8]> for BufferViewMut {
950    #[inline]
951    fn as_mut(&mut self) -> &mut [u8] {
952        self.inner.slice_mut()
953    }
954}
955
956impl Deref for BufferViewMut {
957    type Target = [u8];
958
959    fn deref(&self) -> &Self::Target {
960        self.inner.slice()
961    }
962}
963
964impl DerefMut for BufferViewMut {
965    fn deref_mut(&mut self) -> &mut Self::Target {
966        self.inner.slice_mut()
967    }
968}
969
970impl Drop for BufferView {
971    fn drop(&mut self) {
972        self.buffer
973            .map_context
974            .lock()
975            .remove(self.offset, self.size);
976    }
977}
978
979impl Drop for BufferViewMut {
980    fn drop(&mut self) {
981        self.buffer
982            .map_context
983            .lock()
984            .remove(self.offset, self.size);
985    }
986}
987
988#[track_caller]
989fn check_buffer_bounds(
990    buffer_size: BufferAddress,
991    slice_offset: BufferAddress,
992    slice_size: BufferSize,
993) {
994    // A slice of length 0 is invalid, so the offset must not be equal to or greater than the buffer size.
995    if slice_offset >= buffer_size {
996        panic!(
997            "slice offset {} is out of range for buffer of size {}",
998            slice_offset, buffer_size
999        );
1000    }
1001
1002    // Detect integer overflow.
1003    let end = slice_offset.checked_add(slice_size.get());
1004    if end.is_none_or(|end| end > buffer_size) {
1005        panic!(
1006            "slice offset {} size {} is out of range for buffer of size {}",
1007            slice_offset, slice_size, buffer_size
1008        );
1009    }
1010}
1011
1012#[track_caller]
1013pub(crate) fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
1014    bounds: S,
1015    whole_size: BufferAddress,
1016) -> (BufferAddress, BufferSize) {
1017    let offset = match bounds.start_bound() {
1018        Bound::Included(&bound) => bound,
1019        Bound::Excluded(&bound) => bound + 1,
1020        Bound::Unbounded => 0,
1021    };
1022    let size = BufferSize::new(match bounds.end_bound() {
1023        Bound::Included(&bound) => bound + 1 - offset,
1024        Bound::Excluded(&bound) => bound - offset,
1025        Bound::Unbounded => whole_size - offset,
1026    })
1027    .expect("buffer slices can not be empty");
1028
1029    (offset, size)
1030}
1031
1032#[cfg(test)]
1033mod tests {
1034    use super::{
1035        check_buffer_bounds, range_overlaps, range_to_offset_size, BufferAddress, BufferSize,
1036    };
1037
1038    fn bs(value: BufferAddress) -> BufferSize {
1039        BufferSize::new(value).unwrap()
1040    }
1041
1042    #[test]
1043    fn range_to_offset_size_works() {
1044        let whole = 100;
1045
1046        assert_eq!(range_to_offset_size(0..2, whole), (0, bs(2)));
1047        assert_eq!(range_to_offset_size(2..5, whole), (2, bs(3)));
1048        assert_eq!(range_to_offset_size(.., whole), (0, bs(whole)));
1049        assert_eq!(range_to_offset_size(21.., whole), (21, bs(whole - 21)));
1050        assert_eq!(range_to_offset_size(0.., whole), (0, bs(whole)));
1051        assert_eq!(range_to_offset_size(..21, whole), (0, bs(21)));
1052    }
1053
1054    #[test]
1055    #[should_panic = "buffer slices can not be empty"]
1056    fn range_to_offset_size_panics_for_empty_range() {
1057        range_to_offset_size(123..123, 200);
1058    }
1059
1060    #[test]
1061    #[should_panic = "buffer slices can not be empty"]
1062    fn range_to_offset_size_panics_for_unbounded_empty_range() {
1063        range_to_offset_size(..0, 100);
1064    }
1065
1066    #[test]
1067    fn check_buffer_bounds_works_for_end_in_range() {
1068        check_buffer_bounds(200, 100, bs(50));
1069        check_buffer_bounds(200, 100, bs(100));
1070        check_buffer_bounds(u64::MAX, u64::MAX - 100, bs(100));
1071        check_buffer_bounds(u64::MAX, 0, bs(u64::MAX));
1072        check_buffer_bounds(u64::MAX, 1, bs(u64::MAX - 1));
1073    }
1074
1075    #[test]
1076    #[should_panic]
1077    fn check_buffer_bounds_panics_for_end_over_size() {
1078        check_buffer_bounds(200, 100, bs(101));
1079    }
1080
1081    #[test]
1082    #[should_panic]
1083    fn check_buffer_bounds_panics_for_end_wraparound() {
1084        check_buffer_bounds(u64::MAX, 1, bs(u64::MAX));
1085    }
1086
1087    #[test]
1088    fn range_overlapping() {
1089        // First range to the left
1090        assert_eq!(range_overlaps(&(0..1), &(1..3)), false);
1091        // First range overlaps left edge
1092        assert_eq!(range_overlaps(&(0..2), &(1..3)), true);
1093        // First range completely inside second
1094        assert_eq!(range_overlaps(&(1..2), &(0..3)), true);
1095        // First range completely surrounds second
1096        assert_eq!(range_overlaps(&(0..3), &(1..2)), true);
1097        // First range overlaps right edge
1098        assert_eq!(range_overlaps(&(1..3), &(0..2)), true);
1099        // First range entirely to the right
1100        assert_eq!(range_overlaps(&(2..3), &(0..2)), false);
1101    }
1102}