wgpu/api/
buffer.rs

1use alloc::{boxed::Box, sync::Arc, vec::Vec};
2use core::{
3    error, fmt,
4    ops::{Bound, Deref, DerefMut, Range, RangeBounds},
5};
6
7use crate::util::Mutex;
8use crate::*;
9
10/// Handle to a GPU-accessible buffer.
11///
12/// A `Buffer` is a memory allocation for use by the GPU, somewhat analogous to
13/// <code>[Box]&lt;[\[u8\]][primitive@slice]&gt;</code> in Rust.
14/// The contents of buffers are untyped bytes; it is up to the application to
15/// specify the interpretation of the bytes when the buffer is used, in ways
16/// such as [`VertexBufferLayout`].
17/// A single buffer can be used to hold multiple independent pieces of data at
18/// different offsets (e.g. both vertices and indices for one or more meshes).
19///
20/// A `Buffer`'s bytes have "interior mutability": functions like
21/// [`Queue::write_buffer`] or [mapping] a buffer for writing only require a
22/// `&Buffer`, not a `&mut Buffer`, even though they modify its contents. `wgpu`
23/// prevents simultaneous reads and writes of buffer contents using run-time
24/// checks.
25///
26/// Created with [`Device::create_buffer()`] or
27/// [`DeviceExt::create_buffer_init()`].
28///
29/// Corresponds to [WebGPU `GPUBuffer`](https://gpuweb.github.io/gpuweb/#buffer-interface).
30///
31/// [mapping]: Buffer#mapping-buffers
32///
33/// # How to get your data into a buffer
34///
35/// Every `Buffer` starts with all bytes zeroed.
36/// There are many ways to load data into a `Buffer`:
37///
38/// - When creating a buffer, you may set the [`mapped_at_creation`][mac] flag,
39///   then write to its [`get_mapped_range_mut()`][Buffer::get_mapped_range_mut].
40///   This only works when the buffer is created and has not yet been used by
41///   the GPU, but it is all you need for buffers whose contents do not change
42///   after creation.
43///   - You may use [`DeviceExt::create_buffer_init()`] as a convenient way to
44///     do that and copy data from a `&[u8]` you provide.
45/// - After creation, you may use [`Buffer::map_async()`] to map it again;
46///   however, you then need to wait until the GPU is no longer using the buffer
47///   before you begin writing.
48/// - You may use [`CommandEncoder::copy_buffer_to_buffer()`] to copy data into
49///   this buffer from another buffer.
50/// - You may use [`Queue::write_buffer()`] to copy data into the buffer from a
51///   `&[u8]`. This uses a temporary “staging” buffer managed by `wgpu` to hold
52///   the data.
53///   - [`Queue::write_buffer_with()`] allows you to write directly into temporary
54///     storage instead of providing a slice you already prepared, which may
55///     allow *your* code to save the allocation of a [`Vec`] or such.
56/// - You may use [`util::StagingBelt`] to manage a set of temporary buffers.
57///   This may be more efficient than [`Queue::write_buffer_with()`] when you
58///   have many small copies to perform, but requires more steps to use, and
59///   tuning of the belt buffer size.
60/// - You may write your own staging buffer management customized to your
61///   application, based on mapped buffers and
62///   [`CommandEncoder::copy_buffer_to_buffer()`].
63/// - A GPU computation’s results can be stored in a buffer:
64///   - A [compute shader][ComputePipeline] may write to a buffer bound as a
65///     [storage buffer][BufferBindingType::Storage].
66///   - A render pass may render to a texture which is then copied to a buffer
67///     using [`CommandEncoder::copy_texture_to_buffer()`].
68///
69/// # Mapping buffers
70///
71/// If a `Buffer` is created with the appropriate [`usage`], it can be *mapped*:
72/// you can make its contents accessible to the CPU as an ordinary `&[u8]` or
73/// `&mut [u8]` slice of bytes. Buffers created with the
74/// [`mapped_at_creation`][mac] flag set are also mapped initially.
75///
76/// Depending on the hardware, the buffer could be memory shared between CPU and
77/// GPU, so that the CPU has direct access to the same bytes the GPU will
78/// consult; or it may be ordinary CPU memory, whose contents the system must
79/// copy to/from the GPU as needed. This crate's API is designed to work the
80/// same way in either case: at any given time, a buffer is either mapped and
81/// available to the CPU, or unmapped and ready for use by the GPU, but never
82/// both. This makes it impossible for either side to observe changes by the
83/// other immediately, and any necessary transfers can be carried out when the
84/// buffer transitions from one state to the other.
85///
86/// There are two ways to map a buffer:
87///
88/// - If [`BufferDescriptor::mapped_at_creation`] is `true`, then the entire
89///   buffer is mapped when it is created. This is the easiest way to initialize
90///   a new buffer. You can set `mapped_at_creation` on any kind of buffer,
91///   regardless of its [`usage`] flags.
92///
93/// - If the buffer's [`usage`] includes the [`MAP_READ`] or [`MAP_WRITE`]
94///   flags, then you can call `buffer.slice(range).map_async(mode, callback)`
95///   to map the portion of `buffer` given by `range`. This waits for the GPU to
96///   finish using the buffer, and invokes `callback` as soon as the buffer is
97///   safe for the CPU to access.
98///
99/// Once a buffer is mapped:
100///
101/// - You can call `buffer.slice(range).get_mapped_range()` to obtain a
102///   [`BufferView`], which dereferences to a `&[u8]` that you can use to read
103///   the buffer's contents.
104///
105/// - Or, you can call `buffer.slice(range).get_mapped_range_mut()` to obtain a
106///   [`BufferViewMut`], which dereferences to a `&mut [u8]` that you can use to
107///   read and write the buffer's contents.
108///
109/// The given `range` must fall within the mapped portion of the buffer. If you
110/// attempt to access overlapping ranges, even for shared access only, these
111/// methods panic.
112///
113/// While a buffer is mapped, you may not submit any commands to the GPU that
114/// access it. You may record command buffers that use the buffer, but if you
115/// submit them while the buffer is mapped, submission will panic.
116///
117/// When you are done using the buffer on the CPU, you must call
118/// [`Buffer::unmap`] to make it available for use by the GPU again. All
119/// [`BufferView`] and [`BufferViewMut`] views referring to the buffer must be
120/// dropped before you unmap it; otherwise, [`Buffer::unmap`] will panic.
121///
122/// # Example
123///
124/// If `buffer` was created with [`BufferUsages::MAP_WRITE`], we could fill it
125/// with `f32` values like this:
126///
127/// ```
128/// # #[cfg(feature = "noop")]
129/// # let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
130/// # #[cfg(not(feature = "noop"))]
131/// # let device: wgpu::Device = { return; };
132/// #
133/// # let buffer = device.create_buffer(&wgpu::BufferDescriptor {
134/// #     label: None,
135/// #     size: 400,
136/// #     usage: wgpu::BufferUsages::MAP_WRITE,
137/// #     mapped_at_creation: false,
138/// # });
139/// let capturable = buffer.clone();
140/// buffer.map_async(wgpu::MapMode::Write, .., move |result| {
141///     if result.is_ok() {
142///         let mut view = capturable.get_mapped_range_mut(..);
143///         let floats: &mut [f32] = bytemuck::cast_slice_mut(&mut view);
144///         floats.fill(42.0);
145///         drop(view);
146///         capturable.unmap();
147///     }
148/// });
149/// ```
150///
151/// This code takes the following steps:
152///
153/// - First, it makes a cloned handle to the buffer for capture by
154///   the callback passed to [`map_async`]. Since a [`map_async`] callback may be
155///   invoked from another thread, interaction between the callback and the
156///   thread calling [`map_async`] generally requires some sort of shared heap
157///   data like this. In real code, there might be an [`Arc`] to some larger
158///   structure that itself owns `buffer`.
159///
160/// - Then, it calls [`Buffer::slice`] to make a [`BufferSlice`] referring to
161///   the buffer's entire contents.
162///
163/// - Next, it calls [`BufferSlice::map_async`] to request that the bytes to
164///   which the slice refers be made accessible to the CPU ("mapped"). This may
165///   entail waiting for previously enqueued operations on `buffer` to finish.
166///   Although [`map_async`] itself always returns immediately, it saves the
167///   callback function to be invoked later.
168///
169/// - When some later call to [`Device::poll`] or [`Instance::poll_all`] (not
170///   shown in this example) determines that the buffer is mapped and ready for
171///   the CPU to use, it invokes the callback function.
172///
173/// - The callback function calls [`Buffer::slice`] and then
174///   [`BufferSlice::get_mapped_range_mut`] to obtain a [`BufferViewMut`], which
175///   dereferences to a `&mut [u8]` slice referring to the buffer's bytes.
176///
177/// - It then uses the [`bytemuck`] crate to turn the `&mut [u8]` into a `&mut
178///   [f32]`, and calls the slice [`fill`] method to fill the buffer with a
179///   useful value.
180///
181/// - Finally, the callback drops the view and calls [`Buffer::unmap`] to unmap
182///   the buffer. In real code, the callback would also need to do some sort of
183///   synchronization to let the rest of the program know that it has completed
184///   its work.
185///
186/// If using [`map_async`] directly is awkward, you may find it more convenient to
187/// use [`Queue::write_buffer`] and [`util::DownloadBuffer::read_buffer`].
188/// However, those each have their own tradeoffs; the asynchronous nature of GPU
189/// execution makes it hard to avoid friction altogether.
190///
191/// [`Arc`]: std::sync::Arc
192/// [`map_async`]: BufferSlice::map_async
193/// [`bytemuck`]: https://crates.io/crates/bytemuck
194/// [`fill`]: slice::fill
195///
196/// ## Mapping buffers on the web
197///
198/// When compiled to WebAssembly and running in a browser content process,
199/// `wgpu` implements its API in terms of the browser's WebGPU implementation.
200/// In this context, `wgpu` is further isolated from the GPU:
201///
202/// - Depending on the browser's WebGPU implementation, mapping and unmapping
203///   buffers probably entails copies between WebAssembly linear memory and the
204///   graphics driver's buffers.
205///
206/// - All modern web browsers isolate web content in its own sandboxed process,
207///   which can only interact with the GPU via interprocess communication (IPC).
208///   Although most browsers' IPC systems use shared memory for large data
209///   transfers, there will still probably need to be copies into and out of the
210///   shared memory buffers.
211///
212/// All of these copies contribute to the cost of buffer mapping in this
213/// configuration.
214///
215/// [`usage`]: BufferDescriptor::usage
216/// [mac]: BufferDescriptor::mapped_at_creation
217/// [`MAP_READ`]: BufferUsages::MAP_READ
218/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
219/// [`DeviceExt::create_buffer_init()`]: util::DeviceExt::create_buffer_init
220#[derive(Debug, Clone)]
221pub struct Buffer {
222    pub(crate) inner: dispatch::DispatchBuffer,
223    pub(crate) map_context: Arc<Mutex<MapContext>>,
224    pub(crate) size: wgt::BufferAddress,
225    pub(crate) usage: BufferUsages,
226    // Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
227}
228#[cfg(send_sync)]
229static_assertions::assert_impl_all!(Buffer: Send, Sync);
230
231crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
232
233impl Buffer {
234    /// Return the binding view of the entire buffer.
235    pub fn as_entire_binding(&self) -> BindingResource<'_> {
236        BindingResource::Buffer(self.as_entire_buffer_binding())
237    }
238
239    /// Return the binding view of the entire buffer.
240    pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_> {
241        BufferBinding {
242            buffer: self,
243            offset: 0,
244            size: None,
245        }
246    }
247
248    /// Get the [`wgpu_hal`] buffer from this `Buffer`.
249    ///
250    /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
251    /// and pass that struct to the to the `A` type parameter.
252    ///
253    /// Returns a guard that dereferences to the type of the hal backend
254    /// which implements [`A::Buffer`].
255    ///
256    /// # Types
257    ///
258    /// The returned type depends on the backend:
259    ///
260    #[doc = crate::hal_type_vulkan!("Buffer")]
261    #[doc = crate::hal_type_metal!("Buffer")]
262    #[doc = crate::hal_type_dx12!("Buffer")]
263    #[doc = crate::hal_type_gles!("Buffer")]
264    ///
265    /// # Deadlocks
266    ///
267    /// - The returned guard holds a read-lock on a device-local "destruction"
268    ///   lock, which will cause all calls to `destroy` to block until the
269    ///   guard is released.
270    ///
271    /// # Errors
272    ///
273    /// This method will return None if:
274    /// - The buffer is not from the backend specified by `A`.
275    /// - The buffer is from the `webgpu` or `custom` backend.
276    /// - The buffer has had [`Self::destroy()`] called on it.
277    ///
278    /// # Safety
279    ///
280    /// - The returned resource must not be destroyed unless the guard
281    ///   is the last reference to it and it is not in use by the GPU.
282    ///   The guard and handle may be dropped at any time however.
283    /// - All the safety requirements of wgpu-hal must be upheld.
284    ///
285    /// [`A::Buffer`]: hal::Api::Buffer
286    #[cfg(wgpu_core)]
287    pub unsafe fn as_hal<A: hal::Api>(
288        &self,
289    ) -> Option<impl Deref<Target = A::Buffer> + WasmNotSendSync> {
290        let buffer = self.inner.as_core_opt()?;
291        unsafe { buffer.context.buffer_as_hal::<A>(buffer) }
292    }
293
294    /// Returns a [`BufferSlice`] referring to the portion of `self`'s contents
295    /// indicated by `bounds`. Regardless of what sort of data `self` stores,
296    /// `bounds` start and end are given in bytes.
297    ///
298    /// A [`BufferSlice`] can be used to supply vertex and index data, or to map
299    /// buffer contents for access from the CPU. See the [`BufferSlice`]
300    /// documentation for details.
301    ///
302    /// The `range` argument can be half or fully unbounded: for example,
303    /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
304    /// refers to the portion starting at the `n`th byte and extending to the
305    /// end of the buffer.
306    ///
307    /// # Panics
308    ///
309    /// - If `bounds` is outside of the bounds of `self`.
310    /// - If `bounds` has a length less than 1.
311    #[track_caller]
312    pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
313        let (offset, size) = range_to_offset_size(bounds, self.size);
314        check_buffer_bounds(self.size, offset, size);
315        BufferSlice {
316            buffer: self,
317            offset,
318            size,
319        }
320    }
321
322    /// Unmaps the buffer from host memory.
323    ///
324    /// This terminates the effect of all previous [`map_async()`](Self::map_async) operations and
325    /// makes the buffer available for use by the GPU again.
326    pub fn unmap(&self) {
327        self.map_context.lock().reset();
328        self.inner.unmap();
329    }
330
331    /// Destroy the associated native resources as soon as possible.
332    pub fn destroy(&self) {
333        self.inner.destroy();
334    }
335
336    /// Returns the length of the buffer allocation in bytes.
337    ///
338    /// This is always equal to the `size` that was specified when creating the buffer.
339    pub fn size(&self) -> BufferAddress {
340        self.size
341    }
342
343    /// Returns the allowed usages for this `Buffer`.
344    ///
345    /// This is always equal to the `usage` that was specified when creating the buffer.
346    pub fn usage(&self) -> BufferUsages {
347        self.usage
348    }
349
350    /// Map the buffer to host (CPU) memory, making it available for reading or writing via
351    /// [`get_mapped_range()`](Self::get_mapped_range). The buffer becomes accessible once the
352    /// `callback` is invoked with [`Ok`].
353    ///
354    /// Use this when you want to map the buffer immediately. If you need to submit GPU work that
355    /// uses the buffer before mapping it, use `map_buffer_on_submit` on
356    /// [`CommandEncoder`][CEmbos], [`CommandBuffer`][CBmbos], [`RenderPass`][RPmbos], or
357    /// [`ComputePass`][CPmbos] to schedule the mapping after submission. This avoids extra calls to
358    /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and lets you initiate mapping from a
359    /// more convenient place.
360    ///
361    /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
362    /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
363    /// an event loop or run on a separate thread.
364    ///
365    /// The callback runs on the thread that first calls one of the above functions after the GPU work
366    /// completes. There are no restrictions on the code you can run in the callback; however, on native
367    /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
368    /// send messages, etc.).
369    ///
370    /// While a buffer is mapped, it cannot be used by other commands; at any time, either the GPU or
371    /// the CPU has exclusive access to the buffer’s contents.
372    ///
373    /// This can also be performed using [`BufferSlice::map_async()`].
374    ///
375    /// # Panics
376    ///
377    /// - If the buffer is already mapped.
378    /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
379    /// - If `bounds` is outside of the bounds of `self`.
380    /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
381    /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
382    ///
383    /// [CEmbos]: CommandEncoder::map_buffer_on_submit
384    /// [CBmbos]: CommandBuffer::map_buffer_on_submit
385    /// [RPmbos]: RenderPass::map_buffer_on_submit
386    /// [CPmbos]: ComputePass::map_buffer_on_submit
387    /// [q::s]: Queue::submit
388    /// [i::p_a]: Instance::poll_all
389    /// [d::p]: Device::poll
390    pub fn map_async<S: RangeBounds<BufferAddress>>(
391        &self,
392        mode: MapMode,
393        bounds: S,
394        callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
395    ) {
396        self.slice(bounds).map_async(mode, callback)
397    }
398
399    /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
400    ///
401    /// Returns a [`BufferView`] referring to the buffer range represented by
402    /// `self`. See the documentation for [`BufferView`] for details.
403    ///
404    /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
405    /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
406    ///
407    /// This can also be performed using [`BufferSlice::get_mapped_range()`].
408    ///
409    /// # Panics
410    ///
411    /// - If `bounds` is outside of the bounds of `self`.
412    /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
413    /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
414    /// - If the buffer to which `self` refers is not currently [mapped].
415    /// - If you try to create a view which overlaps an existing [`BufferViewMut`].
416    ///
417    /// [mapped]: Buffer#mapping-buffers
418    #[track_caller]
419    pub fn get_mapped_range<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferView {
420        self.slice(bounds).get_mapped_range()
421    }
422
423    /// Gain write access to the bytes of a [mapped] [`Buffer`].
424    ///
425    /// Returns a [`BufferViewMut`] referring to the buffer range represented by
426    /// `self`. See the documentation for [`BufferViewMut`] for more details.
427    ///
428    /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
429    /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
430    ///
431    /// This can also be performed using [`BufferSlice::get_mapped_range_mut()`].
432    ///
433    /// # Panics
434    ///
435    /// - If `bounds` is outside of the bounds of `self`.
436    /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
437    /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
438    /// - If the buffer to which `self` refers is not currently [mapped].
439    /// - If you try to create a view which overlaps an existing [`BufferView`] or [`BufferViewMut`].
440    ///
441    /// [mapped]: Buffer#mapping-buffers
442    #[track_caller]
443    pub fn get_mapped_range_mut<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferViewMut {
444        self.slice(bounds).get_mapped_range_mut()
445    }
446
447    #[cfg(custom)]
448    /// Returns custom implementation of Buffer (if custom backend and is internally T)
449    pub fn as_custom<T: custom::BufferInterface>(&self) -> Option<&T> {
450        self.inner.as_custom()
451    }
452}
453
454/// A slice of a [`Buffer`], to be mapped, used for vertex or index data, or the like.
455///
456/// You can create a `BufferSlice` by calling [`Buffer::slice`]:
457///
458/// ```no_run
459/// # let buffer: wgpu::Buffer = todo!();
460/// let slice = buffer.slice(10..20);
461/// ```
462///
463/// This returns a slice referring to the second ten bytes of `buffer`. To get a
464/// slice of the entire `Buffer`:
465///
466/// ```no_run
467/// # let buffer: wgpu::Buffer = todo!();
468/// let whole_buffer_slice = buffer.slice(..);
469/// ```
470///
471/// You can pass buffer slices to methods like [`RenderPass::set_vertex_buffer`]
472/// and [`RenderPass::set_index_buffer`] to indicate which portion of the buffer
473/// a draw call should consult. You can also convert it to a [`BufferBinding`]
474/// with `.into()`.
475///
476/// To access the slice's contents on the CPU, you must first [map] the buffer,
477/// and then call [`BufferSlice::get_mapped_range`] or
478/// [`BufferSlice::get_mapped_range_mut`] to obtain a view of the slice's
479/// contents. See the documentation on [mapping][map] for more details,
480/// including example code.
481///
482/// Unlike a Rust shared slice `&[T]`, whose existence guarantees that
483/// nobody else is modifying the `T` values to which it refers, a
484/// [`BufferSlice`] doesn't guarantee that the buffer's contents aren't
485/// changing. You can still record and submit commands operating on the
486/// buffer while holding a [`BufferSlice`]. A [`BufferSlice`] simply
487/// represents a certain range of the buffer's bytes.
488///
489/// The `BufferSlice` type is unique to the Rust API of `wgpu`. In the WebGPU
490/// specification, an offset and size are specified as arguments to each call
491/// working with the [`Buffer`], instead.
492///
493/// [map]: Buffer#mapping-buffers
494#[derive(Copy, Clone, Debug, PartialEq)]
495pub struct BufferSlice<'a> {
496    pub(crate) buffer: &'a Buffer,
497    pub(crate) offset: BufferAddress,
498    pub(crate) size: BufferSize,
499}
500#[cfg(send_sync)]
501static_assertions::assert_impl_all!(BufferSlice<'_>: Send, Sync);
502
503impl<'a> BufferSlice<'a> {
504    /// Return another [`BufferSlice`] referring to the portion of `self`'s contents
505    /// indicated by `bounds`.
506    ///
507    /// The `range` argument can be half or fully unbounded: for example,
508    /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
509    /// refers to the portion starting at the `n`th byte and extending to the
510    /// end of the buffer.
511    ///
512    /// # Panics
513    ///
514    /// - If `bounds` is outside of the bounds of `self`.
515    /// - If `bounds` has a length less than 1.
516    #[track_caller]
517    pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'a> {
518        let (offset, size) = range_to_offset_size(bounds, self.size.get());
519        check_buffer_bounds(self.size.get(), offset, size);
520        BufferSlice {
521            buffer: self.buffer,
522            offset: self.offset + offset, // check_buffer_bounds ensures this does not overflow
523            size,                         // check_buffer_bounds ensures this is essentially min()
524        }
525    }
526
527    /// Map the buffer to host (CPU) memory, making it available for reading or writing via
528    /// [`get_mapped_range()`](Self::get_mapped_range). The buffer becomes accessible once the
529    /// `callback` is invoked with [`Ok`].
530    ///
531    /// Use this when you want to map the buffer immediately. If you need to submit GPU work that
532    /// uses the buffer before mapping it, use `map_buffer_on_submit` on
533    /// [`CommandEncoder`][CEmbos], [`CommandBuffer`][CBmbos], [`RenderPass`][RPmbos], or
534    /// [`ComputePass`][CPmbos] to schedule the mapping after submission. This avoids extra calls to
535    /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and lets you initiate mapping from a
536    /// more convenient place.
537    ///
538    /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
539    /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
540    /// an event loop or run on a separate thread.
541    ///
542    /// The callback runs on the thread that first calls one of the above functions after the GPU work
543    /// completes. There are no restrictions on the code you can run in the callback; however, on native
544    /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
545    /// send messages, etc.).
546    ///
547    /// While a buffer is mapped, it cannot be used by other commands; at any time, either the GPU or
548    /// the CPU has exclusive access to the buffer’s contents.
549    ///
550    /// This can also be performed using [`Buffer::map_async()`].
551    ///
552    /// # Panics
553    ///
554    /// - If the buffer is already mapped.
555    /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
556    /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
557    /// - If the length of this slice is not a multiple of 4.
558    ///
559    /// [CEmbos]: CommandEncoder::map_buffer_on_submit
560    /// [CBmbos]: CommandBuffer::map_buffer_on_submit
561    /// [RPmbos]: RenderPass::map_buffer_on_submit
562    /// [CPmbos]: ComputePass::map_buffer_on_submit
563    /// [q::s]: Queue::submit
564    /// [i::p_a]: Instance::poll_all
565    /// [d::p]: Device::poll
566    pub fn map_async(
567        &self,
568        mode: MapMode,
569        callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
570    ) {
571        let mut mc = self.buffer.map_context.lock();
572        assert_eq!(mc.mapped_range, 0..0, "Buffer is already mapped");
573        let end = self.offset + self.size.get();
574        mc.mapped_range = self.offset..end;
575
576        self.buffer
577            .inner
578            .map_async(mode, self.offset..end, Box::new(callback));
579    }
580
581    /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
582    ///
583    /// Returns a [`BufferView`] referring to the buffer range represented by
584    /// `self`. See the documentation for [`BufferView`] for details.
585    ///
586    /// Multiple views may be obtained and used simultaneously as long as they are from
587    /// non-overlapping slices.
588    ///
589    /// This can also be performed using [`Buffer::get_mapped_range()`].
590    ///
591    /// # Panics
592    ///
593    /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
594    /// - If the length of this slice is not a multiple of 4.
595    /// - If the buffer to which `self` refers is not currently [mapped].
596    /// - If you try to create a view which overlaps an existing [`BufferViewMut`].
597    ///
598    /// [mapped]: Buffer#mapping-buffers
599    #[track_caller]
600    pub fn get_mapped_range(&self) -> BufferView {
601        let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Immutable);
602        self.buffer
603            .map_context
604            .lock()
605            .validate_and_add(subrange.clone());
606        let range = self.buffer.inner.get_mapped_range(subrange.index);
607        BufferView {
608            buffer: self.buffer.clone(),
609            size: self.size,
610            offset: self.offset,
611            inner: range,
612        }
613    }
614
615    /// Gain write access to the bytes of a [mapped] [`Buffer`].
616    ///
617    /// Returns a [`BufferViewMut`] referring to the buffer range represented by
618    /// `self`. See the documentation for [`BufferViewMut`] for more details.
619    ///
620    /// Multiple views may be obtained and used simultaneously as long as they are from
621    /// non-overlapping slices.
622    ///
623    /// This can also be performed using [`Buffer::get_mapped_range_mut()`].
624    ///
625    /// # Panics
626    ///
627    /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
628    /// - If the length of this slice is not a multiple of 4.
629    /// - If the buffer to which `self` refers is not currently [mapped].
630    /// - If you try to create a view which overlaps an existing [`BufferView`] or [`BufferViewMut`].
631    ///
632    /// [mapped]: Buffer#mapping-buffers
633    #[track_caller]
634    pub fn get_mapped_range_mut(&self) -> BufferViewMut {
635        let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Mutable);
636        self.buffer
637            .map_context
638            .lock()
639            .validate_and_add(subrange.clone());
640        let range = self.buffer.inner.get_mapped_range(subrange.index);
641        BufferViewMut {
642            buffer: self.buffer.clone(),
643            size: self.size,
644            offset: self.offset,
645            inner: range,
646            readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
647        }
648    }
649
650    /// Returns the buffer this is a slice of.
651    ///
652    /// You should usually not need to call this, and if you received the buffer from code you
653    /// do not control, you should refrain from accessing the buffer outside the bounds of the
654    /// slice. Nevertheless, it’s possible to get this access, so this method makes it simple.
655    pub fn buffer(&self) -> &'a Buffer {
656        self.buffer
657    }
658
659    /// Returns the offset in [`Self::buffer()`] this slice starts at.
660    pub fn offset(&self) -> BufferAddress {
661        self.offset
662    }
663
664    /// Returns the size of this slice.
665    pub fn size(&self) -> BufferSize {
666        self.size
667    }
668}
669
670impl<'a> From<BufferSlice<'a>> for crate::BufferBinding<'a> {
671    /// Convert a [`BufferSlice`] to an equivalent [`BufferBinding`],
672    /// provided that it will be used without a dynamic offset.
673    fn from(value: BufferSlice<'a>) -> Self {
674        BufferBinding {
675            buffer: value.buffer,
676            offset: value.offset,
677            size: Some(value.size),
678        }
679    }
680}
681
682impl<'a> From<BufferSlice<'a>> for crate::BindingResource<'a> {
683    /// Convert a [`BufferSlice`] to an equivalent [`BindingResource::Buffer`],
684    /// provided that it will be used without a dynamic offset.
685    fn from(value: BufferSlice<'a>) -> Self {
686        crate::BindingResource::Buffer(crate::BufferBinding::from(value))
687    }
688}
689
690fn range_overlaps(a: &Range<BufferAddress>, b: &Range<BufferAddress>) -> bool {
691    a.start < b.end && b.start < a.end
692}
693
694fn range_contains(a: &Range<BufferAddress>, b: &Range<BufferAddress>) -> bool {
695    a.start <= b.start && a.end >= b.end
696}
697
698#[derive(Debug, Copy, Clone)]
699enum RangeMappingKind {
700    Mutable,
701    Immutable,
702}
703
704impl RangeMappingKind {
705    /// Returns true if a range of this kind can touch the same bytes as a range of the other kind.
706    ///
707    /// This is Rust's Mutable XOR Shared rule.
708    fn allowed_concurrently_with(self, other: Self) -> bool {
709        matches!(
710            (self, other),
711            (RangeMappingKind::Immutable, RangeMappingKind::Immutable)
712        )
713    }
714}
715
716#[derive(Debug, Clone)]
717struct Subrange {
718    index: Range<BufferAddress>,
719    kind: RangeMappingKind,
720}
721
722impl Subrange {
723    fn new(offset: BufferAddress, size: BufferSize, kind: RangeMappingKind) -> Self {
724        Self {
725            index: offset..(offset + size.get()),
726            kind,
727        }
728    }
729}
730
731impl fmt::Display for Subrange {
732    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
733        write!(
734            f,
735            "{}..{} ({:?})",
736            self.index.start, self.index.end, self.kind
737        )
738    }
739}
740
741/// The mapped portion of a buffer, if any, and its outstanding views.
742///
743/// This ensures that views fall within the mapped range and don't overlap.
744#[derive(Debug)]
745pub(crate) struct MapContext {
746    /// The range of the buffer that is mapped.
747    ///
748    /// This is `0..0` if the buffer is not mapped. This becomes non-empty when
749    /// the buffer is mapped at creation time, and when you call `map_async` on
750    /// some [`BufferSlice`] (so technically, it indicates the portion that is
751    /// *or has been requested to be* mapped.)
752    ///
753    /// All [`BufferView`]s and [`BufferViewMut`]s must fall within this range.
754    mapped_range: Range<BufferAddress>,
755
756    /// The ranges covered by all outstanding [`BufferView`]s and
757    /// [`BufferViewMut`]s. These are non-overlapping, and are all contained
758    /// within `mapped_range`.
759    sub_ranges: Vec<Subrange>,
760}
761
762impl MapContext {
763    /// Creates a new `MapContext`.
764    ///
765    /// For [`mapped_at_creation`] buffers, pass the full buffer range in the
766    /// `mapped_range` argument. For other buffers, pass `None`.
767    ///
768    /// [`mapped_at_creation`]: BufferDescriptor::mapped_at_creation
769    pub(crate) fn new(mapped_range: Option<Range<BufferAddress>>) -> Self {
770        Self {
771            mapped_range: mapped_range.unwrap_or(0..0),
772            sub_ranges: Vec::new(),
773        }
774    }
775
776    /// Record that the buffer is no longer mapped.
777    fn reset(&mut self) {
778        self.mapped_range = 0..0;
779
780        assert!(
781            self.sub_ranges.is_empty(),
782            "You cannot unmap a buffer that still has accessible mapped views"
783        );
784    }
785
786    /// Record that the `size` bytes of the buffer at `offset` are now viewed.
787    ///
788    /// # Panics
789    ///
790    /// This panics if the given range is invalid.
791    #[track_caller]
792    fn validate_and_add(&mut self, new_sub: Subrange) {
793        if self.mapped_range.is_empty() {
794            panic!("tried to call get_mapped_range(_mut) on an unmapped buffer");
795        }
796        if !range_contains(&self.mapped_range, &new_sub.index) {
797            panic!(
798                "tried to call get_mapped_range(_mut) on a range that is not entirely mapped. \
799                 Attempted to get range {}, but the mapped range is {}..{}",
800                new_sub, self.mapped_range.start, self.mapped_range.end
801            );
802        }
803
804        // This check is essential for avoiding undefined behavior: it is the
805        // only thing that ensures that `&mut` references to the buffer's
806        // contents don't alias anything else.
807        for sub in self.sub_ranges.iter() {
808            if range_overlaps(&sub.index, &new_sub.index)
809                && !sub.kind.allowed_concurrently_with(new_sub.kind)
810            {
811                panic!(
812                    "tried to call get_mapped_range(_mut) on a range that has already \
813                     been mapped and would break Rust memory aliasing rules. Attempted \
814                     to get range {}, and the conflicting range is {}",
815                    new_sub, sub
816                );
817            }
818        }
819        self.sub_ranges.push(new_sub);
820    }
821
822    /// Record that the `size` bytes of the buffer at `offset` are no longer viewed.
823    ///
824    /// # Panics
825    ///
826    /// This panics if the given range does not exactly match one previously
827    /// passed to [`MapContext::validate_and_add`].
828    fn remove(&mut self, offset: BufferAddress, size: BufferSize) {
829        let end = offset + size.get();
830
831        let index = self
832            .sub_ranges
833            .iter()
834            .position(|r| r.index == (offset..end))
835            .expect("unable to remove range from map context");
836        self.sub_ranges.swap_remove(index);
837    }
838}
839
840/// Describes a [`Buffer`].
841///
842/// For use with [`Device::create_buffer`].
843///
844/// Corresponds to [WebGPU `GPUBufferDescriptor`](
845/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferdescriptor).
846pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
847static_assertions::assert_impl_all!(BufferDescriptor<'_>: Send, Sync);
848
849/// Error occurred when trying to async map a buffer.
850#[derive(Clone, PartialEq, Eq, Debug)]
851pub struct BufferAsyncError;
852static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
853
854impl fmt::Display for BufferAsyncError {
855    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
856        write!(f, "Error occurred when trying to async map a buffer")
857    }
858}
859
860impl error::Error for BufferAsyncError {}
861
862/// Type of buffer mapping.
863#[derive(Debug, Clone, Copy, Eq, PartialEq)]
864pub enum MapMode {
865    /// Map only for reading
866    Read,
867    /// Map only for writing
868    Write,
869}
870static_assertions::assert_impl_all!(MapMode: Send, Sync);
871
872/// A read-only view of a mapped buffer's bytes.
873///
874/// To get a `BufferView`, first [map] the buffer, and then
875/// call `buffer.slice(range).get_mapped_range()`.
876///
877/// `BufferView` dereferences to `&[u8]`, so you can use all the usual Rust
878/// slice methods to access the buffer's contents. It also implements
879/// `AsRef<[u8]>`, if that's more convenient.
880///
881/// Before the buffer can be unmapped, all `BufferView`s observing it
882/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
883///
884/// For example code, see the documentation on [mapping buffers][map].
885///
886/// [map]: Buffer#mapping-buffers
887/// [`map_async`]: BufferSlice::map_async
888#[derive(Debug)]
889pub struct BufferView {
890    // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
891    buffer: Buffer,
892    offset: BufferAddress,
893    size: BufferSize,
894    inner: dispatch::DispatchBufferMappedRange,
895}
896
897#[cfg(webgpu)]
898impl BufferView {
899    /// Provides the same data as dereferencing the view, but as a `Uint8Array` in js.
900    /// This can be MUCH faster than dereferencing the view which copies the data into
901    /// the Rust / wasm heap.
902    pub fn as_uint8array(&self) -> &js_sys::Uint8Array {
903        self.inner.as_uint8array()
904    }
905}
906
907impl core::ops::Deref for BufferView {
908    type Target = [u8];
909
910    #[inline]
911    fn deref(&self) -> &[u8] {
912        self.inner.slice()
913    }
914}
915
916impl AsRef<[u8]> for BufferView {
917    #[inline]
918    fn as_ref(&self) -> &[u8] {
919        self.inner.slice()
920    }
921}
922
923/// A write-only view of a mapped buffer's bytes.
924///
925/// To get a `BufferViewMut`, first [map] the buffer, and then
926/// call `buffer.slice(range).get_mapped_range_mut()`.
927///
928/// `BufferViewMut` dereferences to `&mut [u8]`, so you can use all the usual
929/// Rust slice methods to access the buffer's contents. It also implements
930/// `AsMut<[u8]>`, if that's more convenient.
931///
932/// It is possible to read the buffer using this view, but doing so is not
933/// recommended, as it is likely to be slow.
934///
935/// Before the buffer can be unmapped, all `BufferViewMut`s observing it
936/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
937///
938/// For example code, see the documentation on [mapping buffers][map].
939///
940/// [map]: Buffer#mapping-buffers
941#[derive(Debug)]
942pub struct BufferViewMut {
943    // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
944    buffer: Buffer,
945    offset: BufferAddress,
946    size: BufferSize,
947    inner: dispatch::DispatchBufferMappedRange,
948    readable: bool,
949}
950
951impl AsMut<[u8]> for BufferViewMut {
952    #[inline]
953    fn as_mut(&mut self) -> &mut [u8] {
954        self.inner.slice_mut()
955    }
956}
957
958impl Deref for BufferViewMut {
959    type Target = [u8];
960
961    fn deref(&self) -> &Self::Target {
962        if !self.readable {
963            log::warn!("Reading from a BufferViewMut is slow and not recommended.");
964        }
965
966        self.inner.slice()
967    }
968}
969
970impl DerefMut for BufferViewMut {
971    fn deref_mut(&mut self) -> &mut Self::Target {
972        self.inner.slice_mut()
973    }
974}
975
976impl Drop for BufferView {
977    fn drop(&mut self) {
978        self.buffer
979            .map_context
980            .lock()
981            .remove(self.offset, self.size);
982    }
983}
984
985impl Drop for BufferViewMut {
986    fn drop(&mut self) {
987        self.buffer
988            .map_context
989            .lock()
990            .remove(self.offset, self.size);
991    }
992}
993
994#[track_caller]
995fn check_buffer_bounds(
996    buffer_size: BufferAddress,
997    slice_offset: BufferAddress,
998    slice_size: BufferSize,
999) {
1000    // A slice of length 0 is invalid, so the offset must not be equal to or greater than the buffer size.
1001    if slice_offset >= buffer_size {
1002        panic!(
1003            "slice offset {} is out of range for buffer of size {}",
1004            slice_offset, buffer_size
1005        );
1006    }
1007
1008    // Detect integer overflow.
1009    let end = slice_offset.checked_add(slice_size.get());
1010    if end.is_none_or(|end| end > buffer_size) {
1011        panic!(
1012            "slice offset {} size {} is out of range for buffer of size {}",
1013            slice_offset, slice_size, buffer_size
1014        );
1015    }
1016}
1017
1018#[track_caller]
1019pub(crate) fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
1020    bounds: S,
1021    whole_size: BufferAddress,
1022) -> (BufferAddress, BufferSize) {
1023    let offset = match bounds.start_bound() {
1024        Bound::Included(&bound) => bound,
1025        Bound::Excluded(&bound) => bound + 1,
1026        Bound::Unbounded => 0,
1027    };
1028    let size = BufferSize::new(match bounds.end_bound() {
1029        Bound::Included(&bound) => bound + 1 - offset,
1030        Bound::Excluded(&bound) => bound - offset,
1031        Bound::Unbounded => whole_size - offset,
1032    })
1033    .expect("buffer slices can not be empty");
1034
1035    (offset, size)
1036}
1037
1038#[cfg(test)]
1039mod tests {
1040    use super::{
1041        check_buffer_bounds, range_overlaps, range_to_offset_size, BufferAddress, BufferSize,
1042    };
1043
1044    fn bs(value: BufferAddress) -> BufferSize {
1045        BufferSize::new(value).unwrap()
1046    }
1047
1048    #[test]
1049    fn range_to_offset_size_works() {
1050        let whole = 100;
1051
1052        assert_eq!(range_to_offset_size(0..2, whole), (0, bs(2)));
1053        assert_eq!(range_to_offset_size(2..5, whole), (2, bs(3)));
1054        assert_eq!(range_to_offset_size(.., whole), (0, bs(whole)));
1055        assert_eq!(range_to_offset_size(21.., whole), (21, bs(whole - 21)));
1056        assert_eq!(range_to_offset_size(0.., whole), (0, bs(whole)));
1057        assert_eq!(range_to_offset_size(..21, whole), (0, bs(21)));
1058    }
1059
1060    #[test]
1061    #[should_panic = "buffer slices can not be empty"]
1062    fn range_to_offset_size_panics_for_empty_range() {
1063        range_to_offset_size(123..123, 200);
1064    }
1065
1066    #[test]
1067    #[should_panic = "buffer slices can not be empty"]
1068    fn range_to_offset_size_panics_for_unbounded_empty_range() {
1069        range_to_offset_size(..0, 100);
1070    }
1071
1072    #[test]
1073    fn check_buffer_bounds_works_for_end_in_range() {
1074        check_buffer_bounds(200, 100, bs(50));
1075        check_buffer_bounds(200, 100, bs(100));
1076        check_buffer_bounds(u64::MAX, u64::MAX - 100, bs(100));
1077        check_buffer_bounds(u64::MAX, 0, bs(u64::MAX));
1078        check_buffer_bounds(u64::MAX, 1, bs(u64::MAX - 1));
1079    }
1080
1081    #[test]
1082    #[should_panic]
1083    fn check_buffer_bounds_panics_for_end_over_size() {
1084        check_buffer_bounds(200, 100, bs(101));
1085    }
1086
1087    #[test]
1088    #[should_panic]
1089    fn check_buffer_bounds_panics_for_end_wraparound() {
1090        check_buffer_bounds(u64::MAX, 1, bs(u64::MAX));
1091    }
1092
1093    #[test]
1094    fn range_overlapping() {
1095        // First range to the left
1096        assert_eq!(range_overlaps(&(0..1), &(1..3)), false);
1097        // First range overlaps left edge
1098        assert_eq!(range_overlaps(&(0..2), &(1..3)), true);
1099        // First range completely inside second
1100        assert_eq!(range_overlaps(&(1..2), &(0..3)), true);
1101        // First range completely surrounds second
1102        assert_eq!(range_overlaps(&(0..3), &(1..2)), true);
1103        // First range overlaps right edge
1104        assert_eq!(range_overlaps(&(1..3), &(0..2)), true);
1105        // First range entirely to the right
1106        assert_eq!(range_overlaps(&(2..3), &(0..2)), false);
1107    }
1108}