wgpu/api/
buffer.rs

1use alloc::{boxed::Box, sync::Arc, vec::Vec};
2use core::{
3    error, fmt,
4    ops::{Bound, Deref, DerefMut, Range, RangeBounds},
5};
6
7use crate::util::Mutex;
8use crate::*;
9
10/// Handle to a GPU-accessible buffer.
11///
12/// Created with [`Device::create_buffer`] or
13/// [`DeviceExt::create_buffer_init`](util::DeviceExt::create_buffer_init).
14///
15/// Corresponds to [WebGPU `GPUBuffer`](https://gpuweb.github.io/gpuweb/#buffer-interface).
16///
17/// A `Buffer`'s bytes have "interior mutability": functions like
18/// [`Queue::write_buffer`] or [mapping] a buffer for writing only require a
19/// `&Buffer`, not a `&mut Buffer`, even though they modify its contents. `wgpu`
20/// prevents simultaneous reads and writes of buffer contents using run-time
21/// checks.
22///
23/// [mapping]: Buffer#mapping-buffers
24///
25/// # Mapping buffers
26///
27/// If a `Buffer` is created with the appropriate [`usage`], it can be *mapped*:
28/// you can make its contents accessible to the CPU as an ordinary `&[u8]` or
29/// `&mut [u8]` slice of bytes. Buffers created with the
30/// [`mapped_at_creation`][mac] flag set are also mapped initially.
31///
32/// Depending on the hardware, the buffer could be memory shared between CPU and
33/// GPU, so that the CPU has direct access to the same bytes the GPU will
34/// consult; or it may be ordinary CPU memory, whose contents the system must
35/// copy to/from the GPU as needed. This crate's API is designed to work the
36/// same way in either case: at any given time, a buffer is either mapped and
37/// available to the CPU, or unmapped and ready for use by the GPU, but never
38/// both. This makes it impossible for either side to observe changes by the
39/// other immediately, and any necessary transfers can be carried out when the
40/// buffer transitions from one state to the other.
41///
42/// There are two ways to map a buffer:
43///
44/// - If [`BufferDescriptor::mapped_at_creation`] is `true`, then the entire
45///   buffer is mapped when it is created. This is the easiest way to initialize
46///   a new buffer. You can set `mapped_at_creation` on any kind of buffer,
47///   regardless of its [`usage`] flags.
48///
49/// - If the buffer's [`usage`] includes the [`MAP_READ`] or [`MAP_WRITE`]
50///   flags, then you can call `buffer.slice(range).map_async(mode, callback)`
51///   to map the portion of `buffer` given by `range`. This waits for the GPU to
52///   finish using the buffer, and invokes `callback` as soon as the buffer is
53///   safe for the CPU to access.
54///
55/// Once a buffer is mapped:
56///
57/// - You can call `buffer.slice(range).get_mapped_range()` to obtain a
58///   [`BufferView`], which dereferences to a `&[u8]` that you can use to read
59///   the buffer's contents.
60///
61/// - Or, you can call `buffer.slice(range).get_mapped_range_mut()` to obtain a
62///   [`BufferViewMut`], which dereferences to a `&mut [u8]` that you can use to
63///   read and write the buffer's contents.
64///
65/// The given `range` must fall within the mapped portion of the buffer. If you
66/// attempt to access overlapping ranges, even for shared access only, these
67/// methods panic.
68///
69/// While a buffer is mapped, you may not submit any commands to the GPU that
70/// access it. You may record command buffers that use the buffer, but if you
71/// submit them while the buffer is mapped, submission will panic.
72///
73/// When you are done using the buffer on the CPU, you must call
74/// [`Buffer::unmap`] to make it available for use by the GPU again. All
75/// [`BufferView`] and [`BufferViewMut`] views referring to the buffer must be
76/// dropped before you unmap it; otherwise, [`Buffer::unmap`] will panic.
77///
78/// # Example
79///
80/// If `buffer` was created with [`BufferUsages::MAP_WRITE`], we could fill it
81/// with `f32` values like this:
82///
83/// ```
84/// # #[cfg(feature = "noop")]
85/// # let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
86/// # #[cfg(not(feature = "noop"))]
87/// # let device: wgpu::Device = { return; };
88/// #
89/// # let buffer = device.create_buffer(&wgpu::BufferDescriptor {
90/// #     label: None,
91/// #     size: 400,
92/// #     usage: wgpu::BufferUsages::MAP_WRITE,
93/// #     mapped_at_creation: false,
94/// # });
95/// let capturable = buffer.clone();
96/// buffer.map_async(wgpu::MapMode::Write, .., move |result| {
97///     if result.is_ok() {
98///         let mut view = capturable.get_mapped_range_mut(..);
99///         let floats: &mut [f32] = bytemuck::cast_slice_mut(&mut view);
100///         floats.fill(42.0);
101///         drop(view);
102///         capturable.unmap();
103///     }
104/// });
105/// ```
106///
107/// This code takes the following steps:
108///
109/// - First, it makes a cloned handle to the buffer for capture by
110///   the callback passed to [`map_async`]. Since a [`map_async`] callback may be
111///   invoked from another thread, interaction between the callback and the
112///   thread calling [`map_async`] generally requires some sort of shared heap
113///   data like this. In real code, there might be an [`Arc`] to some larger
114///   structure that itself owns `buffer`.
115///
116/// - Then, it calls [`Buffer::slice`] to make a [`BufferSlice`] referring to
117///   the buffer's entire contents.
118///
119/// - Next, it calls [`BufferSlice::map_async`] to request that the bytes to
120///   which the slice refers be made accessible to the CPU ("mapped"). This may
121///   entail waiting for previously enqueued operations on `buffer` to finish.
122///   Although [`map_async`] itself always returns immediately, it saves the
123///   callback function to be invoked later.
124///
125/// - When some later call to [`Device::poll`] or [`Instance::poll_all`] (not
126///   shown in this example) determines that the buffer is mapped and ready for
127///   the CPU to use, it invokes the callback function.
128///
129/// - The callback function calls [`Buffer::slice`] and then
130///   [`BufferSlice::get_mapped_range_mut`] to obtain a [`BufferViewMut`], which
131///   dereferences to a `&mut [u8]` slice referring to the buffer's bytes.
132///
133/// - It then uses the [`bytemuck`] crate to turn the `&mut [u8]` into a `&mut
134///   [f32]`, and calls the slice [`fill`] method to fill the buffer with a
135///   useful value.
136///
137/// - Finally, the callback drops the view and calls [`Buffer::unmap`] to unmap
138///   the buffer. In real code, the callback would also need to do some sort of
139///   synchronization to let the rest of the program know that it has completed
140///   its work.
141///
142/// If using [`map_async`] directly is awkward, you may find it more convenient to
143/// use [`Queue::write_buffer`] and [`util::DownloadBuffer::read_buffer`].
144/// However, those each have their own tradeoffs; the asynchronous nature of GPU
145/// execution makes it hard to avoid friction altogether.
146///
147/// [`Arc`]: std::sync::Arc
148/// [`map_async`]: BufferSlice::map_async
149/// [`bytemuck`]: https://crates.io/crates/bytemuck
150/// [`fill`]: slice::fill
151///
152/// ## Mapping buffers on the web
153///
154/// When compiled to WebAssembly and running in a browser content process,
155/// `wgpu` implements its API in terms of the browser's WebGPU implementation.
156/// In this context, `wgpu` is further isolated from the GPU:
157///
158/// - Depending on the browser's WebGPU implementation, mapping and unmapping
159///   buffers probably entails copies between WebAssembly linear memory and the
160///   graphics driver's buffers.
161///
162/// - All modern web browsers isolate web content in its own sandboxed process,
163///   which can only interact with the GPU via interprocess communication (IPC).
164///   Although most browsers' IPC systems use shared memory for large data
165///   transfers, there will still probably need to be copies into and out of the
166///   shared memory buffers.
167///
168/// All of these copies contribute to the cost of buffer mapping in this
169/// configuration.
170///
171/// [`usage`]: BufferDescriptor::usage
172/// [mac]: BufferDescriptor::mapped_at_creation
173/// [`MAP_READ`]: BufferUsages::MAP_READ
174/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
175#[derive(Debug, Clone)]
176pub struct Buffer {
177    pub(crate) inner: dispatch::DispatchBuffer,
178    pub(crate) map_context: Arc<Mutex<MapContext>>,
179    pub(crate) size: wgt::BufferAddress,
180    pub(crate) usage: BufferUsages,
181    // Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
182}
183#[cfg(send_sync)]
184static_assertions::assert_impl_all!(Buffer: Send, Sync);
185
186crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
187
188impl Buffer {
189    /// Return the binding view of the entire buffer.
190    pub fn as_entire_binding(&self) -> BindingResource<'_> {
191        BindingResource::Buffer(self.as_entire_buffer_binding())
192    }
193
194    /// Return the binding view of the entire buffer.
195    pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_> {
196        BufferBinding {
197            buffer: self,
198            offset: 0,
199            size: None,
200        }
201    }
202
203    /// Get the [`wgpu_hal`] buffer from this `Buffer`.
204    ///
205    /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
206    /// and pass that struct to the to the `A` type parameter.
207    ///
208    /// Returns a guard that dereferences to the type of the hal backend
209    /// which implements [`A::Buffer`].
210    ///
211    /// # Types
212    ///
213    /// The returned type depends on the backend:
214    ///
215    #[doc = crate::hal_type_vulkan!("Buffer")]
216    #[doc = crate::hal_type_metal!("Buffer")]
217    #[doc = crate::hal_type_dx12!("Buffer")]
218    #[doc = crate::hal_type_gles!("Buffer")]
219    ///
220    /// # Deadlocks
221    ///
222    /// - The returned guard holds a read-lock on a device-local "destruction"
223    ///   lock, which will cause all calls to `destroy` to block until the
224    ///   guard is released.
225    ///
226    /// # Errors
227    ///
228    /// This method will return None if:
229    /// - The buffer is not from the backend specified by `A`.
230    /// - The buffer is from the `webgpu` or `custom` backend.
231    /// - The buffer has had [`Self::destroy()`] called on it.
232    ///
233    /// # Safety
234    ///
235    /// - The returned resource must not be destroyed unless the guard
236    ///   is the last reference to it and it is not in use by the GPU.
237    ///   The guard and handle may be dropped at any time however.
238    /// - All the safety requirements of wgpu-hal must be upheld.
239    ///
240    /// [`A::Buffer`]: hal::Api::Buffer
241    #[cfg(wgpu_core)]
242    pub unsafe fn as_hal<A: hal::Api>(
243        &self,
244    ) -> Option<impl Deref<Target = A::Buffer> + WasmNotSendSync> {
245        let buffer = self.inner.as_core_opt()?;
246        unsafe { buffer.context.buffer_as_hal::<A>(buffer) }
247    }
248
249    /// Returns a [`BufferSlice`] referring to the portion of `self`'s contents
250    /// indicated by `bounds`. Regardless of what sort of data `self` stores,
251    /// `bounds` start and end are given in bytes.
252    ///
253    /// A [`BufferSlice`] can be used to supply vertex and index data, or to map
254    /// buffer contents for access from the CPU. See the [`BufferSlice`]
255    /// documentation for details.
256    ///
257    /// The `range` argument can be half or fully unbounded: for example,
258    /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
259    /// refers to the portion starting at the `n`th byte and extending to the
260    /// end of the buffer.
261    ///
262    /// # Panics
263    ///
264    /// - If `bounds` is outside of the bounds of `self`.
265    /// - If `bounds` has a length less than 1.
266    #[track_caller]
267    pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
268        let (offset, size) = range_to_offset_size(bounds, self.size);
269        check_buffer_bounds(self.size, offset, size);
270        BufferSlice {
271            buffer: self,
272            offset,
273            size,
274        }
275    }
276
277    /// Unmaps the buffer from host memory.
278    ///
279    /// This terminates the effect of all previous [`map_async()`](Self::map_async) operations and
280    /// makes the buffer available for use by the GPU again.
281    pub fn unmap(&self) {
282        self.map_context.lock().reset();
283        self.inner.unmap();
284    }
285
286    /// Destroy the associated native resources as soon as possible.
287    pub fn destroy(&self) {
288        self.inner.destroy();
289    }
290
291    /// Returns the length of the buffer allocation in bytes.
292    ///
293    /// This is always equal to the `size` that was specified when creating the buffer.
294    pub fn size(&self) -> BufferAddress {
295        self.size
296    }
297
298    /// Returns the allowed usages for this `Buffer`.
299    ///
300    /// This is always equal to the `usage` that was specified when creating the buffer.
301    pub fn usage(&self) -> BufferUsages {
302        self.usage
303    }
304
305    /// Map the buffer to host (CPU) memory, making it available for reading or writing
306    /// via [`get_mapped_range()`](Self::get_mapped_range).
307    /// It is available once the `callback` is called with an [`Ok`] response.
308    ///
309    /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
310    /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
311    ///
312    /// The callback will be called on the thread that first calls the above functions after the GPU work
313    /// has completed. There are no restrictions on the code you can run in the callback, however on native the
314    /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
315    /// and used to set flags, send messages, etc.
316    ///
317    /// As long as a buffer is mapped, it is not available for use by any other commands;
318    /// at all times, either the GPU or the CPU has exclusive access to the contents of the buffer.
319    ///
320    /// This can also be performed using [`BufferSlice::map_async()`].
321    ///
322    /// # Panics
323    ///
324    /// - If the buffer is already mapped.
325    /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
326    /// - If `bounds` is outside of the bounds of `self`.
327    /// - If `bounds` has a length less than 1.
328    /// - If the start and end of `bounds` are not be aligned to [`MAP_ALIGNMENT`].
329    pub fn map_async<S: RangeBounds<BufferAddress>>(
330        &self,
331        mode: MapMode,
332        bounds: S,
333        callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
334    ) {
335        self.slice(bounds).map_async(mode, callback)
336    }
337
338    /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
339    ///
340    /// Returns a [`BufferView`] referring to the buffer range represented by
341    /// `self`. See the documentation for [`BufferView`] for details.
342    ///
343    /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
344    /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
345    ///
346    /// This can also be performed using [`BufferSlice::get_mapped_range()`].
347    ///
348    /// # Panics
349    ///
350    /// - If `bounds` is outside of the bounds of `self`.
351    /// - If `bounds` has a length less than 1.
352    /// - If the start and end of `bounds` are not aligned to [`MAP_ALIGNMENT`].
353    /// - If the buffer to which `self` refers is not currently [mapped].
354    /// - If you try to create overlapping views of a buffer, mutable or otherwise.
355    ///
356    /// [mapped]: Buffer#mapping-buffers
357    pub fn get_mapped_range<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferView<'_> {
358        self.slice(bounds).get_mapped_range()
359    }
360
361    /// Gain write access to the bytes of a [mapped] [`Buffer`].
362    ///
363    /// Returns a [`BufferViewMut`] referring to the buffer range represented by
364    /// `self`. See the documentation for [`BufferViewMut`] for more details.
365    ///
366    /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
367    /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
368    ///
369    /// This can also be performed using [`BufferSlice::get_mapped_range_mut()`].
370    ///
371    /// # Panics
372    ///
373    /// - If `bounds` is outside of the bounds of `self`.
374    /// - If `bounds` has a length less than 1.
375    /// - If the start and end of `bounds` are not aligned to [`MAP_ALIGNMENT`].
376    /// - If the buffer to which `self` refers is not currently [mapped].
377    /// - If you try to create overlapping views of a buffer, mutable or otherwise.
378    ///
379    /// [mapped]: Buffer#mapping-buffers
380    pub fn get_mapped_range_mut<S: RangeBounds<BufferAddress>>(
381        &self,
382        bounds: S,
383    ) -> BufferViewMut<'_> {
384        self.slice(bounds).get_mapped_range_mut()
385    }
386
387    #[cfg(custom)]
388    /// Returns custom implementation of Buffer (if custom backend and is internally T)
389    pub fn as_custom<T: custom::BufferInterface>(&self) -> Option<&T> {
390        self.inner.as_custom()
391    }
392}
393
394/// A slice of a [`Buffer`], to be mapped, used for vertex or index data, or the like.
395///
396/// You can create a `BufferSlice` by calling [`Buffer::slice`]:
397///
398/// ```no_run
399/// # let buffer: wgpu::Buffer = todo!();
400/// let slice = buffer.slice(10..20);
401/// ```
402///
403/// This returns a slice referring to the second ten bytes of `buffer`. To get a
404/// slice of the entire `Buffer`:
405///
406/// ```no_run
407/// # let buffer: wgpu::Buffer = todo!();
408/// let whole_buffer_slice = buffer.slice(..);
409/// ```
410///
411/// You can pass buffer slices to methods like [`RenderPass::set_vertex_buffer`]
412/// and [`RenderPass::set_index_buffer`] to indicate which portion of the buffer
413/// a draw call should consult. You can also convert it to a [`BufferBinding`]
414/// with `.into()`.
415///
416/// To access the slice's contents on the CPU, you must first [map] the buffer,
417/// and then call [`BufferSlice::get_mapped_range`] or
418/// [`BufferSlice::get_mapped_range_mut`] to obtain a view of the slice's
419/// contents. See the documentation on [mapping][map] for more details,
420/// including example code.
421///
422/// Unlike a Rust shared slice `&[T]`, whose existence guarantees that
423/// nobody else is modifying the `T` values to which it refers, a
424/// [`BufferSlice`] doesn't guarantee that the buffer's contents aren't
425/// changing. You can still record and submit commands operating on the
426/// buffer while holding a [`BufferSlice`]. A [`BufferSlice`] simply
427/// represents a certain range of the buffer's bytes.
428///
429/// The `BufferSlice` type is unique to the Rust API of `wgpu`. In the WebGPU
430/// specification, an offset and size are specified as arguments to each call
431/// working with the [`Buffer`], instead.
432///
433/// [map]: Buffer#mapping-buffers
434#[derive(Copy, Clone, Debug, PartialEq)]
435pub struct BufferSlice<'a> {
436    pub(crate) buffer: &'a Buffer,
437    pub(crate) offset: BufferAddress,
438    pub(crate) size: BufferSize,
439}
440#[cfg(send_sync)]
441static_assertions::assert_impl_all!(BufferSlice<'_>: Send, Sync);
442
443impl<'a> BufferSlice<'a> {
444    /// Return another [`BufferSlice`] referring to the portion of `self`'s contents
445    /// indicated by `bounds`.
446    ///
447    /// The `range` argument can be half or fully unbounded: for example,
448    /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
449    /// refers to the portion starting at the `n`th byte and extending to the
450    /// end of the buffer.
451    ///
452    /// # Panics
453    ///
454    /// - If `bounds` is outside of the bounds of `self`.
455    /// - If `bounds` has a length less than 1.
456    #[track_caller]
457    pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'a> {
458        let (offset, size) = range_to_offset_size(bounds, self.size.get());
459        check_buffer_bounds(self.size.get(), offset, size);
460        BufferSlice {
461            buffer: self.buffer,
462            offset: self.offset + offset, // check_buffer_bounds ensures this does not overflow
463            size,                         // check_buffer_bounds ensures this is essentially min()
464        }
465    }
466
467    /// Map the buffer to host (CPU) memory, making it available for reading or writing
468    /// via [`get_mapped_range()`](Self::get_mapped_range).
469    /// It is available once the `callback` is called with an [`Ok`] response.
470    ///
471    /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
472    /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
473    ///
474    /// The callback will be called on the thread that first calls the above functions after the GPU work
475    /// has completed. There are no restrictions on the code you can run in the callback, however on native the
476    /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
477    /// and used to set flags, send messages, etc.
478    ///
479    /// As long as a buffer is mapped, it is not available for use by any other commands;
480    /// at all times, either the GPU or the CPU has exclusive access to the contents of the buffer.
481    ///
482    /// This can also be performed using [`Buffer::map_async()`].
483    ///
484    /// # Panics
485    ///
486    /// - If the buffer is already mapped.
487    /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
488    /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`] within the buffer.
489    pub fn map_async(
490        &self,
491        mode: MapMode,
492        callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
493    ) {
494        let mut mc = self.buffer.map_context.lock();
495        assert_eq!(mc.initial_range, 0..0, "Buffer is already mapped");
496        let end = self.offset + self.size.get();
497        mc.initial_range = self.offset..end;
498
499        self.buffer
500            .inner
501            .map_async(mode, self.offset..end, Box::new(callback));
502    }
503
504    /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
505    ///
506    /// Returns a [`BufferView`] referring to the buffer range represented by
507    /// `self`. See the documentation for [`BufferView`] for details.
508    ///
509    /// Multiple views may be obtained and used simultaneously as long as they are from
510    /// non-overlapping slices.
511    ///
512    /// This can also be performed using [`Buffer::get_mapped_range()`].
513    ///
514    /// # Panics
515    ///
516    /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`] within the buffer.
517    /// - If the buffer to which `self` refers is not currently [mapped].
518    /// - If you try to create overlapping views of a buffer, mutable or otherwise.
519    ///
520    /// [mapped]: Buffer#mapping-buffers
521    pub fn get_mapped_range(&self) -> BufferView<'a> {
522        let end = self.buffer.map_context.lock().add(self.offset, self.size);
523        let range = self.buffer.inner.get_mapped_range(self.offset..end);
524        BufferView {
525            slice: *self,
526            inner: range,
527        }
528    }
529
530    /// Gain write access to the bytes of a [mapped] [`Buffer`].
531    ///
532    /// Returns a [`BufferViewMut`] referring to the buffer range represented by
533    /// `self`. See the documentation for [`BufferViewMut`] for more details.
534    ///
535    /// Multiple views may be obtained and used simultaneously as long as they are from
536    /// non-overlapping slices.
537    ///
538    /// This can also be performed using [`Buffer::get_mapped_range_mut()`].
539    ///
540    /// # Panics
541    ///
542    /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`].
543    /// - If the buffer to which `self` refers is not currently [mapped].
544    /// - If you try to create overlapping views of a buffer, mutable or otherwise.
545    ///
546    /// [mapped]: Buffer#mapping-buffers
547    pub fn get_mapped_range_mut(&self) -> BufferViewMut<'a> {
548        let end = self.buffer.map_context.lock().add(self.offset, self.size);
549        let range = self.buffer.inner.get_mapped_range(self.offset..end);
550        BufferViewMut {
551            slice: *self,
552            inner: range,
553            readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
554        }
555    }
556
557    /// Returns the buffer this is a slice of.
558    ///
559    /// You should usually not need to call this, and if you received the buffer from code you
560    /// do not control, you should refrain from accessing the buffer outside the bounds of the
561    /// slice. Nevertheless, it’s possible to get this access, so this method makes it simple.
562    pub fn buffer(&self) -> &'a Buffer {
563        self.buffer
564    }
565
566    /// Returns the offset in [`Self::buffer()`] this slice starts at.
567    pub fn offset(&self) -> BufferAddress {
568        self.offset
569    }
570
571    /// Returns the size of this slice.
572    pub fn size(&self) -> BufferSize {
573        self.size
574    }
575}
576
577impl<'a> From<BufferSlice<'a>> for crate::BufferBinding<'a> {
578    /// Convert a [`BufferSlice`] to an equivalent [`BufferBinding`],
579    /// provided that it will be used without a dynamic offset.
580    fn from(value: BufferSlice<'a>) -> Self {
581        BufferBinding {
582            buffer: value.buffer,
583            offset: value.offset,
584            size: Some(value.size),
585        }
586    }
587}
588
589impl<'a> From<BufferSlice<'a>> for crate::BindingResource<'a> {
590    /// Convert a [`BufferSlice`] to an equivalent [`BindingResource::Buffer`],
591    /// provided that it will be used without a dynamic offset.
592    fn from(value: BufferSlice<'a>) -> Self {
593        crate::BindingResource::Buffer(crate::BufferBinding::from(value))
594    }
595}
596
597/// The mapped portion of a buffer, if any, and its outstanding views.
598///
599/// This ensures that views fall within the mapped range and don't overlap.
600#[derive(Debug)]
601pub(crate) struct MapContext {
602    /// The range of the buffer that is mapped.
603    ///
604    /// This is `0..0` if the buffer is not mapped. This becomes non-empty when
605    /// the buffer is mapped at creation time, and when you call `map_async` on
606    /// some [`BufferSlice`] (so technically, it indicates the portion that is
607    /// *or has been requested to be* mapped.)
608    ///
609    /// All [`BufferView`]s and [`BufferViewMut`]s must fall within this range.
610    pub(crate) initial_range: Range<BufferAddress>,
611
612    /// The ranges covered by all outstanding [`BufferView`]s and
613    /// [`BufferViewMut`]s. These are non-overlapping, and are all contained
614    /// within `initial_range`.
615    sub_ranges: Vec<Range<BufferAddress>>,
616}
617
618impl MapContext {
619    pub(crate) fn new() -> Self {
620        Self {
621            initial_range: 0..0,
622            sub_ranges: Vec::new(),
623        }
624    }
625
626    /// Record that the buffer is no longer mapped.
627    fn reset(&mut self) {
628        self.initial_range = 0..0;
629
630        assert!(
631            self.sub_ranges.is_empty(),
632            "You cannot unmap a buffer that still has accessible mapped views"
633        );
634    }
635
636    /// Record that the `size` bytes of the buffer at `offset` are now viewed.
637    ///
638    /// Return the byte offset within the buffer of the end of the viewed range.
639    ///
640    /// # Panics
641    ///
642    /// This panics if the given range overlaps with any existing range.
643    fn add(&mut self, offset: BufferAddress, size: BufferSize) -> BufferAddress {
644        let end = offset + size.get();
645        assert!(self.initial_range.start <= offset && end <= self.initial_range.end);
646        // This check is essential for avoiding undefined behavior: it is the
647        // only thing that ensures that `&mut` references to the buffer's
648        // contents don't alias anything else.
649        for sub in self.sub_ranges.iter() {
650            assert!(
651                end <= sub.start || offset >= sub.end,
652                "Intersecting map range with {sub:?}"
653            );
654        }
655        self.sub_ranges.push(offset..end);
656        end
657    }
658
659    /// Record that the `size` bytes of the buffer at `offset` are no longer viewed.
660    ///
661    /// # Panics
662    ///
663    /// This panics if the given range does not exactly match one previously
664    /// passed to [`add`].
665    ///
666    /// [`add]`: MapContext::add
667    fn remove(&mut self, offset: BufferAddress, size: BufferSize) {
668        let end = offset + size.get();
669
670        let index = self
671            .sub_ranges
672            .iter()
673            .position(|r| *r == (offset..end))
674            .expect("unable to remove range from map context");
675        self.sub_ranges.swap_remove(index);
676    }
677}
678
679/// Describes a [`Buffer`].
680///
681/// For use with [`Device::create_buffer`].
682///
683/// Corresponds to [WebGPU `GPUBufferDescriptor`](
684/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferdescriptor).
685pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
686static_assertions::assert_impl_all!(BufferDescriptor<'_>: Send, Sync);
687
688/// Error occurred when trying to async map a buffer.
689#[derive(Clone, PartialEq, Eq, Debug)]
690pub struct BufferAsyncError;
691static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
692
693impl fmt::Display for BufferAsyncError {
694    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
695        write!(f, "Error occurred when trying to async map a buffer")
696    }
697}
698
699impl error::Error for BufferAsyncError {}
700
701/// Type of buffer mapping.
702#[derive(Debug, Clone, Copy, Eq, PartialEq)]
703pub enum MapMode {
704    /// Map only for reading
705    Read,
706    /// Map only for writing
707    Write,
708}
709static_assertions::assert_impl_all!(MapMode: Send, Sync);
710
711/// A read-only view of a mapped buffer's bytes.
712///
713/// To get a `BufferView`, first [map] the buffer, and then
714/// call `buffer.slice(range).get_mapped_range()`.
715///
716/// `BufferView` dereferences to `&[u8]`, so you can use all the usual Rust
717/// slice methods to access the buffer's contents. It also implements
718/// `AsRef<[u8]>`, if that's more convenient.
719///
720/// Before the buffer can be unmapped, all `BufferView`s observing it
721/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
722///
723/// For example code, see the documentation on [mapping buffers][map].
724///
725/// [map]: Buffer#mapping-buffers
726/// [`map_async`]: BufferSlice::map_async
727#[derive(Debug)]
728pub struct BufferView<'a> {
729    slice: BufferSlice<'a>,
730    inner: dispatch::DispatchBufferMappedRange,
731}
732
733#[cfg(webgpu)]
734impl BufferView<'_> {
735    /// Provides the same data as dereferencing the view, but as a `Uint8Array` in js.
736    /// This can be MUCH faster than dereferencing the view which copies the data into
737    /// the Rust / wasm heap.
738    pub fn as_uint8array(&self) -> &js_sys::Uint8Array {
739        self.inner.as_uint8array()
740    }
741}
742
743impl core::ops::Deref for BufferView<'_> {
744    type Target = [u8];
745
746    #[inline]
747    fn deref(&self) -> &[u8] {
748        self.inner.slice()
749    }
750}
751
752impl AsRef<[u8]> for BufferView<'_> {
753    #[inline]
754    fn as_ref(&self) -> &[u8] {
755        self.inner.slice()
756    }
757}
758
759/// A write-only view of a mapped buffer's bytes.
760///
761/// To get a `BufferViewMut`, first [map] the buffer, and then
762/// call `buffer.slice(range).get_mapped_range_mut()`.
763///
764/// `BufferViewMut` dereferences to `&mut [u8]`, so you can use all the usual
765/// Rust slice methods to access the buffer's contents. It also implements
766/// `AsMut<[u8]>`, if that's more convenient.
767///
768/// It is possible to read the buffer using this view, but doing so is not
769/// recommended, as it is likely to be slow.
770///
771/// Before the buffer can be unmapped, all `BufferViewMut`s observing it
772/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
773///
774/// For example code, see the documentation on [mapping buffers][map].
775///
776/// [map]: Buffer#mapping-buffers
777#[derive(Debug)]
778pub struct BufferViewMut<'a> {
779    slice: BufferSlice<'a>,
780    inner: dispatch::DispatchBufferMappedRange,
781    readable: bool,
782}
783
784impl AsMut<[u8]> for BufferViewMut<'_> {
785    #[inline]
786    fn as_mut(&mut self) -> &mut [u8] {
787        self.inner.slice_mut()
788    }
789}
790
791impl Deref for BufferViewMut<'_> {
792    type Target = [u8];
793
794    fn deref(&self) -> &Self::Target {
795        if !self.readable {
796            log::warn!("Reading from a BufferViewMut is slow and not recommended.");
797        }
798
799        self.inner.slice()
800    }
801}
802
803impl DerefMut for BufferViewMut<'_> {
804    fn deref_mut(&mut self) -> &mut Self::Target {
805        self.inner.slice_mut()
806    }
807}
808
809impl Drop for BufferView<'_> {
810    fn drop(&mut self) {
811        self.slice
812            .buffer
813            .map_context
814            .lock()
815            .remove(self.slice.offset, self.slice.size);
816    }
817}
818
819impl Drop for BufferViewMut<'_> {
820    fn drop(&mut self) {
821        self.slice
822            .buffer
823            .map_context
824            .lock()
825            .remove(self.slice.offset, self.slice.size);
826    }
827}
828
829#[track_caller]
830fn check_buffer_bounds(
831    buffer_size: BufferAddress,
832    slice_offset: BufferAddress,
833    slice_size: BufferSize,
834) {
835    // A slice of length 0 is invalid, so the offset must not be equal to or greater than the buffer size.
836    if slice_offset >= buffer_size {
837        panic!(
838            "slice offset {} is out of range for buffer of size {}",
839            slice_offset, buffer_size
840        );
841    }
842
843    // Detect integer overflow.
844    let end = slice_offset.checked_add(slice_size.get());
845    if end.is_none_or(|end| end > buffer_size) {
846        panic!(
847            "slice offset {} size {} is out of range for buffer of size {}",
848            slice_offset, slice_size, buffer_size
849        );
850    }
851}
852
853#[track_caller]
854fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
855    bounds: S,
856    whole_size: BufferAddress,
857) -> (BufferAddress, BufferSize) {
858    let offset = match bounds.start_bound() {
859        Bound::Included(&bound) => bound,
860        Bound::Excluded(&bound) => bound + 1,
861        Bound::Unbounded => 0,
862    };
863    let size = BufferSize::new(match bounds.end_bound() {
864        Bound::Included(&bound) => bound + 1 - offset,
865        Bound::Excluded(&bound) => bound - offset,
866        Bound::Unbounded => whole_size - offset,
867    })
868    .expect("buffer slices can not be empty");
869
870    (offset, size)
871}
872
873#[cfg(test)]
874mod tests {
875    use super::{check_buffer_bounds, range_to_offset_size, BufferAddress, BufferSize};
876
877    fn bs(value: BufferAddress) -> BufferSize {
878        BufferSize::new(value).unwrap()
879    }
880
881    #[test]
882    fn range_to_offset_size_works() {
883        let whole = 100;
884
885        assert_eq!(range_to_offset_size(0..2, whole), (0, bs(2)));
886        assert_eq!(range_to_offset_size(2..5, whole), (2, bs(3)));
887        assert_eq!(range_to_offset_size(.., whole), (0, bs(whole)));
888        assert_eq!(range_to_offset_size(21.., whole), (21, bs(whole - 21)));
889        assert_eq!(range_to_offset_size(0.., whole), (0, bs(whole)));
890        assert_eq!(range_to_offset_size(..21, whole), (0, bs(21)));
891    }
892
893    #[test]
894    #[should_panic = "buffer slices can not be empty"]
895    fn range_to_offset_size_panics_for_empty_range() {
896        range_to_offset_size(123..123, 200);
897    }
898
899    #[test]
900    #[should_panic = "buffer slices can not be empty"]
901    fn range_to_offset_size_panics_for_unbounded_empty_range() {
902        range_to_offset_size(..0, 100);
903    }
904
905    #[test]
906    fn check_buffer_bounds_works_for_end_in_range() {
907        check_buffer_bounds(200, 100, bs(50));
908        check_buffer_bounds(200, 100, bs(100));
909        check_buffer_bounds(u64::MAX, u64::MAX - 100, bs(100));
910        check_buffer_bounds(u64::MAX, 0, bs(u64::MAX));
911        check_buffer_bounds(u64::MAX, 1, bs(u64::MAX - 1));
912    }
913
914    #[test]
915    #[should_panic]
916    fn check_buffer_bounds_panics_for_end_over_size() {
917        check_buffer_bounds(200, 100, bs(101));
918    }
919
920    #[test]
921    #[should_panic]
922    fn check_buffer_bounds_panics_for_end_wraparound() {
923        check_buffer_bounds(u64::MAX, 1, bs(u64::MAX));
924    }
925}