wgpu/api/buffer.rs
1use alloc::{boxed::Box, string::String, sync::Arc, vec::Vec};
2use core::{
3 error, fmt,
4 ops::{Bound, Deref, Range, RangeBounds},
5};
6
7use crate::util::Mutex;
8use crate::*;
9
10/// Handle to a GPU-accessible buffer.
11///
12/// A `Buffer` is a memory allocation for use by the GPU, somewhat analogous to
13/// <code>[Box]<[\[u8\]][primitive@slice]></code> in Rust.
14/// The contents of buffers are untyped bytes; it is up to the application to
15/// specify the interpretation of the bytes when the buffer is used, in ways
16/// such as [`VertexBufferLayout`].
17/// A single buffer can be used to hold multiple independent pieces of data at
18/// different offsets (e.g. both vertices and indices for one or more meshes).
19///
20/// A `Buffer`'s bytes have "interior mutability": functions like
21/// [`Queue::write_buffer`] or [mapping] a buffer for writing only require a
22/// `&Buffer`, not a `&mut Buffer`, even though they modify its contents. `wgpu`
23/// prevents simultaneous reads and writes of buffer contents using run-time
24/// checks.
25///
26/// Created with [`Device::create_buffer()`] or
27/// [`DeviceExt::create_buffer_init()`].
28///
29/// Corresponds to [WebGPU `GPUBuffer`](https://gpuweb.github.io/gpuweb/#buffer-interface).
30///
31/// [mapping]: Buffer#mapping-buffers
32///
33/// # How to get your data into a buffer
34///
35/// Every `Buffer` starts with all bytes zeroed.
36/// There are many ways to load data into a `Buffer`:
37///
38/// - When creating a buffer, you may set the [`mapped_at_creation`][mac] flag,
39/// then write to its [`get_mapped_range_mut()`][Buffer::get_mapped_range_mut].
40/// This only works when the buffer is created and has not yet been used by
41/// the GPU, but it is all you need for buffers whose contents do not change
42/// after creation.
43/// - You may use [`DeviceExt::create_buffer_init()`] as a convenient way to
44/// do that and copy data from a `&[u8]` you provide.
45/// - After creation, you may use [`Buffer::map_async()`] to map it again;
46/// however, you then need to wait until the GPU is no longer using the buffer
47/// before you begin writing.
48/// - You may use [`CommandEncoder::copy_buffer_to_buffer()`] to copy data into
49/// this buffer from another buffer.
50/// - You may use [`Queue::write_buffer()`] to copy data into the buffer from a
51/// `&[u8]`. This uses a temporary “staging” buffer managed by `wgpu` to hold
52/// the data.
53/// - [`Queue::write_buffer_with()`] allows you to write directly into temporary
54/// storage instead of providing a slice you already prepared, which may
55/// allow *your* code to save the allocation of a [`Vec`] or such.
56/// - You may use [`util::StagingBelt`] to manage a set of temporary buffers.
57/// This may be more efficient than [`Queue::write_buffer_with()`] when you
58/// have many small copies to perform, but requires more steps to use, and
59/// tuning of the belt buffer size.
60/// - You may write your own staging buffer management customized to your
61/// application, based on mapped buffers and
62/// [`CommandEncoder::copy_buffer_to_buffer()`].
63/// - A GPU computation’s results can be stored in a buffer:
64/// - A [compute shader][ComputePipeline] may write to a buffer bound as a
65/// [storage buffer][BufferBindingType::Storage].
66/// - A render pass may render to a texture which is then copied to a buffer
67/// using [`CommandEncoder::copy_texture_to_buffer()`].
68///
69/// # Mapping buffers
70///
71/// If a `Buffer` is created with the appropriate [`usage`], it can be *mapped*:
72/// you can make its contents accessible to the CPU as an ordinary `&[u8]` or
73/// `&mut [u8]` slice of bytes. Buffers created with the
74/// [`mapped_at_creation`][mac] flag set are also mapped initially.
75///
76/// Depending on the hardware, the buffer could be memory shared between CPU and
77/// GPU, so that the CPU has direct access to the same bytes the GPU will
78/// consult; or it may be ordinary CPU memory, whose contents the system must
79/// copy to/from the GPU as needed. This crate's API is designed to work the
80/// same way in either case: at any given time, a buffer is either mapped and
81/// available to the CPU, or unmapped and ready for use by the GPU, but never
82/// both. This makes it impossible for either side to observe changes by the
83/// other immediately, and any necessary transfers can be carried out when the
84/// buffer transitions from one state to the other.
85///
86/// There are two ways to map a buffer:
87///
88/// - If [`BufferDescriptor::mapped_at_creation`] is `true`, then the entire
89/// buffer is mapped when it is created. This is the easiest way to initialize
90/// a new buffer. You can set `mapped_at_creation` on any kind of buffer,
91/// regardless of its [`usage`] flags.
92///
93/// - If the buffer's [`usage`] includes the [`MAP_READ`] or [`MAP_WRITE`]
94/// flags, then you can call `buffer.slice(range).map_async(mode, callback)`
95/// to map the portion of `buffer` given by `range`. This waits for the GPU to
96/// finish using the buffer, and invokes `callback` as soon as the buffer is
97/// safe for the CPU to access.
98///
99/// Once a buffer is mapped:
100///
101/// - You can call `buffer.slice(range).get_mapped_range()` to obtain a
102/// [`BufferView`], which dereferences to a `&[u8]` that you can use to read
103/// the buffer's contents.
104///
105/// - Or, you can call `buffer.slice(range).get_mapped_range_mut()` to obtain a
106/// [`BufferViewMut`], which dereferences to a `&mut [u8]` that you can use to
107/// read and write the buffer's contents.
108///
109/// The given `range` must fall within the mapped portion of the buffer. If you
110/// attempt to access overlapping ranges, even for shared access only, these
111/// methods panic.
112///
113/// While a buffer is mapped, you may not submit any commands to the GPU that
114/// access it. You may record command buffers that use the buffer, but if you
115/// submit them while the buffer is mapped, submission will panic.
116///
117/// When you are done using the buffer on the CPU, you must call
118/// [`Buffer::unmap`] to make it available for use by the GPU again. All
119/// [`BufferView`] and [`BufferViewMut`] views referring to the buffer must be
120/// dropped before you unmap it; otherwise, [`Buffer::unmap`] will panic.
121///
122/// # Example
123///
124/// If `buffer` was created with [`BufferUsages::MAP_WRITE`], we could fill it
125/// with `f32` values like this:
126///
127/// ```
128/// # #[cfg(feature = "noop")]
129/// # let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
130/// # #[cfg(not(feature = "noop"))]
131/// # let device: wgpu::Device = { return; };
132/// #
133/// # let buffer = device.create_buffer(&wgpu::BufferDescriptor {
134/// # label: None,
135/// # size: 400,
136/// # usage: wgpu::BufferUsages::MAP_WRITE,
137/// # mapped_at_creation: false,
138/// # });
139/// let capturable = buffer.clone();
140/// buffer.map_async(wgpu::MapMode::Write, .., move |result| {
141/// if result.is_ok() {
142/// let mut view = capturable.get_mapped_range_mut(..).unwrap();
143/// let mut floats: wgpu::WriteOnly<[[u8; 4]]> = view.slice(..).into_chunks::<4>().0;
144/// floats.fill(42.0f32.to_ne_bytes());
145/// drop(view);
146/// capturable.unmap();
147/// }
148/// });
149/// ```
150///
151/// This code takes the following steps:
152///
153/// - First, it makes a cloned handle to the buffer for capture by
154/// the callback passed to [`map_async`]. Since a [`map_async`] callback may be
155/// invoked from another thread, interaction between the callback and the
156/// thread calling [`map_async`] generally requires some sort of shared heap
157/// data like this. In real code, there might be an [`Arc`] to some larger
158/// structure that itself owns `buffer`.
159///
160/// - Then, it calls [`Buffer::slice`] to make a [`BufferSlice`] referring to
161/// the buffer's entire contents.
162///
163/// - Next, it calls [`BufferSlice::map_async`] to request that the bytes to
164/// which the slice refers be made accessible to the CPU ("mapped"). This may
165/// entail waiting for previously enqueued operations on `buffer` to finish.
166/// Although [`map_async`] itself always returns immediately, it saves the
167/// callback function to be invoked later.
168///
169/// - When some later call to [`Device::poll`] or [`Instance::poll_all`] (not
170/// shown in this example) determines that the buffer is mapped and ready for
171/// the CPU to use, it invokes the callback function.
172///
173/// - The callback function calls [`Buffer::slice`] and then
174/// [`BufferSlice::get_mapped_range_mut`] to obtain a [`BufferViewMut`], which
175/// dereferences to a `&mut [u8]` slice referring to the buffer's bytes.
176///
177/// - It then uses the [`bytemuck`] crate to turn the `&mut [u8]` into a `&mut
178/// [f32]`, and calls the slice [`fill`] method to fill the buffer with a
179/// useful value.
180///
181/// - Finally, the callback drops the view and calls [`Buffer::unmap`] to unmap
182/// the buffer. In real code, the callback would also need to do some sort of
183/// synchronization to let the rest of the program know that it has completed
184/// its work.
185///
186/// If using [`map_async`] directly is awkward, you may find it more convenient to
187/// use [`Queue::write_buffer`] and [`util::DownloadBuffer::read_buffer`].
188/// However, those each have their own tradeoffs; the asynchronous nature of GPU
189/// execution makes it hard to avoid friction altogether.
190///
191/// [`Arc`]: std::sync::Arc
192/// [`map_async`]: BufferSlice::map_async
193/// [`bytemuck`]: https://crates.io/crates/bytemuck
194/// [`fill`]: slice::fill
195///
196/// ## Mapping buffers on the web
197///
198/// When compiled to WebAssembly and running in a browser content process,
199/// `wgpu` implements its API in terms of the browser's WebGPU implementation.
200/// In this context, `wgpu` is further isolated from the GPU:
201///
202/// - Depending on the browser's WebGPU implementation, mapping and unmapping
203/// buffers probably entails copies between WebAssembly linear memory and the
204/// graphics driver's buffers.
205///
206/// - All modern web browsers isolate web content in its own sandboxed process,
207/// which can only interact with the GPU via interprocess communication (IPC).
208/// Although most browsers' IPC systems use shared memory for large data
209/// transfers, there will still probably need to be copies into and out of the
210/// shared memory buffers.
211///
212/// All of these copies contribute to the cost of buffer mapping in this
213/// configuration.
214///
215/// [`usage`]: BufferDescriptor::usage
216/// [mac]: BufferDescriptor::mapped_at_creation
217/// [`MAP_READ`]: BufferUsages::MAP_READ
218/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
219/// [`DeviceExt::create_buffer_init()`]: util::DeviceExt::create_buffer_init
220#[derive(Debug, Clone)]
221pub struct Buffer {
222 pub(crate) inner: dispatch::DispatchBuffer,
223 pub(crate) map_context: Arc<Mutex<MapContext>>,
224 pub(crate) size: wgt::BufferAddress,
225 pub(crate) usage: BufferUsages,
226 // Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
227}
228#[cfg(send_sync)]
229static_assertions::assert_impl_all!(Buffer: Send, Sync);
230
231crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
232
233impl Buffer {
234 /// Return the binding view of the entire buffer.
235 pub fn as_entire_binding(&self) -> BindingResource<'_> {
236 BindingResource::Buffer(self.as_entire_buffer_binding())
237 }
238
239 /// Return the binding view of the entire buffer.
240 pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_> {
241 BufferBinding {
242 buffer: self,
243 offset: 0,
244 size: None,
245 }
246 }
247
248 /// Get the [`wgpu_hal`] buffer from this `Buffer`.
249 ///
250 /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
251 /// and pass that struct to the to the `A` type parameter.
252 ///
253 /// Returns a guard that dereferences to the type of the hal backend
254 /// which implements [`A::Buffer`].
255 ///
256 /// # Types
257 ///
258 /// The returned type depends on the backend:
259 ///
260 #[doc = crate::macros::hal_type_vulkan!("Buffer")]
261 #[doc = crate::macros::hal_type_metal!("Buffer")]
262 #[doc = crate::macros::hal_type_dx12!("Buffer")]
263 #[doc = crate::macros::hal_type_gles!("Buffer")]
264 ///
265 /// # Deadlocks
266 ///
267 /// - The returned guard holds a read-lock on a device-local "destruction"
268 /// lock, which will cause all calls to `destroy` to block until the
269 /// guard is released.
270 ///
271 /// # Errors
272 ///
273 /// This method will return None if:
274 /// - The buffer is not from the backend specified by `A`.
275 /// - The buffer is from the `webgpu` or `custom` backend.
276 /// - The buffer has had [`Self::destroy()`] called on it.
277 ///
278 /// # Safety
279 ///
280 /// - The returned resource must not be destroyed unless the guard
281 /// is the last reference to it and it is not in use by the GPU.
282 /// The guard and handle may be dropped at any time however.
283 /// - All the safety requirements of wgpu-hal must be upheld.
284 ///
285 /// [`A::Buffer`]: hal::Api::Buffer
286 #[cfg(wgpu_core)]
287 pub unsafe fn as_hal<A: hal::Api>(
288 &self,
289 ) -> Option<impl Deref<Target = A::Buffer> + WasmNotSendSync> {
290 let buffer = self.inner.as_core_opt()?;
291 unsafe { buffer.context.buffer_as_hal::<A>(buffer) }
292 }
293
294 /// Returns a [`BufferSlice`] referring to the portion of `self`'s contents
295 /// indicated by `bounds`. Regardless of what sort of data `self` stores,
296 /// `bounds` start and end are given in bytes.
297 ///
298 /// A [`BufferSlice`] can be used to supply vertex and index data, or to map
299 /// buffer contents for access from the CPU. See the [`BufferSlice`]
300 /// documentation for details.
301 ///
302 /// The `range` argument can be half or fully unbounded: for example,
303 /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
304 /// refers to the portion starting at the `n`th byte and extending to the
305 /// end of the buffer.
306 ///
307 /// # Panics
308 ///
309 /// - If `bounds` is outside of the bounds of `self`.
310 /// - If `bounds` has a length less than 1.
311 #[track_caller]
312 pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
313 let (offset, size) = range_to_offset_size(bounds, self.size);
314 check_buffer_bounds(self.size, offset, size);
315 BufferSlice {
316 buffer: self,
317 offset,
318 size,
319 }
320 }
321
322 /// Unmaps the buffer from host memory.
323 ///
324 /// This terminates the effect of all previous [`map_async()`](Self::map_async) operations and
325 /// makes the buffer available for use by the GPU again.
326 pub fn unmap(&self) {
327 self.map_context.lock().reset();
328 self.inner.unmap();
329 }
330
331 /// Destroy the associated native resources as soon as possible.
332 pub fn destroy(&self) {
333 self.inner.destroy();
334 }
335
336 /// Returns the length of the buffer allocation in bytes.
337 ///
338 /// This is always equal to the `size` that was specified when creating the buffer.
339 pub fn size(&self) -> BufferAddress {
340 self.size
341 }
342
343 /// Returns the allowed usages for this `Buffer`.
344 ///
345 /// This is always equal to the `usage` that was specified when creating the buffer.
346 pub fn usage(&self) -> BufferUsages {
347 self.usage
348 }
349
350 /// Map the buffer to host (CPU) memory, making it available for reading or writing via
351 /// [`get_mapped_range()`](Self::get_mapped_range). The buffer becomes accessible once the
352 /// `callback` is invoked with [`Ok`].
353 ///
354 /// Use this when you want to map the buffer immediately. If you need to submit GPU work that
355 /// uses the buffer before mapping it, use `map_buffer_on_submit` on
356 /// [`CommandEncoder`][CEmbos], [`CommandBuffer`][CBmbos], [`RenderPass`][RPmbos], or
357 /// [`ComputePass`][CPmbos] to schedule the mapping after submission. This avoids extra calls to
358 /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and lets you initiate mapping from a
359 /// more convenient place.
360 ///
361 /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
362 /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
363 /// an event loop or run on a separate thread.
364 ///
365 /// The callback runs on the thread that first calls one of the above functions after the GPU work
366 /// completes. There are no restrictions on the code you can run in the callback; however, on native
367 /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
368 /// send messages, etc.).
369 ///
370 /// While a buffer is mapped, it cannot be used by other commands; at any time, either the GPU or
371 /// the CPU has exclusive access to the buffer’s contents.
372 ///
373 /// This can also be performed using [`BufferSlice::map_async()`].
374 ///
375 /// # Panics
376 ///
377 /// - If the buffer is already mapped.
378 /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
379 /// - If `bounds` is outside of the bounds of `self`.
380 /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
381 /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
382 ///
383 /// [CEmbos]: CommandEncoder::map_buffer_on_submit
384 /// [CBmbos]: CommandBuffer::map_buffer_on_submit
385 /// [RPmbos]: RenderPass::map_buffer_on_submit
386 /// [CPmbos]: ComputePass::map_buffer_on_submit
387 /// [q::s]: Queue::submit
388 /// [i::p_a]: Instance::poll_all
389 /// [d::p]: Device::poll
390 pub fn map_async<S: RangeBounds<BufferAddress>>(
391 &self,
392 mode: MapMode,
393 bounds: S,
394 callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
395 ) {
396 self.slice(bounds).map_async(mode, callback)
397 }
398
399 /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
400 ///
401 /// Returns a [`BufferView`] referring to the buffer range represented by
402 /// `self`. See the documentation for [`BufferView`] for details.
403 ///
404 /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
405 /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
406 ///
407 /// This can also be performed using [`BufferSlice::get_mapped_range()`].
408 ///
409 /// # Errors
410 ///
411 /// - If `bounds` is outside of the bounds of `self`.
412 /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
413 /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
414 /// - If the buffer to which `self` refers is not currently [mapped].
415 /// - If you try to create a view which overlaps an existing [`BufferViewMut`].
416 ///
417 /// [mapped]: Buffer#mapping-buffers
418 #[track_caller]
419 pub fn get_mapped_range<S: RangeBounds<BufferAddress>>(
420 &self,
421 bounds: S,
422 ) -> Result<BufferView, MapRangeError> {
423 self.slice(bounds).get_mapped_range()
424 }
425
426 /// Gain write access to the bytes of a [mapped] [`Buffer`].
427 ///
428 /// Returns a [`BufferViewMut`] referring to the buffer range represented by
429 /// `self`. See the documentation for [`BufferViewMut`] for more details.
430 ///
431 /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
432 /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
433 ///
434 /// This can also be performed using [`BufferSlice::get_mapped_range_mut()`].
435 ///
436 /// # Errors
437 ///
438 /// - If `bounds` is outside of the bounds of `self`.
439 /// - If `bounds` does not start at a multiple of [`MAP_ALIGNMENT`].
440 /// - If `bounds` has a length that is not a multiple of 4 greater than 0.
441 /// - If the buffer to which `self` refers is not currently [mapped].
442 /// - If you try to create a view which overlaps an existing [`BufferView`] or [`BufferViewMut`].
443 ///
444 /// [mapped]: Buffer#mapping-buffers
445 #[track_caller]
446 pub fn get_mapped_range_mut<S: RangeBounds<BufferAddress>>(
447 &self,
448 bounds: S,
449 ) -> Result<BufferViewMut, MapRangeError> {
450 self.slice(bounds).get_mapped_range_mut()
451 }
452
453 #[cfg(custom)]
454 /// Returns custom implementation of Buffer (if custom backend and is internally T)
455 pub fn as_custom<T: custom::BufferInterface>(&self) -> Option<&T> {
456 self.inner.as_custom()
457 }
458}
459
460/// A slice of a [`Buffer`], to be mapped, used for vertex or index data, or the like.
461///
462/// You can create a `BufferSlice` by calling [`Buffer::slice`]:
463///
464/// ```no_run
465/// # let buffer: wgpu::Buffer = todo!();
466/// let slice = buffer.slice(10..20);
467/// ```
468///
469/// This returns a slice referring to the second ten bytes of `buffer`. To get a
470/// slice of the entire `Buffer`:
471///
472/// ```no_run
473/// # let buffer: wgpu::Buffer = todo!();
474/// let whole_buffer_slice = buffer.slice(..);
475/// ```
476///
477/// You can pass buffer slices to methods like [`RenderPass::set_vertex_buffer`]
478/// and [`RenderPass::set_index_buffer`] to indicate which portion of the buffer
479/// a draw call should consult. You can also convert it to a [`BufferBinding`]
480/// with `.into()`.
481///
482/// To access the slice's contents on the CPU, you must first [map] the buffer,
483/// and then call [`BufferSlice::get_mapped_range`] or
484/// [`BufferSlice::get_mapped_range_mut`] to obtain a view of the slice's
485/// contents. See the documentation on [mapping][map] for more details,
486/// including example code.
487///
488/// Unlike a Rust shared slice `&[T]`, whose existence guarantees that
489/// nobody else is modifying the `T` values to which it refers, a
490/// [`BufferSlice`] doesn't guarantee that the buffer's contents aren't
491/// changing. You can still record and submit commands operating on the
492/// buffer while holding a [`BufferSlice`]. A [`BufferSlice`] simply
493/// represents a certain range of the buffer's bytes.
494///
495/// The `BufferSlice` type is unique to the Rust API of `wgpu`. In the WebGPU
496/// specification, an offset and size are specified as arguments to each call
497/// working with the [`Buffer`], instead.
498///
499/// [map]: Buffer#mapping-buffers
500#[derive(Copy, Clone, Debug, PartialEq)]
501pub struct BufferSlice<'a> {
502 pub(crate) buffer: &'a Buffer,
503 pub(crate) offset: BufferAddress,
504 pub(crate) size: BufferSize,
505}
506#[cfg(send_sync)]
507static_assertions::assert_impl_all!(BufferSlice<'_>: Send, Sync);
508
509impl<'a> BufferSlice<'a> {
510 /// Return another [`BufferSlice`] referring to the portion of `self`'s contents
511 /// indicated by `bounds`.
512 ///
513 /// The `range` argument can be half or fully unbounded: for example,
514 /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
515 /// refers to the portion starting at the `n`th byte and extending to the
516 /// end of the buffer.
517 ///
518 /// # Panics
519 ///
520 /// - If `bounds` is outside of the bounds of `self`.
521 /// - If `bounds` has a length less than 1.
522 #[track_caller]
523 pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'a> {
524 let (offset, size) = range_to_offset_size(bounds, self.size.get());
525 check_buffer_bounds(self.size.get(), offset, size);
526 BufferSlice {
527 buffer: self.buffer,
528 offset: self.offset + offset, // check_buffer_bounds ensures this does not overflow
529 size, // check_buffer_bounds ensures this is essentially min()
530 }
531 }
532
533 /// Map the buffer to host (CPU) memory, making it available for reading or writing via
534 /// [`get_mapped_range()`](Self::get_mapped_range). The buffer becomes accessible once the
535 /// `callback` is invoked with [`Ok`].
536 ///
537 /// Use this when you want to map the buffer immediately. If you need to submit GPU work that
538 /// uses the buffer before mapping it, use `map_buffer_on_submit` on
539 /// [`CommandEncoder`][CEmbos], [`CommandBuffer`][CBmbos], [`RenderPass`][RPmbos], or
540 /// [`ComputePass`][CPmbos] to schedule the mapping after submission. This avoids extra calls to
541 /// [`Buffer::map_async()`] or [`BufferSlice::map_async()`] and lets you initiate mapping from a
542 /// more convenient place.
543 ///
544 /// For the callback to run, either [`queue.submit(..)`][q::s], [`instance.poll_all(..)`][i::p_a],
545 /// or [`device.poll(..)`][d::p] must be called elsewhere in the runtime, possibly integrated into
546 /// an event loop or run on a separate thread.
547 ///
548 /// The callback runs on the thread that first calls one of the above functions after the GPU work
549 /// completes. There are no restrictions on the code you can run in the callback; however, on native
550 /// the polling call will not return until the callback finishes, so keep callbacks short (set flags,
551 /// send messages, etc.).
552 ///
553 /// While a buffer is mapped, it cannot be used by other commands; at any time, either the GPU or
554 /// the CPU has exclusive access to the buffer’s contents.
555 ///
556 /// This can also be performed using [`Buffer::map_async()`].
557 ///
558 /// # Panics
559 ///
560 /// - If the buffer is already mapped.
561 /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
562 /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
563 /// - If the length of this slice is not a multiple of 4.
564 ///
565 /// [CEmbos]: CommandEncoder::map_buffer_on_submit
566 /// [CBmbos]: CommandBuffer::map_buffer_on_submit
567 /// [RPmbos]: RenderPass::map_buffer_on_submit
568 /// [CPmbos]: ComputePass::map_buffer_on_submit
569 /// [q::s]: Queue::submit
570 /// [i::p_a]: Instance::poll_all
571 /// [d::p]: Device::poll
572 pub fn map_async(
573 &self,
574 mode: MapMode,
575 callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
576 ) {
577 let mut mc = self.buffer.map_context.lock();
578 assert_eq!(mc.mapped_range, 0..0, "Buffer is already mapped");
579 let end = self.offset + self.size.get();
580 mc.mapped_range = self.offset..end;
581 drop(mc); // release the lock of map_context as callback can call lock it again
582
583 self.buffer
584 .inner
585 .map_async(mode, self.offset..end, Box::new(callback));
586 }
587
588 /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
589 ///
590 /// Returns a [`BufferView`] referring to the buffer range represented by
591 /// `self`. See the documentation for [`BufferView`] for details.
592 ///
593 /// Multiple views may be obtained and used simultaneously as long as they are from
594 /// non-overlapping slices.
595 ///
596 /// This can also be performed using [`Buffer::get_mapped_range()`].
597 ///
598 /// # Errors
599 ///
600 /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
601 /// - If the length of this slice is not a multiple of 4.
602 /// - If the buffer to which `self` refers is not currently [mapped].
603 /// - If you try to create a view which overlaps an existing [`BufferViewMut`].
604 ///
605 /// [mapped]: Buffer#mapping-buffers
606 #[track_caller]
607 pub fn get_mapped_range(&self) -> Result<BufferView, MapRangeError> {
608 let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Immutable);
609 let range = self.buffer.inner.get_mapped_range(subrange.index.clone())?;
610 self.buffer.map_context.lock().validate_and_add(subrange)?;
611 Ok(BufferView {
612 buffer: self.buffer.clone(),
613 size: self.size,
614 offset: self.offset,
615 inner: range,
616 })
617 }
618
619 /// Gain write-only access to the bytes of a [mapped] [`Buffer`].
620 ///
621 /// Returns a [`BufferViewMut`] referring to the buffer range represented by
622 /// `self`. See the documentation for [`BufferViewMut`] for more details.
623 ///
624 /// Multiple views may be obtained and used simultaneously as long as they are from
625 /// non-overlapping slices.
626 ///
627 /// This can also be performed using [`Buffer::get_mapped_range_mut()`].
628 ///
629 /// # Errors
630 ///
631 /// - If the beginning of this slice is not aligned to [`MAP_ALIGNMENT`] within the buffer.
632 /// - If the length of this slice is not a multiple of 4.
633 /// - If the buffer to which `self` refers is not currently [mapped].
634 /// - If you try to create a view which overlaps an existing [`BufferView`] or [`BufferViewMut`].
635 ///
636 /// [mapped]: Buffer#mapping-buffers
637 #[track_caller]
638 pub fn get_mapped_range_mut(&self) -> Result<BufferViewMut, MapRangeError> {
639 let subrange = Subrange::new(self.offset, self.size, RangeMappingKind::Mutable);
640 let range = self.buffer.inner.get_mapped_range(subrange.index.clone())?;
641 self.buffer.map_context.lock().validate_and_add(subrange)?;
642 Ok(BufferViewMut {
643 buffer: self.buffer.clone(),
644 size: self.size,
645 offset: self.offset,
646 inner: range,
647 })
648 }
649
650 /// Returns the buffer this is a slice of.
651 ///
652 /// You should usually not need to call this, and if you received the buffer from code you
653 /// do not control, you should refrain from accessing the buffer outside the bounds of the
654 /// slice. Nevertheless, it’s possible to get this access, so this method makes it simple.
655 pub fn buffer(&self) -> &'a Buffer {
656 self.buffer
657 }
658
659 /// Returns the offset in [`Self::buffer()`] this slice starts at.
660 pub fn offset(&self) -> BufferAddress {
661 self.offset
662 }
663
664 /// Returns the size of this slice.
665 pub fn size(&self) -> BufferSize {
666 self.size
667 }
668}
669
670impl<'a> From<BufferSlice<'a>> for crate::BufferBinding<'a> {
671 /// Convert a [`BufferSlice`] to an equivalent [`BufferBinding`],
672 /// provided that it will be used without a dynamic offset.
673 fn from(value: BufferSlice<'a>) -> Self {
674 BufferBinding {
675 buffer: value.buffer,
676 offset: value.offset,
677 size: Some(value.size),
678 }
679 }
680}
681
682impl<'a> From<BufferSlice<'a>> for crate::BindingResource<'a> {
683 /// Convert a [`BufferSlice`] to an equivalent [`BindingResource::Buffer`],
684 /// provided that it will be used without a dynamic offset.
685 fn from(value: BufferSlice<'a>) -> Self {
686 crate::BindingResource::Buffer(crate::BufferBinding::from(value))
687 }
688}
689
690fn range_overlaps(a: &Range<BufferAddress>, b: &Range<BufferAddress>) -> bool {
691 a.start < b.end && b.start < a.end
692}
693
694fn range_contains(a: &Range<BufferAddress>, b: &Range<BufferAddress>) -> bool {
695 a.start <= b.start && a.end >= b.end
696}
697
698#[derive(Debug, Copy, Clone)]
699enum RangeMappingKind {
700 Mutable,
701 Immutable,
702}
703
704impl RangeMappingKind {
705 /// Returns true if a range of this kind can touch the same bytes as a range of the other kind.
706 ///
707 /// This is Rust's Mutable XOR Shared rule.
708 fn allowed_concurrently_with(self, other: Self) -> bool {
709 matches!(
710 (self, other),
711 (RangeMappingKind::Immutable, RangeMappingKind::Immutable)
712 )
713 }
714}
715
716#[derive(Debug, Clone)]
717struct Subrange {
718 index: Range<BufferAddress>,
719 kind: RangeMappingKind,
720}
721
722impl Subrange {
723 fn new(offset: BufferAddress, size: BufferSize, kind: RangeMappingKind) -> Self {
724 Self {
725 index: offset..(offset + size.get()),
726 kind,
727 }
728 }
729}
730
731impl fmt::Display for Subrange {
732 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
733 write!(
734 f,
735 "{}..{} ({:?})",
736 self.index.start, self.index.end, self.kind
737 )
738 }
739}
740
741/// The mapped portion of a buffer, if any, and its outstanding views.
742///
743/// This ensures that views fall within the mapped range and don't overlap.
744#[derive(Debug)]
745pub(crate) struct MapContext {
746 /// The range of the buffer that is mapped.
747 ///
748 /// This is `0..0` if the buffer is not mapped. This becomes non-empty when
749 /// the buffer is mapped at creation time, and when you call `map_async` on
750 /// some [`BufferSlice`] (so technically, it indicates the portion that is
751 /// *or has been requested to be* mapped.)
752 ///
753 /// All [`BufferView`]s and [`BufferViewMut`]s must fall within this range.
754 mapped_range: Range<BufferAddress>,
755
756 /// The ranges covered by all outstanding [`BufferView`]s and
757 /// [`BufferViewMut`]s. These are non-overlapping, and are all contained
758 /// within `mapped_range`.
759 sub_ranges: Vec<Subrange>,
760}
761
762impl MapContext {
763 /// Creates a new `MapContext`.
764 ///
765 /// For [`mapped_at_creation`] buffers, pass the full buffer range in the
766 /// `mapped_range` argument. For other buffers, pass `None`.
767 ///
768 /// [`mapped_at_creation`]: BufferDescriptor::mapped_at_creation
769 pub(crate) fn new(mapped_range: Option<Range<BufferAddress>>) -> Self {
770 Self {
771 mapped_range: mapped_range.unwrap_or(0..0),
772 sub_ranges: Vec::new(),
773 }
774 }
775
776 /// Record that the buffer is no longer mapped.
777 fn reset(&mut self) {
778 self.mapped_range = 0..0;
779
780 assert!(
781 self.sub_ranges.is_empty(),
782 "You cannot unmap a buffer that still has accessible mapped views"
783 );
784 }
785
786 /// Record that the `size` bytes of the buffer at `offset` are now viewed.
787 ///
788 /// # Errors
789 ///
790 /// This returns an error if the given range is invalid.
791 fn validate_and_add(&mut self, new_sub: Subrange) -> Result<(), MapRangeError> {
792 if self.mapped_range.is_empty() {
793 return Err(MapRangeError(
794 "tried to call get_mapped_range(_mut) on an unmapped buffer".into(),
795 ));
796 }
797 if !range_contains(&self.mapped_range, &new_sub.index) {
798 return Err(MapRangeError(alloc::format!(
799 "tried to call get_mapped_range(_mut) on a range that is not entirely mapped. \
800 Attempted to get range {}, but the mapped range is {}..{}",
801 new_sub,
802 self.mapped_range.start,
803 self.mapped_range.end
804 )));
805 }
806 // This check is essential for avoiding undefined behavior: it is the
807 // only thing that ensures that `&mut` references to the buffer's
808 // contents don't alias anything else.
809 for sub in self.sub_ranges.iter() {
810 if range_overlaps(&sub.index, &new_sub.index)
811 && !sub.kind.allowed_concurrently_with(new_sub.kind)
812 {
813 return Err(MapRangeError(alloc::format!(
814 "tried to call get_mapped_range(_mut) on a range that has already \
815 been mapped and would break Rust memory aliasing rules. Attempted \
816 to get range {}, and the conflicting range is {}",
817 new_sub,
818 sub
819 )));
820 }
821 }
822 self.sub_ranges.push(new_sub);
823 Ok(())
824 }
825
826 /// Record that the `size` bytes of the buffer at `offset` are no longer viewed.
827 ///
828 /// # Panics
829 ///
830 /// This panics if the given range does not exactly match one previously
831 /// passed to [`MapContext::validate_and_add`].
832 pub(crate) fn remove(&mut self, offset: BufferAddress, size: BufferSize) {
833 let end = offset + size.get();
834
835 let index = self
836 .sub_ranges
837 .iter()
838 .position(|r| r.index == (offset..end))
839 .expect("unable to remove range from map context");
840 self.sub_ranges.swap_remove(index);
841 }
842}
843
844/// Describes a [`Buffer`].
845///
846/// For use with [`Device::create_buffer`].
847///
848/// Corresponds to [WebGPU `GPUBufferDescriptor`](
849/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferdescriptor).
850pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
851static_assertions::assert_impl_all!(BufferDescriptor<'_>: Send, Sync);
852
853/// Error occurred when trying to async map a buffer.
854#[derive(Clone, PartialEq, Eq, Debug)]
855pub struct BufferAsyncError;
856static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
857
858impl fmt::Display for BufferAsyncError {
859 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
860 write!(f, "Error occurred when trying to async map a buffer")
861 }
862}
863
864impl error::Error for BufferAsyncError {}
865
866/// Error returned by [`BufferSlice::get_mapped_range`] and [`BufferSlice::get_mapped_range_mut`].
867///
868/// Corresponds to the `OperationError` thrown by
869/// [`getMappedRange()`](https://gpuweb.github.io/gpuweb/#dom-gpubuffer-getmappedrange)
870/// in the WebGPU spec.
871#[derive(Clone, Debug)]
872pub struct MapRangeError(pub(crate) String);
873static_assertions::assert_impl_all!(MapRangeError: Send, Sync);
874
875impl fmt::Display for MapRangeError {
876 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
877 write!(f, "Buffer view error: {}", self.0)
878 }
879}
880
881impl error::Error for MapRangeError {}
882
883/// Type of buffer mapping.
884#[derive(Debug, Clone, Copy, Eq, PartialEq)]
885pub enum MapMode {
886 /// Map only for reading
887 Read,
888 /// Map only for writing
889 Write,
890}
891static_assertions::assert_impl_all!(MapMode: Send, Sync);
892
893/// A read-only view of a mapped buffer's bytes.
894///
895/// To get a `BufferView`, first [map] the buffer, and then
896/// call `buffer.slice(range).get_mapped_range()`.
897///
898/// `BufferView` dereferences to `&[u8]`, so you can use all the usual Rust
899/// slice methods to access the buffer's contents. It also implements
900/// `AsRef<[u8]>`, if that's more convenient.
901///
902/// Before the buffer can be unmapped, all `BufferView`s observing it
903/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
904///
905/// For example code, see the documentation on [mapping buffers][map].
906///
907/// [map]: Buffer#mapping-buffers
908/// [`map_async`]: BufferSlice::map_async
909#[derive(Debug)]
910pub struct BufferView {
911 // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
912 buffer: Buffer,
913 offset: BufferAddress,
914 size: BufferSize,
915 inner: dispatch::DispatchBufferMappedRange,
916}
917
918/// A write-only view of a mapped buffer's bytes.
919///
920/// To get a `BufferViewMut`, first [map] the buffer, and then
921/// call `buffer.slice(range).get_mapped_range_mut()`.
922///
923/// Because Rust has no write-only reference type
924/// (`&[u8]` is read-only and `&mut [u8]` is read-write),
925/// this type does not dereference to a slice in the way that [`BufferView`] does.
926/// Instead, [`.slice()`][BufferViewMut::slice] returns a special [`WriteOnly`] pointer type,
927/// and there are also a few convenience methods such as [`BufferViewMut::copy_from_slice()`].
928///
929/// Before the buffer can be unmapped, all `BufferViewMut`s observing it
930/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
931///
932/// For example code, see the documentation on [mapping buffers][map].
933///
934/// [map]: Buffer#mapping-buffers
935#[derive(Debug)]
936pub struct BufferViewMut {
937 // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
938 buffer: Buffer,
939 offset: BufferAddress,
940 size: BufferSize,
941 inner: dispatch::DispatchBufferMappedRange,
942}
943
944// `BufferView` simply dereferences. `BufferViewMut` cannot, because mapped memory may be
945// write-combining memory <https://en.wikipedia.org/wiki/Write_combining>,
946// and not support the expected behavior of atomic accesses.
947// Further context: <https://github.com/gfx-rs/wgpu/issues/8897>
948
949impl core::ops::Deref for BufferView {
950 type Target = [u8];
951
952 #[inline]
953 fn deref(&self) -> &[u8] {
954 // SAFETY: this is a read mapping
955 unsafe { self.inner.read_slice() }
956 }
957}
958
959impl AsRef<[u8]> for BufferView {
960 #[inline]
961 fn as_ref(&self) -> &[u8] {
962 self
963 }
964}
965
966impl Drop for BufferView {
967 fn drop(&mut self) {
968 self.buffer
969 .map_context
970 .lock()
971 .remove(self.offset, self.size);
972 }
973}
974
975impl Drop for BufferViewMut {
976 fn drop(&mut self) {
977 self.buffer
978 .map_context
979 .lock()
980 .remove(self.offset, self.size);
981 }
982}
983
984#[cfg(webgpu)]
985impl BufferView {
986 /// Provides the same data as dereferencing the view, but as a `Uint8Array` in js.
987 /// This can be MUCH faster than dereferencing the view which copies the data into
988 /// the Rust / wasm heap.
989 pub fn as_uint8array(&self) -> &js_sys::Uint8Array {
990 self.inner.as_uint8array()
991 }
992}
993
994/// These methods are equivalent to the methods of the same names on [`WriteOnly`].
995impl BufferViewMut {
996 /// Returns the length of this view; the number of bytes to be written.
997 pub fn len(&self) -> usize {
998 // cannot fail because we can't actually map more than isize::MAX bytes
999 usize::try_from(self.size.get()).unwrap()
1000 }
1001
1002 /// Returns `true` if the view has a length of 0.
1003 ///
1004 /// Note that this is currently impossible.
1005 pub fn is_empty(&self) -> bool {
1006 self.len() == 0
1007 }
1008
1009 /// Returns a [`WriteOnly`] reference to a portion of this.
1010 ///
1011 /// `.slice(..)` can be used to access the whole data.
1012 pub fn slice<'a, S: RangeBounds<usize>>(&'a mut self, bounds: S) -> WriteOnly<'a, [u8]> {
1013 // SAFETY: this is a write mapping
1014 unsafe { self.inner.write_slice() }.into_slice(bounds)
1015 }
1016
1017 /// Copies all elements from src into `self`.
1018 ///
1019 /// The length of `src` must be the same as `self`.
1020 ///
1021 /// This method is equivalent to
1022 /// [`self.slice(..).copy_from_slice(src)`][WriteOnly::copy_from_slice].
1023 pub fn copy_from_slice(&mut self, src: &[u8]) {
1024 self.slice(..).copy_from_slice(src)
1025 }
1026}
1027
1028#[track_caller]
1029fn check_buffer_bounds(
1030 buffer_size: BufferAddress,
1031 slice_offset: BufferAddress,
1032 slice_size: BufferSize,
1033) {
1034 // A slice of length 0 is invalid, so the offset must not be equal to or greater than the buffer size.
1035 if slice_offset >= buffer_size {
1036 panic!(
1037 "slice offset {} is out of range for buffer of size {}",
1038 slice_offset, buffer_size
1039 );
1040 }
1041
1042 // Detect integer overflow.
1043 let end = slice_offset.checked_add(slice_size.get());
1044 if end.is_none_or(|end| end > buffer_size) {
1045 panic!(
1046 "slice offset {} size {} is out of range for buffer of size {}",
1047 slice_offset, slice_size, buffer_size
1048 );
1049 }
1050}
1051
1052#[track_caller]
1053pub(crate) fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
1054 bounds: S,
1055 whole_size: BufferAddress,
1056) -> (BufferAddress, BufferSize) {
1057 let offset = match bounds.start_bound() {
1058 Bound::Included(&bound) => bound,
1059 Bound::Excluded(&bound) => bound + 1,
1060 Bound::Unbounded => 0,
1061 };
1062 let size = BufferSize::new(match bounds.end_bound() {
1063 Bound::Included(&bound) => bound + 1 - offset,
1064 Bound::Excluded(&bound) => bound - offset,
1065 Bound::Unbounded => whole_size - offset,
1066 })
1067 .expect("buffer slices can not be empty");
1068
1069 (offset, size)
1070}
1071
1072#[cfg(test)]
1073mod tests {
1074 use super::{
1075 check_buffer_bounds, range_overlaps, range_to_offset_size, BufferAddress, BufferSize,
1076 };
1077
1078 fn bs(value: BufferAddress) -> BufferSize {
1079 BufferSize::new(value).unwrap()
1080 }
1081
1082 #[test]
1083 fn range_to_offset_size_works() {
1084 let whole = 100;
1085
1086 assert_eq!(range_to_offset_size(0..2, whole), (0, bs(2)));
1087 assert_eq!(range_to_offset_size(2..5, whole), (2, bs(3)));
1088 assert_eq!(range_to_offset_size(.., whole), (0, bs(whole)));
1089 assert_eq!(range_to_offset_size(21.., whole), (21, bs(whole - 21)));
1090 assert_eq!(range_to_offset_size(0.., whole), (0, bs(whole)));
1091 assert_eq!(range_to_offset_size(..21, whole), (0, bs(21)));
1092 }
1093
1094 #[test]
1095 #[should_panic = "buffer slices can not be empty"]
1096 fn range_to_offset_size_panics_for_empty_range() {
1097 range_to_offset_size(123..123, 200);
1098 }
1099
1100 #[test]
1101 #[should_panic = "buffer slices can not be empty"]
1102 fn range_to_offset_size_panics_for_unbounded_empty_range() {
1103 range_to_offset_size(..0, 100);
1104 }
1105
1106 #[test]
1107 fn check_buffer_bounds_works_for_end_in_range() {
1108 check_buffer_bounds(200, 100, bs(50));
1109 check_buffer_bounds(200, 100, bs(100));
1110 check_buffer_bounds(u64::MAX, u64::MAX - 100, bs(100));
1111 check_buffer_bounds(u64::MAX, 0, bs(u64::MAX));
1112 check_buffer_bounds(u64::MAX, 1, bs(u64::MAX - 1));
1113 }
1114
1115 #[test]
1116 #[should_panic]
1117 fn check_buffer_bounds_panics_for_end_over_size() {
1118 check_buffer_bounds(200, 100, bs(101));
1119 }
1120
1121 #[test]
1122 #[should_panic]
1123 fn check_buffer_bounds_panics_for_end_wraparound() {
1124 check_buffer_bounds(u64::MAX, 1, bs(u64::MAX));
1125 }
1126
1127 #[test]
1128 fn range_overlapping() {
1129 // First range to the left
1130 assert_eq!(range_overlaps(&(0..1), &(1..3)), false);
1131 // First range overlaps left edge
1132 assert_eq!(range_overlaps(&(0..2), &(1..3)), true);
1133 // First range completely inside second
1134 assert_eq!(range_overlaps(&(1..2), &(0..3)), true);
1135 // First range completely surrounds second
1136 assert_eq!(range_overlaps(&(0..3), &(1..2)), true);
1137 // First range overlaps right edge
1138 assert_eq!(range_overlaps(&(1..3), &(0..2)), true);
1139 // First range entirely to the right
1140 assert_eq!(range_overlaps(&(2..3), &(0..2)), false);
1141 }
1142}