wgpu/api/buffer.rs
1use alloc::{boxed::Box, sync::Arc, vec::Vec};
2use core::{
3 error, fmt,
4 ops::{Bound, Deref, DerefMut, Range, RangeBounds},
5};
6
7use crate::util::Mutex;
8use crate::*;
9
10/// Handle to a GPU-accessible buffer.
11///
12/// A `Buffer` is a memory allocation for use by the GPU, somewhat analogous to
13/// <code>[Box]<[\[u8\]][primitive@slice]></code> in Rust.
14/// The contents of buffers are untyped bytes; it is up to the application to
15/// specify the interpretation of the bytes when the buffer is used, in ways
16/// such as [`VertexBufferLayout`].
17/// A single buffer can be used to hold multiple independent pieces of data at
18/// different offsets (e.g. both vertices and indices for one or more meshes).
19///
20/// A `Buffer`'s bytes have "interior mutability": functions like
21/// [`Queue::write_buffer`] or [mapping] a buffer for writing only require a
22/// `&Buffer`, not a `&mut Buffer`, even though they modify its contents. `wgpu`
23/// prevents simultaneous reads and writes of buffer contents using run-time
24/// checks.
25///
26/// Created with [`Device::create_buffer()`] or
27/// [`DeviceExt::create_buffer_init()`].
28///
29/// Corresponds to [WebGPU `GPUBuffer`](https://gpuweb.github.io/gpuweb/#buffer-interface).
30///
31/// [mapping]: Buffer#mapping-buffers
32///
33/// # How to get your data into a buffer
34///
35/// Every `Buffer` starts with all bytes zeroed.
36/// There are many ways to load data into a `Buffer`:
37///
38/// - When creating a buffer, you may set the [`mapped_at_creation`][mac] flag,
39/// then write to its [`get_mapped_range_mut()`][Buffer::get_mapped_range_mut].
40/// This only works when the buffer is created and has not yet been used by
41/// the GPU, but it is all you need for buffers whose contents do not change
42/// after creation.
43/// - You may use [`DeviceExt::create_buffer_init()`] as a convenient way to
44/// do that and copy data from a `&[u8]` you provide.
45/// - After creation, you may use [`Buffer::map_async()`] to map it again;
46/// however, you then need to wait until the GPU is no longer using the buffer
47/// before you begin writing.
48/// - You may use [`CommandEncoder::copy_buffer_to_buffer()`] to copy data into
49/// this buffer from another buffer.
50/// - You may use [`Queue::write_buffer()`] to copy data into the buffer from a
51/// `&[u8]`. This uses a temporary “staging” buffer managed by `wgpu` to hold
52/// the data.
53/// - [`Queue::write_buffer_with()`] allows you to write directly into temporary
54/// storage instead of providing a slice you already prepared, which may
55/// allow *your* code to save the allocation of a [`Vec`] or such.
56/// - You may use [`util::StagingBelt`] to manage a set of temporary buffers.
57/// This may be more efficient than [`Queue::write_buffer_with()`] when you
58/// have many small copies to perform, but requires more steps to use, and
59/// tuning of the belt buffer size.
60/// - You may write your own staging buffer management customized to your
61/// application, based on mapped buffers and
62/// [`CommandEncoder::copy_buffer_to_buffer()`].
63/// - A GPU computation’s results can be stored in a buffer:
64/// - A [compute shader][ComputePipeline] may write to a buffer bound as a
65/// [storage buffer][BufferBindingType::Storage].
66/// - A render pass may render to a texture which is then copied to a buffer
67/// using [`CommandEncoder::copy_texture_to_buffer()`].
68///
69/// # Mapping buffers
70///
71/// If a `Buffer` is created with the appropriate [`usage`], it can be *mapped*:
72/// you can make its contents accessible to the CPU as an ordinary `&[u8]` or
73/// `&mut [u8]` slice of bytes. Buffers created with the
74/// [`mapped_at_creation`][mac] flag set are also mapped initially.
75///
76/// Depending on the hardware, the buffer could be memory shared between CPU and
77/// GPU, so that the CPU has direct access to the same bytes the GPU will
78/// consult; or it may be ordinary CPU memory, whose contents the system must
79/// copy to/from the GPU as needed. This crate's API is designed to work the
80/// same way in either case: at any given time, a buffer is either mapped and
81/// available to the CPU, or unmapped and ready for use by the GPU, but never
82/// both. This makes it impossible for either side to observe changes by the
83/// other immediately, and any necessary transfers can be carried out when the
84/// buffer transitions from one state to the other.
85///
86/// There are two ways to map a buffer:
87///
88/// - If [`BufferDescriptor::mapped_at_creation`] is `true`, then the entire
89/// buffer is mapped when it is created. This is the easiest way to initialize
90/// a new buffer. You can set `mapped_at_creation` on any kind of buffer,
91/// regardless of its [`usage`] flags.
92///
93/// - If the buffer's [`usage`] includes the [`MAP_READ`] or [`MAP_WRITE`]
94/// flags, then you can call `buffer.slice(range).map_async(mode, callback)`
95/// to map the portion of `buffer` given by `range`. This waits for the GPU to
96/// finish using the buffer, and invokes `callback` as soon as the buffer is
97/// safe for the CPU to access.
98///
99/// Once a buffer is mapped:
100///
101/// - You can call `buffer.slice(range).get_mapped_range()` to obtain a
102/// [`BufferView`], which dereferences to a `&[u8]` that you can use to read
103/// the buffer's contents.
104///
105/// - Or, you can call `buffer.slice(range).get_mapped_range_mut()` to obtain a
106/// [`BufferViewMut`], which dereferences to a `&mut [u8]` that you can use to
107/// read and write the buffer's contents.
108///
109/// The given `range` must fall within the mapped portion of the buffer. If you
110/// attempt to access overlapping ranges, even for shared access only, these
111/// methods panic.
112///
113/// While a buffer is mapped, you may not submit any commands to the GPU that
114/// access it. You may record command buffers that use the buffer, but if you
115/// submit them while the buffer is mapped, submission will panic.
116///
117/// When you are done using the buffer on the CPU, you must call
118/// [`Buffer::unmap`] to make it available for use by the GPU again. All
119/// [`BufferView`] and [`BufferViewMut`] views referring to the buffer must be
120/// dropped before you unmap it; otherwise, [`Buffer::unmap`] will panic.
121///
122/// # Example
123///
124/// If `buffer` was created with [`BufferUsages::MAP_WRITE`], we could fill it
125/// with `f32` values like this:
126///
127/// ```
128/// # #[cfg(feature = "noop")]
129/// # let (device, _queue) = wgpu::Device::noop(&wgpu::DeviceDescriptor::default());
130/// # #[cfg(not(feature = "noop"))]
131/// # let device: wgpu::Device = { return; };
132/// #
133/// # let buffer = device.create_buffer(&wgpu::BufferDescriptor {
134/// # label: None,
135/// # size: 400,
136/// # usage: wgpu::BufferUsages::MAP_WRITE,
137/// # mapped_at_creation: false,
138/// # });
139/// let capturable = buffer.clone();
140/// buffer.map_async(wgpu::MapMode::Write, .., move |result| {
141/// if result.is_ok() {
142/// let mut view = capturable.get_mapped_range_mut(..);
143/// let floats: &mut [f32] = bytemuck::cast_slice_mut(&mut view);
144/// floats.fill(42.0);
145/// drop(view);
146/// capturable.unmap();
147/// }
148/// });
149/// ```
150///
151/// This code takes the following steps:
152///
153/// - First, it makes a cloned handle to the buffer for capture by
154/// the callback passed to [`map_async`]. Since a [`map_async`] callback may be
155/// invoked from another thread, interaction between the callback and the
156/// thread calling [`map_async`] generally requires some sort of shared heap
157/// data like this. In real code, there might be an [`Arc`] to some larger
158/// structure that itself owns `buffer`.
159///
160/// - Then, it calls [`Buffer::slice`] to make a [`BufferSlice`] referring to
161/// the buffer's entire contents.
162///
163/// - Next, it calls [`BufferSlice::map_async`] to request that the bytes to
164/// which the slice refers be made accessible to the CPU ("mapped"). This may
165/// entail waiting for previously enqueued operations on `buffer` to finish.
166/// Although [`map_async`] itself always returns immediately, it saves the
167/// callback function to be invoked later.
168///
169/// - When some later call to [`Device::poll`] or [`Instance::poll_all`] (not
170/// shown in this example) determines that the buffer is mapped and ready for
171/// the CPU to use, it invokes the callback function.
172///
173/// - The callback function calls [`Buffer::slice`] and then
174/// [`BufferSlice::get_mapped_range_mut`] to obtain a [`BufferViewMut`], which
175/// dereferences to a `&mut [u8]` slice referring to the buffer's bytes.
176///
177/// - It then uses the [`bytemuck`] crate to turn the `&mut [u8]` into a `&mut
178/// [f32]`, and calls the slice [`fill`] method to fill the buffer with a
179/// useful value.
180///
181/// - Finally, the callback drops the view and calls [`Buffer::unmap`] to unmap
182/// the buffer. In real code, the callback would also need to do some sort of
183/// synchronization to let the rest of the program know that it has completed
184/// its work.
185///
186/// If using [`map_async`] directly is awkward, you may find it more convenient to
187/// use [`Queue::write_buffer`] and [`util::DownloadBuffer::read_buffer`].
188/// However, those each have their own tradeoffs; the asynchronous nature of GPU
189/// execution makes it hard to avoid friction altogether.
190///
191/// [`Arc`]: std::sync::Arc
192/// [`map_async`]: BufferSlice::map_async
193/// [`bytemuck`]: https://crates.io/crates/bytemuck
194/// [`fill`]: slice::fill
195///
196/// ## Mapping buffers on the web
197///
198/// When compiled to WebAssembly and running in a browser content process,
199/// `wgpu` implements its API in terms of the browser's WebGPU implementation.
200/// In this context, `wgpu` is further isolated from the GPU:
201///
202/// - Depending on the browser's WebGPU implementation, mapping and unmapping
203/// buffers probably entails copies between WebAssembly linear memory and the
204/// graphics driver's buffers.
205///
206/// - All modern web browsers isolate web content in its own sandboxed process,
207/// which can only interact with the GPU via interprocess communication (IPC).
208/// Although most browsers' IPC systems use shared memory for large data
209/// transfers, there will still probably need to be copies into and out of the
210/// shared memory buffers.
211///
212/// All of these copies contribute to the cost of buffer mapping in this
213/// configuration.
214///
215/// [`usage`]: BufferDescriptor::usage
216/// [mac]: BufferDescriptor::mapped_at_creation
217/// [`MAP_READ`]: BufferUsages::MAP_READ
218/// [`MAP_WRITE`]: BufferUsages::MAP_WRITE
219/// [`DeviceExt::create_buffer_init()`]: util::DeviceExt::create_buffer_init
220#[derive(Debug, Clone)]
221pub struct Buffer {
222 pub(crate) inner: dispatch::DispatchBuffer,
223 pub(crate) map_context: Arc<Mutex<MapContext>>,
224 pub(crate) size: wgt::BufferAddress,
225 pub(crate) usage: BufferUsages,
226 // Todo: missing map_state https://www.w3.org/TR/webgpu/#dom-gpubuffer-mapstate
227}
228#[cfg(send_sync)]
229static_assertions::assert_impl_all!(Buffer: Send, Sync);
230
231crate::cmp::impl_eq_ord_hash_proxy!(Buffer => .inner);
232
233impl Buffer {
234 /// Return the binding view of the entire buffer.
235 pub fn as_entire_binding(&self) -> BindingResource<'_> {
236 BindingResource::Buffer(self.as_entire_buffer_binding())
237 }
238
239 /// Return the binding view of the entire buffer.
240 pub fn as_entire_buffer_binding(&self) -> BufferBinding<'_> {
241 BufferBinding {
242 buffer: self,
243 offset: 0,
244 size: None,
245 }
246 }
247
248 /// Get the [`wgpu_hal`] buffer from this `Buffer`.
249 ///
250 /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
251 /// and pass that struct to the to the `A` type parameter.
252 ///
253 /// Returns a guard that dereferences to the type of the hal backend
254 /// which implements [`A::Buffer`].
255 ///
256 /// # Types
257 ///
258 /// The returned type depends on the backend:
259 ///
260 #[doc = crate::hal_type_vulkan!("Buffer")]
261 #[doc = crate::hal_type_metal!("Buffer")]
262 #[doc = crate::hal_type_dx12!("Buffer")]
263 #[doc = crate::hal_type_gles!("Buffer")]
264 ///
265 /// # Deadlocks
266 ///
267 /// - The returned guard holds a read-lock on a device-local "destruction"
268 /// lock, which will cause all calls to `destroy` to block until the
269 /// guard is released.
270 ///
271 /// # Errors
272 ///
273 /// This method will return None if:
274 /// - The buffer is not from the backend specified by `A`.
275 /// - The buffer is from the `webgpu` or `custom` backend.
276 /// - The buffer has had [`Self::destroy()`] called on it.
277 ///
278 /// # Safety
279 ///
280 /// - The returned resource must not be destroyed unless the guard
281 /// is the last reference to it and it is not in use by the GPU.
282 /// The guard and handle may be dropped at any time however.
283 /// - All the safety requirements of wgpu-hal must be upheld.
284 ///
285 /// [`A::Buffer`]: hal::Api::Buffer
286 #[cfg(wgpu_core)]
287 pub unsafe fn as_hal<A: hal::Api>(
288 &self,
289 ) -> Option<impl Deref<Target = A::Buffer> + WasmNotSendSync> {
290 let buffer = self.inner.as_core_opt()?;
291 unsafe { buffer.context.buffer_as_hal::<A>(buffer) }
292 }
293
294 /// Returns a [`BufferSlice`] referring to the portion of `self`'s contents
295 /// indicated by `bounds`. Regardless of what sort of data `self` stores,
296 /// `bounds` start and end are given in bytes.
297 ///
298 /// A [`BufferSlice`] can be used to supply vertex and index data, or to map
299 /// buffer contents for access from the CPU. See the [`BufferSlice`]
300 /// documentation for details.
301 ///
302 /// The `range` argument can be half or fully unbounded: for example,
303 /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
304 /// refers to the portion starting at the `n`th byte and extending to the
305 /// end of the buffer.
306 ///
307 /// # Panics
308 ///
309 /// - If `bounds` is outside of the bounds of `self`.
310 /// - If `bounds` has a length less than 1.
311 #[track_caller]
312 pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'_> {
313 let (offset, size) = range_to_offset_size(bounds, self.size);
314 check_buffer_bounds(self.size, offset, size);
315 BufferSlice {
316 buffer: self,
317 offset,
318 size,
319 }
320 }
321
322 /// Unmaps the buffer from host memory.
323 ///
324 /// This terminates the effect of all previous [`map_async()`](Self::map_async) operations and
325 /// makes the buffer available for use by the GPU again.
326 pub fn unmap(&self) {
327 self.map_context.lock().reset();
328 self.inner.unmap();
329 }
330
331 /// Destroy the associated native resources as soon as possible.
332 pub fn destroy(&self) {
333 self.inner.destroy();
334 }
335
336 /// Returns the length of the buffer allocation in bytes.
337 ///
338 /// This is always equal to the `size` that was specified when creating the buffer.
339 pub fn size(&self) -> BufferAddress {
340 self.size
341 }
342
343 /// Returns the allowed usages for this `Buffer`.
344 ///
345 /// This is always equal to the `usage` that was specified when creating the buffer.
346 pub fn usage(&self) -> BufferUsages {
347 self.usage
348 }
349
350 /// Map the buffer to host (CPU) memory, making it available for reading or writing
351 /// via [`get_mapped_range()`](Self::get_mapped_range).
352 /// It is available once the `callback` is called with an [`Ok`] response.
353 ///
354 /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
355 /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
356 ///
357 /// The callback will be called on the thread that first calls the above functions after the GPU work
358 /// has completed. There are no restrictions on the code you can run in the callback, however on native the
359 /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
360 /// and used to set flags, send messages, etc.
361 ///
362 /// As long as a buffer is mapped, it is not available for use by any other commands;
363 /// at all times, either the GPU or the CPU has exclusive access to the contents of the buffer.
364 ///
365 /// This can also be performed using [`BufferSlice::map_async()`].
366 ///
367 /// # Panics
368 ///
369 /// - If the buffer is already mapped.
370 /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
371 /// - If `bounds` is outside of the bounds of `self`.
372 /// - If `bounds` has a length less than 1.
373 /// - If the start and end of `bounds` are not be aligned to [`MAP_ALIGNMENT`].
374 pub fn map_async<S: RangeBounds<BufferAddress>>(
375 &self,
376 mode: MapMode,
377 bounds: S,
378 callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
379 ) {
380 self.slice(bounds).map_async(mode, callback)
381 }
382
383 /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
384 ///
385 /// Returns a [`BufferView`] referring to the buffer range represented by
386 /// `self`. See the documentation for [`BufferView`] for details.
387 ///
388 /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
389 /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
390 ///
391 /// This can also be performed using [`BufferSlice::get_mapped_range()`].
392 ///
393 /// # Panics
394 ///
395 /// - If `bounds` is outside of the bounds of `self`.
396 /// - If `bounds` has a length less than 1.
397 /// - If the start and end of `bounds` are not aligned to [`MAP_ALIGNMENT`].
398 /// - If the buffer to which `self` refers is not currently [mapped].
399 /// - If you try to create overlapping views of a buffer, mutable or otherwise.
400 ///
401 /// [mapped]: Buffer#mapping-buffers
402 pub fn get_mapped_range<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferView {
403 self.slice(bounds).get_mapped_range()
404 }
405
406 /// Gain write access to the bytes of a [mapped] [`Buffer`].
407 ///
408 /// Returns a [`BufferViewMut`] referring to the buffer range represented by
409 /// `self`. See the documentation for [`BufferViewMut`] for more details.
410 ///
411 /// `bounds` may be less than the bounds passed to [`Self::map_async()`],
412 /// and multiple views may be obtained and used simultaneously as long as they do not overlap.
413 ///
414 /// This can also be performed using [`BufferSlice::get_mapped_range_mut()`].
415 ///
416 /// # Panics
417 ///
418 /// - If `bounds` is outside of the bounds of `self`.
419 /// - If `bounds` has a length less than 1.
420 /// - If the start and end of `bounds` are not aligned to [`MAP_ALIGNMENT`].
421 /// - If the buffer to which `self` refers is not currently [mapped].
422 /// - If you try to create overlapping views of a buffer, mutable or otherwise.
423 ///
424 /// [mapped]: Buffer#mapping-buffers
425 pub fn get_mapped_range_mut<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferViewMut {
426 self.slice(bounds).get_mapped_range_mut()
427 }
428
429 #[cfg(custom)]
430 /// Returns custom implementation of Buffer (if custom backend and is internally T)
431 pub fn as_custom<T: custom::BufferInterface>(&self) -> Option<&T> {
432 self.inner.as_custom()
433 }
434}
435
436/// A slice of a [`Buffer`], to be mapped, used for vertex or index data, or the like.
437///
438/// You can create a `BufferSlice` by calling [`Buffer::slice`]:
439///
440/// ```no_run
441/// # let buffer: wgpu::Buffer = todo!();
442/// let slice = buffer.slice(10..20);
443/// ```
444///
445/// This returns a slice referring to the second ten bytes of `buffer`. To get a
446/// slice of the entire `Buffer`:
447///
448/// ```no_run
449/// # let buffer: wgpu::Buffer = todo!();
450/// let whole_buffer_slice = buffer.slice(..);
451/// ```
452///
453/// You can pass buffer slices to methods like [`RenderPass::set_vertex_buffer`]
454/// and [`RenderPass::set_index_buffer`] to indicate which portion of the buffer
455/// a draw call should consult. You can also convert it to a [`BufferBinding`]
456/// with `.into()`.
457///
458/// To access the slice's contents on the CPU, you must first [map] the buffer,
459/// and then call [`BufferSlice::get_mapped_range`] or
460/// [`BufferSlice::get_mapped_range_mut`] to obtain a view of the slice's
461/// contents. See the documentation on [mapping][map] for more details,
462/// including example code.
463///
464/// Unlike a Rust shared slice `&[T]`, whose existence guarantees that
465/// nobody else is modifying the `T` values to which it refers, a
466/// [`BufferSlice`] doesn't guarantee that the buffer's contents aren't
467/// changing. You can still record and submit commands operating on the
468/// buffer while holding a [`BufferSlice`]. A [`BufferSlice`] simply
469/// represents a certain range of the buffer's bytes.
470///
471/// The `BufferSlice` type is unique to the Rust API of `wgpu`. In the WebGPU
472/// specification, an offset and size are specified as arguments to each call
473/// working with the [`Buffer`], instead.
474///
475/// [map]: Buffer#mapping-buffers
476#[derive(Copy, Clone, Debug, PartialEq)]
477pub struct BufferSlice<'a> {
478 pub(crate) buffer: &'a Buffer,
479 pub(crate) offset: BufferAddress,
480 pub(crate) size: BufferSize,
481}
482#[cfg(send_sync)]
483static_assertions::assert_impl_all!(BufferSlice<'_>: Send, Sync);
484
485impl<'a> BufferSlice<'a> {
486 /// Return another [`BufferSlice`] referring to the portion of `self`'s contents
487 /// indicated by `bounds`.
488 ///
489 /// The `range` argument can be half or fully unbounded: for example,
490 /// `buffer.slice(..)` refers to the entire buffer, and `buffer.slice(n..)`
491 /// refers to the portion starting at the `n`th byte and extending to the
492 /// end of the buffer.
493 ///
494 /// # Panics
495 ///
496 /// - If `bounds` is outside of the bounds of `self`.
497 /// - If `bounds` has a length less than 1.
498 #[track_caller]
499 pub fn slice<S: RangeBounds<BufferAddress>>(&self, bounds: S) -> BufferSlice<'a> {
500 let (offset, size) = range_to_offset_size(bounds, self.size.get());
501 check_buffer_bounds(self.size.get(), offset, size);
502 BufferSlice {
503 buffer: self.buffer,
504 offset: self.offset + offset, // check_buffer_bounds ensures this does not overflow
505 size, // check_buffer_bounds ensures this is essentially min()
506 }
507 }
508
509 /// Map the buffer to host (CPU) memory, making it available for reading or writing
510 /// via [`get_mapped_range()`](Self::get_mapped_range).
511 /// It is available once the `callback` is called with an [`Ok`] response.
512 ///
513 /// For the callback to complete, either `queue.submit(..)`, `instance.poll_all(..)`, or `device.poll(..)`
514 /// must be called elsewhere in the runtime, possibly integrated into an event loop or run on a separate thread.
515 ///
516 /// The callback will be called on the thread that first calls the above functions after the GPU work
517 /// has completed. There are no restrictions on the code you can run in the callback, however on native the
518 /// call to the function will not complete until the callback returns, so prefer keeping callbacks short
519 /// and used to set flags, send messages, etc.
520 ///
521 /// As long as a buffer is mapped, it is not available for use by any other commands;
522 /// at all times, either the GPU or the CPU has exclusive access to the contents of the buffer.
523 ///
524 /// This can also be performed using [`Buffer::map_async()`].
525 ///
526 /// # Panics
527 ///
528 /// - If the buffer is already mapped.
529 /// - If the buffer’s [`BufferUsages`] do not allow the requested [`MapMode`].
530 /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`] within the buffer.
531 pub fn map_async(
532 &self,
533 mode: MapMode,
534 callback: impl FnOnce(Result<(), BufferAsyncError>) + WasmNotSend + 'static,
535 ) {
536 let mut mc = self.buffer.map_context.lock();
537 assert_eq!(mc.initial_range, 0..0, "Buffer is already mapped");
538 let end = self.offset + self.size.get();
539 mc.initial_range = self.offset..end;
540
541 self.buffer
542 .inner
543 .map_async(mode, self.offset..end, Box::new(callback));
544 }
545
546 /// Gain read-only access to the bytes of a [mapped] [`Buffer`].
547 ///
548 /// Returns a [`BufferView`] referring to the buffer range represented by
549 /// `self`. See the documentation for [`BufferView`] for details.
550 ///
551 /// Multiple views may be obtained and used simultaneously as long as they are from
552 /// non-overlapping slices.
553 ///
554 /// This can also be performed using [`Buffer::get_mapped_range()`].
555 ///
556 /// # Panics
557 ///
558 /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`] within the buffer.
559 /// - If the buffer to which `self` refers is not currently [mapped].
560 /// - If you try to create overlapping views of a buffer, mutable or otherwise.
561 ///
562 /// [mapped]: Buffer#mapping-buffers
563 pub fn get_mapped_range(&self) -> BufferView {
564 let end = self.buffer.map_context.lock().add(self.offset, self.size);
565 let range = self.buffer.inner.get_mapped_range(self.offset..end);
566 BufferView {
567 buffer: self.buffer.clone(),
568 size: self.size,
569 offset: self.offset,
570 inner: range,
571 }
572 }
573
574 /// Gain write access to the bytes of a [mapped] [`Buffer`].
575 ///
576 /// Returns a [`BufferViewMut`] referring to the buffer range represented by
577 /// `self`. See the documentation for [`BufferViewMut`] for more details.
578 ///
579 /// Multiple views may be obtained and used simultaneously as long as they are from
580 /// non-overlapping slices.
581 ///
582 /// This can also be performed using [`Buffer::get_mapped_range_mut()`].
583 ///
584 /// # Panics
585 ///
586 /// - If the endpoints of this slice are not aligned to [`MAP_ALIGNMENT`].
587 /// - If the buffer to which `self` refers is not currently [mapped].
588 /// - If you try to create overlapping views of a buffer, mutable or otherwise.
589 ///
590 /// [mapped]: Buffer#mapping-buffers
591 pub fn get_mapped_range_mut(&self) -> BufferViewMut {
592 let end = self.buffer.map_context.lock().add(self.offset, self.size);
593 let range = self.buffer.inner.get_mapped_range(self.offset..end);
594 BufferViewMut {
595 buffer: self.buffer.clone(),
596 size: self.size,
597 offset: self.offset,
598 inner: range,
599 readable: self.buffer.usage.contains(BufferUsages::MAP_READ),
600 }
601 }
602
603 /// Returns the buffer this is a slice of.
604 ///
605 /// You should usually not need to call this, and if you received the buffer from code you
606 /// do not control, you should refrain from accessing the buffer outside the bounds of the
607 /// slice. Nevertheless, it’s possible to get this access, so this method makes it simple.
608 pub fn buffer(&self) -> &'a Buffer {
609 self.buffer
610 }
611
612 /// Returns the offset in [`Self::buffer()`] this slice starts at.
613 pub fn offset(&self) -> BufferAddress {
614 self.offset
615 }
616
617 /// Returns the size of this slice.
618 pub fn size(&self) -> BufferSize {
619 self.size
620 }
621}
622
623impl<'a> From<BufferSlice<'a>> for crate::BufferBinding<'a> {
624 /// Convert a [`BufferSlice`] to an equivalent [`BufferBinding`],
625 /// provided that it will be used without a dynamic offset.
626 fn from(value: BufferSlice<'a>) -> Self {
627 BufferBinding {
628 buffer: value.buffer,
629 offset: value.offset,
630 size: Some(value.size),
631 }
632 }
633}
634
635impl<'a> From<BufferSlice<'a>> for crate::BindingResource<'a> {
636 /// Convert a [`BufferSlice`] to an equivalent [`BindingResource::Buffer`],
637 /// provided that it will be used without a dynamic offset.
638 fn from(value: BufferSlice<'a>) -> Self {
639 crate::BindingResource::Buffer(crate::BufferBinding::from(value))
640 }
641}
642
643/// The mapped portion of a buffer, if any, and its outstanding views.
644///
645/// This ensures that views fall within the mapped range and don't overlap.
646#[derive(Debug)]
647pub(crate) struct MapContext {
648 /// The range of the buffer that is mapped.
649 ///
650 /// This is `0..0` if the buffer is not mapped. This becomes non-empty when
651 /// the buffer is mapped at creation time, and when you call `map_async` on
652 /// some [`BufferSlice`] (so technically, it indicates the portion that is
653 /// *or has been requested to be* mapped.)
654 ///
655 /// All [`BufferView`]s and [`BufferViewMut`]s must fall within this range.
656 pub(crate) initial_range: Range<BufferAddress>,
657
658 /// The ranges covered by all outstanding [`BufferView`]s and
659 /// [`BufferViewMut`]s. These are non-overlapping, and are all contained
660 /// within `initial_range`.
661 sub_ranges: Vec<Range<BufferAddress>>,
662}
663
664impl MapContext {
665 pub(crate) fn new() -> Self {
666 Self {
667 initial_range: 0..0,
668 sub_ranges: Vec::new(),
669 }
670 }
671
672 /// Record that the buffer is no longer mapped.
673 fn reset(&mut self) {
674 self.initial_range = 0..0;
675
676 assert!(
677 self.sub_ranges.is_empty(),
678 "You cannot unmap a buffer that still has accessible mapped views"
679 );
680 }
681
682 /// Record that the `size` bytes of the buffer at `offset` are now viewed.
683 ///
684 /// Return the byte offset within the buffer of the end of the viewed range.
685 ///
686 /// # Panics
687 ///
688 /// This panics if the given range overlaps with any existing range.
689 fn add(&mut self, offset: BufferAddress, size: BufferSize) -> BufferAddress {
690 let end = offset + size.get();
691 assert!(self.initial_range.start <= offset && end <= self.initial_range.end);
692 // This check is essential for avoiding undefined behavior: it is the
693 // only thing that ensures that `&mut` references to the buffer's
694 // contents don't alias anything else.
695 for sub in self.sub_ranges.iter() {
696 assert!(
697 end <= sub.start || offset >= sub.end,
698 "Intersecting map range with {sub:?}"
699 );
700 }
701 self.sub_ranges.push(offset..end);
702 end
703 }
704
705 /// Record that the `size` bytes of the buffer at `offset` are no longer viewed.
706 ///
707 /// # Panics
708 ///
709 /// This panics if the given range does not exactly match one previously
710 /// passed to [`add`].
711 ///
712 /// [`add]`: MapContext::add
713 fn remove(&mut self, offset: BufferAddress, size: BufferSize) {
714 let end = offset + size.get();
715
716 let index = self
717 .sub_ranges
718 .iter()
719 .position(|r| *r == (offset..end))
720 .expect("unable to remove range from map context");
721 self.sub_ranges.swap_remove(index);
722 }
723}
724
725/// Describes a [`Buffer`].
726///
727/// For use with [`Device::create_buffer`].
728///
729/// Corresponds to [WebGPU `GPUBufferDescriptor`](
730/// https://gpuweb.github.io/gpuweb/#dictdef-gpubufferdescriptor).
731pub type BufferDescriptor<'a> = wgt::BufferDescriptor<Label<'a>>;
732static_assertions::assert_impl_all!(BufferDescriptor<'_>: Send, Sync);
733
734/// Error occurred when trying to async map a buffer.
735#[derive(Clone, PartialEq, Eq, Debug)]
736pub struct BufferAsyncError;
737static_assertions::assert_impl_all!(BufferAsyncError: Send, Sync);
738
739impl fmt::Display for BufferAsyncError {
740 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
741 write!(f, "Error occurred when trying to async map a buffer")
742 }
743}
744
745impl error::Error for BufferAsyncError {}
746
747/// Type of buffer mapping.
748#[derive(Debug, Clone, Copy, Eq, PartialEq)]
749pub enum MapMode {
750 /// Map only for reading
751 Read,
752 /// Map only for writing
753 Write,
754}
755static_assertions::assert_impl_all!(MapMode: Send, Sync);
756
757/// A read-only view of a mapped buffer's bytes.
758///
759/// To get a `BufferView`, first [map] the buffer, and then
760/// call `buffer.slice(range).get_mapped_range()`.
761///
762/// `BufferView` dereferences to `&[u8]`, so you can use all the usual Rust
763/// slice methods to access the buffer's contents. It also implements
764/// `AsRef<[u8]>`, if that's more convenient.
765///
766/// Before the buffer can be unmapped, all `BufferView`s observing it
767/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
768///
769/// For example code, see the documentation on [mapping buffers][map].
770///
771/// [map]: Buffer#mapping-buffers
772/// [`map_async`]: BufferSlice::map_async
773#[derive(Debug)]
774pub struct BufferView {
775 // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
776 buffer: Buffer,
777 offset: BufferAddress,
778 size: BufferSize,
779 inner: dispatch::DispatchBufferMappedRange,
780}
781
782#[cfg(webgpu)]
783impl BufferView {
784 /// Provides the same data as dereferencing the view, but as a `Uint8Array` in js.
785 /// This can be MUCH faster than dereferencing the view which copies the data into
786 /// the Rust / wasm heap.
787 pub fn as_uint8array(&self) -> &js_sys::Uint8Array {
788 self.inner.as_uint8array()
789 }
790}
791
792impl core::ops::Deref for BufferView {
793 type Target = [u8];
794
795 #[inline]
796 fn deref(&self) -> &[u8] {
797 self.inner.slice()
798 }
799}
800
801impl AsRef<[u8]> for BufferView {
802 #[inline]
803 fn as_ref(&self) -> &[u8] {
804 self.inner.slice()
805 }
806}
807
808/// A write-only view of a mapped buffer's bytes.
809///
810/// To get a `BufferViewMut`, first [map] the buffer, and then
811/// call `buffer.slice(range).get_mapped_range_mut()`.
812///
813/// `BufferViewMut` dereferences to `&mut [u8]`, so you can use all the usual
814/// Rust slice methods to access the buffer's contents. It also implements
815/// `AsMut<[u8]>`, if that's more convenient.
816///
817/// It is possible to read the buffer using this view, but doing so is not
818/// recommended, as it is likely to be slow.
819///
820/// Before the buffer can be unmapped, all `BufferViewMut`s observing it
821/// must be dropped. Otherwise, the call to [`Buffer::unmap`] will panic.
822///
823/// For example code, see the documentation on [mapping buffers][map].
824///
825/// [map]: Buffer#mapping-buffers
826#[derive(Debug)]
827pub struct BufferViewMut {
828 // `buffer, offset, size` are similar to `BufferSlice`, except that they own the buffer.
829 buffer: Buffer,
830 offset: BufferAddress,
831 size: BufferSize,
832 inner: dispatch::DispatchBufferMappedRange,
833 readable: bool,
834}
835
836impl AsMut<[u8]> for BufferViewMut {
837 #[inline]
838 fn as_mut(&mut self) -> &mut [u8] {
839 self.inner.slice_mut()
840 }
841}
842
843impl Deref for BufferViewMut {
844 type Target = [u8];
845
846 fn deref(&self) -> &Self::Target {
847 if !self.readable {
848 log::warn!("Reading from a BufferViewMut is slow and not recommended.");
849 }
850
851 self.inner.slice()
852 }
853}
854
855impl DerefMut for BufferViewMut {
856 fn deref_mut(&mut self) -> &mut Self::Target {
857 self.inner.slice_mut()
858 }
859}
860
861impl Drop for BufferView {
862 fn drop(&mut self) {
863 self.buffer
864 .map_context
865 .lock()
866 .remove(self.offset, self.size);
867 }
868}
869
870impl Drop for BufferViewMut {
871 fn drop(&mut self) {
872 self.buffer
873 .map_context
874 .lock()
875 .remove(self.offset, self.size);
876 }
877}
878
879#[track_caller]
880fn check_buffer_bounds(
881 buffer_size: BufferAddress,
882 slice_offset: BufferAddress,
883 slice_size: BufferSize,
884) {
885 // A slice of length 0 is invalid, so the offset must not be equal to or greater than the buffer size.
886 if slice_offset >= buffer_size {
887 panic!(
888 "slice offset {} is out of range for buffer of size {}",
889 slice_offset, buffer_size
890 );
891 }
892
893 // Detect integer overflow.
894 let end = slice_offset.checked_add(slice_size.get());
895 if end.is_none_or(|end| end > buffer_size) {
896 panic!(
897 "slice offset {} size {} is out of range for buffer of size {}",
898 slice_offset, slice_size, buffer_size
899 );
900 }
901}
902
903#[track_caller]
904fn range_to_offset_size<S: RangeBounds<BufferAddress>>(
905 bounds: S,
906 whole_size: BufferAddress,
907) -> (BufferAddress, BufferSize) {
908 let offset = match bounds.start_bound() {
909 Bound::Included(&bound) => bound,
910 Bound::Excluded(&bound) => bound + 1,
911 Bound::Unbounded => 0,
912 };
913 let size = BufferSize::new(match bounds.end_bound() {
914 Bound::Included(&bound) => bound + 1 - offset,
915 Bound::Excluded(&bound) => bound - offset,
916 Bound::Unbounded => whole_size - offset,
917 })
918 .expect("buffer slices can not be empty");
919
920 (offset, size)
921}
922
923#[cfg(test)]
924mod tests {
925 use super::{check_buffer_bounds, range_to_offset_size, BufferAddress, BufferSize};
926
927 fn bs(value: BufferAddress) -> BufferSize {
928 BufferSize::new(value).unwrap()
929 }
930
931 #[test]
932 fn range_to_offset_size_works() {
933 let whole = 100;
934
935 assert_eq!(range_to_offset_size(0..2, whole), (0, bs(2)));
936 assert_eq!(range_to_offset_size(2..5, whole), (2, bs(3)));
937 assert_eq!(range_to_offset_size(.., whole), (0, bs(whole)));
938 assert_eq!(range_to_offset_size(21.., whole), (21, bs(whole - 21)));
939 assert_eq!(range_to_offset_size(0.., whole), (0, bs(whole)));
940 assert_eq!(range_to_offset_size(..21, whole), (0, bs(21)));
941 }
942
943 #[test]
944 #[should_panic = "buffer slices can not be empty"]
945 fn range_to_offset_size_panics_for_empty_range() {
946 range_to_offset_size(123..123, 200);
947 }
948
949 #[test]
950 #[should_panic = "buffer slices can not be empty"]
951 fn range_to_offset_size_panics_for_unbounded_empty_range() {
952 range_to_offset_size(..0, 100);
953 }
954
955 #[test]
956 fn check_buffer_bounds_works_for_end_in_range() {
957 check_buffer_bounds(200, 100, bs(50));
958 check_buffer_bounds(200, 100, bs(100));
959 check_buffer_bounds(u64::MAX, u64::MAX - 100, bs(100));
960 check_buffer_bounds(u64::MAX, 0, bs(u64::MAX));
961 check_buffer_bounds(u64::MAX, 1, bs(u64::MAX - 1));
962 }
963
964 #[test]
965 #[should_panic]
966 fn check_buffer_bounds_panics_for_end_over_size() {
967 check_buffer_bounds(200, 100, bs(101));
968 }
969
970 #[test]
971 #[should_panic]
972 fn check_buffer_bounds_panics_for_end_wraparound() {
973 check_buffer_bounds(u64::MAX, 1, bs(u64::MAX));
974 }
975}