wgpu/api/device.rs
1use alloc::{boxed::Box, string::String, sync::Arc, vec};
2#[cfg(wgpu_core)]
3use core::ops::Deref;
4use core::{error, fmt, future::Future, marker::PhantomData};
5
6use crate::api::blas::{Blas, BlasGeometrySizeDescriptors, CreateBlasDescriptor};
7use crate::api::tlas::{CreateTlasDescriptor, Tlas};
8use crate::util::Mutex;
9use crate::*;
10
11/// Open connection to a graphics and/or compute device.
12///
13/// Responsible for the creation of most rendering and compute resources.
14/// These are then used in commands, which are submitted to a [`Queue`].
15///
16/// A device may be requested from an adapter with [`Adapter::request_device`].
17///
18/// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device).
19#[derive(Debug, Clone)]
20pub struct Device {
21 pub(crate) inner: dispatch::DispatchDevice,
22}
23#[cfg(send_sync)]
24static_assertions::assert_impl_all!(Device: Send, Sync);
25
26crate::cmp::impl_eq_ord_hash_proxy!(Device => .inner);
27
28/// Describes a [`Device`].
29///
30/// For use with [`Adapter::request_device`].
31///
32/// Corresponds to [WebGPU `GPUDeviceDescriptor`](
33/// https://gpuweb.github.io/gpuweb/#dictdef-gpudevicedescriptor).
34pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
35static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
36
37impl Device {
38 #[cfg(custom)]
39 /// Returns custom implementation of Device (if custom backend and is internally T)
40 pub fn as_custom<T: custom::DeviceInterface>(&self) -> Option<&T> {
41 self.inner.as_custom()
42 }
43
44 #[cfg(custom)]
45 /// Creates Device from custom implementation
46 pub fn from_custom<T: custom::DeviceInterface>(device: T) -> Self {
47 Self {
48 inner: dispatch::DispatchDevice::custom(device),
49 }
50 }
51
52 /// Constructs a stub device for testing using [`Backend::Noop`].
53 ///
54 /// This is a convenience function which avoids the configuration, `async`, and fallibility
55 /// aspects of constructing a device through `Instance`.
56 #[cfg(feature = "noop")]
57 pub fn noop(desc: &DeviceDescriptor<'_>) -> (Device, Queue) {
58 use core::future::Future as _;
59 use core::pin::pin;
60 use core::task;
61 let ctx = &mut task::Context::from_waker(task::Waker::noop());
62
63 let instance = Instance::new(InstanceDescriptor {
64 backends: Backends::NOOP,
65 backend_options: BackendOptions {
66 noop: NoopBackendOptions { enable: true },
67 ..Default::default()
68 },
69 ..Default::default()
70 });
71
72 // Both of these futures are trivial and should complete instantaneously,
73 // so we do not need an executor and can just poll them once.
74 let task::Poll::Ready(Ok(adapter)) =
75 pin!(instance.request_adapter(&RequestAdapterOptions::default())).poll(ctx)
76 else {
77 unreachable!()
78 };
79 let task::Poll::Ready(Ok(device_and_queue)) = pin!(adapter.request_device(desc)).poll(ctx)
80 else {
81 unreachable!()
82 };
83 device_and_queue
84 }
85
86 /// Check for resource cleanups and mapping callbacks. Will block if [`PollType::Wait`] is passed.
87 ///
88 /// Return `true` if the queue is empty, or `false` if there are more queue
89 /// submissions still in flight. (Note that, unless access to the [`Queue`] is
90 /// coordinated somehow, this information could be out of date by the time
91 /// the caller receives it. `Queue`s can be shared between threads, so
92 /// other threads could submit new work at any time.)
93 ///
94 /// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
95 pub fn poll(&self, poll_type: PollType) -> Result<crate::PollStatus, crate::PollError> {
96 self.inner.poll(poll_type.map_index(|s| s.index))
97 }
98
99 /// The [features][Features] which can be used on this device.
100 ///
101 /// This will be equal to the [`required_features`][DeviceDescriptor::required_features]
102 /// specified when creating the device.
103 /// No additional features can be used, even if the underlying adapter can support them.
104 #[must_use]
105 pub fn features(&self) -> Features {
106 self.inner.features()
107 }
108
109 /// The limits which can be used on this device.
110 ///
111 /// This will be equal to the [`required_limits`][DeviceDescriptor::required_limits]
112 /// specified when creating the device.
113 /// No better limits can be used, even if the underlying adapter can support them.
114 #[must_use]
115 pub fn limits(&self) -> Limits {
116 self.inner.limits()
117 }
118
119 /// Creates a shader module.
120 ///
121 /// <div class="warning">
122 // NOTE: Keep this in sync with `naga::front::wgsl::parse_str`!
123 // NOTE: Keep this in sync with `wgpu_core::Global::device_create_shader_module`!
124 ///
125 /// This function may consume a lot of stack space. Compiler-enforced limits for parsing
126 /// recursion exist; if shader compilation runs into them, it will return an error gracefully.
127 /// However, on some build profiles and platforms, the default stack size for a thread may be
128 /// exceeded before this limit is reached during parsing. Callers should ensure that there is
129 /// enough stack space for this, particularly if calls to this method are exposed to user
130 /// input.
131 ///
132 /// </div>
133 #[must_use]
134 pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
135 let module = self
136 .inner
137 .create_shader_module(desc, wgt::ShaderRuntimeChecks::checked());
138 ShaderModule { inner: module }
139 }
140
141 /// Deprecated: Use [`create_shader_module_trusted`][csmt] instead.
142 ///
143 /// # Safety
144 ///
145 /// See [`create_shader_module_trusted`][csmt].
146 ///
147 /// [csmt]: Self::create_shader_module_trusted
148 #[deprecated(
149 since = "24.0.0",
150 note = "Use `Device::create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked())` instead."
151 )]
152 #[must_use]
153 pub unsafe fn create_shader_module_unchecked(
154 &self,
155 desc: ShaderModuleDescriptor<'_>,
156 ) -> ShaderModule {
157 unsafe { self.create_shader_module_trusted(desc, crate::ShaderRuntimeChecks::unchecked()) }
158 }
159
160 /// Creates a shader module with flags to dictate runtime checks.
161 ///
162 /// When running on WebGPU, this will merely call [`create_shader_module`][csm].
163 ///
164 /// # Safety
165 ///
166 /// In contrast with [`create_shader_module`][csm] this function
167 /// creates a shader module with user-customizable runtime checks which allows shaders to
168 /// perform operations which can lead to undefined behavior like indexing out of bounds,
169 /// thus it's the caller responsibility to pass a shader which doesn't perform any of this
170 /// operations.
171 ///
172 /// See the documentation for [`ShaderRuntimeChecks`] for more information about specific checks.
173 ///
174 /// [csm]: Self::create_shader_module
175 #[must_use]
176 pub unsafe fn create_shader_module_trusted(
177 &self,
178 desc: ShaderModuleDescriptor<'_>,
179 runtime_checks: crate::ShaderRuntimeChecks,
180 ) -> ShaderModule {
181 let module = self.inner.create_shader_module(desc, runtime_checks);
182 ShaderModule { inner: module }
183 }
184
185 /// Creates a shader module which will bypass wgpu's shader tooling and validation and be used directly by the backend.
186 ///
187 /// # Safety
188 ///
189 /// This function passes data to the backend as-is and can potentially result in a
190 /// driver crash or bogus behaviour. No attempt is made to ensure that data is valid.
191 #[must_use]
192 pub unsafe fn create_shader_module_passthrough(
193 &self,
194 desc: ShaderModuleDescriptorPassthrough<'_>,
195 ) -> ShaderModule {
196 let module = unsafe { self.inner.create_shader_module_passthrough(&desc) };
197 ShaderModule { inner: module }
198 }
199
200 /// Creates an empty [`CommandEncoder`].
201 #[must_use]
202 pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
203 let encoder = self.inner.create_command_encoder(desc);
204 // Each encoder starts with its own deferred-action store that travels
205 // with the CommandBuffer produced by finish().
206 CommandEncoder {
207 inner: encoder,
208 actions: Default::default(),
209 }
210 }
211
212 /// Creates an empty [`RenderBundleEncoder`].
213 #[must_use]
214 pub fn create_render_bundle_encoder<'a>(
215 &self,
216 desc: &RenderBundleEncoderDescriptor<'_>,
217 ) -> RenderBundleEncoder<'a> {
218 let encoder = self.inner.create_render_bundle_encoder(desc);
219 RenderBundleEncoder {
220 inner: encoder,
221 _p: PhantomData,
222 }
223 }
224
225 /// Creates a new [`BindGroup`].
226 #[must_use]
227 pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
228 let group = self.inner.create_bind_group(desc);
229 BindGroup { inner: group }
230 }
231
232 /// Creates a [`BindGroupLayout`].
233 #[must_use]
234 pub fn create_bind_group_layout(
235 &self,
236 desc: &BindGroupLayoutDescriptor<'_>,
237 ) -> BindGroupLayout {
238 let layout = self.inner.create_bind_group_layout(desc);
239 BindGroupLayout { inner: layout }
240 }
241
242 /// Creates a [`PipelineLayout`].
243 #[must_use]
244 pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
245 let layout = self.inner.create_pipeline_layout(desc);
246 PipelineLayout { inner: layout }
247 }
248
249 /// Creates a [`RenderPipeline`].
250 #[must_use]
251 pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
252 let pipeline = self.inner.create_render_pipeline(desc);
253 RenderPipeline { inner: pipeline }
254 }
255
256 /// Creates a mesh shader based [`RenderPipeline`].
257 #[must_use]
258 pub fn create_mesh_pipeline(&self, desc: &MeshPipelineDescriptor<'_>) -> RenderPipeline {
259 let pipeline = self.inner.create_mesh_pipeline(desc);
260 RenderPipeline { inner: pipeline }
261 }
262
263 /// Creates a [`ComputePipeline`].
264 #[must_use]
265 pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
266 let pipeline = self.inner.create_compute_pipeline(desc);
267 ComputePipeline { inner: pipeline }
268 }
269
270 /// Creates a [`Buffer`].
271 #[must_use]
272 pub fn create_buffer(&self, desc: &BufferDescriptor<'_>) -> Buffer {
273 let map_context = MapContext::new(desc.mapped_at_creation.then_some(0..desc.size));
274
275 let buffer = self.inner.create_buffer(desc);
276
277 Buffer {
278 inner: buffer,
279 map_context: Arc::new(Mutex::new(map_context)),
280 size: desc.size,
281 usage: desc.usage,
282 }
283 }
284
285 /// Creates a new [`Texture`].
286 ///
287 /// `desc` specifies the general format of the texture.
288 #[must_use]
289 pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
290 let texture = self.inner.create_texture(desc);
291
292 Texture {
293 inner: texture,
294 descriptor: TextureDescriptor {
295 label: None,
296 view_formats: &[],
297 ..desc.clone()
298 },
299 }
300 }
301
302 /// Creates a [`Texture`] from a wgpu-hal Texture.
303 ///
304 /// # Types
305 ///
306 /// The type of `A::Texture` depends on the backend:
307 ///
308 #[doc = crate::hal_type_vulkan!("Texture")]
309 #[doc = crate::hal_type_metal!("Texture")]
310 #[doc = crate::hal_type_dx12!("Texture")]
311 #[doc = crate::hal_type_gles!("Texture")]
312 ///
313 /// # Safety
314 ///
315 /// - `hal_texture` must be created from this device internal handle
316 /// - `hal_texture` must be created respecting `desc`
317 /// - `hal_texture` must be initialized
318 #[cfg(wgpu_core)]
319 #[must_use]
320 pub unsafe fn create_texture_from_hal<A: hal::Api>(
321 &self,
322 hal_texture: A::Texture,
323 desc: &TextureDescriptor<'_>,
324 ) -> Texture {
325 let texture = unsafe {
326 let core_device = self.inner.as_core();
327 core_device
328 .context
329 .create_texture_from_hal::<A>(hal_texture, core_device, desc)
330 };
331 Texture {
332 inner: texture.into(),
333 descriptor: TextureDescriptor {
334 label: None,
335 view_formats: &[],
336 ..desc.clone()
337 },
338 }
339 }
340
341 /// Creates a new [`ExternalTexture`].
342 #[must_use]
343 pub fn create_external_texture(
344 &self,
345 desc: &ExternalTextureDescriptor<'_>,
346 planes: &[&TextureView],
347 ) -> ExternalTexture {
348 let external_texture = self.inner.create_external_texture(desc, planes);
349
350 ExternalTexture {
351 inner: external_texture,
352 }
353 }
354
355 /// Creates a [`Buffer`] from a wgpu-hal Buffer.
356 ///
357 /// # Types
358 ///
359 /// The type of `A::Buffer` depends on the backend:
360 ///
361 #[doc = crate::hal_type_vulkan!("Buffer")]
362 #[doc = crate::hal_type_metal!("Buffer")]
363 #[doc = crate::hal_type_dx12!("Buffer")]
364 #[doc = crate::hal_type_gles!("Buffer")]
365 ///
366 /// # Safety
367 ///
368 /// - `hal_buffer` must be created from this device internal handle
369 /// - `hal_buffer` must be created respecting `desc`
370 /// - `hal_buffer` must be initialized
371 /// - `hal_buffer` must not have zero size
372 #[cfg(wgpu_core)]
373 #[must_use]
374 pub unsafe fn create_buffer_from_hal<A: hal::Api>(
375 &self,
376 hal_buffer: A::Buffer,
377 desc: &BufferDescriptor<'_>,
378 ) -> Buffer {
379 let map_context = MapContext::new(desc.mapped_at_creation.then_some(0..desc.size));
380
381 let buffer = unsafe {
382 let core_device = self.inner.as_core();
383 core_device
384 .context
385 .create_buffer_from_hal::<A>(hal_buffer, core_device, desc)
386 };
387
388 Buffer {
389 inner: buffer.into(),
390 map_context: Arc::new(Mutex::new(map_context)),
391 size: desc.size,
392 usage: desc.usage,
393 }
394 }
395
396 /// Creates a new [`Sampler`].
397 ///
398 /// `desc` specifies the behavior of the sampler.
399 #[must_use]
400 pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
401 let sampler = self.inner.create_sampler(desc);
402 Sampler { inner: sampler }
403 }
404
405 /// Creates a new [`QuerySet`].
406 #[must_use]
407 pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
408 let query_set = self.inner.create_query_set(desc);
409 QuerySet { inner: query_set }
410 }
411
412 /// Set a callback which will be called for all errors that are not handled in error scopes.
413 pub fn on_uncaptured_error(&self, handler: Arc<dyn UncapturedErrorHandler>) {
414 self.inner.on_uncaptured_error(handler)
415 }
416
417 /// Push an error scope on this device's thread-local error scope
418 /// stack. All operations on this device, or on resources created
419 /// from this device, will have their errors captured by this scope
420 /// until the scope is popped.
421 ///
422 /// Scopes must be popped in reverse order to their creation. If
423 /// a guard is dropped without being `pop()`ped, the scope will be
424 /// popped, and the captured errors will be dropped.
425 ///
426 /// Multiple error scopes may be active at one time, forming a stack.
427 /// Each error will be reported to the inner-most scope that matches
428 /// its filter.
429 ///
430 /// With the `std` feature enabled, this stack is **thread-local**.
431 /// Without, this is **global** to all threads.
432 ///
433 /// ```rust
434 /// # async move {
435 /// # let device: wgpu::Device = unreachable!();
436 /// let error_scope = device.push_error_scope(wgpu::ErrorFilter::Validation);
437 ///
438 /// // ...
439 /// // do work that may produce validation errors
440 /// // ...
441 ///
442 /// // pop the error scope and get a future for the result
443 /// let error_future = error_scope.pop();
444 ///
445 /// // await the future to get the error, if any
446 /// let error = error_future.await;
447 /// # };
448 /// ```
449 pub fn push_error_scope(&self, filter: ErrorFilter) -> ErrorScopeGuard {
450 let index = self.inner.push_error_scope(filter);
451 ErrorScopeGuard {
452 device: self.inner.clone(),
453 index,
454 popped: false,
455 _phantom: PhantomData,
456 }
457 }
458
459 /// Starts a capture in the attached graphics debugger.
460 ///
461 /// This behaves differently depending on which graphics debugger is attached:
462 ///
463 /// - Renderdoc: Calls [`StartFrameCapture(device, NULL)`][rd].
464 /// - Xcode: Creates a capture with [`MTLCaptureManager`][xcode].
465 /// - None: No action is taken.
466 ///
467 /// # Safety
468 ///
469 /// - There should not be any other captures currently active.
470 /// - All other safety rules are defined by the graphics debugger, see the
471 /// documentation for the specific debugger.
472 /// - In general, graphics debuggers can easily cause crashes, so this isn't
473 /// ever guaranteed to be sound.
474 ///
475 /// # Tips
476 ///
477 /// - Debuggers need to capture both the recording of the commands and the
478 /// submission of the commands to the GPU. Try to wrap all of your
479 /// gpu work in a capture.
480 /// - If you encounter issues, try waiting for the GPU to finish all work
481 /// before stopping the capture.
482 ///
483 /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv417StartFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
484 /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
485 #[doc(alias = "start_renderdoc_capture")]
486 #[doc(alias = "start_xcode_capture")]
487 pub unsafe fn start_graphics_debugger_capture(&self) {
488 unsafe { self.inner.start_graphics_debugger_capture() }
489 }
490
491 /// Stops the current capture in the attached graphics debugger.
492 ///
493 /// This behaves differently depending on which graphics debugger is attached:
494 ///
495 /// - Renderdoc: Calls [`EndFrameCapture(device, NULL)`][rd].
496 /// - Xcode: Stops the capture with [`MTLCaptureManager`][xcode].
497 /// - None: No action is taken.
498 ///
499 /// # Safety
500 ///
501 /// - There should be a capture currently active.
502 /// - All other safety rules are defined by the graphics debugger, see the
503 /// documentation for the specific debugger.
504 /// - In general, graphics debuggers can easily cause crashes, so this isn't
505 /// ever guaranteed to be sound.
506 ///
507 /// # Tips
508 ///
509 /// - If you encounter issues, try to submit all work to the GPU, and waiting
510 /// for that work to finish before stopping the capture.
511 ///
512 /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv415EndFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
513 /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
514 #[doc(alias = "stop_renderdoc_capture")]
515 #[doc(alias = "stop_xcode_capture")]
516 pub unsafe fn stop_graphics_debugger_capture(&self) {
517 unsafe { self.inner.stop_graphics_debugger_capture() }
518 }
519
520 /// Query internal counters from the native backend for debugging purposes.
521 ///
522 /// Some backends may not set all counters, or may not set any counter at all.
523 /// The `counters` cargo feature must be enabled for any counter to be set.
524 ///
525 /// If a counter is not set, its contains its default value (zero).
526 #[must_use]
527 pub fn get_internal_counters(&self) -> wgt::InternalCounters {
528 self.inner.get_internal_counters()
529 }
530
531 /// Generate an GPU memory allocation report if the underlying backend supports it.
532 ///
533 /// Backends that do not support producing these reports return `None`. A backend may
534 /// Support it and still return `None` if it is not using performing sub-allocation,
535 /// for example as a workaround for driver issues.
536 #[must_use]
537 pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
538 self.inner.generate_allocator_report()
539 }
540
541 /// Get the [`wgpu_hal`] device from this `Device`.
542 ///
543 /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
544 /// and pass that struct to the to the `A` type parameter.
545 ///
546 /// Returns a guard that dereferences to the type of the hal backend
547 /// which implements [`A::Device`].
548 ///
549 /// # Types
550 ///
551 /// The returned type depends on the backend:
552 ///
553 #[doc = crate::hal_type_vulkan!("Device")]
554 #[doc = crate::hal_type_metal!("Device")]
555 #[doc = crate::hal_type_dx12!("Device")]
556 #[doc = crate::hal_type_gles!("Device")]
557 ///
558 /// # Errors
559 ///
560 /// This method will return None if:
561 /// - The device is not from the backend specified by `A`.
562 /// - The device is from the `webgpu` or `custom` backend.
563 ///
564 /// # Safety
565 ///
566 /// - The returned resource must not be destroyed unless the guard
567 /// is the last reference to it and it is not in use by the GPU.
568 /// The guard and handle may be dropped at any time however.
569 /// - All the safety requirements of wgpu-hal must be upheld.
570 ///
571 /// [`A::Device`]: hal::Api::Device
572 #[cfg(wgpu_core)]
573 pub unsafe fn as_hal<A: hal::Api>(
574 &self,
575 ) -> Option<impl Deref<Target = A::Device> + WasmNotSendSync> {
576 let device = self.inner.as_core_opt()?;
577 unsafe { device.context.device_as_hal::<A>(device) }
578 }
579
580 /// Destroy this device.
581 pub fn destroy(&self) {
582 self.inner.destroy()
583 }
584
585 /// Set a DeviceLostCallback on this device.
586 pub fn set_device_lost_callback(
587 &self,
588 callback: impl Fn(DeviceLostReason, String) + Send + 'static,
589 ) {
590 self.inner.set_device_lost_callback(Box::new(callback))
591 }
592
593 /// Create a [`PipelineCache`] with initial data
594 ///
595 /// This can be passed to [`Device::create_compute_pipeline`]
596 /// and [`Device::create_render_pipeline`] to either accelerate these
597 /// or add the cache results from those.
598 ///
599 /// # Safety
600 ///
601 /// If the `data` field of `desc` is set, it must have previously been returned from a call
602 /// to [`PipelineCache::get_data`][^saving]. This `data` will only be used if it came
603 /// from an adapter with the same [`util::pipeline_cache_key`].
604 /// This *is* compatible across wgpu versions, as any data format change will
605 /// be accounted for.
606 ///
607 /// It is *not* supported to bring caches from previous direct uses of backend APIs
608 /// into this method.
609 ///
610 /// # Errors
611 ///
612 /// Returns an error value if:
613 /// * the [`PIPELINE_CACHE`](wgt::Features::PIPELINE_CACHE) feature is not enabled
614 /// * this device is invalid; or
615 /// * the device is out of memory
616 ///
617 /// This method also returns an error value if:
618 /// * The `fallback` field on `desc` is false; and
619 /// * the `data` provided would not be used[^data_not_used]
620 ///
621 /// If an error value is used in subsequent calls, default caching will be used.
622 ///
623 /// [^saving]: We do recognise that saving this data to disk means this condition
624 /// is impossible to fully prove. Consider the risks for your own application in this case.
625 ///
626 /// [^data_not_used]: This data may be not used if: the data was produced by a prior
627 /// version of wgpu; or was created for an incompatible adapter, or there was a GPU driver
628 /// update. In some cases, the data might not be used and a real value is returned,
629 /// this is left to the discretion of GPU drivers.
630 #[must_use]
631 pub unsafe fn create_pipeline_cache(
632 &self,
633 desc: &PipelineCacheDescriptor<'_>,
634 ) -> PipelineCache {
635 let cache = unsafe { self.inner.create_pipeline_cache(desc) };
636 PipelineCache { inner: cache }
637 }
638}
639
640/// [`Features::EXPERIMENTAL_RAY_QUERY`] must be enabled on the device in order to call these functions.
641impl Device {
642 /// Create a bottom level acceleration structure, used inside a top level acceleration structure for ray tracing.
643 /// - `desc`: The descriptor of the acceleration structure.
644 /// - `sizes`: Size descriptor limiting what can be built into the acceleration structure.
645 ///
646 /// # Validation
647 /// If any of the following is not satisfied a validation error is generated
648 ///
649 /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_QUERY`] enabled.
650 /// if `sizes` is [`BlasGeometrySizeDescriptors::Triangles`] then the following must be satisfied
651 /// - For every geometry descriptor (for the purposes this is called `geo_desc`) of `sizes.descriptors` the following must be satisfied:
652 /// - `geo_desc.vertex_format` must be within allowed formats (allowed formats for a given feature set
653 /// may be queried with [`Features::allowed_vertex_formats_for_blas`]).
654 /// - Both or neither of `geo_desc.index_format` and `geo_desc.index_count` must be provided.
655 ///
656 /// [`Features::EXPERIMENTAL_RAY_QUERY`]: wgt::Features::EXPERIMENTAL_RAY_QUERY
657 /// [`Features::allowed_vertex_formats_for_blas`]: wgt::Features::allowed_vertex_formats_for_blas
658 #[must_use]
659 pub fn create_blas(
660 &self,
661 desc: &CreateBlasDescriptor<'_>,
662 sizes: BlasGeometrySizeDescriptors,
663 ) -> Blas {
664 let (handle, blas) = self.inner.create_blas(desc, sizes);
665
666 Blas {
667 inner: blas,
668 handle,
669 }
670 }
671
672 /// Create a top level acceleration structure, used for ray tracing.
673 /// - `desc`: The descriptor of the acceleration structure.
674 ///
675 /// # Validation
676 /// If any of the following is not satisfied a validation error is generated
677 ///
678 /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_QUERY`] enabled.
679 ///
680 /// [`Features::EXPERIMENTAL_RAY_QUERY`]: wgt::Features::EXPERIMENTAL_RAY_QUERY
681 #[must_use]
682 pub fn create_tlas(&self, desc: &CreateTlasDescriptor<'_>) -> Tlas {
683 let tlas = self.inner.create_tlas(desc);
684
685 Tlas {
686 inner: tlas,
687 instances: vec![None; desc.max_instances as usize],
688 lowest_unmodified: 0,
689 }
690 }
691}
692
693/// Requesting a device from an [`Adapter`] failed.
694#[derive(Clone, Debug)]
695pub struct RequestDeviceError {
696 pub(crate) inner: RequestDeviceErrorKind,
697}
698#[derive(Clone, Debug)]
699pub(crate) enum RequestDeviceErrorKind {
700 /// Error from [`wgpu_core`].
701 // must match dependency cfg
702 #[cfg(wgpu_core)]
703 Core(wgc::instance::RequestDeviceError),
704
705 /// Error from web API that was called by `wgpu` to request a device.
706 ///
707 /// (This is currently never used by the webgl backend, but it could be.)
708 #[cfg(webgpu)]
709 WebGpu(String),
710}
711
712static_assertions::assert_impl_all!(RequestDeviceError: Send, Sync);
713
714impl fmt::Display for RequestDeviceError {
715 fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
716 match &self.inner {
717 #[cfg(wgpu_core)]
718 RequestDeviceErrorKind::Core(error) => error.fmt(_f),
719 #[cfg(webgpu)]
720 RequestDeviceErrorKind::WebGpu(error) => {
721 write!(_f, "{error}")
722 }
723 #[cfg(not(any(webgpu, wgpu_core)))]
724 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
725 }
726 }
727}
728
729impl error::Error for RequestDeviceError {
730 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
731 match &self.inner {
732 #[cfg(wgpu_core)]
733 RequestDeviceErrorKind::Core(error) => error.source(),
734 #[cfg(webgpu)]
735 RequestDeviceErrorKind::WebGpu(_) => None,
736 #[cfg(not(any(webgpu, wgpu_core)))]
737 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
738 }
739 }
740}
741
742#[cfg(wgpu_core)]
743impl From<wgc::instance::RequestDeviceError> for RequestDeviceError {
744 fn from(error: wgc::instance::RequestDeviceError) -> Self {
745 Self {
746 inner: RequestDeviceErrorKind::Core(error),
747 }
748 }
749}
750
751/// The callback of [`Device::on_uncaptured_error()`].
752///
753/// It must be a function with this signature.
754pub trait UncapturedErrorHandler: Fn(Error) + Send + Sync + 'static {}
755impl<T> UncapturedErrorHandler for T where T: Fn(Error) + Send + Sync + 'static {}
756
757/// Kinds of [`Error`]s a [`Device::push_error_scope()`] may be configured to catch.
758#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)]
759pub enum ErrorFilter {
760 /// Catch only out-of-memory errors.
761 OutOfMemory,
762 /// Catch only validation errors.
763 Validation,
764 /// Catch only internal errors.
765 Internal,
766}
767static_assertions::assert_impl_all!(ErrorFilter: Send, Sync);
768
769/// Lower level source of the error.
770///
771/// `Send + Sync` varies depending on configuration.
772#[cfg(send_sync)]
773#[cfg_attr(docsrs, doc(cfg(all())))]
774pub type ErrorSource = Box<dyn error::Error + Send + Sync + 'static>;
775/// Lower level source of the error.
776///
777/// `Send + Sync` varies depending on configuration.
778#[cfg(not(send_sync))]
779#[cfg_attr(docsrs, doc(cfg(all())))]
780pub type ErrorSource = Box<dyn error::Error + 'static>;
781
782/// Errors resulting from usage of GPU APIs.
783///
784/// By default, errors translate into panics. Depending on the backend and circumstances,
785/// errors may occur synchronously or asynchronously. When errors need to be handled, use
786/// [`Device::push_error_scope()`] or [`Device::on_uncaptured_error()`].
787#[derive(Debug)]
788pub enum Error {
789 /// Out of memory.
790 OutOfMemory {
791 /// Lower level source of the error.
792 source: ErrorSource,
793 },
794 /// Validation error, signifying a bug in code or data provided to `wgpu`.
795 Validation {
796 /// Lower level source of the error.
797 source: ErrorSource,
798 /// Description of the validation error.
799 description: String,
800 },
801 /// Internal error. Used for signalling any failures not explicitly expected by WebGPU.
802 ///
803 /// These could be due to internal implementation or system limits being reached.
804 Internal {
805 /// Lower level source of the error.
806 source: ErrorSource,
807 /// Description of the internal GPU error.
808 description: String,
809 },
810}
811#[cfg(send_sync)]
812static_assertions::assert_impl_all!(Error: Send, Sync);
813
814impl error::Error for Error {
815 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
816 match self {
817 Error::OutOfMemory { source } => Some(source.as_ref()),
818 Error::Validation { source, .. } => Some(source.as_ref()),
819 Error::Internal { source, .. } => Some(source.as_ref()),
820 }
821 }
822}
823
824impl fmt::Display for Error {
825 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
826 match self {
827 Error::OutOfMemory { .. } => f.write_str("Out of Memory"),
828 Error::Validation { description, .. } => f.write_str(description),
829 Error::Internal { description, .. } => f.write_str(description),
830 }
831 }
832}
833
834/// Guard for an error scope pushed with [`Device::push_error_scope()`].
835///
836/// Call [`pop()`] to pop the scope and get a future for the result. If
837/// the guard is dropped without being popped explicitly, the scope will still be popped,
838/// and the captured errors will be dropped.
839///
840/// This guard is neither `Send` nor `Sync`, as error scopes are handled
841/// on a per-thread basis when the `std` feature is enabled.
842///
843/// [`pop()`]: ErrorScopeGuard::pop
844#[must_use = "Error scopes must be explicitly popped to retrieve errors they catch"]
845pub struct ErrorScopeGuard {
846 device: dispatch::DispatchDevice,
847 index: u32,
848 popped: bool,
849 // Ensure the guard is !Send and !Sync
850 _phantom: PhantomData<*mut ()>,
851}
852
853static_assertions::assert_not_impl_any!(ErrorScopeGuard: Send, Sync);
854
855impl ErrorScopeGuard {
856 /// Pops the error scope.
857 ///
858 /// Returns a future which resolves to the error captured by this scope, if any.
859 /// The pop takes effect immediately; the future does not need to be awaited before doing work that is outside of this error scope.
860 pub fn pop(mut self) -> impl Future<Output = Option<Error>> + WasmNotSend {
861 self.popped = true;
862 self.device.pop_error_scope(self.index)
863 }
864}
865
866impl Drop for ErrorScopeGuard {
867 fn drop(&mut self) {
868 if !self.popped {
869 drop(self.device.pop_error_scope(self.index));
870 }
871 }
872}