wgpu/api/
device.rs

1use alloc::{boxed::Box, string::String, sync::Arc, vec};
2#[cfg(wgpu_core)]
3use core::ops::Deref;
4use core::{error, fmt, future::Future, marker::PhantomData};
5
6use crate::api::blas::{Blas, BlasGeometrySizeDescriptors, CreateBlasDescriptor};
7use crate::api::tlas::{CreateTlasDescriptor, Tlas};
8use crate::util::Mutex;
9use crate::*;
10
11/// Open connection to a graphics and/or compute device.
12///
13/// Responsible for the creation of most rendering and compute resources.
14/// These are then used in commands, which are submitted to a [`Queue`].
15///
16/// A device may be requested from an adapter with [`Adapter::request_device`].
17///
18/// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device).
19#[derive(Debug, Clone)]
20pub struct Device {
21    pub(crate) inner: dispatch::DispatchDevice,
22}
23#[cfg(send_sync)]
24static_assertions::assert_impl_all!(Device: Send, Sync);
25
26crate::cmp::impl_eq_ord_hash_proxy!(Device => .inner);
27
28/// Describes a [`Device`].
29///
30/// For use with [`Adapter::request_device`].
31///
32/// Corresponds to [WebGPU `GPUDeviceDescriptor`](
33/// https://gpuweb.github.io/gpuweb/#dictdef-gpudevicedescriptor).
34pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
35static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
36
37impl Device {
38    #[cfg(custom)]
39    /// Returns custom implementation of Device (if custom backend and is internally T)
40    pub fn as_custom<T: custom::DeviceInterface>(&self) -> Option<&T> {
41        self.inner.as_custom()
42    }
43
44    #[cfg(custom)]
45    /// Creates Device from custom implementation
46    pub fn from_custom<T: custom::DeviceInterface>(device: T) -> Self {
47        Self {
48            inner: dispatch::DispatchDevice::custom(device),
49        }
50    }
51
52    /// Constructs a stub device for testing using [`Backend::Noop`].
53    ///
54    /// This is a convenience function which avoids the configuration, `async`, and fallibility
55    /// aspects of constructing a device through `Instance`.
56    #[cfg(feature = "noop")]
57    pub fn noop(desc: &DeviceDescriptor<'_>) -> (Device, Queue) {
58        use core::future::Future as _;
59        use core::pin::pin;
60        use core::task;
61        let ctx = &mut task::Context::from_waker(task::Waker::noop());
62
63        let instance = Instance::new(&InstanceDescriptor {
64            backends: Backends::NOOP,
65            backend_options: BackendOptions {
66                noop: NoopBackendOptions { enable: true },
67                ..Default::default()
68            },
69            ..Default::default()
70        });
71
72        // Both of these futures are trivial and should complete instantaneously,
73        // so we do not need an executor and can just poll them once.
74        let task::Poll::Ready(Ok(adapter)) =
75            pin!(instance.request_adapter(&RequestAdapterOptions::default())).poll(ctx)
76        else {
77            unreachable!()
78        };
79        let task::Poll::Ready(Ok(device_and_queue)) = pin!(adapter.request_device(desc)).poll(ctx)
80        else {
81            unreachable!()
82        };
83        device_and_queue
84    }
85
86    /// Check for resource cleanups and mapping callbacks. Will block if [`PollType::Wait`] is passed.
87    ///
88    /// Return `true` if the queue is empty, or `false` if there are more queue
89    /// submissions still in flight. (Note that, unless access to the [`Queue`] is
90    /// coordinated somehow, this information could be out of date by the time
91    /// the caller receives it. `Queue`s can be shared between threads, so
92    /// other threads could submit new work at any time.)
93    ///
94    /// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
95    pub fn poll(&self, poll_type: PollType) -> Result<crate::PollStatus, crate::PollError> {
96        self.inner.poll(poll_type.map_index(|s| s.index))
97    }
98
99    /// The features which can be used on this device.
100    ///
101    /// No additional features can be used, even if the underlying adapter can support them.
102    #[must_use]
103    pub fn features(&self) -> Features {
104        self.inner.features()
105    }
106
107    /// The limits which can be used on this device.
108    ///
109    /// No better limits can be used, even if the underlying adapter can support them.
110    #[must_use]
111    pub fn limits(&self) -> Limits {
112        self.inner.limits()
113    }
114
115    /// Creates a shader module.
116    ///
117    /// <div class="warning">
118    // NOTE: Keep this in sync with `naga::front::wgsl::parse_str`!
119    // NOTE: Keep this in sync with `wgpu_core::Global::device_create_shader_module`!
120    ///
121    /// This function may consume a lot of stack space. Compiler-enforced limits for parsing
122    /// recursion exist; if shader compilation runs into them, it will return an error gracefully.
123    /// However, on some build profiles and platforms, the default stack size for a thread may be
124    /// exceeded before this limit is reached during parsing. Callers should ensure that there is
125    /// enough stack space for this, particularly if calls to this method are exposed to user
126    /// input.
127    ///
128    /// </div>
129    #[must_use]
130    pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
131        let module = self
132            .inner
133            .create_shader_module(desc, wgt::ShaderRuntimeChecks::checked());
134        ShaderModule { inner: module }
135    }
136
137    /// Deprecated: Use [`create_shader_module_trusted`][csmt] instead.
138    ///
139    /// # Safety
140    ///
141    /// See [`create_shader_module_trusted`][csmt].
142    ///
143    /// [csmt]: Self::create_shader_module_trusted
144    #[deprecated(
145        since = "24.0.0",
146        note = "Use `Device::create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked())` instead."
147    )]
148    #[must_use]
149    pub unsafe fn create_shader_module_unchecked(
150        &self,
151        desc: ShaderModuleDescriptor<'_>,
152    ) -> ShaderModule {
153        unsafe { self.create_shader_module_trusted(desc, crate::ShaderRuntimeChecks::unchecked()) }
154    }
155
156    /// Creates a shader module with flags to dictate runtime checks.
157    ///
158    /// When running on WebGPU, this will merely call [`create_shader_module`][csm].
159    ///
160    /// # Safety
161    ///
162    /// In contrast with [`create_shader_module`][csm] this function
163    /// creates a shader module with user-customizable runtime checks which allows shaders to
164    /// perform operations which can lead to undefined behavior like indexing out of bounds,
165    /// thus it's the caller responsibility to pass a shader which doesn't perform any of this
166    /// operations.
167    ///
168    /// See the documentation for [`ShaderRuntimeChecks`] for more information about specific checks.
169    ///
170    /// [csm]: Self::create_shader_module
171    #[must_use]
172    pub unsafe fn create_shader_module_trusted(
173        &self,
174        desc: ShaderModuleDescriptor<'_>,
175        runtime_checks: crate::ShaderRuntimeChecks,
176    ) -> ShaderModule {
177        let module = self.inner.create_shader_module(desc, runtime_checks);
178        ShaderModule { inner: module }
179    }
180
181    /// Creates a shader module which will bypass wgpu's shader tooling and validation and be used directly by the backend.
182    ///
183    /// # Safety
184    ///
185    /// This function passes data to the backend as-is and can potentially result in a
186    /// driver crash or bogus behaviour. No attempt is made to ensure that data is valid.
187    #[must_use]
188    pub unsafe fn create_shader_module_passthrough(
189        &self,
190        desc: ShaderModuleDescriptorPassthrough<'_>,
191    ) -> ShaderModule {
192        let module = unsafe { self.inner.create_shader_module_passthrough(&desc) };
193        ShaderModule { inner: module }
194    }
195
196    /// Creates an empty [`CommandEncoder`].
197    #[must_use]
198    pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
199        let encoder = self.inner.create_command_encoder(desc);
200        // Each encoder starts with its own deferred-action store that travels
201        // with the CommandBuffer produced by finish().
202        CommandEncoder {
203            inner: encoder,
204            actions: Default::default(),
205        }
206    }
207
208    /// Creates an empty [`RenderBundleEncoder`].
209    #[must_use]
210    pub fn create_render_bundle_encoder<'a>(
211        &self,
212        desc: &RenderBundleEncoderDescriptor<'_>,
213    ) -> RenderBundleEncoder<'a> {
214        let encoder = self.inner.create_render_bundle_encoder(desc);
215        RenderBundleEncoder {
216            inner: encoder,
217            _p: PhantomData,
218        }
219    }
220
221    /// Creates a new [`BindGroup`].
222    #[must_use]
223    pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
224        let group = self.inner.create_bind_group(desc);
225        BindGroup { inner: group }
226    }
227
228    /// Creates a [`BindGroupLayout`].
229    #[must_use]
230    pub fn create_bind_group_layout(
231        &self,
232        desc: &BindGroupLayoutDescriptor<'_>,
233    ) -> BindGroupLayout {
234        let layout = self.inner.create_bind_group_layout(desc);
235        BindGroupLayout { inner: layout }
236    }
237
238    /// Creates a [`PipelineLayout`].
239    #[must_use]
240    pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
241        let layout = self.inner.create_pipeline_layout(desc);
242        PipelineLayout { inner: layout }
243    }
244
245    /// Creates a [`RenderPipeline`].
246    #[must_use]
247    pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
248        let pipeline = self.inner.create_render_pipeline(desc);
249        RenderPipeline { inner: pipeline }
250    }
251
252    /// Creates a mesh shader based [`RenderPipeline`].
253    #[must_use]
254    pub fn create_mesh_pipeline(&self, desc: &MeshPipelineDescriptor<'_>) -> RenderPipeline {
255        let pipeline = self.inner.create_mesh_pipeline(desc);
256        RenderPipeline { inner: pipeline }
257    }
258
259    /// Creates a [`ComputePipeline`].
260    #[must_use]
261    pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
262        let pipeline = self.inner.create_compute_pipeline(desc);
263        ComputePipeline { inner: pipeline }
264    }
265
266    /// Creates a [`Buffer`].
267    #[must_use]
268    pub fn create_buffer(&self, desc: &BufferDescriptor<'_>) -> Buffer {
269        let map_context = MapContext::new(desc.mapped_at_creation.then_some(0..desc.size));
270
271        let buffer = self.inner.create_buffer(desc);
272
273        Buffer {
274            inner: buffer,
275            map_context: Arc::new(Mutex::new(map_context)),
276            size: desc.size,
277            usage: desc.usage,
278        }
279    }
280
281    /// Creates a new [`Texture`].
282    ///
283    /// `desc` specifies the general format of the texture.
284    #[must_use]
285    pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
286        let texture = self.inner.create_texture(desc);
287
288        Texture {
289            inner: texture,
290            descriptor: TextureDescriptor {
291                label: None,
292                view_formats: &[],
293                ..desc.clone()
294            },
295        }
296    }
297
298    /// Creates a [`Texture`] from a wgpu-hal Texture.
299    ///
300    /// # Types
301    ///
302    /// The type of `A::Texture` depends on the backend:
303    ///
304    #[doc = crate::hal_type_vulkan!("Texture")]
305    #[doc = crate::hal_type_metal!("Texture")]
306    #[doc = crate::hal_type_dx12!("Texture")]
307    #[doc = crate::hal_type_gles!("Texture")]
308    ///
309    /// # Safety
310    ///
311    /// - `hal_texture` must be created from this device internal handle
312    /// - `hal_texture` must be created respecting `desc`
313    /// - `hal_texture` must be initialized
314    #[cfg(wgpu_core)]
315    #[must_use]
316    pub unsafe fn create_texture_from_hal<A: hal::Api>(
317        &self,
318        hal_texture: A::Texture,
319        desc: &TextureDescriptor<'_>,
320    ) -> Texture {
321        let texture = unsafe {
322            let core_device = self.inner.as_core();
323            core_device
324                .context
325                .create_texture_from_hal::<A>(hal_texture, core_device, desc)
326        };
327        Texture {
328            inner: texture.into(),
329            descriptor: TextureDescriptor {
330                label: None,
331                view_formats: &[],
332                ..desc.clone()
333            },
334        }
335    }
336
337    /// Creates a new [`ExternalTexture`].
338    #[must_use]
339    pub fn create_external_texture(
340        &self,
341        desc: &ExternalTextureDescriptor<'_>,
342        planes: &[&TextureView],
343    ) -> ExternalTexture {
344        let external_texture = self.inner.create_external_texture(desc, planes);
345
346        ExternalTexture {
347            inner: external_texture,
348        }
349    }
350
351    /// Creates a [`Buffer`] from a wgpu-hal Buffer.
352    ///
353    /// # Types
354    ///
355    /// The type of `A::Buffer` depends on the backend:
356    ///
357    #[doc = crate::hal_type_vulkan!("Buffer")]
358    #[doc = crate::hal_type_metal!("Buffer")]
359    #[doc = crate::hal_type_dx12!("Buffer")]
360    #[doc = crate::hal_type_gles!("Buffer")]
361    ///
362    /// # Safety
363    ///
364    /// - `hal_buffer` must be created from this device internal handle
365    /// - `hal_buffer` must be created respecting `desc`
366    /// - `hal_buffer` must be initialized
367    /// - `hal_buffer` must not have zero size
368    #[cfg(wgpu_core)]
369    #[must_use]
370    pub unsafe fn create_buffer_from_hal<A: hal::Api>(
371        &self,
372        hal_buffer: A::Buffer,
373        desc: &BufferDescriptor<'_>,
374    ) -> Buffer {
375        let map_context = MapContext::new(desc.mapped_at_creation.then_some(0..desc.size));
376
377        let buffer = unsafe {
378            let core_device = self.inner.as_core();
379            core_device
380                .context
381                .create_buffer_from_hal::<A>(hal_buffer, core_device, desc)
382        };
383
384        Buffer {
385            inner: buffer.into(),
386            map_context: Arc::new(Mutex::new(map_context)),
387            size: desc.size,
388            usage: desc.usage,
389        }
390    }
391
392    /// Creates a new [`Sampler`].
393    ///
394    /// `desc` specifies the behavior of the sampler.
395    #[must_use]
396    pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
397        let sampler = self.inner.create_sampler(desc);
398        Sampler { inner: sampler }
399    }
400
401    /// Creates a new [`QuerySet`].
402    #[must_use]
403    pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
404        let query_set = self.inner.create_query_set(desc);
405        QuerySet { inner: query_set }
406    }
407
408    /// Set a callback which will be called for all errors that are not handled in error scopes.
409    pub fn on_uncaptured_error(&self, handler: Arc<dyn UncapturedErrorHandler>) {
410        self.inner.on_uncaptured_error(handler)
411    }
412
413    /// Push an error scope on this device's thread-local error scope
414    /// stack. All operations on this device, or on resources created
415    /// from this device, will have their errors captured by this scope
416    /// until the scope is popped.
417    ///
418    /// Scopes must be popped in reverse order to their creation. If
419    /// a guard is dropped without being `pop()`ped, the scope will be
420    /// popped, and the captured errors will be dropped.
421    ///
422    /// Multiple error scopes may be active at one time, forming a stack.
423    /// Each error will be reported to the inner-most scope that matches
424    /// its filter.
425    ///
426    /// With the `std` feature enabled, this stack is **thread-local**.
427    /// Without, this is **global** to all threads.
428    ///
429    /// ```rust
430    /// # async move {
431    /// # let device: wgpu::Device = unreachable!();
432    /// let error_scope = device.push_error_scope(wgpu::ErrorFilter::Validation);
433    ///
434    /// // ...
435    /// // do work that may produce validation errors
436    /// // ...
437    ///
438    /// // pop the error scope and get a future for the result
439    /// let error_future = error_scope.pop();
440    ///
441    /// // await the future to get the error, if any
442    /// let error = error_future.await;
443    /// # };
444    /// ```
445    pub fn push_error_scope(&self, filter: ErrorFilter) -> ErrorScopeGuard {
446        let index = self.inner.push_error_scope(filter);
447        ErrorScopeGuard {
448            device: self.inner.clone(),
449            index,
450            popped: false,
451            _phantom: PhantomData,
452        }
453    }
454
455    /// Starts a capture in the attached graphics debugger.
456    ///
457    /// This behaves differently depending on which graphics debugger is attached:
458    ///
459    /// - Renderdoc: Calls [`StartFrameCapture(device, NULL)`][rd].
460    /// - Xcode: Creates a capture with [`MTLCaptureManager`][xcode].
461    /// - None: No action is taken.
462    ///
463    /// # Safety
464    ///
465    /// - There should not be any other captures currently active.
466    /// - All other safety rules are defined by the graphics debugger, see the
467    ///   documentation for the specific debugger.
468    /// - In general, graphics debuggers can easily cause crashes, so this isn't
469    ///   ever guaranteed to be sound.
470    ///
471    /// # Tips
472    ///
473    /// - Debuggers need to capture both the recording of the commands and the
474    ///   submission of the commands to the GPU. Try to wrap all of your
475    ///   gpu work in a capture.
476    /// - If you encounter issues, try waiting for the GPU to finish all work
477    ///   before stopping the capture.
478    ///
479    /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv417StartFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
480    /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
481    #[doc(alias = "start_renderdoc_capture")]
482    #[doc(alias = "start_xcode_capture")]
483    pub unsafe fn start_graphics_debugger_capture(&self) {
484        unsafe { self.inner.start_graphics_debugger_capture() }
485    }
486
487    /// Stops the current capture in the attached graphics debugger.
488    ///
489    /// This behaves differently depending on which graphics debugger is attached:
490    ///
491    /// - Renderdoc: Calls [`EndFrameCapture(device, NULL)`][rd].
492    /// - Xcode: Stops the capture with [`MTLCaptureManager`][xcode].
493    /// - None: No action is taken.
494    ///
495    /// # Safety
496    ///
497    /// - There should be a capture currently active.
498    /// - All other safety rules are defined by the graphics debugger, see the
499    ///   documentation for the specific debugger.
500    /// - In general, graphics debuggers can easily cause crashes, so this isn't
501    ///   ever guaranteed to be sound.
502    ///
503    /// # Tips
504    ///
505    /// - If you encounter issues, try to submit all work to the GPU, and waiting
506    ///   for that work to finish before stopping the capture.
507    ///
508    /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv415EndFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
509    /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
510    #[doc(alias = "stop_renderdoc_capture")]
511    #[doc(alias = "stop_xcode_capture")]
512    pub unsafe fn stop_graphics_debugger_capture(&self) {
513        unsafe { self.inner.stop_graphics_debugger_capture() }
514    }
515
516    /// Query internal counters from the native backend for debugging purposes.
517    ///
518    /// Some backends may not set all counters, or may not set any counter at all.
519    /// The `counters` cargo feature must be enabled for any counter to be set.
520    ///
521    /// If a counter is not set, its contains its default value (zero).
522    #[must_use]
523    pub fn get_internal_counters(&self) -> wgt::InternalCounters {
524        self.inner.get_internal_counters()
525    }
526
527    /// Generate an GPU memory allocation report if the underlying backend supports it.
528    ///
529    /// Backends that do not support producing these reports return `None`. A backend may
530    /// Support it and still return `None` if it is not using performing sub-allocation,
531    /// for example as a workaround for driver issues.
532    #[must_use]
533    pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
534        self.inner.generate_allocator_report()
535    }
536
537    /// Get the [`wgpu_hal`] device from this `Device`.
538    ///
539    /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
540    /// and pass that struct to the to the `A` type parameter.
541    ///
542    /// Returns a guard that dereferences to the type of the hal backend
543    /// which implements [`A::Device`].
544    ///
545    /// # Types
546    ///
547    /// The returned type depends on the backend:
548    ///
549    #[doc = crate::hal_type_vulkan!("Device")]
550    #[doc = crate::hal_type_metal!("Device")]
551    #[doc = crate::hal_type_dx12!("Device")]
552    #[doc = crate::hal_type_gles!("Device")]
553    ///
554    /// # Errors
555    ///
556    /// This method will return None if:
557    /// - The device is not from the backend specified by `A`.
558    /// - The device is from the `webgpu` or `custom` backend.
559    ///
560    /// # Safety
561    ///
562    /// - The returned resource must not be destroyed unless the guard
563    ///   is the last reference to it and it is not in use by the GPU.
564    ///   The guard and handle may be dropped at any time however.
565    /// - All the safety requirements of wgpu-hal must be upheld.
566    ///
567    /// [`A::Device`]: hal::Api::Device
568    #[cfg(wgpu_core)]
569    pub unsafe fn as_hal<A: hal::Api>(
570        &self,
571    ) -> Option<impl Deref<Target = A::Device> + WasmNotSendSync> {
572        let device = self.inner.as_core_opt()?;
573        unsafe { device.context.device_as_hal::<A>(device) }
574    }
575
576    /// Destroy this device.
577    pub fn destroy(&self) {
578        self.inner.destroy()
579    }
580
581    /// Set a DeviceLostCallback on this device.
582    pub fn set_device_lost_callback(
583        &self,
584        callback: impl Fn(DeviceLostReason, String) + Send + 'static,
585    ) {
586        self.inner.set_device_lost_callback(Box::new(callback))
587    }
588
589    /// Create a [`PipelineCache`] with initial data
590    ///
591    /// This can be passed to [`Device::create_compute_pipeline`]
592    /// and [`Device::create_render_pipeline`] to either accelerate these
593    /// or add the cache results from those.
594    ///
595    /// # Safety
596    ///
597    /// If the `data` field of `desc` is set, it must have previously been returned from a call
598    /// to [`PipelineCache::get_data`][^saving]. This `data` will only be used if it came
599    /// from an adapter with the same [`util::pipeline_cache_key`].
600    /// This *is* compatible across wgpu versions, as any data format change will
601    /// be accounted for.
602    ///
603    /// It is *not* supported to bring caches from previous direct uses of backend APIs
604    /// into this method.
605    ///
606    /// # Errors
607    ///
608    /// Returns an error value if:
609    ///  * the [`PIPELINE_CACHE`](wgt::Features::PIPELINE_CACHE) feature is not enabled
610    ///  * this device is invalid; or
611    ///  * the device is out of memory
612    ///
613    /// This method also returns an error value if:
614    ///  * The `fallback` field on `desc` is false; and
615    ///  * the `data` provided would not be used[^data_not_used]
616    ///
617    /// If an error value is used in subsequent calls, default caching will be used.
618    ///
619    /// [^saving]: We do recognise that saving this data to disk means this condition
620    /// is impossible to fully prove. Consider the risks for your own application in this case.
621    ///
622    /// [^data_not_used]: This data may be not used if: the data was produced by a prior
623    /// version of wgpu; or was created for an incompatible adapter, or there was a GPU driver
624    /// update. In some cases, the data might not be used and a real value is returned,
625    /// this is left to the discretion of GPU drivers.
626    #[must_use]
627    pub unsafe fn create_pipeline_cache(
628        &self,
629        desc: &PipelineCacheDescriptor<'_>,
630    ) -> PipelineCache {
631        let cache = unsafe { self.inner.create_pipeline_cache(desc) };
632        PipelineCache { inner: cache }
633    }
634}
635
636/// [`Features::EXPERIMENTAL_RAY_QUERY`] must be enabled on the device in order to call these functions.
637impl Device {
638    /// Create a bottom level acceleration structure, used inside a top level acceleration structure for ray tracing.
639    /// - `desc`: The descriptor of the acceleration structure.
640    /// - `sizes`: Size descriptor limiting what can be built into the acceleration structure.
641    ///
642    /// # Validation
643    /// If any of the following is not satisfied a validation error is generated
644    ///
645    /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_QUERY`] enabled.
646    /// if `sizes` is [`BlasGeometrySizeDescriptors::Triangles`] then the following must be satisfied
647    /// - For every geometry descriptor (for the purposes this is called `geo_desc`) of `sizes.descriptors` the following must be satisfied:
648    ///     - `geo_desc.vertex_format` must be within allowed formats (allowed formats for a given feature set
649    ///       may be queried with [`Features::allowed_vertex_formats_for_blas`]).
650    ///     - Both or neither of `geo_desc.index_format` and `geo_desc.index_count` must be provided.
651    ///
652    /// [`Features::EXPERIMENTAL_RAY_QUERY`]: wgt::Features::EXPERIMENTAL_RAY_QUERY
653    /// [`Features::allowed_vertex_formats_for_blas`]: wgt::Features::allowed_vertex_formats_for_blas
654    #[must_use]
655    pub fn create_blas(
656        &self,
657        desc: &CreateBlasDescriptor<'_>,
658        sizes: BlasGeometrySizeDescriptors,
659    ) -> Blas {
660        let (handle, blas) = self.inner.create_blas(desc, sizes);
661
662        Blas {
663            inner: blas,
664            handle,
665        }
666    }
667
668    /// Create a top level acceleration structure, used for ray tracing.
669    /// - `desc`: The descriptor of the acceleration structure.
670    ///
671    /// # Validation
672    /// If any of the following is not satisfied a validation error is generated
673    ///
674    /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_QUERY`] enabled.
675    ///
676    /// [`Features::EXPERIMENTAL_RAY_QUERY`]: wgt::Features::EXPERIMENTAL_RAY_QUERY
677    #[must_use]
678    pub fn create_tlas(&self, desc: &CreateTlasDescriptor<'_>) -> Tlas {
679        let tlas = self.inner.create_tlas(desc);
680
681        Tlas {
682            inner: tlas,
683            instances: vec![None; desc.max_instances as usize],
684            lowest_unmodified: 0,
685        }
686    }
687}
688
689/// Requesting a device from an [`Adapter`] failed.
690#[derive(Clone, Debug)]
691pub struct RequestDeviceError {
692    pub(crate) inner: RequestDeviceErrorKind,
693}
694#[derive(Clone, Debug)]
695pub(crate) enum RequestDeviceErrorKind {
696    /// Error from [`wgpu_core`].
697    // must match dependency cfg
698    #[cfg(wgpu_core)]
699    Core(wgc::instance::RequestDeviceError),
700
701    /// Error from web API that was called by `wgpu` to request a device.
702    ///
703    /// (This is currently never used by the webgl backend, but it could be.)
704    #[cfg(webgpu)]
705    WebGpu(String),
706}
707
708static_assertions::assert_impl_all!(RequestDeviceError: Send, Sync);
709
710impl fmt::Display for RequestDeviceError {
711    fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
712        match &self.inner {
713            #[cfg(wgpu_core)]
714            RequestDeviceErrorKind::Core(error) => error.fmt(_f),
715            #[cfg(webgpu)]
716            RequestDeviceErrorKind::WebGpu(error) => {
717                write!(_f, "{error}")
718            }
719            #[cfg(not(any(webgpu, wgpu_core)))]
720            _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
721        }
722    }
723}
724
725impl error::Error for RequestDeviceError {
726    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
727        match &self.inner {
728            #[cfg(wgpu_core)]
729            RequestDeviceErrorKind::Core(error) => error.source(),
730            #[cfg(webgpu)]
731            RequestDeviceErrorKind::WebGpu(_) => None,
732            #[cfg(not(any(webgpu, wgpu_core)))]
733            _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
734        }
735    }
736}
737
738#[cfg(wgpu_core)]
739impl From<wgc::instance::RequestDeviceError> for RequestDeviceError {
740    fn from(error: wgc::instance::RequestDeviceError) -> Self {
741        Self {
742            inner: RequestDeviceErrorKind::Core(error),
743        }
744    }
745}
746
747/// The callback of [`Device::on_uncaptured_error()`].
748///
749/// It must be a function with this signature.
750pub trait UncapturedErrorHandler: Fn(Error) + Send + Sync + 'static {}
751impl<T> UncapturedErrorHandler for T where T: Fn(Error) + Send + Sync + 'static {}
752
753/// Kinds of [`Error`]s a [`Device::push_error_scope()`] may be configured to catch.
754#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)]
755pub enum ErrorFilter {
756    /// Catch only out-of-memory errors.
757    OutOfMemory,
758    /// Catch only validation errors.
759    Validation,
760    /// Catch only internal errors.
761    Internal,
762}
763static_assertions::assert_impl_all!(ErrorFilter: Send, Sync);
764
765/// Lower level source of the error.
766///
767/// `Send + Sync` varies depending on configuration.
768#[cfg(send_sync)]
769#[cfg_attr(docsrs, doc(cfg(all())))]
770pub type ErrorSource = Box<dyn error::Error + Send + Sync + 'static>;
771/// Lower level source of the error.
772///
773/// `Send + Sync` varies depending on configuration.
774#[cfg(not(send_sync))]
775#[cfg_attr(docsrs, doc(cfg(all())))]
776pub type ErrorSource = Box<dyn error::Error + 'static>;
777
778/// Errors resulting from usage of GPU APIs.
779///
780/// By default, errors translate into panics. Depending on the backend and circumstances,
781/// errors may occur synchronously or asynchronously. When errors need to be handled, use
782/// [`Device::push_error_scope()`] or [`Device::on_uncaptured_error()`].
783#[derive(Debug)]
784pub enum Error {
785    /// Out of memory.
786    OutOfMemory {
787        /// Lower level source of the error.
788        source: ErrorSource,
789    },
790    /// Validation error, signifying a bug in code or data provided to `wgpu`.
791    Validation {
792        /// Lower level source of the error.
793        source: ErrorSource,
794        /// Description of the validation error.
795        description: String,
796    },
797    /// Internal error. Used for signalling any failures not explicitly expected by WebGPU.
798    ///
799    /// These could be due to internal implementation or system limits being reached.
800    Internal {
801        /// Lower level source of the error.
802        source: ErrorSource,
803        /// Description of the internal GPU error.
804        description: String,
805    },
806}
807#[cfg(send_sync)]
808static_assertions::assert_impl_all!(Error: Send, Sync);
809
810impl error::Error for Error {
811    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
812        match self {
813            Error::OutOfMemory { source } => Some(source.as_ref()),
814            Error::Validation { source, .. } => Some(source.as_ref()),
815            Error::Internal { source, .. } => Some(source.as_ref()),
816        }
817    }
818}
819
820impl fmt::Display for Error {
821    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
822        match self {
823            Error::OutOfMemory { .. } => f.write_str("Out of Memory"),
824            Error::Validation { description, .. } => f.write_str(description),
825            Error::Internal { description, .. } => f.write_str(description),
826        }
827    }
828}
829
830/// Guard for an error scope pushed with [`Device::push_error_scope()`].
831///
832/// Call [`pop()`] to pop the scope and get a future for the result. If
833/// the guard is dropped without being popped explicitly, the scope will still be popped,
834/// and the captured errors will be dropped.
835///
836/// This guard is neither `Send` nor `Sync`, as error scopes are handled
837/// on a per-thread basis when the `std` feature is enabled.
838///
839/// [`pop()`]: ErrorScopeGuard::pop
840#[must_use = "Error scopes must be explicitly popped to retrieve errors they catch"]
841pub struct ErrorScopeGuard {
842    device: dispatch::DispatchDevice,
843    index: u32,
844    popped: bool,
845    // Ensure the guard is !Send and !Sync
846    _phantom: PhantomData<*mut ()>,
847}
848
849static_assertions::assert_not_impl_any!(ErrorScopeGuard: Send, Sync);
850
851impl ErrorScopeGuard {
852    /// Pops the error scope.
853    ///
854    /// Returns a future which resolves to the error captured by this scope, if any.
855    /// The pop takes effect immediately; the future does not need to be awaited before doing work that is outside of this error scope.
856    pub fn pop(mut self) -> impl Future<Output = Option<Error>> + WasmNotSend {
857        self.popped = true;
858        self.device.pop_error_scope(self.index)
859    }
860}
861
862impl Drop for ErrorScopeGuard {
863    fn drop(&mut self) {
864        if !self.popped {
865            drop(self.device.pop_error_scope(self.index));
866        }
867    }
868}