wgpu/api/
device.rs

1use alloc::{boxed::Box, string::String, sync::Arc, vec};
2#[cfg(wgpu_core)]
3use core::ops::Deref;
4use core::{error, fmt, future::Future};
5
6use crate::api::blas::{Blas, BlasGeometrySizeDescriptors, CreateBlasDescriptor};
7use crate::api::tlas::{CreateTlasDescriptor, Tlas};
8use crate::util::Mutex;
9use crate::*;
10
11/// Open connection to a graphics and/or compute device.
12///
13/// Responsible for the creation of most rendering and compute resources.
14/// These are then used in commands, which are submitted to a [`Queue`].
15///
16/// A device may be requested from an adapter with [`Adapter::request_device`].
17///
18/// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device).
19#[derive(Debug, Clone)]
20pub struct Device {
21    pub(crate) inner: dispatch::DispatchDevice,
22}
23#[cfg(send_sync)]
24static_assertions::assert_impl_all!(Device: Send, Sync);
25
26crate::cmp::impl_eq_ord_hash_proxy!(Device => .inner);
27
28/// Describes a [`Device`].
29///
30/// For use with [`Adapter::request_device`].
31///
32/// Corresponds to [WebGPU `GPUDeviceDescriptor`](
33/// https://gpuweb.github.io/gpuweb/#dictdef-gpudevicedescriptor).
34pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
35static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
36
37impl Device {
38    #[cfg(custom)]
39    /// Returns custom implementation of Device (if custom backend and is internally T)
40    pub fn as_custom<T: custom::DeviceInterface>(&self) -> Option<&T> {
41        self.inner.as_custom()
42    }
43
44    #[cfg(custom)]
45    /// Creates Device from custom implementation
46    pub fn from_custom<T: custom::DeviceInterface>(device: T) -> Self {
47        Self {
48            inner: dispatch::DispatchDevice::custom(device),
49        }
50    }
51
52    /// Constructs a stub device for testing using [`Backend::Noop`].
53    ///
54    /// This is a convenience function which avoids the configuration, `async`, and fallibility
55    /// aspects of constructing a device through `Instance`.
56    #[cfg(feature = "noop")]
57    pub fn noop(desc: &DeviceDescriptor<'_>) -> (Device, Queue) {
58        use core::future::Future as _;
59        use core::pin::pin;
60        use core::task;
61        let ctx = &mut task::Context::from_waker(task::Waker::noop());
62
63        let instance = Instance::new(&InstanceDescriptor {
64            backends: Backends::NOOP,
65            backend_options: BackendOptions {
66                noop: NoopBackendOptions { enable: true },
67                ..Default::default()
68            },
69            ..Default::default()
70        });
71
72        // Both of these futures are trivial and should complete instantaneously,
73        // so we do not need an executor and can just poll them once.
74        let task::Poll::Ready(Ok(adapter)) =
75            pin!(instance.request_adapter(&RequestAdapterOptions::default())).poll(ctx)
76        else {
77            unreachable!()
78        };
79        let task::Poll::Ready(Ok(device_and_queue)) = pin!(adapter.request_device(desc)).poll(ctx)
80        else {
81            unreachable!()
82        };
83        device_and_queue
84    }
85
86    /// Check for resource cleanups and mapping callbacks. Will block if [`PollType::Wait`] is passed.
87    ///
88    /// Return `true` if the queue is empty, or `false` if there are more queue
89    /// submissions still in flight. (Note that, unless access to the [`Queue`] is
90    /// coordinated somehow, this information could be out of date by the time
91    /// the caller receives it. `Queue`s can be shared between threads, so
92    /// other threads could submit new work at any time.)
93    ///
94    /// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
95    pub fn poll(&self, poll_type: PollType) -> Result<crate::PollStatus, crate::PollError> {
96        self.inner.poll(poll_type.map_index(|s| s.index))
97    }
98
99    /// The features which can be used on this device.
100    ///
101    /// No additional features can be used, even if the underlying adapter can support them.
102    #[must_use]
103    pub fn features(&self) -> Features {
104        self.inner.features()
105    }
106
107    /// The limits which can be used on this device.
108    ///
109    /// No better limits can be used, even if the underlying adapter can support them.
110    #[must_use]
111    pub fn limits(&self) -> Limits {
112        self.inner.limits()
113    }
114
115    /// Creates a shader module.
116    ///
117    /// <div class="warning">
118    // NOTE: Keep this in sync with `naga::front::wgsl::parse_str`!
119    // NOTE: Keep this in sync with `wgpu_core::Global::device_create_shader_module`!
120    ///
121    /// This function may consume a lot of stack space. Compiler-enforced limits for parsing
122    /// recursion exist; if shader compilation runs into them, it will return an error gracefully.
123    /// However, on some build profiles and platforms, the default stack size for a thread may be
124    /// exceeded before this limit is reached during parsing. Callers should ensure that there is
125    /// enough stack space for this, particularly if calls to this method are exposed to user
126    /// input.
127    ///
128    /// </div>
129    #[must_use]
130    pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
131        let module = self
132            .inner
133            .create_shader_module(desc, wgt::ShaderRuntimeChecks::checked());
134        ShaderModule { inner: module }
135    }
136
137    /// Deprecated: Use [`create_shader_module_trusted`][csmt] instead.
138    ///
139    /// # Safety
140    ///
141    /// See [`create_shader_module_trusted`][csmt].
142    ///
143    /// [csmt]: Self::create_shader_module_trusted
144    #[deprecated(
145        since = "24.0.0",
146        note = "Use `Device::create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked())` instead."
147    )]
148    #[must_use]
149    pub unsafe fn create_shader_module_unchecked(
150        &self,
151        desc: ShaderModuleDescriptor<'_>,
152    ) -> ShaderModule {
153        unsafe { self.create_shader_module_trusted(desc, crate::ShaderRuntimeChecks::unchecked()) }
154    }
155
156    /// Creates a shader module with flags to dictate runtime checks.
157    ///
158    /// When running on WebGPU, this will merely call [`create_shader_module`][csm].
159    ///
160    /// # Safety
161    ///
162    /// In contrast with [`create_shader_module`][csm] this function
163    /// creates a shader module with user-customizable runtime checks which allows shaders to
164    /// perform operations which can lead to undefined behavior like indexing out of bounds,
165    /// thus it's the caller responsibility to pass a shader which doesn't perform any of this
166    /// operations.
167    ///
168    /// See the documentation for [`ShaderRuntimeChecks`] for more information about specific checks.
169    ///
170    /// [csm]: Self::create_shader_module
171    #[must_use]
172    pub unsafe fn create_shader_module_trusted(
173        &self,
174        desc: ShaderModuleDescriptor<'_>,
175        runtime_checks: crate::ShaderRuntimeChecks,
176    ) -> ShaderModule {
177        let module = self.inner.create_shader_module(desc, runtime_checks);
178        ShaderModule { inner: module }
179    }
180
181    /// Creates a shader module which will bypass wgpu's shader tooling and validation and be used directly by the backend.
182    ///
183    /// # Safety
184    ///
185    /// This function passes data to the backend as-is and can potentially result in a
186    /// driver crash or bogus behaviour. No attempt is made to ensure that data is valid.
187    #[must_use]
188    pub unsafe fn create_shader_module_passthrough(
189        &self,
190        desc: ShaderModuleDescriptorPassthrough<'_>,
191    ) -> ShaderModule {
192        let module = unsafe { self.inner.create_shader_module_passthrough(&desc) };
193        ShaderModule { inner: module }
194    }
195
196    /// Creates an empty [`CommandEncoder`].
197    #[must_use]
198    pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
199        let encoder = self.inner.create_command_encoder(desc);
200        // Each encoder starts with its own deferred-action store that travels
201        // with the CommandBuffer produced by finish().
202        CommandEncoder {
203            inner: encoder,
204            actions: Default::default(),
205        }
206    }
207
208    /// Creates an empty [`RenderBundleEncoder`].
209    #[must_use]
210    pub fn create_render_bundle_encoder<'a>(
211        &self,
212        desc: &RenderBundleEncoderDescriptor<'_>,
213    ) -> RenderBundleEncoder<'a> {
214        let encoder = self.inner.create_render_bundle_encoder(desc);
215        RenderBundleEncoder {
216            inner: encoder,
217            _p: core::marker::PhantomData,
218        }
219    }
220
221    /// Creates a new [`BindGroup`].
222    #[must_use]
223    pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
224        let group = self.inner.create_bind_group(desc);
225        BindGroup { inner: group }
226    }
227
228    /// Creates a [`BindGroupLayout`].
229    #[must_use]
230    pub fn create_bind_group_layout(
231        &self,
232        desc: &BindGroupLayoutDescriptor<'_>,
233    ) -> BindGroupLayout {
234        let layout = self.inner.create_bind_group_layout(desc);
235        BindGroupLayout { inner: layout }
236    }
237
238    /// Creates a [`PipelineLayout`].
239    #[must_use]
240    pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
241        let layout = self.inner.create_pipeline_layout(desc);
242        PipelineLayout { inner: layout }
243    }
244
245    /// Creates a [`RenderPipeline`].
246    #[must_use]
247    pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
248        let pipeline = self.inner.create_render_pipeline(desc);
249        RenderPipeline { inner: pipeline }
250    }
251
252    /// Creates a mesh shader based [`RenderPipeline`].
253    #[must_use]
254    pub fn create_mesh_pipeline(&self, desc: &MeshPipelineDescriptor<'_>) -> RenderPipeline {
255        let pipeline = self.inner.create_mesh_pipeline(desc);
256        RenderPipeline { inner: pipeline }
257    }
258
259    /// Creates a [`ComputePipeline`].
260    #[must_use]
261    pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
262        let pipeline = self.inner.create_compute_pipeline(desc);
263        ComputePipeline { inner: pipeline }
264    }
265
266    /// Creates a [`Buffer`].
267    #[must_use]
268    pub fn create_buffer(&self, desc: &BufferDescriptor<'_>) -> Buffer {
269        let map_context = MapContext::new(desc.mapped_at_creation.then_some(0..desc.size));
270
271        let buffer = self.inner.create_buffer(desc);
272
273        Buffer {
274            inner: buffer,
275            map_context: Arc::new(Mutex::new(map_context)),
276            size: desc.size,
277            usage: desc.usage,
278        }
279    }
280
281    /// Creates a new [`Texture`].
282    ///
283    /// `desc` specifies the general format of the texture.
284    #[must_use]
285    pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
286        let texture = self.inner.create_texture(desc);
287
288        Texture {
289            inner: texture,
290            descriptor: TextureDescriptor {
291                label: None,
292                view_formats: &[],
293                ..desc.clone()
294            },
295        }
296    }
297
298    /// Creates a [`Texture`] from a wgpu-hal Texture.
299    ///
300    /// # Types
301    ///
302    /// The type of `A::Texture` depends on the backend:
303    ///
304    #[doc = crate::hal_type_vulkan!("Texture")]
305    #[doc = crate::hal_type_metal!("Texture")]
306    #[doc = crate::hal_type_dx12!("Texture")]
307    #[doc = crate::hal_type_gles!("Texture")]
308    ///
309    /// # Safety
310    ///
311    /// - `hal_texture` must be created from this device internal handle
312    /// - `hal_texture` must be created respecting `desc`
313    /// - `hal_texture` must be initialized
314    #[cfg(wgpu_core)]
315    #[must_use]
316    pub unsafe fn create_texture_from_hal<A: hal::Api>(
317        &self,
318        hal_texture: A::Texture,
319        desc: &TextureDescriptor<'_>,
320    ) -> Texture {
321        let texture = unsafe {
322            let core_device = self.inner.as_core();
323            core_device
324                .context
325                .create_texture_from_hal::<A>(hal_texture, core_device, desc)
326        };
327        Texture {
328            inner: texture.into(),
329            descriptor: TextureDescriptor {
330                label: None,
331                view_formats: &[],
332                ..desc.clone()
333            },
334        }
335    }
336
337    /// Creates a new [`ExternalTexture`].
338    #[must_use]
339    pub fn create_external_texture(
340        &self,
341        desc: &ExternalTextureDescriptor<'_>,
342        planes: &[&TextureView],
343    ) -> ExternalTexture {
344        let external_texture = self.inner.create_external_texture(desc, planes);
345
346        ExternalTexture {
347            inner: external_texture,
348        }
349    }
350
351    /// Creates a [`Buffer`] from a wgpu-hal Buffer.
352    ///
353    /// # Types
354    ///
355    /// The type of `A::Buffer` depends on the backend:
356    ///
357    #[doc = crate::hal_type_vulkan!("Buffer")]
358    #[doc = crate::hal_type_metal!("Buffer")]
359    #[doc = crate::hal_type_dx12!("Buffer")]
360    #[doc = crate::hal_type_gles!("Buffer")]
361    ///
362    /// # Safety
363    ///
364    /// - `hal_buffer` must be created from this device internal handle
365    /// - `hal_buffer` must be created respecting `desc`
366    /// - `hal_buffer` must be initialized
367    /// - `hal_buffer` must not have zero size
368    #[cfg(wgpu_core)]
369    #[must_use]
370    pub unsafe fn create_buffer_from_hal<A: hal::Api>(
371        &self,
372        hal_buffer: A::Buffer,
373        desc: &BufferDescriptor<'_>,
374    ) -> Buffer {
375        let map_context = MapContext::new(desc.mapped_at_creation.then_some(0..desc.size));
376
377        let buffer = unsafe {
378            let core_device = self.inner.as_core();
379            core_device
380                .context
381                .create_buffer_from_hal::<A>(hal_buffer, core_device, desc)
382        };
383
384        Buffer {
385            inner: buffer.into(),
386            map_context: Arc::new(Mutex::new(map_context)),
387            size: desc.size,
388            usage: desc.usage,
389        }
390    }
391
392    /// Creates a new [`Sampler`].
393    ///
394    /// `desc` specifies the behavior of the sampler.
395    #[must_use]
396    pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
397        let sampler = self.inner.create_sampler(desc);
398        Sampler { inner: sampler }
399    }
400
401    /// Creates a new [`QuerySet`].
402    #[must_use]
403    pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
404        let query_set = self.inner.create_query_set(desc);
405        QuerySet { inner: query_set }
406    }
407
408    /// Set a callback which will be called for all errors that are not handled in error scopes.
409    pub fn on_uncaptured_error(&self, handler: Arc<dyn UncapturedErrorHandler>) {
410        self.inner.on_uncaptured_error(handler)
411    }
412
413    /// Push an error scope.
414    pub fn push_error_scope(&self, filter: ErrorFilter) {
415        self.inner.push_error_scope(filter)
416    }
417
418    /// Pop an error scope.
419    pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + WasmNotSend {
420        self.inner.pop_error_scope()
421    }
422
423    /// Starts a capture in the attached graphics debugger.
424    ///
425    /// This behaves differently depending on which graphics debugger is attached:
426    ///
427    /// - Renderdoc: Calls [`StartFrameCapture(device, NULL)`][rd].
428    /// - Xcode: Creates a capture with [`MTLCaptureManager`][xcode].
429    /// - None: No action is taken.
430    ///
431    /// # Safety
432    ///
433    /// - There should not be any other captures currently active.
434    /// - All other safety rules are defined by the graphics debugger, see the
435    ///   documentation for the specific debugger.
436    /// - In general, graphics debuggers can easily cause crashes, so this isn't
437    ///   ever guaranteed to be sound.
438    ///
439    /// # Tips
440    ///
441    /// - Debuggers need to capture both the recording of the commands and the
442    ///   submission of the commands to the GPU. Try to wrap all of your
443    ///   gpu work in a capture.
444    /// - If you encounter issues, try waiting for the GPU to finish all work
445    ///   before stopping the capture.
446    ///
447    /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv417StartFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
448    /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
449    #[doc(alias = "start_renderdoc_capture")]
450    #[doc(alias = "start_xcode_capture")]
451    pub unsafe fn start_graphics_debugger_capture(&self) {
452        unsafe { self.inner.start_graphics_debugger_capture() }
453    }
454
455    /// Stops the current capture in the attached graphics debugger.
456    ///
457    /// This behaves differently depending on which graphics debugger is attached:
458    ///
459    /// - Renderdoc: Calls [`EndFrameCapture(device, NULL)`][rd].
460    /// - Xcode: Stops the capture with [`MTLCaptureManager`][xcode].
461    /// - None: No action is taken.
462    ///
463    /// # Safety
464    ///
465    /// - There should be a capture currently active.
466    /// - All other safety rules are defined by the graphics debugger, see the
467    ///   documentation for the specific debugger.
468    /// - In general, graphics debuggers can easily cause crashes, so this isn't
469    ///   ever guaranteed to be sound.
470    ///
471    /// # Tips
472    ///
473    /// - If you encounter issues, try to submit all work to the GPU, and waiting
474    ///   for that work to finish before stopping the capture.
475    ///
476    /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv415EndFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
477    /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
478    #[doc(alias = "stop_renderdoc_capture")]
479    #[doc(alias = "stop_xcode_capture")]
480    pub unsafe fn stop_graphics_debugger_capture(&self) {
481        unsafe { self.inner.stop_graphics_debugger_capture() }
482    }
483
484    /// Query internal counters from the native backend for debugging purposes.
485    ///
486    /// Some backends may not set all counters, or may not set any counter at all.
487    /// The `counters` cargo feature must be enabled for any counter to be set.
488    ///
489    /// If a counter is not set, its contains its default value (zero).
490    #[must_use]
491    pub fn get_internal_counters(&self) -> wgt::InternalCounters {
492        self.inner.get_internal_counters()
493    }
494
495    /// Generate an GPU memory allocation report if the underlying backend supports it.
496    ///
497    /// Backends that do not support producing these reports return `None`. A backend may
498    /// Support it and still return `None` if it is not using performing sub-allocation,
499    /// for example as a workaround for driver issues.
500    #[must_use]
501    pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
502        self.inner.generate_allocator_report()
503    }
504
505    /// Get the [`wgpu_hal`] device from this `Device`.
506    ///
507    /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
508    /// and pass that struct to the to the `A` type parameter.
509    ///
510    /// Returns a guard that dereferences to the type of the hal backend
511    /// which implements [`A::Device`].
512    ///
513    /// # Types
514    ///
515    /// The returned type depends on the backend:
516    ///
517    #[doc = crate::hal_type_vulkan!("Device")]
518    #[doc = crate::hal_type_metal!("Device")]
519    #[doc = crate::hal_type_dx12!("Device")]
520    #[doc = crate::hal_type_gles!("Device")]
521    ///
522    /// # Errors
523    ///
524    /// This method will return None if:
525    /// - The device is not from the backend specified by `A`.
526    /// - The device is from the `webgpu` or `custom` backend.
527    ///
528    /// # Safety
529    ///
530    /// - The returned resource must not be destroyed unless the guard
531    ///   is the last reference to it and it is not in use by the GPU.
532    ///   The guard and handle may be dropped at any time however.
533    /// - All the safety requirements of wgpu-hal must be upheld.
534    ///
535    /// [`A::Device`]: hal::Api::Device
536    #[cfg(wgpu_core)]
537    pub unsafe fn as_hal<A: hal::Api>(
538        &self,
539    ) -> Option<impl Deref<Target = A::Device> + WasmNotSendSync> {
540        let device = self.inner.as_core_opt()?;
541        unsafe { device.context.device_as_hal::<A>(device) }
542    }
543
544    /// Destroy this device.
545    pub fn destroy(&self) {
546        self.inner.destroy()
547    }
548
549    /// Set a DeviceLostCallback on this device.
550    pub fn set_device_lost_callback(
551        &self,
552        callback: impl Fn(DeviceLostReason, String) + Send + 'static,
553    ) {
554        self.inner.set_device_lost_callback(Box::new(callback))
555    }
556
557    /// Create a [`PipelineCache`] with initial data
558    ///
559    /// This can be passed to [`Device::create_compute_pipeline`]
560    /// and [`Device::create_render_pipeline`] to either accelerate these
561    /// or add the cache results from those.
562    ///
563    /// # Safety
564    ///
565    /// If the `data` field of `desc` is set, it must have previously been returned from a call
566    /// to [`PipelineCache::get_data`][^saving]. This `data` will only be used if it came
567    /// from an adapter with the same [`util::pipeline_cache_key`].
568    /// This *is* compatible across wgpu versions, as any data format change will
569    /// be accounted for.
570    ///
571    /// It is *not* supported to bring caches from previous direct uses of backend APIs
572    /// into this method.
573    ///
574    /// # Errors
575    ///
576    /// Returns an error value if:
577    ///  * the [`PIPELINE_CACHE`](wgt::Features::PIPELINE_CACHE) feature is not enabled
578    ///  * this device is invalid; or
579    ///  * the device is out of memory
580    ///
581    /// This method also returns an error value if:
582    ///  * The `fallback` field on `desc` is false; and
583    ///  * the `data` provided would not be used[^data_not_used]
584    ///
585    /// If an error value is used in subsequent calls, default caching will be used.
586    ///
587    /// [^saving]: We do recognise that saving this data to disk means this condition
588    /// is impossible to fully prove. Consider the risks for your own application in this case.
589    ///
590    /// [^data_not_used]: This data may be not used if: the data was produced by a prior
591    /// version of wgpu; or was created for an incompatible adapter, or there was a GPU driver
592    /// update. In some cases, the data might not be used and a real value is returned,
593    /// this is left to the discretion of GPU drivers.
594    #[must_use]
595    pub unsafe fn create_pipeline_cache(
596        &self,
597        desc: &PipelineCacheDescriptor<'_>,
598    ) -> PipelineCache {
599        let cache = unsafe { self.inner.create_pipeline_cache(desc) };
600        PipelineCache { inner: cache }
601    }
602}
603
604/// [`Features::EXPERIMENTAL_RAY_QUERY`] must be enabled on the device in order to call these functions.
605impl Device {
606    /// Create a bottom level acceleration structure, used inside a top level acceleration structure for ray tracing.
607    /// - `desc`: The descriptor of the acceleration structure.
608    /// - `sizes`: Size descriptor limiting what can be built into the acceleration structure.
609    ///
610    /// # Validation
611    /// If any of the following is not satisfied a validation error is generated
612    ///
613    /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_QUERY`] enabled.
614    /// if `sizes` is [`BlasGeometrySizeDescriptors::Triangles`] then the following must be satisfied
615    /// - For every geometry descriptor (for the purposes this is called `geo_desc`) of `sizes.descriptors` the following must be satisfied:
616    ///     - `geo_desc.vertex_format` must be within allowed formats (allowed formats for a given feature set
617    ///       may be queried with [`Features::allowed_vertex_formats_for_blas`]).
618    ///     - Both or neither of `geo_desc.index_format` and `geo_desc.index_count` must be provided.
619    ///
620    /// [`Features::EXPERIMENTAL_RAY_QUERY`]: wgt::Features::EXPERIMENTAL_RAY_QUERY
621    /// [`Features::allowed_vertex_formats_for_blas`]: wgt::Features::allowed_vertex_formats_for_blas
622    #[must_use]
623    pub fn create_blas(
624        &self,
625        desc: &CreateBlasDescriptor<'_>,
626        sizes: BlasGeometrySizeDescriptors,
627    ) -> Blas {
628        let (handle, blas) = self.inner.create_blas(desc, sizes);
629
630        Blas {
631            inner: blas,
632            handle,
633        }
634    }
635
636    /// Create a top level acceleration structure, used for ray tracing.
637    /// - `desc`: The descriptor of the acceleration structure.
638    ///
639    /// # Validation
640    /// If any of the following is not satisfied a validation error is generated
641    ///
642    /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_QUERY`] enabled.
643    ///
644    /// [`Features::EXPERIMENTAL_RAY_QUERY`]: wgt::Features::EXPERIMENTAL_RAY_QUERY
645    #[must_use]
646    pub fn create_tlas(&self, desc: &CreateTlasDescriptor<'_>) -> Tlas {
647        let tlas = self.inner.create_tlas(desc);
648
649        Tlas {
650            inner: tlas,
651            instances: vec![None; desc.max_instances as usize],
652            lowest_unmodified: 0,
653        }
654    }
655}
656
657/// Requesting a device from an [`Adapter`] failed.
658#[derive(Clone, Debug)]
659pub struct RequestDeviceError {
660    pub(crate) inner: RequestDeviceErrorKind,
661}
662#[derive(Clone, Debug)]
663pub(crate) enum RequestDeviceErrorKind {
664    /// Error from [`wgpu_core`].
665    // must match dependency cfg
666    #[cfg(wgpu_core)]
667    Core(wgc::instance::RequestDeviceError),
668
669    /// Error from web API that was called by `wgpu` to request a device.
670    ///
671    /// (This is currently never used by the webgl backend, but it could be.)
672    #[cfg(webgpu)]
673    WebGpu(String),
674}
675
676static_assertions::assert_impl_all!(RequestDeviceError: Send, Sync);
677
678impl fmt::Display for RequestDeviceError {
679    fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
680        match &self.inner {
681            #[cfg(wgpu_core)]
682            RequestDeviceErrorKind::Core(error) => error.fmt(_f),
683            #[cfg(webgpu)]
684            RequestDeviceErrorKind::WebGpu(error) => {
685                write!(_f, "{error}")
686            }
687            #[cfg(not(any(webgpu, wgpu_core)))]
688            _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
689        }
690    }
691}
692
693impl error::Error for RequestDeviceError {
694    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
695        match &self.inner {
696            #[cfg(wgpu_core)]
697            RequestDeviceErrorKind::Core(error) => error.source(),
698            #[cfg(webgpu)]
699            RequestDeviceErrorKind::WebGpu(_) => None,
700            #[cfg(not(any(webgpu, wgpu_core)))]
701            _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
702        }
703    }
704}
705
706#[cfg(wgpu_core)]
707impl From<wgc::instance::RequestDeviceError> for RequestDeviceError {
708    fn from(error: wgc::instance::RequestDeviceError) -> Self {
709        Self {
710            inner: RequestDeviceErrorKind::Core(error),
711        }
712    }
713}
714
715/// The callback of [`Device::on_uncaptured_error()`].
716///
717/// It must be a function with this signature.
718pub trait UncapturedErrorHandler: Fn(Error) + Send + Sync + 'static {}
719impl<T> UncapturedErrorHandler for T where T: Fn(Error) + Send + Sync + 'static {}
720
721/// Kinds of [`Error`]s a [`Device::push_error_scope()`] may be configured to catch.
722#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)]
723pub enum ErrorFilter {
724    /// Catch only out-of-memory errors.
725    OutOfMemory,
726    /// Catch only validation errors.
727    Validation,
728    /// Catch only internal errors.
729    Internal,
730}
731static_assertions::assert_impl_all!(ErrorFilter: Send, Sync);
732
733/// Lower level source of the error.
734///
735/// `Send + Sync` varies depending on configuration.
736#[cfg(send_sync)]
737#[cfg_attr(docsrs, doc(cfg(all())))]
738pub type ErrorSource = Box<dyn error::Error + Send + Sync + 'static>;
739/// Lower level source of the error.
740///
741/// `Send + Sync` varies depending on configuration.
742#[cfg(not(send_sync))]
743#[cfg_attr(docsrs, doc(cfg(all())))]
744pub type ErrorSource = Box<dyn error::Error + 'static>;
745
746/// Errors resulting from usage of GPU APIs.
747///
748/// By default, errors translate into panics. Depending on the backend and circumstances,
749/// errors may occur synchronously or asynchronously. When errors need to be handled, use
750/// [`Device::push_error_scope()`] or [`Device::on_uncaptured_error()`].
751#[derive(Debug)]
752pub enum Error {
753    /// Out of memory.
754    OutOfMemory {
755        /// Lower level source of the error.
756        source: ErrorSource,
757    },
758    /// Validation error, signifying a bug in code or data provided to `wgpu`.
759    Validation {
760        /// Lower level source of the error.
761        source: ErrorSource,
762        /// Description of the validation error.
763        description: String,
764    },
765    /// Internal error. Used for signalling any failures not explicitly expected by WebGPU.
766    ///
767    /// These could be due to internal implementation or system limits being reached.
768    Internal {
769        /// Lower level source of the error.
770        source: ErrorSource,
771        /// Description of the internal GPU error.
772        description: String,
773    },
774}
775#[cfg(send_sync)]
776static_assertions::assert_impl_all!(Error: Send, Sync);
777
778impl error::Error for Error {
779    fn source(&self) -> Option<&(dyn error::Error + 'static)> {
780        match self {
781            Error::OutOfMemory { source } => Some(source.as_ref()),
782            Error::Validation { source, .. } => Some(source.as_ref()),
783            Error::Internal { source, .. } => Some(source.as_ref()),
784        }
785    }
786}
787
788impl fmt::Display for Error {
789    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
790        match self {
791            Error::OutOfMemory { .. } => f.write_str("Out of Memory"),
792            Error::Validation { description, .. } => f.write_str(description),
793            Error::Internal { description, .. } => f.write_str(description),
794        }
795    }
796}