wgpu/api/device.rs
1use alloc::{boxed::Box, string::String, sync::Arc, vec};
2#[cfg(wgpu_core)]
3use core::ops::Deref;
4use core::{error, fmt, future::Future};
5
6use crate::api::blas::{Blas, BlasGeometrySizeDescriptors, CreateBlasDescriptor};
7use crate::api::tlas::{CreateTlasDescriptor, Tlas};
8use crate::util::Mutex;
9use crate::*;
10
11/// Open connection to a graphics and/or compute device.
12///
13/// Responsible for the creation of most rendering and compute resources.
14/// These are then used in commands, which are submitted to a [`Queue`].
15///
16/// A device may be requested from an adapter with [`Adapter::request_device`].
17///
18/// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device).
19#[derive(Debug, Clone)]
20pub struct Device {
21 pub(crate) inner: dispatch::DispatchDevice,
22}
23#[cfg(send_sync)]
24static_assertions::assert_impl_all!(Device: Send, Sync);
25
26crate::cmp::impl_eq_ord_hash_proxy!(Device => .inner);
27
28/// Describes a [`Device`].
29///
30/// For use with [`Adapter::request_device`].
31///
32/// Corresponds to [WebGPU `GPUDeviceDescriptor`](
33/// https://gpuweb.github.io/gpuweb/#dictdef-gpudevicedescriptor).
34pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
35static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
36
37impl Device {
38 #[cfg(custom)]
39 /// Returns custom implementation of Device (if custom backend and is internally T)
40 pub fn as_custom<T: custom::DeviceInterface>(&self) -> Option<&T> {
41 self.inner.as_custom()
42 }
43
44 #[cfg(custom)]
45 /// Creates Device from custom implementation
46 pub fn from_custom<T: custom::DeviceInterface>(device: T) -> Self {
47 Self {
48 inner: dispatch::DispatchDevice::custom(device),
49 }
50 }
51
52 /// Constructs a stub device for testing using [`Backend::Noop`].
53 ///
54 /// This is a convenience function which avoids the configuration, `async`, and fallibility
55 /// aspects of constructing a device through `Instance`.
56 #[cfg(feature = "noop")]
57 pub fn noop(desc: &DeviceDescriptor<'_>) -> (Device, Queue) {
58 use core::future::Future as _;
59 use core::pin::pin;
60 use core::task;
61 let ctx = &mut task::Context::from_waker(waker::noop_waker_ref());
62
63 let instance = Instance::new(&InstanceDescriptor {
64 backends: Backends::NOOP,
65 backend_options: BackendOptions {
66 noop: NoopBackendOptions { enable: true },
67 ..Default::default()
68 },
69 ..Default::default()
70 });
71
72 // Both of these futures are trivial and should complete instantaneously,
73 // so we do not need an executor and can just poll them once.
74 let task::Poll::Ready(Ok(adapter)) =
75 pin!(instance.request_adapter(&RequestAdapterOptions::default())).poll(ctx)
76 else {
77 unreachable!()
78 };
79 let task::Poll::Ready(Ok(device_and_queue)) = pin!(adapter.request_device(desc)).poll(ctx)
80 else {
81 unreachable!()
82 };
83 device_and_queue
84 }
85
86 /// Check for resource cleanups and mapping callbacks. Will block if [`PollType::Wait`] is passed.
87 ///
88 /// Return `true` if the queue is empty, or `false` if there are more queue
89 /// submissions still in flight. (Note that, unless access to the [`Queue`] is
90 /// coordinated somehow, this information could be out of date by the time
91 /// the caller receives it. `Queue`s can be shared between threads, so
92 /// other threads could submit new work at any time.)
93 ///
94 /// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
95 pub fn poll(&self, poll_type: PollType) -> Result<crate::PollStatus, crate::PollError> {
96 self.inner.poll(poll_type.map_index(|s| s.index))
97 }
98
99 /// The features which can be used on this device.
100 ///
101 /// No additional features can be used, even if the underlying adapter can support them.
102 #[must_use]
103 pub fn features(&self) -> Features {
104 self.inner.features()
105 }
106
107 /// The limits which can be used on this device.
108 ///
109 /// No better limits can be used, even if the underlying adapter can support them.
110 #[must_use]
111 pub fn limits(&self) -> Limits {
112 self.inner.limits()
113 }
114
115 /// Creates a shader module.
116 ///
117 /// <div class="warning">
118 // NOTE: Keep this in sync with `naga::front::wgsl::parse_str`!
119 // NOTE: Keep this in sync with `wgpu_core::Global::device_create_shader_module`!
120 ///
121 /// This function may consume a lot of stack space. Compiler-enforced limits for parsing
122 /// recursion exist; if shader compilation runs into them, it will return an error gracefully.
123 /// However, on some build profiles and platforms, the default stack size for a thread may be
124 /// exceeded before this limit is reached during parsing. Callers should ensure that there is
125 /// enough stack space for this, particularly if calls to this method are exposed to user
126 /// input.
127 ///
128 /// </div>
129 #[must_use]
130 pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
131 let module = self
132 .inner
133 .create_shader_module(desc, wgt::ShaderRuntimeChecks::checked());
134 ShaderModule { inner: module }
135 }
136
137 /// Deprecated: Use [`create_shader_module_trusted`][csmt] instead.
138 ///
139 /// # Safety
140 ///
141 /// See [`create_shader_module_trusted`][csmt].
142 ///
143 /// [csmt]: Self::create_shader_module_trusted
144 #[deprecated(
145 since = "24.0.0",
146 note = "Use `Device::create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked())` instead."
147 )]
148 #[must_use]
149 pub unsafe fn create_shader_module_unchecked(
150 &self,
151 desc: ShaderModuleDescriptor<'_>,
152 ) -> ShaderModule {
153 unsafe { self.create_shader_module_trusted(desc, crate::ShaderRuntimeChecks::unchecked()) }
154 }
155
156 /// Creates a shader module with flags to dictate runtime checks.
157 ///
158 /// When running on WebGPU, this will merely call [`create_shader_module`][csm].
159 ///
160 /// # Safety
161 ///
162 /// In contrast with [`create_shader_module`][csm] this function
163 /// creates a shader module with user-customizable runtime checks which allows shaders to
164 /// perform operations which can lead to undefined behavior like indexing out of bounds,
165 /// thus it's the caller responsibility to pass a shader which doesn't perform any of this
166 /// operations.
167 ///
168 /// See the documentation for [`ShaderRuntimeChecks`][src] for more information about specific checks.
169 ///
170 /// [csm]: Self::create_shader_module
171 /// [src]: crate::ShaderRuntimeChecks
172 #[must_use]
173 pub unsafe fn create_shader_module_trusted(
174 &self,
175 desc: ShaderModuleDescriptor<'_>,
176 runtime_checks: crate::ShaderRuntimeChecks,
177 ) -> ShaderModule {
178 let module = self.inner.create_shader_module(desc, runtime_checks);
179 ShaderModule { inner: module }
180 }
181
182 /// Creates a shader module which will bypass wgpu's shader tooling and validation and be used directly by the backend.
183 ///
184 /// # Safety
185 ///
186 /// This function passes data to the backend as-is and can potentially result in a
187 /// driver crash or bogus behaviour. No attempt is made to ensure that data is valid.
188 #[must_use]
189 pub unsafe fn create_shader_module_passthrough(
190 &self,
191 desc: ShaderModuleDescriptorPassthrough<'_>,
192 ) -> ShaderModule {
193 let module = unsafe { self.inner.create_shader_module_passthrough(&desc) };
194 ShaderModule { inner: module }
195 }
196
197 /// Creates an empty [`CommandEncoder`].
198 #[must_use]
199 pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
200 let encoder = self.inner.create_command_encoder(desc);
201 // Each encoder starts with its own deferred-action store that travels
202 // with the CommandBuffer produced by finish().
203 CommandEncoder {
204 inner: encoder,
205 actions: Default::default(),
206 }
207 }
208
209 /// Creates an empty [`RenderBundleEncoder`].
210 #[must_use]
211 pub fn create_render_bundle_encoder<'a>(
212 &self,
213 desc: &RenderBundleEncoderDescriptor<'_>,
214 ) -> RenderBundleEncoder<'a> {
215 let encoder = self.inner.create_render_bundle_encoder(desc);
216 RenderBundleEncoder {
217 inner: encoder,
218 _p: core::marker::PhantomData,
219 }
220 }
221
222 /// Creates a new [`BindGroup`].
223 #[must_use]
224 pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
225 let group = self.inner.create_bind_group(desc);
226 BindGroup { inner: group }
227 }
228
229 /// Creates a [`BindGroupLayout`].
230 #[must_use]
231 pub fn create_bind_group_layout(
232 &self,
233 desc: &BindGroupLayoutDescriptor<'_>,
234 ) -> BindGroupLayout {
235 let layout = self.inner.create_bind_group_layout(desc);
236 BindGroupLayout { inner: layout }
237 }
238
239 /// Creates a [`PipelineLayout`].
240 #[must_use]
241 pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
242 let layout = self.inner.create_pipeline_layout(desc);
243 PipelineLayout { inner: layout }
244 }
245
246 /// Creates a [`RenderPipeline`].
247 #[must_use]
248 pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
249 let pipeline = self.inner.create_render_pipeline(desc);
250 RenderPipeline { inner: pipeline }
251 }
252
253 /// Creates a mesh shader based [`RenderPipeline`].
254 #[must_use]
255 pub fn create_mesh_pipeline(&self, desc: &MeshPipelineDescriptor<'_>) -> RenderPipeline {
256 let pipeline = self.inner.create_mesh_pipeline(desc);
257 RenderPipeline { inner: pipeline }
258 }
259
260 /// Creates a [`ComputePipeline`].
261 #[must_use]
262 pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
263 let pipeline = self.inner.create_compute_pipeline(desc);
264 ComputePipeline { inner: pipeline }
265 }
266
267 /// Creates a [`Buffer`].
268 #[must_use]
269 pub fn create_buffer(&self, desc: &BufferDescriptor<'_>) -> Buffer {
270 let map_context = MapContext::new(desc.mapped_at_creation.then_some(0..desc.size));
271
272 let buffer = self.inner.create_buffer(desc);
273
274 Buffer {
275 inner: buffer,
276 map_context: Arc::new(Mutex::new(map_context)),
277 size: desc.size,
278 usage: desc.usage,
279 }
280 }
281
282 /// Creates a new [`Texture`].
283 ///
284 /// `desc` specifies the general format of the texture.
285 #[must_use]
286 pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
287 let texture = self.inner.create_texture(desc);
288
289 Texture {
290 inner: texture,
291 descriptor: TextureDescriptor {
292 label: None,
293 view_formats: &[],
294 ..desc.clone()
295 },
296 }
297 }
298
299 /// Creates a [`Texture`] from a wgpu-hal Texture.
300 ///
301 /// # Types
302 ///
303 /// The type of `A::Texture` depends on the backend:
304 ///
305 #[doc = crate::hal_type_vulkan!("Texture")]
306 #[doc = crate::hal_type_metal!("Texture")]
307 #[doc = crate::hal_type_dx12!("Texture")]
308 #[doc = crate::hal_type_gles!("Texture")]
309 ///
310 /// # Safety
311 ///
312 /// - `hal_texture` must be created from this device internal handle
313 /// - `hal_texture` must be created respecting `desc`
314 /// - `hal_texture` must be initialized
315 #[cfg(wgpu_core)]
316 #[must_use]
317 pub unsafe fn create_texture_from_hal<A: hal::Api>(
318 &self,
319 hal_texture: A::Texture,
320 desc: &TextureDescriptor<'_>,
321 ) -> Texture {
322 let texture = unsafe {
323 let core_device = self.inner.as_core();
324 core_device
325 .context
326 .create_texture_from_hal::<A>(hal_texture, core_device, desc)
327 };
328 Texture {
329 inner: texture.into(),
330 descriptor: TextureDescriptor {
331 label: None,
332 view_formats: &[],
333 ..desc.clone()
334 },
335 }
336 }
337
338 /// Creates a new [`ExternalTexture`].
339 #[must_use]
340 pub fn create_external_texture(
341 &self,
342 desc: &ExternalTextureDescriptor<'_>,
343 planes: &[&TextureView],
344 ) -> ExternalTexture {
345 let external_texture = self.inner.create_external_texture(desc, planes);
346
347 ExternalTexture {
348 inner: external_texture,
349 }
350 }
351
352 /// Creates a [`Buffer`] from a wgpu-hal Buffer.
353 ///
354 /// # Types
355 ///
356 /// The type of `A::Buffer` depends on the backend:
357 ///
358 #[doc = crate::hal_type_vulkan!("Buffer")]
359 #[doc = crate::hal_type_metal!("Buffer")]
360 #[doc = crate::hal_type_dx12!("Buffer")]
361 #[doc = crate::hal_type_gles!("Buffer")]
362 ///
363 /// # Safety
364 ///
365 /// - `hal_buffer` must be created from this device internal handle
366 /// - `hal_buffer` must be created respecting `desc`
367 /// - `hal_buffer` must be initialized
368 /// - `hal_buffer` must not have zero size
369 #[cfg(wgpu_core)]
370 #[must_use]
371 pub unsafe fn create_buffer_from_hal<A: hal::Api>(
372 &self,
373 hal_buffer: A::Buffer,
374 desc: &BufferDescriptor<'_>,
375 ) -> Buffer {
376 let map_context = MapContext::new(desc.mapped_at_creation.then_some(0..desc.size));
377
378 let buffer = unsafe {
379 let core_device = self.inner.as_core();
380 core_device
381 .context
382 .create_buffer_from_hal::<A>(hal_buffer, core_device, desc)
383 };
384
385 Buffer {
386 inner: buffer.into(),
387 map_context: Arc::new(Mutex::new(map_context)),
388 size: desc.size,
389 usage: desc.usage,
390 }
391 }
392
393 /// Creates a new [`Sampler`].
394 ///
395 /// `desc` specifies the behavior of the sampler.
396 #[must_use]
397 pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
398 let sampler = self.inner.create_sampler(desc);
399 Sampler { inner: sampler }
400 }
401
402 /// Creates a new [`QuerySet`].
403 #[must_use]
404 pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
405 let query_set = self.inner.create_query_set(desc);
406 QuerySet { inner: query_set }
407 }
408
409 /// Set a callback which will be called for all errors that are not handled in error scopes.
410 pub fn on_uncaptured_error(&self, handler: Arc<dyn UncapturedErrorHandler>) {
411 self.inner.on_uncaptured_error(handler)
412 }
413
414 /// Push an error scope.
415 pub fn push_error_scope(&self, filter: ErrorFilter) {
416 self.inner.push_error_scope(filter)
417 }
418
419 /// Pop an error scope.
420 pub fn pop_error_scope(&self) -> impl Future<Output = Option<Error>> + WasmNotSend {
421 self.inner.pop_error_scope()
422 }
423
424 /// Starts a capture in the attached graphics debugger.
425 ///
426 /// This behaves differently depending on which graphics debugger is attached:
427 ///
428 /// - Renderdoc: Calls [`StartFrameCapture(device, NULL)`][rd].
429 /// - Xcode: Creates a capture with [`MTLCaptureManager`][xcode].
430 /// - None: No action is taken.
431 ///
432 /// # Safety
433 ///
434 /// - There should not be any other captures currently active.
435 /// - All other safety rules are defined by the graphics debugger, see the
436 /// documentation for the specific debugger.
437 /// - In general, graphics debuggers can easily cause crashes, so this isn't
438 /// ever guaranteed to be sound.
439 ///
440 /// # Tips
441 ///
442 /// - Debuggers need to capture both the recording of the commands and the
443 /// submission of the commands to the GPU. Try to wrap all of your
444 /// gpu work in a capture.
445 /// - If you encounter issues, try waiting for the GPU to finish all work
446 /// before stopping the capture.
447 ///
448 /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv417StartFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
449 /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
450 #[doc(alias = "start_renderdoc_capture")]
451 #[doc(alias = "start_xcode_capture")]
452 pub unsafe fn start_graphics_debugger_capture(&self) {
453 unsafe { self.inner.start_graphics_debugger_capture() }
454 }
455
456 /// Stops the current capture in the attached graphics debugger.
457 ///
458 /// This behaves differently depending on which graphics debugger is attached:
459 ///
460 /// - Renderdoc: Calls [`EndFrameCapture(device, NULL)`][rd].
461 /// - Xcode: Stops the capture with [`MTLCaptureManager`][xcode].
462 /// - None: No action is taken.
463 ///
464 /// # Safety
465 ///
466 /// - There should be a capture currently active.
467 /// - All other safety rules are defined by the graphics debugger, see the
468 /// documentation for the specific debugger.
469 /// - In general, graphics debuggers can easily cause crashes, so this isn't
470 /// ever guaranteed to be sound.
471 ///
472 /// # Tips
473 ///
474 /// - If you encounter issues, try to submit all work to the GPU, and waiting
475 /// for that work to finish before stopping the capture.
476 ///
477 /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv415EndFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
478 /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
479 #[doc(alias = "stop_renderdoc_capture")]
480 #[doc(alias = "stop_xcode_capture")]
481 pub unsafe fn stop_graphics_debugger_capture(&self) {
482 unsafe { self.inner.stop_graphics_debugger_capture() }
483 }
484
485 /// Query internal counters from the native backend for debugging purposes.
486 ///
487 /// Some backends may not set all counters, or may not set any counter at all.
488 /// The `counters` cargo feature must be enabled for any counter to be set.
489 ///
490 /// If a counter is not set, its contains its default value (zero).
491 #[must_use]
492 pub fn get_internal_counters(&self) -> wgt::InternalCounters {
493 self.inner.get_internal_counters()
494 }
495
496 /// Generate an GPU memory allocation report if the underlying backend supports it.
497 ///
498 /// Backends that do not support producing these reports return `None`. A backend may
499 /// Support it and still return `None` if it is not using performing sub-allocation,
500 /// for example as a workaround for driver issues.
501 #[must_use]
502 pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
503 self.inner.generate_allocator_report()
504 }
505
506 /// Get the [`wgpu_hal`] device from this `Device`.
507 ///
508 /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
509 /// and pass that struct to the to the `A` type parameter.
510 ///
511 /// Returns a guard that dereferences to the type of the hal backend
512 /// which implements [`A::Device`].
513 ///
514 /// # Types
515 ///
516 /// The returned type depends on the backend:
517 ///
518 #[doc = crate::hal_type_vulkan!("Device")]
519 #[doc = crate::hal_type_metal!("Device")]
520 #[doc = crate::hal_type_dx12!("Device")]
521 #[doc = crate::hal_type_gles!("Device")]
522 ///
523 /// # Errors
524 ///
525 /// This method will return None if:
526 /// - The device is not from the backend specified by `A`.
527 /// - The device is from the `webgpu` or `custom` backend.
528 ///
529 /// # Safety
530 ///
531 /// - The returned resource must not be destroyed unless the guard
532 /// is the last reference to it and it is not in use by the GPU.
533 /// The guard and handle may be dropped at any time however.
534 /// - All the safety requirements of wgpu-hal must be upheld.
535 ///
536 /// [`A::Device`]: hal::Api::Device
537 #[cfg(wgpu_core)]
538 pub unsafe fn as_hal<A: hal::Api>(
539 &self,
540 ) -> Option<impl Deref<Target = A::Device> + WasmNotSendSync> {
541 let device = self.inner.as_core_opt()?;
542 unsafe { device.context.device_as_hal::<A>(device) }
543 }
544
545 /// Destroy this device.
546 pub fn destroy(&self) {
547 self.inner.destroy()
548 }
549
550 /// Set a DeviceLostCallback on this device.
551 pub fn set_device_lost_callback(
552 &self,
553 callback: impl Fn(DeviceLostReason, String) + Send + 'static,
554 ) {
555 self.inner.set_device_lost_callback(Box::new(callback))
556 }
557
558 /// Create a [`PipelineCache`] with initial data
559 ///
560 /// This can be passed to [`Device::create_compute_pipeline`]
561 /// and [`Device::create_render_pipeline`] to either accelerate these
562 /// or add the cache results from those.
563 ///
564 /// # Safety
565 ///
566 /// If the `data` field of `desc` is set, it must have previously been returned from a call
567 /// to [`PipelineCache::get_data`][^saving]. This `data` will only be used if it came
568 /// from an adapter with the same [`util::pipeline_cache_key`].
569 /// This *is* compatible across wgpu versions, as any data format change will
570 /// be accounted for.
571 ///
572 /// It is *not* supported to bring caches from previous direct uses of backend APIs
573 /// into this method.
574 ///
575 /// # Errors
576 ///
577 /// Returns an error value if:
578 /// * the [`PIPELINE_CACHE`](wgt::Features::PIPELINE_CACHE) feature is not enabled
579 /// * this device is invalid; or
580 /// * the device is out of memory
581 ///
582 /// This method also returns an error value if:
583 /// * The `fallback` field on `desc` is false; and
584 /// * the `data` provided would not be used[^data_not_used]
585 ///
586 /// If an error value is used in subsequent calls, default caching will be used.
587 ///
588 /// [^saving]: We do recognise that saving this data to disk means this condition
589 /// is impossible to fully prove. Consider the risks for your own application in this case.
590 ///
591 /// [^data_not_used]: This data may be not used if: the data was produced by a prior
592 /// version of wgpu; or was created for an incompatible adapter, or there was a GPU driver
593 /// update. In some cases, the data might not be used and a real value is returned,
594 /// this is left to the discretion of GPU drivers.
595 #[must_use]
596 pub unsafe fn create_pipeline_cache(
597 &self,
598 desc: &PipelineCacheDescriptor<'_>,
599 ) -> PipelineCache {
600 let cache = unsafe { self.inner.create_pipeline_cache(desc) };
601 PipelineCache { inner: cache }
602 }
603}
604
605/// [`Features::EXPERIMENTAL_RAY_QUERY`] must be enabled on the device in order to call these functions.
606impl Device {
607 /// Create a bottom level acceleration structure, used inside a top level acceleration structure for ray tracing.
608 /// - `desc`: The descriptor of the acceleration structure.
609 /// - `sizes`: Size descriptor limiting what can be built into the acceleration structure.
610 ///
611 /// # Validation
612 /// If any of the following is not satisfied a validation error is generated
613 ///
614 /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_QUERY`] enabled.
615 /// if `sizes` is [`BlasGeometrySizeDescriptors::Triangles`] then the following must be satisfied
616 /// - For every geometry descriptor (for the purposes this is called `geo_desc`) of `sizes.descriptors` the following must be satisfied:
617 /// - `geo_desc.vertex_format` must be within allowed formats (allowed formats for a given feature set
618 /// may be queried with [`Features::allowed_vertex_formats_for_blas`]).
619 /// - Both or neither of `geo_desc.index_format` and `geo_desc.index_count` must be provided.
620 ///
621 /// [`Features::EXPERIMENTAL_RAY_QUERY`]: wgt::Features::EXPERIMENTAL_RAY_QUERY
622 /// [`Features::allowed_vertex_formats_for_blas`]: wgt::Features::allowed_vertex_formats_for_blas
623 #[must_use]
624 pub fn create_blas(
625 &self,
626 desc: &CreateBlasDescriptor<'_>,
627 sizes: BlasGeometrySizeDescriptors,
628 ) -> Blas {
629 let (handle, blas) = self.inner.create_blas(desc, sizes);
630
631 Blas {
632 inner: blas,
633 handle,
634 }
635 }
636
637 /// Create a top level acceleration structure, used for ray tracing.
638 /// - `desc`: The descriptor of the acceleration structure.
639 ///
640 /// # Validation
641 /// If any of the following is not satisfied a validation error is generated
642 ///
643 /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_QUERY`] enabled.
644 ///
645 /// [`Features::EXPERIMENTAL_RAY_QUERY`]: wgt::Features::EXPERIMENTAL_RAY_QUERY
646 #[must_use]
647 pub fn create_tlas(&self, desc: &CreateTlasDescriptor<'_>) -> Tlas {
648 let tlas = self.inner.create_tlas(desc);
649
650 Tlas {
651 inner: tlas,
652 instances: vec![None; desc.max_instances as usize],
653 lowest_unmodified: 0,
654 }
655 }
656}
657
658/// Requesting a device from an [`Adapter`] failed.
659#[derive(Clone, Debug)]
660pub struct RequestDeviceError {
661 pub(crate) inner: RequestDeviceErrorKind,
662}
663#[derive(Clone, Debug)]
664pub(crate) enum RequestDeviceErrorKind {
665 /// Error from [`wgpu_core`].
666 // must match dependency cfg
667 #[cfg(wgpu_core)]
668 Core(wgc::instance::RequestDeviceError),
669
670 /// Error from web API that was called by `wgpu` to request a device.
671 ///
672 /// (This is currently never used by the webgl backend, but it could be.)
673 #[cfg(webgpu)]
674 WebGpu(String),
675}
676
677static_assertions::assert_impl_all!(RequestDeviceError: Send, Sync);
678
679impl fmt::Display for RequestDeviceError {
680 fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
681 match &self.inner {
682 #[cfg(wgpu_core)]
683 RequestDeviceErrorKind::Core(error) => error.fmt(_f),
684 #[cfg(webgpu)]
685 RequestDeviceErrorKind::WebGpu(error) => {
686 write!(_f, "{error}")
687 }
688 #[cfg(not(any(webgpu, wgpu_core)))]
689 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
690 }
691 }
692}
693
694impl error::Error for RequestDeviceError {
695 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
696 match &self.inner {
697 #[cfg(wgpu_core)]
698 RequestDeviceErrorKind::Core(error) => error.source(),
699 #[cfg(webgpu)]
700 RequestDeviceErrorKind::WebGpu(_) => None,
701 #[cfg(not(any(webgpu, wgpu_core)))]
702 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
703 }
704 }
705}
706
707#[cfg(wgpu_core)]
708impl From<wgc::instance::RequestDeviceError> for RequestDeviceError {
709 fn from(error: wgc::instance::RequestDeviceError) -> Self {
710 Self {
711 inner: RequestDeviceErrorKind::Core(error),
712 }
713 }
714}
715
716/// The callback of [`Device::on_uncaptured_error()`].
717///
718/// It must be a function with this signature.
719pub trait UncapturedErrorHandler: Fn(Error) + Send + Sync + 'static {}
720impl<T> UncapturedErrorHandler for T where T: Fn(Error) + Send + Sync + 'static {}
721
722/// Kinds of [`Error`]s a [`Device::push_error_scope()`] may be configured to catch.
723#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)]
724pub enum ErrorFilter {
725 /// Catch only out-of-memory errors.
726 OutOfMemory,
727 /// Catch only validation errors.
728 Validation,
729 /// Catch only internal errors.
730 Internal,
731}
732static_assertions::assert_impl_all!(ErrorFilter: Send, Sync);
733
734/// Lower level source of the error.
735///
736/// `Send + Sync` varies depending on configuration.
737#[cfg(send_sync)]
738#[cfg_attr(docsrs, doc(cfg(all())))]
739pub type ErrorSource = Box<dyn error::Error + Send + Sync + 'static>;
740/// Lower level source of the error.
741///
742/// `Send + Sync` varies depending on configuration.
743#[cfg(not(send_sync))]
744#[cfg_attr(docsrs, doc(cfg(all())))]
745pub type ErrorSource = Box<dyn error::Error + 'static>;
746
747/// Errors resulting from usage of GPU APIs.
748///
749/// By default, errors translate into panics. Depending on the backend and circumstances,
750/// errors may occur synchronously or asynchronously. When errors need to be handled, use
751/// [`Device::push_error_scope()`] or [`Device::on_uncaptured_error()`].
752#[derive(Debug)]
753pub enum Error {
754 /// Out of memory.
755 OutOfMemory {
756 /// Lower level source of the error.
757 source: ErrorSource,
758 },
759 /// Validation error, signifying a bug in code or data provided to `wgpu`.
760 Validation {
761 /// Lower level source of the error.
762 source: ErrorSource,
763 /// Description of the validation error.
764 description: String,
765 },
766 /// Internal error. Used for signalling any failures not explicitly expected by WebGPU.
767 ///
768 /// These could be due to internal implementation or system limits being reached.
769 Internal {
770 /// Lower level source of the error.
771 source: ErrorSource,
772 /// Description of the internal GPU error.
773 description: String,
774 },
775}
776#[cfg(send_sync)]
777static_assertions::assert_impl_all!(Error: Send, Sync);
778
779impl error::Error for Error {
780 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
781 match self {
782 Error::OutOfMemory { source } => Some(source.as_ref()),
783 Error::Validation { source, .. } => Some(source.as_ref()),
784 Error::Internal { source, .. } => Some(source.as_ref()),
785 }
786 }
787}
788
789impl fmt::Display for Error {
790 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
791 match self {
792 Error::OutOfMemory { .. } => f.write_str("Out of Memory"),
793 Error::Validation { description, .. } => f.write_str(description),
794 Error::Internal { description, .. } => f.write_str(description),
795 }
796 }
797}
798
799// Copied from [`futures::task::noop_waker`].
800// Needed until MSRV is 1.85 with `task::Waker::noop()` available
801#[cfg(feature = "noop")]
802mod waker {
803 use core::ptr::null;
804 use core::task::{RawWaker, RawWakerVTable, Waker};
805
806 unsafe fn noop_clone(_data: *const ()) -> RawWaker {
807 noop_raw_waker()
808 }
809
810 unsafe fn noop(_data: *const ()) {}
811
812 const NOOP_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(noop_clone, noop, noop, noop);
813
814 const fn noop_raw_waker() -> RawWaker {
815 RawWaker::new(null(), &NOOP_WAKER_VTABLE)
816 }
817
818 /// Get a static reference to a [`Waker`] which
819 /// does nothing when `wake()` is called on it.
820 #[inline]
821 pub fn noop_waker_ref() -> &'static Waker {
822 struct SyncRawWaker(RawWaker);
823 unsafe impl Sync for SyncRawWaker {}
824
825 static NOOP_WAKER_INSTANCE: SyncRawWaker = SyncRawWaker(noop_raw_waker());
826
827 // SAFETY: `Waker` is #[repr(transparent)] over its `RawWaker`.
828 unsafe { &*(&NOOP_WAKER_INSTANCE.0 as *const RawWaker as *const Waker) }
829 }
830}