wgpu/api/device.rs
1use alloc::{boxed::Box, string::String, sync::Arc, vec};
2#[cfg(wgpu_core)]
3use core::ops::Deref;
4use core::{error, fmt, future::Future, marker::PhantomData};
5
6use crate::api::blas::{Blas, BlasGeometrySizeDescriptors, CreateBlasDescriptor};
7use crate::api::tlas::{CreateTlasDescriptor, Tlas};
8use crate::util::Mutex;
9use crate::*;
10
11/// Open connection to a graphics and/or compute device.
12///
13/// Responsible for the creation of most rendering and compute resources.
14/// These are then used in commands, which are submitted to a [`Queue`].
15///
16/// A device may be requested from an adapter with [`Adapter::request_device`].
17///
18/// Corresponds to [WebGPU `GPUDevice`](https://gpuweb.github.io/gpuweb/#gpu-device).
19#[derive(Debug, Clone)]
20pub struct Device {
21 pub(crate) inner: dispatch::DispatchDevice,
22}
23#[cfg(send_sync)]
24static_assertions::assert_impl_all!(Device: Send, Sync);
25
26crate::cmp::impl_eq_ord_hash_proxy!(Device => .inner);
27
28/// Describes a [`Device`].
29///
30/// For use with [`Adapter::request_device`].
31///
32/// Corresponds to [WebGPU `GPUDeviceDescriptor`](
33/// https://gpuweb.github.io/gpuweb/#dictdef-gpudevicedescriptor).
34pub type DeviceDescriptor<'a> = wgt::DeviceDescriptor<Label<'a>>;
35static_assertions::assert_impl_all!(DeviceDescriptor<'_>: Send, Sync);
36
37impl Device {
38 #[cfg(custom)]
39 /// Returns custom implementation of Device (if custom backend and is internally T)
40 pub fn as_custom<T: custom::DeviceInterface>(&self) -> Option<&T> {
41 self.inner.as_custom()
42 }
43
44 #[cfg(custom)]
45 /// Creates Device from custom implementation
46 pub fn from_custom<T: custom::DeviceInterface>(device: T) -> Self {
47 Self {
48 inner: dispatch::DispatchDevice::custom(device),
49 }
50 }
51
52 /// Constructs a stub device for testing using [`Backend::Noop`].
53 ///
54 /// This is a convenience function which avoids the configuration, `async`, and fallibility
55 /// aspects of constructing a device through `Instance`.
56 #[cfg(feature = "noop")]
57 pub fn noop(desc: &DeviceDescriptor<'_>) -> (Device, Queue) {
58 use core::future::Future as _;
59 use core::pin::pin;
60 use core::task;
61 let ctx = &mut task::Context::from_waker(task::Waker::noop());
62
63 let instance = Instance::new(InstanceDescriptor {
64 backends: Backends::NOOP,
65 backend_options: BackendOptions {
66 noop: NoopBackendOptions { enable: true },
67 ..Default::default()
68 },
69 ..Default::default()
70 });
71
72 // Both of these futures are trivial and should complete instantaneously,
73 // so we do not need an executor and can just poll them once.
74 let task::Poll::Ready(Ok(adapter)) =
75 pin!(instance.request_adapter(&RequestAdapterOptions::default())).poll(ctx)
76 else {
77 unreachable!()
78 };
79 let task::Poll::Ready(Ok(device_and_queue)) = pin!(adapter.request_device(desc)).poll(ctx)
80 else {
81 unreachable!()
82 };
83 device_and_queue
84 }
85
86 /// Check for resource cleanups and mapping callbacks. Will block if [`PollType::Wait`] is passed.
87 ///
88 /// Return `true` if the queue is empty, or `false` if there are more queue
89 /// submissions still in flight. (Note that, unless access to the [`Queue`] is
90 /// coordinated somehow, this information could be out of date by the time
91 /// the caller receives it. `Queue`s can be shared between threads, so
92 /// other threads could submit new work at any time.)
93 ///
94 /// When running on WebGPU, this is a no-op. `Device`s are automatically polled.
95 pub fn poll(&self, poll_type: PollType) -> Result<crate::PollStatus, crate::PollError> {
96 self.inner.poll(poll_type.map_index(|s| s.index))
97 }
98
99 /// The [features][Features] which can be used on this device.
100 ///
101 /// This will be equal to the [`required_features`][DeviceDescriptor::required_features]
102 /// specified when creating the device.
103 /// No additional features can be used, even if the underlying adapter can support them.
104 #[must_use]
105 pub fn features(&self) -> Features {
106 self.inner.features()
107 }
108
109 /// The limits which can be used on this device.
110 ///
111 /// This will be equal to the [`required_limits`][DeviceDescriptor::required_limits]
112 /// specified when creating the device.
113 /// No better limits can be used, even if the underlying adapter can support them.
114 #[must_use]
115 pub fn limits(&self) -> Limits {
116 self.inner.limits()
117 }
118
119 /// Get info about the adapter that this device was created from.
120 pub fn adapter_info(&self) -> AdapterInfo {
121 self.inner.adapter_info()
122 }
123
124 /// Creates a shader module.
125 ///
126 /// <div class="warning">
127 // NOTE: Keep this in sync with `naga::front::wgsl::parse_str`!
128 // NOTE: Keep this in sync with `wgpu_core::Global::device_create_shader_module`!
129 ///
130 /// This function may consume a lot of stack space. Compiler-enforced limits for parsing
131 /// recursion exist; if shader compilation runs into them, it will return an error gracefully.
132 /// However, on some build profiles and platforms, the default stack size for a thread may be
133 /// exceeded before this limit is reached during parsing. Callers should ensure that there is
134 /// enough stack space for this, particularly if calls to this method are exposed to user
135 /// input.
136 ///
137 /// </div>
138 #[must_use]
139 pub fn create_shader_module(&self, desc: ShaderModuleDescriptor<'_>) -> ShaderModule {
140 let module = self
141 .inner
142 .create_shader_module(desc, wgt::ShaderRuntimeChecks::checked());
143 ShaderModule { inner: module }
144 }
145
146 /// Deprecated: Use [`create_shader_module_trusted`][csmt] instead.
147 ///
148 /// # Safety
149 ///
150 /// See [`create_shader_module_trusted`][csmt].
151 ///
152 /// [csmt]: Self::create_shader_module_trusted
153 #[deprecated(
154 since = "24.0.0",
155 note = "Use `Device::create_shader_module_trusted(desc, wgpu::ShaderRuntimeChecks::unchecked())` instead."
156 )]
157 #[must_use]
158 pub unsafe fn create_shader_module_unchecked(
159 &self,
160 desc: ShaderModuleDescriptor<'_>,
161 ) -> ShaderModule {
162 unsafe { self.create_shader_module_trusted(desc, crate::ShaderRuntimeChecks::unchecked()) }
163 }
164
165 /// Creates a shader module with flags to dictate runtime checks.
166 ///
167 /// When running on WebGPU, this will merely call [`create_shader_module`][csm].
168 ///
169 /// # Safety
170 ///
171 /// In contrast with [`create_shader_module`][csm] this function
172 /// creates a shader module with user-customizable runtime checks which allows shaders to
173 /// perform operations which can lead to undefined behavior like indexing out of bounds,
174 /// thus it's the caller responsibility to pass a shader which doesn't perform any of this
175 /// operations.
176 ///
177 /// See the documentation for [`ShaderRuntimeChecks`] for more information about specific checks.
178 ///
179 /// [csm]: Self::create_shader_module
180 #[must_use]
181 pub unsafe fn create_shader_module_trusted(
182 &self,
183 desc: ShaderModuleDescriptor<'_>,
184 runtime_checks: crate::ShaderRuntimeChecks,
185 ) -> ShaderModule {
186 let module = self.inner.create_shader_module(desc, runtime_checks);
187 ShaderModule { inner: module }
188 }
189
190 /// Creates a shader module which will bypass wgpu's shader tooling and validation and be used directly by the backend.
191 ///
192 /// # Safety
193 ///
194 /// This function passes data to the backend as-is and can potentially result in a
195 /// driver crash or bogus behaviour. No attempt is made to ensure that data is valid.
196 #[must_use]
197 pub unsafe fn create_shader_module_passthrough(
198 &self,
199 desc: ShaderModuleDescriptorPassthrough<'_>,
200 ) -> ShaderModule {
201 let module = unsafe { self.inner.create_shader_module_passthrough(&desc) };
202 ShaderModule { inner: module }
203 }
204
205 /// Creates an empty [`CommandEncoder`].
206 #[must_use]
207 pub fn create_command_encoder(&self, desc: &CommandEncoderDescriptor<'_>) -> CommandEncoder {
208 let encoder = self.inner.create_command_encoder(desc);
209 // Each encoder starts with its own deferred-action store that travels
210 // with the CommandBuffer produced by finish().
211 CommandEncoder {
212 inner: encoder,
213 actions: Default::default(),
214 }
215 }
216
217 /// Creates an empty [`RenderBundleEncoder`].
218 #[must_use]
219 pub fn create_render_bundle_encoder<'a>(
220 &self,
221 desc: &RenderBundleEncoderDescriptor<'_>,
222 ) -> RenderBundleEncoder<'a> {
223 let encoder = self.inner.create_render_bundle_encoder(desc);
224 RenderBundleEncoder {
225 inner: encoder,
226 _p: PhantomData,
227 }
228 }
229
230 /// Creates a new [`BindGroup`].
231 #[must_use]
232 pub fn create_bind_group(&self, desc: &BindGroupDescriptor<'_>) -> BindGroup {
233 let group = self.inner.create_bind_group(desc);
234 BindGroup { inner: group }
235 }
236
237 /// Creates a [`BindGroupLayout`].
238 #[must_use]
239 pub fn create_bind_group_layout(
240 &self,
241 desc: &BindGroupLayoutDescriptor<'_>,
242 ) -> BindGroupLayout {
243 let layout = self.inner.create_bind_group_layout(desc);
244 BindGroupLayout { inner: layout }
245 }
246
247 /// Creates a [`PipelineLayout`].
248 #[must_use]
249 pub fn create_pipeline_layout(&self, desc: &PipelineLayoutDescriptor<'_>) -> PipelineLayout {
250 let layout = self.inner.create_pipeline_layout(desc);
251 PipelineLayout { inner: layout }
252 }
253
254 /// Creates a [`RenderPipeline`].
255 #[must_use]
256 pub fn create_render_pipeline(&self, desc: &RenderPipelineDescriptor<'_>) -> RenderPipeline {
257 let pipeline = self.inner.create_render_pipeline(desc);
258 RenderPipeline { inner: pipeline }
259 }
260
261 /// Creates a mesh shader based [`RenderPipeline`].
262 #[must_use]
263 pub fn create_mesh_pipeline(&self, desc: &MeshPipelineDescriptor<'_>) -> RenderPipeline {
264 let pipeline = self.inner.create_mesh_pipeline(desc);
265 RenderPipeline { inner: pipeline }
266 }
267
268 /// Creates a [`ComputePipeline`].
269 #[must_use]
270 pub fn create_compute_pipeline(&self, desc: &ComputePipelineDescriptor<'_>) -> ComputePipeline {
271 let pipeline = self.inner.create_compute_pipeline(desc);
272 ComputePipeline { inner: pipeline }
273 }
274
275 /// Creates a [`Buffer`].
276 #[must_use]
277 pub fn create_buffer(&self, desc: &BufferDescriptor<'_>) -> Buffer {
278 let map_context = MapContext::new(desc.mapped_at_creation.then_some(0..desc.size));
279
280 let buffer = self.inner.create_buffer(desc);
281
282 Buffer {
283 inner: buffer,
284 map_context: Arc::new(Mutex::new(map_context)),
285 size: desc.size,
286 usage: desc.usage,
287 }
288 }
289
290 /// Creates a new [`Texture`].
291 ///
292 /// `desc` specifies the general format of the texture.
293 #[must_use]
294 pub fn create_texture(&self, desc: &TextureDescriptor<'_>) -> Texture {
295 let texture = self.inner.create_texture(desc);
296
297 Texture {
298 inner: texture,
299 descriptor: TextureDescriptor {
300 label: None,
301 view_formats: &[],
302 ..desc.clone()
303 },
304 }
305 }
306
307 /// Creates a [`Texture`] from a wgpu-hal Texture.
308 ///
309 /// # Types
310 ///
311 /// The type of `A::Texture` depends on the backend:
312 ///
313 #[doc = crate::hal_type_vulkan!("Texture")]
314 #[doc = crate::hal_type_metal!("Texture")]
315 #[doc = crate::hal_type_dx12!("Texture")]
316 #[doc = crate::hal_type_gles!("Texture")]
317 ///
318 /// # Safety
319 ///
320 /// - `hal_texture` must be created from this device internal handle
321 /// - `hal_texture` must be created respecting `desc`
322 /// - `hal_texture` must be initialized
323 #[cfg(wgpu_core)]
324 #[must_use]
325 pub unsafe fn create_texture_from_hal<A: hal::Api>(
326 &self,
327 hal_texture: A::Texture,
328 desc: &TextureDescriptor<'_>,
329 ) -> Texture {
330 let texture = unsafe {
331 let core_device = self.inner.as_core();
332 core_device
333 .context
334 .create_texture_from_hal::<A>(hal_texture, core_device, desc)
335 };
336 Texture {
337 inner: texture.into(),
338 descriptor: TextureDescriptor {
339 label: None,
340 view_formats: &[],
341 ..desc.clone()
342 },
343 }
344 }
345
346 /// Creates a new [`ExternalTexture`].
347 #[must_use]
348 pub fn create_external_texture(
349 &self,
350 desc: &ExternalTextureDescriptor<'_>,
351 planes: &[&TextureView],
352 ) -> ExternalTexture {
353 let external_texture = self.inner.create_external_texture(desc, planes);
354
355 ExternalTexture {
356 inner: external_texture,
357 }
358 }
359
360 /// Creates a [`Buffer`] from a wgpu-hal Buffer.
361 ///
362 /// # Types
363 ///
364 /// The type of `A::Buffer` depends on the backend:
365 ///
366 #[doc = crate::hal_type_vulkan!("Buffer")]
367 #[doc = crate::hal_type_metal!("Buffer")]
368 #[doc = crate::hal_type_dx12!("Buffer")]
369 #[doc = crate::hal_type_gles!("Buffer")]
370 ///
371 /// # Safety
372 ///
373 /// - `hal_buffer` must be created from this device internal handle
374 /// - `hal_buffer` must be created respecting `desc`
375 /// - `hal_buffer` must be initialized
376 /// - `hal_buffer` must not have zero size
377 #[cfg(wgpu_core)]
378 #[must_use]
379 pub unsafe fn create_buffer_from_hal<A: hal::Api>(
380 &self,
381 hal_buffer: A::Buffer,
382 desc: &BufferDescriptor<'_>,
383 ) -> Buffer {
384 let map_context = MapContext::new(desc.mapped_at_creation.then_some(0..desc.size));
385
386 let buffer = unsafe {
387 let core_device = self.inner.as_core();
388 core_device
389 .context
390 .create_buffer_from_hal::<A>(hal_buffer, core_device, desc)
391 };
392
393 Buffer {
394 inner: buffer.into(),
395 map_context: Arc::new(Mutex::new(map_context)),
396 size: desc.size,
397 usage: desc.usage,
398 }
399 }
400
401 /// Creates a new [`Sampler`].
402 ///
403 /// `desc` specifies the behavior of the sampler.
404 #[must_use]
405 pub fn create_sampler(&self, desc: &SamplerDescriptor<'_>) -> Sampler {
406 let sampler = self.inner.create_sampler(desc);
407 Sampler { inner: sampler }
408 }
409
410 /// Creates a new [`QuerySet`].
411 #[must_use]
412 pub fn create_query_set(&self, desc: &QuerySetDescriptor<'_>) -> QuerySet {
413 let query_set = self.inner.create_query_set(desc);
414 QuerySet { inner: query_set }
415 }
416
417 /// Set a callback which will be called for all errors that are not handled in error scopes.
418 pub fn on_uncaptured_error(&self, handler: Arc<dyn UncapturedErrorHandler>) {
419 self.inner.on_uncaptured_error(handler)
420 }
421
422 /// Push an error scope on this device's thread-local error scope
423 /// stack. All operations on this device, or on resources created
424 /// from this device, will have their errors captured by this scope
425 /// until the scope is popped.
426 ///
427 /// Scopes must be popped in reverse order to their creation. If
428 /// a guard is dropped without being `pop()`ped, the scope will be
429 /// popped, and the captured errors will be dropped.
430 ///
431 /// Multiple error scopes may be active at one time, forming a stack.
432 /// Each error will be reported to the inner-most scope that matches
433 /// its filter.
434 ///
435 /// With the `std` feature enabled, this stack is **thread-local**.
436 /// Without, this is **global** to all threads.
437 ///
438 /// ```rust
439 /// # async move {
440 /// # let device: wgpu::Device = unreachable!();
441 /// let error_scope = device.push_error_scope(wgpu::ErrorFilter::Validation);
442 ///
443 /// // ...
444 /// // do work that may produce validation errors
445 /// // ...
446 ///
447 /// // pop the error scope and get a future for the result
448 /// let error_future = error_scope.pop();
449 ///
450 /// // await the future to get the error, if any
451 /// let error = error_future.await;
452 /// # };
453 /// ```
454 pub fn push_error_scope(&self, filter: ErrorFilter) -> ErrorScopeGuard {
455 let index = self.inner.push_error_scope(filter);
456 ErrorScopeGuard {
457 device: self.inner.clone(),
458 index,
459 popped: false,
460 _phantom: PhantomData,
461 }
462 }
463
464 /// Starts a capture in the attached graphics debugger.
465 ///
466 /// This behaves differently depending on which graphics debugger is attached:
467 ///
468 /// - Renderdoc: Calls [`StartFrameCapture(device, NULL)`][rd].
469 /// - Xcode: Creates a capture with [`MTLCaptureManager`][xcode].
470 /// - None: No action is taken.
471 ///
472 /// # Safety
473 ///
474 /// - There should not be any other captures currently active.
475 /// - All other safety rules are defined by the graphics debugger, see the
476 /// documentation for the specific debugger.
477 /// - In general, graphics debuggers can easily cause crashes, so this isn't
478 /// ever guaranteed to be sound.
479 ///
480 /// # Tips
481 ///
482 /// - Debuggers need to capture both the recording of the commands and the
483 /// submission of the commands to the GPU. Try to wrap all of your
484 /// gpu work in a capture.
485 /// - If you encounter issues, try waiting for the GPU to finish all work
486 /// before stopping the capture.
487 ///
488 /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv417StartFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
489 /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
490 #[doc(alias = "start_renderdoc_capture")]
491 #[doc(alias = "start_xcode_capture")]
492 pub unsafe fn start_graphics_debugger_capture(&self) {
493 unsafe { self.inner.start_graphics_debugger_capture() }
494 }
495
496 /// Stops the current capture in the attached graphics debugger.
497 ///
498 /// This behaves differently depending on which graphics debugger is attached:
499 ///
500 /// - Renderdoc: Calls [`EndFrameCapture(device, NULL)`][rd].
501 /// - Xcode: Stops the capture with [`MTLCaptureManager`][xcode].
502 /// - None: No action is taken.
503 ///
504 /// # Safety
505 ///
506 /// - There should be a capture currently active.
507 /// - All other safety rules are defined by the graphics debugger, see the
508 /// documentation for the specific debugger.
509 /// - In general, graphics debuggers can easily cause crashes, so this isn't
510 /// ever guaranteed to be sound.
511 ///
512 /// # Tips
513 ///
514 /// - If you encounter issues, try to submit all work to the GPU, and waiting
515 /// for that work to finish before stopping the capture.
516 ///
517 /// [rd]: https://renderdoc.org/docs/in_application_api.html#_CPPv415EndFrameCapture23RENDERDOC_DevicePointer22RENDERDOC_WindowHandle
518 /// [xcode]: https://developer.apple.com/documentation/metal/mtlcapturemanager
519 #[doc(alias = "stop_renderdoc_capture")]
520 #[doc(alias = "stop_xcode_capture")]
521 pub unsafe fn stop_graphics_debugger_capture(&self) {
522 unsafe { self.inner.stop_graphics_debugger_capture() }
523 }
524
525 /// Query internal counters from the native backend for debugging purposes.
526 ///
527 /// Some backends may not set all counters, or may not set any counter at all.
528 /// The `counters` cargo feature must be enabled for any counter to be set.
529 ///
530 /// If a counter is not set, its contains its default value (zero).
531 #[must_use]
532 pub fn get_internal_counters(&self) -> wgt::InternalCounters {
533 self.inner.get_internal_counters()
534 }
535
536 /// Generate an GPU memory allocation report if the underlying backend supports it.
537 ///
538 /// Backends that do not support producing these reports return `None`. A backend may
539 /// Support it and still return `None` if it is not using performing sub-allocation,
540 /// for example as a workaround for driver issues.
541 #[must_use]
542 pub fn generate_allocator_report(&self) -> Option<wgt::AllocatorReport> {
543 self.inner.generate_allocator_report()
544 }
545
546 /// Get the [`wgpu_hal`] device from this `Device`.
547 ///
548 /// Find the Api struct corresponding to the active backend in [`wgpu_hal::api`],
549 /// and pass that struct to the to the `A` type parameter.
550 ///
551 /// Returns a guard that dereferences to the type of the hal backend
552 /// which implements [`A::Device`].
553 ///
554 /// # Types
555 ///
556 /// The returned type depends on the backend:
557 ///
558 #[doc = crate::hal_type_vulkan!("Device")]
559 #[doc = crate::hal_type_metal!("Device")]
560 #[doc = crate::hal_type_dx12!("Device")]
561 #[doc = crate::hal_type_gles!("Device")]
562 ///
563 /// # Errors
564 ///
565 /// This method will return None if:
566 /// - The device is not from the backend specified by `A`.
567 /// - The device is from the `webgpu` or `custom` backend.
568 ///
569 /// # Safety
570 ///
571 /// - The returned resource must not be destroyed unless the guard
572 /// is the last reference to it and it is not in use by the GPU.
573 /// The guard and handle may be dropped at any time however.
574 /// - All the safety requirements of wgpu-hal must be upheld.
575 ///
576 /// [`A::Device`]: hal::Api::Device
577 #[cfg(wgpu_core)]
578 pub unsafe fn as_hal<A: hal::Api>(
579 &self,
580 ) -> Option<impl Deref<Target = A::Device> + WasmNotSendSync> {
581 let device = self.inner.as_core_opt()?;
582 unsafe { device.context.device_as_hal::<A>(device) }
583 }
584
585 /// Destroy this device.
586 pub fn destroy(&self) {
587 self.inner.destroy()
588 }
589
590 /// Set a DeviceLostCallback on this device.
591 pub fn set_device_lost_callback(
592 &self,
593 callback: impl Fn(DeviceLostReason, String) + Send + 'static,
594 ) {
595 self.inner.set_device_lost_callback(Box::new(callback))
596 }
597
598 /// Create a [`PipelineCache`] with initial data
599 ///
600 /// This can be passed to [`Device::create_compute_pipeline`]
601 /// and [`Device::create_render_pipeline`] to either accelerate these
602 /// or add the cache results from those.
603 ///
604 /// # Safety
605 ///
606 /// If the `data` field of `desc` is set, it must have previously been returned from a call
607 /// to [`PipelineCache::get_data`][^saving]. This `data` will only be used if it came
608 /// from an adapter with the same [`util::pipeline_cache_key`].
609 /// This *is* compatible across wgpu versions, as any data format change will
610 /// be accounted for.
611 ///
612 /// It is *not* supported to bring caches from previous direct uses of backend APIs
613 /// into this method.
614 ///
615 /// # Errors
616 ///
617 /// Returns an error value if:
618 /// * the [`PIPELINE_CACHE`](wgt::Features::PIPELINE_CACHE) feature is not enabled
619 /// * this device is invalid; or
620 /// * the device is out of memory
621 ///
622 /// This method also returns an error value if:
623 /// * The `fallback` field on `desc` is false; and
624 /// * the `data` provided would not be used[^data_not_used]
625 ///
626 /// If an error value is used in subsequent calls, default caching will be used.
627 ///
628 /// [^saving]: We do recognise that saving this data to disk means this condition
629 /// is impossible to fully prove. Consider the risks for your own application in this case.
630 ///
631 /// [^data_not_used]: This data may be not used if: the data was produced by a prior
632 /// version of wgpu; or was created for an incompatible adapter, or there was a GPU driver
633 /// update. In some cases, the data might not be used and a real value is returned,
634 /// this is left to the discretion of GPU drivers.
635 #[must_use]
636 pub unsafe fn create_pipeline_cache(
637 &self,
638 desc: &PipelineCacheDescriptor<'_>,
639 ) -> PipelineCache {
640 let cache = unsafe { self.inner.create_pipeline_cache(desc) };
641 PipelineCache { inner: cache }
642 }
643}
644
645/// [`Features::EXPERIMENTAL_RAY_QUERY`] must be enabled on the device in order to call these functions.
646impl Device {
647 /// Create a bottom level acceleration structure, used inside a top level acceleration structure for ray tracing.
648 /// - `desc`: The descriptor of the acceleration structure.
649 /// - `sizes`: Size descriptor limiting what can be built into the acceleration structure.
650 ///
651 /// # Validation
652 /// If any of the following is not satisfied a validation error is generated
653 ///
654 /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_QUERY`] enabled.
655 /// if `sizes` is [`BlasGeometrySizeDescriptors::Triangles`] then the following must be satisfied
656 /// - For every geometry descriptor (for the purposes this is called `geo_desc`) of `sizes.descriptors` the following must be satisfied:
657 /// - `geo_desc.vertex_format` must be within allowed formats (allowed formats for a given feature set
658 /// may be queried with [`Features::allowed_vertex_formats_for_blas`]).
659 /// - Both or neither of `geo_desc.index_format` and `geo_desc.index_count` must be provided.
660 ///
661 /// [`Features::EXPERIMENTAL_RAY_QUERY`]: wgt::Features::EXPERIMENTAL_RAY_QUERY
662 /// [`Features::allowed_vertex_formats_for_blas`]: wgt::Features::allowed_vertex_formats_for_blas
663 #[must_use]
664 pub fn create_blas(
665 &self,
666 desc: &CreateBlasDescriptor<'_>,
667 sizes: BlasGeometrySizeDescriptors,
668 ) -> Blas {
669 let (handle, blas) = self.inner.create_blas(desc, sizes);
670
671 Blas {
672 inner: blas,
673 handle,
674 }
675 }
676
677 /// Create a top level acceleration structure, used for ray tracing.
678 /// - `desc`: The descriptor of the acceleration structure.
679 ///
680 /// # Validation
681 /// If any of the following is not satisfied a validation error is generated
682 ///
683 /// The device ***must*** have [`Features::EXPERIMENTAL_RAY_QUERY`] enabled.
684 ///
685 /// [`Features::EXPERIMENTAL_RAY_QUERY`]: wgt::Features::EXPERIMENTAL_RAY_QUERY
686 #[must_use]
687 pub fn create_tlas(&self, desc: &CreateTlasDescriptor<'_>) -> Tlas {
688 let tlas = self.inner.create_tlas(desc);
689
690 Tlas {
691 inner: tlas,
692 instances: vec![None; desc.max_instances as usize],
693 lowest_unmodified: 0,
694 }
695 }
696}
697
698/// Requesting a device from an [`Adapter`] failed.
699#[derive(Clone, Debug)]
700pub struct RequestDeviceError {
701 pub(crate) inner: RequestDeviceErrorKind,
702}
703#[derive(Clone, Debug)]
704pub(crate) enum RequestDeviceErrorKind {
705 /// Error from [`wgpu_core`].
706 // must match dependency cfg
707 #[cfg(wgpu_core)]
708 Core(wgc::instance::RequestDeviceError),
709
710 /// Error from web API that was called by `wgpu` to request a device.
711 ///
712 /// (This is currently never used by the webgl backend, but it could be.)
713 #[cfg(webgpu)]
714 WebGpu(String),
715}
716
717static_assertions::assert_impl_all!(RequestDeviceError: Send, Sync);
718
719impl fmt::Display for RequestDeviceError {
720 fn fmt(&self, _f: &mut fmt::Formatter<'_>) -> fmt::Result {
721 match &self.inner {
722 #[cfg(wgpu_core)]
723 RequestDeviceErrorKind::Core(error) => error.fmt(_f),
724 #[cfg(webgpu)]
725 RequestDeviceErrorKind::WebGpu(error) => {
726 write!(_f, "{error}")
727 }
728 #[cfg(not(any(webgpu, wgpu_core)))]
729 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
730 }
731 }
732}
733
734impl error::Error for RequestDeviceError {
735 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
736 match &self.inner {
737 #[cfg(wgpu_core)]
738 RequestDeviceErrorKind::Core(error) => error.source(),
739 #[cfg(webgpu)]
740 RequestDeviceErrorKind::WebGpu(_) => None,
741 #[cfg(not(any(webgpu, wgpu_core)))]
742 _ => unimplemented!("unknown `RequestDeviceErrorKind`"),
743 }
744 }
745}
746
747#[cfg(wgpu_core)]
748impl From<wgc::instance::RequestDeviceError> for RequestDeviceError {
749 fn from(error: wgc::instance::RequestDeviceError) -> Self {
750 Self {
751 inner: RequestDeviceErrorKind::Core(error),
752 }
753 }
754}
755
756/// The callback of [`Device::on_uncaptured_error()`].
757///
758/// It must be a function with this signature.
759pub trait UncapturedErrorHandler: Fn(Error) + Send + Sync + 'static {}
760impl<T> UncapturedErrorHandler for T where T: Fn(Error) + Send + Sync + 'static {}
761
762/// Kinds of [`Error`]s a [`Device::push_error_scope()`] may be configured to catch.
763#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd)]
764pub enum ErrorFilter {
765 /// Catch only out-of-memory errors.
766 OutOfMemory,
767 /// Catch only validation errors.
768 Validation,
769 /// Catch only internal errors.
770 Internal,
771}
772static_assertions::assert_impl_all!(ErrorFilter: Send, Sync);
773
774/// Lower level source of the error.
775///
776/// `Send + Sync` varies depending on configuration.
777#[cfg(send_sync)]
778#[cfg_attr(docsrs, doc(cfg(all())))]
779pub type ErrorSource = Box<dyn error::Error + Send + Sync + 'static>;
780/// Lower level source of the error.
781///
782/// `Send + Sync` varies depending on configuration.
783#[cfg(not(send_sync))]
784#[cfg_attr(docsrs, doc(cfg(all())))]
785pub type ErrorSource = Box<dyn error::Error + 'static>;
786
787/// Errors resulting from usage of GPU APIs.
788///
789/// By default, errors translate into panics. Depending on the backend and circumstances,
790/// errors may occur synchronously or asynchronously. When errors need to be handled, use
791/// [`Device::push_error_scope()`] or [`Device::on_uncaptured_error()`].
792#[derive(Debug)]
793pub enum Error {
794 /// Out of memory.
795 OutOfMemory {
796 /// Lower level source of the error.
797 source: ErrorSource,
798 },
799 /// Validation error, signifying a bug in code or data provided to `wgpu`.
800 Validation {
801 /// Lower level source of the error.
802 source: ErrorSource,
803 /// Description of the validation error.
804 description: String,
805 },
806 /// Internal error. Used for signalling any failures not explicitly expected by WebGPU.
807 ///
808 /// These could be due to internal implementation or system limits being reached.
809 Internal {
810 /// Lower level source of the error.
811 source: ErrorSource,
812 /// Description of the internal GPU error.
813 description: String,
814 },
815}
816#[cfg(send_sync)]
817static_assertions::assert_impl_all!(Error: Send, Sync);
818
819impl error::Error for Error {
820 fn source(&self) -> Option<&(dyn error::Error + 'static)> {
821 match self {
822 Error::OutOfMemory { source } => Some(source.as_ref()),
823 Error::Validation { source, .. } => Some(source.as_ref()),
824 Error::Internal { source, .. } => Some(source.as_ref()),
825 }
826 }
827}
828
829impl fmt::Display for Error {
830 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
831 match self {
832 Error::OutOfMemory { .. } => f.write_str("Out of Memory"),
833 Error::Validation { description, .. } => f.write_str(description),
834 Error::Internal { description, .. } => f.write_str(description),
835 }
836 }
837}
838
839/// Guard for an error scope pushed with [`Device::push_error_scope()`].
840///
841/// Call [`pop()`] to pop the scope and get a future for the result. If
842/// the guard is dropped without being popped explicitly, the scope will still be popped,
843/// and the captured errors will be dropped.
844///
845/// This guard is neither `Send` nor `Sync`, as error scopes are handled
846/// on a per-thread basis when the `std` feature is enabled.
847///
848/// [`pop()`]: ErrorScopeGuard::pop
849#[must_use = "Error scopes must be explicitly popped to retrieve errors they catch"]
850pub struct ErrorScopeGuard {
851 device: dispatch::DispatchDevice,
852 index: u32,
853 popped: bool,
854 // Ensure the guard is !Send and !Sync
855 _phantom: PhantomData<*mut ()>,
856}
857
858static_assertions::assert_not_impl_any!(ErrorScopeGuard: Send, Sync);
859
860impl ErrorScopeGuard {
861 /// Pops the error scope.
862 ///
863 /// Returns a future which resolves to the error captured by this scope, if any.
864 /// The pop takes effect immediately; the future does not need to be awaited before doing work that is outside of this error scope.
865 pub fn pop(mut self) -> impl Future<Output = Option<Error>> + WasmNotSend {
866 self.popped = true;
867 self.device.pop_error_scope(self.index)
868 }
869}
870
871impl Drop for ErrorScopeGuard {
872 fn drop(&mut self) {
873 if !self.popped {
874 drop(self.device.pop_error_scope(self.index));
875 }
876 }
877}