1use alloc::{
2 borrow::ToOwned, format, string::String, string::ToString as _, sync::Arc, vec, vec::Vec,
3};
4use core::{cmp::max, convert::TryInto, num::NonZeroU32, ptr, sync::atomic::Ordering};
5
6use arrayvec::ArrayVec;
7use glow::HasContext;
8use naga::FastHashMap;
9
10use super::{conv, lock, MaybeMutex, PrivateCapabilities};
11use crate::auxil::map_naga_stage;
12use crate::TlasInstance;
13
14type ShaderStage<'a> = (
15 naga::ShaderStage,
16 &'a crate::ProgrammableStage<'a, super::ShaderModule>,
17);
18type NameBindingMap = FastHashMap<String, (super::BindingRegister, u8)>;
19
20struct CompilationContext<'a> {
21 layout: &'a super::PipelineLayout,
22 sampler_map: &'a mut super::SamplerBindMap,
23 name_binding_map: &'a mut NameBindingMap,
24 push_constant_items: &'a mut Vec<naga::back::glsl::PushConstantItem>,
25 multiview_mask: Option<NonZeroU32>,
26 clip_distance_count: &'a mut u32,
27}
28
29impl CompilationContext<'_> {
30 fn consume_reflection(
31 self,
32 gl: &glow::Context,
33 module: &naga::Module,
34 ep_info: &naga::valid::FunctionInfo,
35 reflection_info: naga::back::glsl::ReflectionInfo,
36 naga_stage: naga::ShaderStage,
37 program: glow::Program,
38 ) {
39 for (handle, var) in module.global_variables.iter() {
40 if ep_info[handle].is_empty() {
41 continue;
42 }
43 let register = match var.space {
44 naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
45 naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
46 _ => continue,
47 };
48
49 let br = var.binding.as_ref().unwrap();
50 let slot = self.layout.get_slot(br);
51
52 let name = match reflection_info.uniforms.get(&handle) {
53 Some(name) => name.clone(),
54 None => continue,
55 };
56 log::trace!(
57 "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
58 var.name.as_ref(),
59 &name,
60 register,
61 slot
62 );
63 self.name_binding_map.insert(name, (register, slot));
64 }
65
66 for (name, mapping) in reflection_info.texture_mapping {
67 let var = &module.global_variables[mapping.texture];
68 let register = match module.types[var.ty].inner {
69 naga::TypeInner::Image {
70 class: naga::ImageClass::Storage { .. },
71 ..
72 } => super::BindingRegister::Images,
73 _ => super::BindingRegister::Textures,
74 };
75
76 let tex_br = var.binding.as_ref().unwrap();
77 let texture_linear_index = self.layout.get_slot(tex_br);
78
79 self.name_binding_map
80 .insert(name, (register, texture_linear_index));
81 if let Some(sampler_handle) = mapping.sampler {
82 let sam_br = module.global_variables[sampler_handle]
83 .binding
84 .as_ref()
85 .unwrap();
86 let sampler_linear_index = self.layout.get_slot(sam_br);
87 self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
88 }
89 }
90
91 for (name, location) in reflection_info.varying {
92 match naga_stage {
93 naga::ShaderStage::Vertex => {
94 assert_eq!(location.index, 0);
95 unsafe { gl.bind_attrib_location(program, location.location, &name) }
96 }
97 naga::ShaderStage::Fragment => {
98 assert_eq!(location.index, 0);
99 unsafe { gl.bind_frag_data_location(program, location.location, &name) }
100 }
101 naga::ShaderStage::Compute => {}
102 naga::ShaderStage::Task | naga::ShaderStage::Mesh => unreachable!(),
103 }
104 }
105
106 *self.push_constant_items = reflection_info.push_constant_items;
107
108 if naga_stage == naga::ShaderStage::Vertex {
109 *self.clip_distance_count = reflection_info.clip_distance_count;
110 }
111 }
112}
113
114impl super::Device {
115 #[cfg(any(native, Emscripten))]
122 pub unsafe fn texture_from_raw(
123 &self,
124 name: NonZeroU32,
125 desc: &crate::TextureDescriptor,
126 drop_callback: Option<crate::DropCallback>,
127 ) -> super::Texture {
128 super::Texture {
129 inner: super::TextureInner::Texture {
130 raw: glow::NativeTexture(name),
131 target: super::Texture::get_info_from_desc(desc),
132 },
133 drop_guard: crate::DropGuard::from_option(drop_callback),
134 mip_level_count: desc.mip_level_count,
135 array_layer_count: desc.array_layer_count(),
136 format: desc.format,
137 format_desc: self.shared.describe_texture_format(desc.format),
138 copy_size: desc.copy_extent(),
139 }
140 }
141
142 #[cfg(any(native, Emscripten))]
149 pub unsafe fn texture_from_raw_renderbuffer(
150 &self,
151 name: NonZeroU32,
152 desc: &crate::TextureDescriptor,
153 drop_callback: Option<crate::DropCallback>,
154 ) -> super::Texture {
155 super::Texture {
156 inner: super::TextureInner::Renderbuffer {
157 raw: glow::NativeRenderbuffer(name),
158 },
159 drop_guard: crate::DropGuard::from_option(drop_callback),
160 mip_level_count: desc.mip_level_count,
161 array_layer_count: desc.array_layer_count(),
162 format: desc.format,
163 format_desc: self.shared.describe_texture_format(desc.format),
164 copy_size: desc.copy_extent(),
165 }
166 }
167
168 unsafe fn compile_shader(
169 gl: &glow::Context,
170 shader: &str,
171 naga_stage: naga::ShaderStage,
172 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
173 ) -> Result<glow::Shader, crate::PipelineError> {
174 let target = match naga_stage {
175 naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
176 naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
177 naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
178 naga::ShaderStage::Task | naga::ShaderStage::Mesh => unreachable!(),
179 };
180
181 let raw = unsafe { gl.create_shader(target) }.unwrap();
182 #[cfg(native)]
183 if gl.supports_debug() {
184 let name = raw.0.get();
185 unsafe { gl.object_label(glow::SHADER, name, label) };
186 }
187
188 unsafe { gl.shader_source(raw, shader) };
189 unsafe { gl.compile_shader(raw) };
190
191 log::debug!("\tCompiled shader {raw:?}");
192
193 let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
194 let msg = unsafe { gl.get_shader_info_log(raw) };
195 if compiled_ok {
196 if !msg.is_empty() {
197 log::warn!("\tCompile: {msg}");
198 }
199 Ok(raw)
200 } else {
201 log::error!("\tShader compilation failed: {msg}");
202 unsafe { gl.delete_shader(raw) };
203 Err(crate::PipelineError::Linkage(
204 map_naga_stage(naga_stage),
205 msg,
206 ))
207 }
208 }
209
210 fn create_shader(
211 gl: &glow::Context,
212 naga_stage: naga::ShaderStage,
213 stage: &crate::ProgrammableStage<super::ShaderModule>,
214 context: CompilationContext,
215 program: glow::Program,
216 ) -> Result<glow::Shader, crate::PipelineError> {
217 use naga::back::glsl;
218 let pipeline_options = glsl::PipelineOptions {
219 shader_stage: naga_stage,
220 entry_point: stage.entry_point.to_owned(),
221 multiview: context
222 .multiview_mask
223 .map(|a| NonZeroU32::new(a.get().count_ones()).unwrap()),
224 };
225
226 let (module, info) = naga::back::pipeline_constants::process_overrides(
227 &stage.module.source.module,
228 &stage.module.source.info,
229 Some((naga_stage, stage.entry_point)),
230 stage.constants,
231 )
232 .map_err(|e| {
233 let msg = format!("{e}");
234 crate::PipelineError::PipelineConstants(map_naga_stage(naga_stage), msg)
235 })?;
236
237 let entry_point_index = module
238 .entry_points
239 .iter()
240 .position(|ep| ep.name.as_str() == stage.entry_point)
241 .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
242
243 use naga::proc::BoundsCheckPolicy;
244 let version = gl.version();
246 let image_check = if !version.is_embedded && (version.major, version.minor) >= (4, 3) {
247 BoundsCheckPolicy::ReadZeroSkipWrite
248 } else {
249 BoundsCheckPolicy::Unchecked
250 };
251
252 let policies = naga::proc::BoundsCheckPolicies {
254 index: BoundsCheckPolicy::Unchecked,
255 buffer: BoundsCheckPolicy::Unchecked,
256 image_load: image_check,
257 binding_array: BoundsCheckPolicy::Unchecked,
258 };
259
260 let mut output = String::new();
261 let needs_temp_options = stage.zero_initialize_workgroup_memory
262 != context.layout.naga_options.zero_initialize_workgroup_memory;
263 let mut temp_options;
264 let naga_options = if needs_temp_options {
265 temp_options = context.layout.naga_options.clone();
268 temp_options.zero_initialize_workgroup_memory = stage.zero_initialize_workgroup_memory;
269 &temp_options
270 } else {
271 &context.layout.naga_options
272 };
273 let mut writer = glsl::Writer::new(
274 &mut output,
275 &module,
276 &info,
277 naga_options,
278 &pipeline_options,
279 policies,
280 )
281 .map_err(|e| {
282 let msg = format!("{e}");
283 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
284 })?;
285
286 let reflection_info = writer.write().map_err(|e| {
287 let msg = format!("{e}");
288 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
289 })?;
290
291 log::debug!("Naga generated shader:\n{output}");
292
293 context.consume_reflection(
294 gl,
295 &module,
296 info.get_entry_point(entry_point_index),
297 reflection_info,
298 naga_stage,
299 program,
300 );
301
302 unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
303 }
304
305 unsafe fn create_pipeline<'a>(
306 &self,
307 gl: &glow::Context,
308 shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
309 layout: &super::PipelineLayout,
310 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
311 multiview_mask: Option<NonZeroU32>,
312 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
313 let mut program_stages = ArrayVec::new();
314 let mut group_to_binding_to_slot = Vec::with_capacity(layout.group_infos.len());
315 for group in &*layout.group_infos {
316 group_to_binding_to_slot.push(group.binding_to_slot.clone());
317 }
318 for &(naga_stage, stage) in &shaders {
319 program_stages.push(super::ProgramStage {
320 naga_stage: naga_stage.to_owned(),
321 shader_id: stage.module.id,
322 entry_point: stage.entry_point.to_owned(),
323 zero_initialize_workgroup_memory: stage.zero_initialize_workgroup_memory,
324 });
325 }
326 let mut guard = self
327 .shared
328 .program_cache
329 .try_lock()
330 .expect("Couldn't acquire program_cache lock");
331 let program = guard
334 .entry(super::ProgramCacheKey {
335 stages: program_stages,
336 group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
337 })
338 .or_insert_with(|| unsafe {
339 Self::create_program(
340 gl,
341 shaders,
342 layout,
343 label,
344 multiview_mask,
345 self.shared.shading_language_version,
346 self.shared.private_caps,
347 )
348 })
349 .to_owned()?;
350 drop(guard);
351
352 Ok(program)
353 }
354
355 unsafe fn create_program<'a>(
356 gl: &glow::Context,
357 shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
358 layout: &super::PipelineLayout,
359 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
360 multiview_mask: Option<NonZeroU32>,
361 glsl_version: naga::back::glsl::Version,
362 private_caps: PrivateCapabilities,
363 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
364 let glsl_version = match glsl_version {
365 naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
366 naga::back::glsl::Version::Desktop(version) => format!("{version}"),
367 };
368 let program = unsafe { gl.create_program() }.unwrap();
369 #[cfg(native)]
370 if let Some(label) = label {
371 if private_caps.contains(PrivateCapabilities::DEBUG_FNS) {
372 let name = program.0.get();
373 unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
374 }
375 }
376
377 let mut name_binding_map = NameBindingMap::default();
378 let mut push_constant_items = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
379 let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
380 let mut has_stages = wgt::ShaderStages::empty();
381 let mut shaders_to_delete = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
382 let mut clip_distance_count = 0;
383
384 for &(naga_stage, stage) in &shaders {
385 has_stages |= map_naga_stage(naga_stage);
386 let pc_item = {
387 push_constant_items.push(Vec::new());
388 push_constant_items.last_mut().unwrap()
389 };
390 let context = CompilationContext {
391 layout,
392 sampler_map: &mut sampler_map,
393 name_binding_map: &mut name_binding_map,
394 push_constant_items: pc_item,
395 multiview_mask,
396 clip_distance_count: &mut clip_distance_count,
397 };
398
399 let shader = Self::create_shader(gl, naga_stage, stage, context, program)?;
400 shaders_to_delete.push(shader);
401 }
402
403 if has_stages == wgt::ShaderStages::VERTEX {
405 let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
406 log::info!("Only vertex shader is present. Creating an empty fragment shader",);
407 let shader = unsafe {
408 Self::compile_shader(
409 gl,
410 &shader_src,
411 naga::ShaderStage::Fragment,
412 Some("(wgpu internal) dummy fragment shader"),
413 )
414 }?;
415 shaders_to_delete.push(shader);
416 }
417
418 for &shader in shaders_to_delete.iter() {
419 unsafe { gl.attach_shader(program, shader) };
420 }
421 unsafe { gl.link_program(program) };
422
423 for shader in shaders_to_delete {
424 unsafe { gl.delete_shader(shader) };
425 }
426
427 log::debug!("\tLinked program {program:?}");
428
429 let linked_ok = unsafe { gl.get_program_link_status(program) };
430 let msg = unsafe { gl.get_program_info_log(program) };
431 if !linked_ok {
432 return Err(crate::PipelineError::Linkage(has_stages, msg));
433 }
434 if !msg.is_empty() {
435 log::warn!("\tLink: {msg}");
436 }
437
438 if !private_caps.contains(PrivateCapabilities::SHADER_BINDING_LAYOUT) {
439 unsafe { gl.use_program(Some(program)) };
442 for (ref name, (register, slot)) in name_binding_map {
443 log::trace!("Get binding {name:?} from program {program:?}");
444 match register {
445 super::BindingRegister::UniformBuffers => {
446 let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
447 log::trace!("\tBinding slot {slot} to block index {index}");
448 unsafe { gl.uniform_block_binding(program, index, slot as _) };
449 }
450 super::BindingRegister::StorageBuffers => {
451 let index =
452 unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
453 log::error!("Unable to re-map shader storage block {name} to {index}");
454 return Err(crate::DeviceError::Lost.into());
455 }
456 super::BindingRegister::Textures | super::BindingRegister::Images => {
457 let location = unsafe { gl.get_uniform_location(program, name) };
458 unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
459 }
460 }
461 }
462 }
463
464 let mut uniforms = ArrayVec::new();
465
466 for (stage_idx, stage_items) in push_constant_items.into_iter().enumerate() {
467 for item in stage_items {
468 let naga_module = &shaders[stage_idx].1.module.source.module;
469 let type_inner = &naga_module.types[item.ty].inner;
470
471 let location = unsafe { gl.get_uniform_location(program, &item.access_path) };
472
473 log::trace!(
474 "push constant item: name={}, ty={:?}, offset={}, location={:?}",
475 item.access_path,
476 type_inner,
477 item.offset,
478 location,
479 );
480
481 if let Some(location) = location {
482 uniforms.push(super::PushConstantDesc {
483 location,
484 offset: item.offset,
485 size_bytes: type_inner.size(naga_module.to_ctx()),
486 ty: type_inner.clone(),
487 });
488 }
489 }
490 }
491
492 let first_instance_location = if has_stages.contains(wgt::ShaderStages::VERTEX) {
493 unsafe { gl.get_uniform_location(program, naga::back::glsl::FIRST_INSTANCE_BINDING) }
495 } else {
496 None
497 };
498
499 Ok(Arc::new(super::PipelineInner {
500 program,
501 sampler_map,
502 first_instance_location,
503 push_constant_descs: uniforms,
504 clip_distance_count,
505 }))
506 }
507}
508
509impl crate::Device for super::Device {
510 type A = super::Api;
511
512 unsafe fn create_buffer(
513 &self,
514 desc: &crate::BufferDescriptor,
515 ) -> Result<super::Buffer, crate::DeviceError> {
516 let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
517 glow::ELEMENT_ARRAY_BUFFER
518 } else {
519 glow::ARRAY_BUFFER
520 };
521
522 let emulate_map = self
523 .shared
524 .workarounds
525 .contains(super::Workarounds::EMULATE_BUFFER_MAP)
526 || !self
527 .shared
528 .private_caps
529 .contains(PrivateCapabilities::BUFFER_ALLOCATION);
530
531 if emulate_map && desc.usage.intersects(wgt::BufferUses::MAP_WRITE) {
532 return Ok(super::Buffer {
533 raw: None,
534 target,
535 size: desc.size,
536 map_flags: 0,
537 data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
538 offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
539 });
540 }
541
542 let gl = &self.shared.context.lock();
543
544 let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
545 glow::ELEMENT_ARRAY_BUFFER
546 } else {
547 glow::ARRAY_BUFFER
548 };
549
550 let is_host_visible = desc
551 .usage
552 .intersects(wgt::BufferUses::MAP_READ | wgt::BufferUses::MAP_WRITE);
553 let is_coherent = desc
554 .memory_flags
555 .contains(crate::MemoryFlags::PREFER_COHERENT);
556
557 let mut map_flags = 0;
558 if desc.usage.contains(wgt::BufferUses::MAP_READ) {
559 map_flags |= glow::MAP_READ_BIT;
560 }
561 if desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
562 map_flags |= glow::MAP_WRITE_BIT;
563 }
564
565 let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
566 unsafe { gl.bind_buffer(target, raw) };
567 let raw_size = desc
568 .size
569 .try_into()
570 .map_err(|_| crate::DeviceError::OutOfMemory)?;
571
572 if self
573 .shared
574 .private_caps
575 .contains(PrivateCapabilities::BUFFER_ALLOCATION)
576 {
577 if is_host_visible {
578 map_flags |= glow::MAP_PERSISTENT_BIT;
579 if is_coherent {
580 map_flags |= glow::MAP_COHERENT_BIT;
581 }
582 }
583 if desc.usage.intersects(wgt::BufferUses::QUERY_RESOLVE) {
585 map_flags |= glow::DYNAMIC_STORAGE_BIT;
586 }
587 unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
588 } else {
589 assert!(!is_coherent);
590 let usage = if is_host_visible {
591 if desc.usage.contains(wgt::BufferUses::MAP_READ) {
592 glow::STREAM_READ
593 } else {
594 glow::DYNAMIC_DRAW
595 }
596 } else {
597 glow::DYNAMIC_DRAW
601 };
602 unsafe { gl.buffer_data_size(target, raw_size, usage) };
603 }
604
605 unsafe { gl.bind_buffer(target, None) };
606
607 if !is_coherent && desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
608 map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
609 }
610 #[cfg(native)]
613 if let Some(label) = desc.label {
614 if self
615 .shared
616 .private_caps
617 .contains(PrivateCapabilities::DEBUG_FNS)
618 {
619 let name = raw.map_or(0, |buf| buf.0.get());
620 unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
621 }
622 }
623
624 let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
625 Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
626 } else {
627 None
628 };
629
630 self.counters.buffers.add(1);
631
632 Ok(super::Buffer {
633 raw,
634 target,
635 size: desc.size,
636 map_flags,
637 data,
638 offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
639 })
640 }
641
642 unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
643 if let Some(raw) = buffer.raw {
644 let gl = &self.shared.context.lock();
645 unsafe { gl.delete_buffer(raw) };
646 }
647
648 self.counters.buffers.sub(1);
649 }
650
651 unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) {
652 self.counters.buffers.add(1);
653 }
654
655 unsafe fn map_buffer(
656 &self,
657 buffer: &super::Buffer,
658 range: crate::MemoryRange,
659 ) -> Result<crate::BufferMapping, crate::DeviceError> {
660 let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
661 let ptr = match buffer.raw {
662 None => {
663 let mut vec = lock(buffer.data.as_ref().unwrap());
664 let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
665 slice.as_mut_ptr()
666 }
667 Some(raw) => {
668 let gl = &self.shared.context.lock();
669 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
670 let ptr = if let Some(ref map_read_allocation) = buffer.data {
671 let mut guard = lock(map_read_allocation);
672 let slice = guard.as_mut_slice();
673 unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
674 slice.as_mut_ptr()
675 } else {
676 *lock(&buffer.offset_of_current_mapping) = range.start;
677 unsafe {
678 gl.map_buffer_range(
679 buffer.target,
680 range.start as i32,
681 (range.end - range.start) as i32,
682 buffer.map_flags,
683 )
684 }
685 };
686 unsafe { gl.bind_buffer(buffer.target, None) };
687 ptr
688 }
689 };
690 Ok(crate::BufferMapping {
691 ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
692 is_coherent,
693 })
694 }
695 unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
696 if let Some(raw) = buffer.raw {
697 if buffer.data.is_none() {
698 let gl = &self.shared.context.lock();
699 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
700 unsafe { gl.unmap_buffer(buffer.target) };
701 unsafe { gl.bind_buffer(buffer.target, None) };
702 *lock(&buffer.offset_of_current_mapping) = 0;
703 }
704 }
705 }
706 unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
707 where
708 I: Iterator<Item = crate::MemoryRange>,
709 {
710 if let Some(raw) = buffer.raw {
711 if buffer.data.is_none() {
712 let gl = &self.shared.context.lock();
713 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
714 for range in ranges {
715 let offset_of_current_mapping = *lock(&buffer.offset_of_current_mapping);
716 unsafe {
717 gl.flush_mapped_buffer_range(
718 buffer.target,
719 (range.start - offset_of_current_mapping) as i32,
720 (range.end - range.start) as i32,
721 )
722 };
723 }
724 }
725 }
726 }
727 unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
728 }
730
731 unsafe fn create_texture(
732 &self,
733 desc: &crate::TextureDescriptor,
734 ) -> Result<super::Texture, crate::DeviceError> {
735 let gl = &self.shared.context.lock();
736
737 let render_usage = wgt::TextureUses::COLOR_TARGET
738 | wgt::TextureUses::DEPTH_STENCIL_WRITE
739 | wgt::TextureUses::DEPTH_STENCIL_READ
740 | wgt::TextureUses::TRANSIENT;
741 let format_desc = self.shared.describe_texture_format(desc.format);
742
743 let inner = if render_usage.contains(desc.usage)
744 && desc.dimension == wgt::TextureDimension::D2
745 && desc.size.depth_or_array_layers == 1
746 {
747 let raw = unsafe { gl.create_renderbuffer().unwrap() };
748 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
749 if desc.sample_count > 1 {
750 unsafe {
751 gl.renderbuffer_storage_multisample(
752 glow::RENDERBUFFER,
753 desc.sample_count as i32,
754 format_desc.internal,
755 desc.size.width as i32,
756 desc.size.height as i32,
757 )
758 };
759 } else {
760 unsafe {
761 gl.renderbuffer_storage(
762 glow::RENDERBUFFER,
763 format_desc.internal,
764 desc.size.width as i32,
765 desc.size.height as i32,
766 )
767 };
768 }
769
770 #[cfg(native)]
771 if let Some(label) = desc.label {
772 if self
773 .shared
774 .private_caps
775 .contains(PrivateCapabilities::DEBUG_FNS)
776 {
777 let name = raw.0.get();
778 unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
779 }
780 }
781
782 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
783 super::TextureInner::Renderbuffer { raw }
784 } else {
785 let raw = unsafe { gl.create_texture().unwrap() };
786 let target = super::Texture::get_info_from_desc(desc);
787
788 unsafe { gl.bind_texture(target, Some(raw)) };
789 match desc.format.sample_type(None, Some(self.shared.features)) {
791 Some(
792 wgt::TextureSampleType::Float { filterable: false }
793 | wgt::TextureSampleType::Uint
794 | wgt::TextureSampleType::Sint,
795 ) => {
796 unsafe {
798 gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
799 };
800 unsafe {
801 gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
802 };
803 }
804 _ => {}
805 }
806
807 if conv::is_layered_target(target) {
808 unsafe {
809 if self
810 .shared
811 .private_caps
812 .contains(PrivateCapabilities::TEXTURE_STORAGE)
813 {
814 gl.tex_storage_3d(
815 target,
816 desc.mip_level_count as i32,
817 format_desc.internal,
818 desc.size.width as i32,
819 desc.size.height as i32,
820 desc.size.depth_or_array_layers as i32,
821 )
822 } else if target == glow::TEXTURE_3D {
823 let mut width = desc.size.width;
824 let mut height = desc.size.width;
825 let mut depth = desc.size.depth_or_array_layers;
826 for i in 0..desc.mip_level_count {
827 gl.tex_image_3d(
828 target,
829 i as i32,
830 format_desc.internal as i32,
831 width as i32,
832 height as i32,
833 depth as i32,
834 0,
835 format_desc.external,
836 format_desc.data_type,
837 glow::PixelUnpackData::Slice(None),
838 );
839 width = max(1, width / 2);
840 height = max(1, height / 2);
841 depth = max(1, depth / 2);
842 }
843 } else {
844 let mut width = desc.size.width;
845 let mut height = desc.size.width;
846 for i in 0..desc.mip_level_count {
847 gl.tex_image_3d(
848 target,
849 i as i32,
850 format_desc.internal as i32,
851 width as i32,
852 height as i32,
853 desc.size.depth_or_array_layers as i32,
854 0,
855 format_desc.external,
856 format_desc.data_type,
857 glow::PixelUnpackData::Slice(None),
858 );
859 width = max(1, width / 2);
860 height = max(1, height / 2);
861 }
862 }
863 };
864 } else if desc.sample_count > 1 {
865 unsafe {
866 gl.tex_storage_2d_multisample(
867 target,
868 desc.sample_count as i32,
869 format_desc.internal,
870 desc.size.width as i32,
871 desc.size.height as i32,
872 true,
873 )
874 };
875 } else {
876 unsafe {
877 if self
878 .shared
879 .private_caps
880 .contains(PrivateCapabilities::TEXTURE_STORAGE)
881 {
882 gl.tex_storage_2d(
883 target,
884 desc.mip_level_count as i32,
885 format_desc.internal,
886 desc.size.width as i32,
887 desc.size.height as i32,
888 )
889 } else if target == glow::TEXTURE_CUBE_MAP {
890 let mut width = desc.size.width;
891 let mut height = desc.size.width;
892 for i in 0..desc.mip_level_count {
893 for face in [
894 glow::TEXTURE_CUBE_MAP_POSITIVE_X,
895 glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
896 glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
897 glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
898 glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
899 glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
900 ] {
901 gl.tex_image_2d(
902 face,
903 i as i32,
904 format_desc.internal as i32,
905 width as i32,
906 height as i32,
907 0,
908 format_desc.external,
909 format_desc.data_type,
910 glow::PixelUnpackData::Slice(None),
911 );
912 }
913 width = max(1, width / 2);
914 height = max(1, height / 2);
915 }
916 } else {
917 let mut width = desc.size.width;
918 let mut height = desc.size.width;
919 for i in 0..desc.mip_level_count {
920 gl.tex_image_2d(
921 target,
922 i as i32,
923 format_desc.internal as i32,
924 width as i32,
925 height as i32,
926 0,
927 format_desc.external,
928 format_desc.data_type,
929 glow::PixelUnpackData::Slice(None),
930 );
931 width = max(1, width / 2);
932 height = max(1, height / 2);
933 }
934 }
935 };
936 }
937
938 #[cfg(native)]
939 if let Some(label) = desc.label {
940 if self
941 .shared
942 .private_caps
943 .contains(PrivateCapabilities::DEBUG_FNS)
944 {
945 let name = raw.0.get();
946 unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
947 }
948 }
949
950 unsafe { gl.bind_texture(target, None) };
951 super::TextureInner::Texture { raw, target }
952 };
953
954 self.counters.textures.add(1);
955
956 Ok(super::Texture {
957 inner,
958 drop_guard: None,
959 mip_level_count: desc.mip_level_count,
960 array_layer_count: desc.array_layer_count(),
961 format: desc.format,
962 format_desc,
963 copy_size: desc.copy_extent(),
964 })
965 }
966
967 unsafe fn destroy_texture(&self, texture: super::Texture) {
968 if texture.drop_guard.is_none() {
969 let gl = &self.shared.context.lock();
970 match texture.inner {
971 super::TextureInner::Renderbuffer { raw, .. } => {
972 unsafe { gl.delete_renderbuffer(raw) };
973 }
974 super::TextureInner::DefaultRenderbuffer => {}
975 super::TextureInner::Texture { raw, .. } => {
976 unsafe { gl.delete_texture(raw) };
977 }
978 #[cfg(webgl)]
979 super::TextureInner::ExternalFramebuffer { .. } => {}
980 #[cfg(native)]
981 super::TextureInner::ExternalNativeFramebuffer { .. } => {}
982 }
983 }
984
985 drop(texture.drop_guard);
988
989 self.counters.textures.sub(1);
990 }
991
992 unsafe fn add_raw_texture(&self, _texture: &super::Texture) {
993 self.counters.textures.add(1);
994 }
995
996 unsafe fn create_texture_view(
997 &self,
998 texture: &super::Texture,
999 desc: &crate::TextureViewDescriptor,
1000 ) -> Result<super::TextureView, crate::DeviceError> {
1001 self.counters.texture_views.add(1);
1002 Ok(super::TextureView {
1003 inner: texture.inner.clone(),
1005 aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
1006 mip_levels: desc.range.mip_range(texture.mip_level_count),
1007 array_layers: desc.range.layer_range(texture.array_layer_count),
1008 format: texture.format,
1009 })
1010 }
1011
1012 unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
1013 self.counters.texture_views.sub(1);
1014 }
1015
1016 unsafe fn create_sampler(
1017 &self,
1018 desc: &crate::SamplerDescriptor,
1019 ) -> Result<super::Sampler, crate::DeviceError> {
1020 let gl = &self.shared.context.lock();
1021
1022 let raw = unsafe { gl.create_sampler().unwrap() };
1023
1024 let (min, mag) =
1025 conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
1026
1027 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
1028 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
1029
1030 unsafe {
1031 gl.sampler_parameter_i32(
1032 raw,
1033 glow::TEXTURE_WRAP_S,
1034 conv::map_address_mode(desc.address_modes[0]) as i32,
1035 )
1036 };
1037 unsafe {
1038 gl.sampler_parameter_i32(
1039 raw,
1040 glow::TEXTURE_WRAP_T,
1041 conv::map_address_mode(desc.address_modes[1]) as i32,
1042 )
1043 };
1044 unsafe {
1045 gl.sampler_parameter_i32(
1046 raw,
1047 glow::TEXTURE_WRAP_R,
1048 conv::map_address_mode(desc.address_modes[2]) as i32,
1049 )
1050 };
1051
1052 if let Some(border_color) = desc.border_color {
1053 let border = match border_color {
1054 wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
1055 [0.0; 4]
1056 }
1057 wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
1058 wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
1059 };
1060 unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
1061 }
1062
1063 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
1064 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
1065
1066 if desc.anisotropy_clamp != 1 {
1068 unsafe {
1069 gl.sampler_parameter_i32(
1070 raw,
1071 glow::TEXTURE_MAX_ANISOTROPY,
1072 desc.anisotropy_clamp as i32,
1073 )
1074 };
1075 }
1076
1077 if let Some(compare) = desc.compare {
1080 unsafe {
1081 gl.sampler_parameter_i32(
1082 raw,
1083 glow::TEXTURE_COMPARE_MODE,
1084 glow::COMPARE_REF_TO_TEXTURE as i32,
1085 )
1086 };
1087 unsafe {
1088 gl.sampler_parameter_i32(
1089 raw,
1090 glow::TEXTURE_COMPARE_FUNC,
1091 conv::map_compare_func(compare) as i32,
1092 )
1093 };
1094 }
1095
1096 #[cfg(native)]
1097 if let Some(label) = desc.label {
1098 if self
1099 .shared
1100 .private_caps
1101 .contains(PrivateCapabilities::DEBUG_FNS)
1102 {
1103 let name = raw.0.get();
1104 unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
1105 }
1106 }
1107
1108 self.counters.samplers.add(1);
1109
1110 Ok(super::Sampler { raw })
1111 }
1112
1113 unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
1114 let gl = &self.shared.context.lock();
1115 unsafe { gl.delete_sampler(sampler.raw) };
1116 self.counters.samplers.sub(1);
1117 }
1118
1119 unsafe fn create_command_encoder(
1120 &self,
1121 _desc: &crate::CommandEncoderDescriptor<super::Queue>,
1122 ) -> Result<super::CommandEncoder, crate::DeviceError> {
1123 self.counters.command_encoders.add(1);
1124
1125 Ok(super::CommandEncoder {
1126 cmd_buffer: super::CommandBuffer::default(),
1127 state: Default::default(),
1128 private_caps: self.shared.private_caps,
1129 counters: Arc::clone(&self.counters),
1130 })
1131 }
1132
1133 unsafe fn create_bind_group_layout(
1134 &self,
1135 desc: &crate::BindGroupLayoutDescriptor,
1136 ) -> Result<super::BindGroupLayout, crate::DeviceError> {
1137 self.counters.bind_group_layouts.add(1);
1138 Ok(super::BindGroupLayout {
1139 entries: Arc::from(desc.entries),
1140 })
1141 }
1142
1143 unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
1144 self.counters.bind_group_layouts.sub(1);
1145 }
1146
1147 unsafe fn create_pipeline_layout(
1148 &self,
1149 desc: &crate::PipelineLayoutDescriptor<super::BindGroupLayout>,
1150 ) -> Result<super::PipelineLayout, crate::DeviceError> {
1151 use naga::back::glsl;
1152
1153 let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
1154 let mut num_samplers = 0u8;
1155 let mut num_textures = 0u8;
1156 let mut num_images = 0u8;
1157 let mut num_uniform_buffers = 0u8;
1158 let mut num_storage_buffers = 0u8;
1159
1160 let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
1161 writer_flags.set(
1162 glsl::WriterFlags::TEXTURE_SHADOW_LOD,
1163 self.shared
1164 .private_caps
1165 .contains(PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
1166 );
1167 writer_flags.set(
1168 glsl::WriterFlags::DRAW_PARAMETERS,
1169 self.shared
1170 .private_caps
1171 .contains(PrivateCapabilities::FULLY_FEATURED_INSTANCING),
1172 );
1173 writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
1176 let mut binding_map = glsl::BindingMap::default();
1177
1178 for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
1179 let mut binding_to_slot = vec![
1181 !0;
1182 bg_layout
1183 .entries
1184 .iter()
1185 .map(|b| b.binding)
1186 .max()
1187 .map_or(0, |idx| idx as usize + 1)
1188 ]
1189 .into_boxed_slice();
1190
1191 for entry in bg_layout.entries.iter() {
1192 let counter = match entry.ty {
1193 wgt::BindingType::Sampler { .. } => &mut num_samplers,
1194 wgt::BindingType::Texture { .. } => &mut num_textures,
1195 wgt::BindingType::StorageTexture { .. } => &mut num_images,
1196 wgt::BindingType::Buffer {
1197 ty: wgt::BufferBindingType::Uniform,
1198 ..
1199 } => &mut num_uniform_buffers,
1200 wgt::BindingType::Buffer {
1201 ty: wgt::BufferBindingType::Storage { .. },
1202 ..
1203 } => &mut num_storage_buffers,
1204 wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1205 wgt::BindingType::ExternalTexture => unimplemented!(),
1206 };
1207
1208 binding_to_slot[entry.binding as usize] = *counter;
1209 let br = naga::ResourceBinding {
1210 group: group_index as u32,
1211 binding: entry.binding,
1212 };
1213 binding_map.insert(br, *counter);
1214 *counter += entry.count.map_or(1, |c| c.get() as u8);
1215 }
1216
1217 group_infos.push(super::BindGroupLayoutInfo {
1218 entries: Arc::clone(&bg_layout.entries),
1219 binding_to_slot,
1220 });
1221 }
1222
1223 self.counters.pipeline_layouts.add(1);
1224
1225 Ok(super::PipelineLayout {
1226 group_infos: group_infos.into_boxed_slice(),
1227 naga_options: glsl::Options {
1228 version: self.shared.shading_language_version,
1229 writer_flags,
1230 binding_map,
1231 zero_initialize_workgroup_memory: true,
1232 },
1233 })
1234 }
1235
1236 unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
1237 self.counters.pipeline_layouts.sub(1);
1238 }
1239
1240 unsafe fn create_bind_group(
1241 &self,
1242 desc: &crate::BindGroupDescriptor<
1243 super::BindGroupLayout,
1244 super::Buffer,
1245 super::Sampler,
1246 super::TextureView,
1247 super::AccelerationStructure,
1248 >,
1249 ) -> Result<super::BindGroup, crate::DeviceError> {
1250 let mut contents = Vec::new();
1251
1252 let layout_and_entry_iter = desc.entries.iter().map(|entry| {
1253 let layout = desc
1254 .layout
1255 .entries
1256 .iter()
1257 .find(|layout_entry| layout_entry.binding == entry.binding)
1258 .expect("internal error: no layout entry found with binding slot");
1259 (entry, layout)
1260 });
1261 for (entry, layout) in layout_and_entry_iter {
1262 let binding = match layout.ty {
1263 wgt::BindingType::Buffer { .. } => {
1264 let bb = &desc.buffers[entry.resource_index as usize];
1265 super::RawBinding::Buffer {
1266 raw: bb.buffer.raw.unwrap(),
1267 offset: bb.offset as i32,
1268 size: match bb.size {
1269 Some(s) => s.get() as i32,
1270 None => (bb.buffer.size - bb.offset) as i32,
1271 },
1272 }
1273 }
1274 wgt::BindingType::Sampler { .. } => {
1275 let sampler = desc.samplers[entry.resource_index as usize];
1276 super::RawBinding::Sampler(sampler.raw)
1277 }
1278 wgt::BindingType::Texture { view_dimension, .. } => {
1279 let view = desc.textures[entry.resource_index as usize].view;
1280 if view.array_layers.start != 0 {
1281 log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
1282 "This is an implementation problem of wgpu-hal/gles backend.")
1283 }
1284 let (raw, target) = view.inner.as_native();
1285
1286 super::Texture::log_failing_target_heuristics(view_dimension, target);
1287
1288 super::RawBinding::Texture {
1289 raw,
1290 target,
1291 aspects: view.aspects,
1292 mip_levels: view.mip_levels.clone(),
1293 }
1294 }
1295 wgt::BindingType::StorageTexture {
1296 access,
1297 format,
1298 view_dimension,
1299 } => {
1300 let view = desc.textures[entry.resource_index as usize].view;
1301 let format_desc = self.shared.describe_texture_format(format);
1302 let (raw, _target) = view.inner.as_native();
1303 super::RawBinding::Image(super::ImageBinding {
1304 raw,
1305 mip_level: view.mip_levels.start,
1306 array_layer: match view_dimension {
1307 wgt::TextureViewDimension::D2Array
1308 | wgt::TextureViewDimension::CubeArray => None,
1309 _ => Some(view.array_layers.start),
1310 },
1311 access: conv::map_storage_access(access),
1312 format: format_desc.internal,
1313 })
1314 }
1315 wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1316 wgt::BindingType::ExternalTexture => unimplemented!(),
1317 };
1318 contents.push(binding);
1319 }
1320
1321 self.counters.bind_groups.add(1);
1322
1323 Ok(super::BindGroup {
1324 contents: contents.into_boxed_slice(),
1325 })
1326 }
1327
1328 unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
1329 self.counters.bind_groups.sub(1);
1330 }
1331
1332 unsafe fn create_shader_module(
1333 &self,
1334 desc: &crate::ShaderModuleDescriptor,
1335 shader: crate::ShaderInput,
1336 ) -> Result<super::ShaderModule, crate::ShaderError> {
1337 self.counters.shader_modules.add(1);
1338
1339 Ok(super::ShaderModule {
1340 source: match shader {
1341 crate::ShaderInput::Naga(naga) => naga,
1342 crate::ShaderInput::Glsl { .. } => unimplemented!(),
1344 crate::ShaderInput::SpirV(_)
1345 | crate::ShaderInput::Msl { .. }
1346 | crate::ShaderInput::Dxil { .. }
1347 | crate::ShaderInput::Hlsl { .. } => {
1348 unreachable!()
1349 }
1350 },
1351 label: desc.label.map(|str| str.to_string()),
1352 id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1353 })
1354 }
1355
1356 unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
1357 self.counters.shader_modules.sub(1);
1358 }
1359
1360 unsafe fn create_render_pipeline(
1361 &self,
1362 desc: &crate::RenderPipelineDescriptor<
1363 super::PipelineLayout,
1364 super::ShaderModule,
1365 super::PipelineCache,
1366 >,
1367 ) -> Result<super::RenderPipeline, crate::PipelineError> {
1368 let (vertex_stage, vertex_buffers) = match &desc.vertex_processor {
1369 crate::VertexProcessor::Standard {
1370 vertex_buffers,
1371 ref vertex_stage,
1372 } => (vertex_stage, vertex_buffers),
1373 crate::VertexProcessor::Mesh { .. } => unreachable!(),
1374 };
1375 let gl = &self.shared.context.lock();
1376 let mut shaders = ArrayVec::new();
1377 shaders.push((naga::ShaderStage::Vertex, vertex_stage));
1378 if let Some(ref fs) = desc.fragment_stage {
1379 shaders.push((naga::ShaderStage::Fragment, fs));
1380 }
1381 let inner = unsafe {
1382 self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview_mask)
1383 }?;
1384
1385 let (vertex_buffers, vertex_attributes) = {
1386 let mut buffers = Vec::new();
1387 let mut attributes = Vec::new();
1388 for (index, vb_layout) in vertex_buffers.iter().enumerate() {
1389 buffers.push(super::VertexBufferDesc {
1390 step: vb_layout.step_mode,
1391 stride: vb_layout.array_stride as u32,
1392 });
1393 for vat in vb_layout.attributes.iter() {
1394 let format_desc = conv::describe_vertex_format(vat.format);
1395 attributes.push(super::AttributeDesc {
1396 location: vat.shader_location,
1397 offset: vat.offset as u32,
1398 buffer_index: index as u32,
1399 format_desc,
1400 });
1401 }
1402 }
1403 (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1404 };
1405
1406 let color_targets = {
1407 let mut targets = Vec::new();
1408 for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1409 targets.push(super::ColorTargetDesc {
1410 mask: ct.write_mask,
1411 blend: ct.blend.as_ref().map(conv::map_blend),
1412 });
1413 }
1414 targets.into_boxed_slice()
1417 };
1418
1419 self.counters.render_pipelines.add(1);
1420
1421 Ok(super::RenderPipeline {
1422 inner,
1423 primitive: desc.primitive,
1424 vertex_buffers,
1425 vertex_attributes,
1426 color_targets,
1427 depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1428 function: conv::map_compare_func(ds.depth_compare),
1429 mask: ds.depth_write_enabled,
1430 }),
1431 depth_bias: desc
1432 .depth_stencil
1433 .as_ref()
1434 .map(|ds| ds.bias)
1435 .unwrap_or_default(),
1436 stencil: desc
1437 .depth_stencil
1438 .as_ref()
1439 .map(|ds| conv::map_stencil(&ds.stencil)),
1440 alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1441 })
1442 }
1443
1444 unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1445 if Arc::strong_count(&pipeline.inner) == 2 {
1450 let gl = &self.shared.context.lock();
1451 let mut program_cache = self.shared.program_cache.lock();
1452 program_cache.retain(|_, v| match *v {
1453 Ok(ref p) => p.program != pipeline.inner.program,
1454 Err(_) => false,
1455 });
1456 unsafe { gl.delete_program(pipeline.inner.program) };
1457 }
1458
1459 self.counters.render_pipelines.sub(1);
1460 }
1461
1462 unsafe fn create_compute_pipeline(
1463 &self,
1464 desc: &crate::ComputePipelineDescriptor<
1465 super::PipelineLayout,
1466 super::ShaderModule,
1467 super::PipelineCache,
1468 >,
1469 ) -> Result<super::ComputePipeline, crate::PipelineError> {
1470 let gl = &self.shared.context.lock();
1471 let mut shaders = ArrayVec::new();
1472 shaders.push((naga::ShaderStage::Compute, &desc.stage));
1473 let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1474
1475 self.counters.compute_pipelines.add(1);
1476
1477 Ok(super::ComputePipeline { inner })
1478 }
1479
1480 unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1481 if Arc::strong_count(&pipeline.inner) == 2 {
1486 let gl = &self.shared.context.lock();
1487 let mut program_cache = self.shared.program_cache.lock();
1488 program_cache.retain(|_, v| match *v {
1489 Ok(ref p) => p.program != pipeline.inner.program,
1490 Err(_) => false,
1491 });
1492 unsafe { gl.delete_program(pipeline.inner.program) };
1493 }
1494
1495 self.counters.compute_pipelines.sub(1);
1496 }
1497
1498 unsafe fn create_pipeline_cache(
1499 &self,
1500 _: &crate::PipelineCacheDescriptor<'_>,
1501 ) -> Result<super::PipelineCache, crate::PipelineCacheError> {
1502 Ok(super::PipelineCache)
1505 }
1506 unsafe fn destroy_pipeline_cache(&self, _: super::PipelineCache) {}
1507
1508 #[cfg_attr(target_arch = "wasm32", allow(unused))]
1509 unsafe fn create_query_set(
1510 &self,
1511 desc: &wgt::QuerySetDescriptor<crate::Label>,
1512 ) -> Result<super::QuerySet, crate::DeviceError> {
1513 let gl = &self.shared.context.lock();
1514
1515 let mut queries = Vec::with_capacity(desc.count as usize);
1516 for _ in 0..desc.count {
1517 let query =
1518 unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1519
1520 queries.push(query);
1527 }
1528
1529 self.counters.query_sets.add(1);
1530
1531 Ok(super::QuerySet {
1532 queries: queries.into_boxed_slice(),
1533 target: match desc.ty {
1534 wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1535 wgt::QueryType::Timestamp => glow::TIMESTAMP,
1536 _ => unimplemented!(),
1537 },
1538 })
1539 }
1540
1541 unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1542 let gl = &self.shared.context.lock();
1543 for &query in set.queries.iter() {
1544 unsafe { gl.delete_query(query) };
1545 }
1546 self.counters.query_sets.sub(1);
1547 }
1548
1549 unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1550 self.counters.fences.add(1);
1551 Ok(super::Fence::new(&self.shared.options))
1552 }
1553
1554 unsafe fn destroy_fence(&self, fence: super::Fence) {
1555 let gl = &self.shared.context.lock();
1556 fence.destroy(gl);
1557 self.counters.fences.sub(1);
1558 }
1559
1560 unsafe fn get_fence_value(
1561 &self,
1562 fence: &super::Fence,
1563 ) -> Result<crate::FenceValue, crate::DeviceError> {
1564 #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1565 Ok(fence.get_latest(&self.shared.context.lock()))
1566 }
1567 unsafe fn wait(
1568 &self,
1569 fence: &super::Fence,
1570 wait_value: crate::FenceValue,
1571 timeout: Option<core::time::Duration>,
1572 ) -> Result<bool, crate::DeviceError> {
1573 if fence.satisfied(wait_value) {
1574 return Ok(true);
1575 }
1576
1577 let gl = &self.shared.context.lock();
1578 let timeout_ns = if cfg!(any(webgl, Emscripten)) {
1583 0
1584 } else {
1585 timeout
1586 .map(|t| t.as_nanos().min(u32::MAX as u128) as u32)
1587 .unwrap_or(u32::MAX)
1588 };
1589 fence.wait(gl, wait_value, timeout_ns)
1590 }
1591
1592 unsafe fn start_graphics_debugger_capture(&self) -> bool {
1593 #[cfg(all(native, feature = "renderdoc"))]
1594 return unsafe {
1595 self.render_doc
1596 .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1597 };
1598 #[allow(unreachable_code)]
1599 false
1600 }
1601 unsafe fn stop_graphics_debugger_capture(&self) {
1602 #[cfg(all(native, feature = "renderdoc"))]
1603 unsafe {
1604 self.render_doc
1605 .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1606 }
1607 }
1608 unsafe fn create_acceleration_structure(
1609 &self,
1610 _desc: &crate::AccelerationStructureDescriptor,
1611 ) -> Result<super::AccelerationStructure, crate::DeviceError> {
1612 unimplemented!()
1613 }
1614 unsafe fn get_acceleration_structure_build_sizes<'a>(
1615 &self,
1616 _desc: &crate::GetAccelerationStructureBuildSizesDescriptor<'a, super::Buffer>,
1617 ) -> crate::AccelerationStructureBuildSizes {
1618 unimplemented!()
1619 }
1620 unsafe fn get_acceleration_structure_device_address(
1621 &self,
1622 _acceleration_structure: &super::AccelerationStructure,
1623 ) -> wgt::BufferAddress {
1624 unimplemented!()
1625 }
1626 unsafe fn destroy_acceleration_structure(
1627 &self,
1628 _acceleration_structure: super::AccelerationStructure,
1629 ) {
1630 }
1631
1632 fn tlas_instance_to_bytes(&self, _instance: TlasInstance) -> Vec<u8> {
1633 unimplemented!()
1634 }
1635
1636 fn get_internal_counters(&self) -> wgt::HalCounters {
1637 self.counters.as_ref().clone()
1638 }
1639
1640 fn check_if_oom(&self) -> Result<(), crate::DeviceError> {
1641 Ok(())
1642 }
1643}
1644
1645#[cfg(send_sync)]
1646unsafe impl Sync for super::Device {}
1647#[cfg(send_sync)]
1648unsafe impl Send for super::Device {}