1use alloc::{
2 borrow::{Cow, ToOwned},
3 format,
4 string::String,
5 string::ToString as _,
6 sync::Arc,
7 vec,
8 vec::Vec,
9};
10use core::{cmp::max, convert::TryInto, num::NonZeroU32, ptr, sync::atomic::Ordering};
11
12use arrayvec::ArrayVec;
13use glow::HasContext;
14use naga::FastHashMap;
15
16use super::{conv, lock, MaybeMutex, PrivateCapabilities};
17use crate::auxil::map_naga_stage;
18use crate::TlasInstance;
19
20type ShaderStage<'a> = (
21 naga::ShaderStage,
22 &'a crate::ProgrammableStage<'a, super::ShaderModule>,
23);
24type NameBindingMap = FastHashMap<String, (super::BindingRegister, u8)>;
25
26struct CompilationContext<'a> {
27 layout: &'a super::PipelineLayout,
28 sampler_map: &'a mut super::SamplerBindMap,
29 name_binding_map: &'a mut NameBindingMap,
30 immediates_items: &'a mut Vec<naga::back::glsl::ImmediateItem>,
31 multiview_mask: Option<NonZeroU32>,
32 clip_distance_count: &'a mut u32,
33}
34
35impl CompilationContext<'_> {
36 fn consume_reflection(
37 self,
38 gl: &glow::Context,
39 module: &naga::Module,
40 ep_info: &naga::valid::FunctionInfo,
41 reflection_info: naga::back::glsl::ReflectionInfo,
42 naga_stage: naga::ShaderStage,
43 program: glow::Program,
44 ) {
45 for (handle, var) in module.global_variables.iter() {
46 if ep_info[handle].is_empty() {
47 continue;
48 }
49 let register = match var.space {
50 naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
51 naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
52 _ => continue,
53 };
54
55 let br = var.binding.as_ref().unwrap();
56 let slot = self.layout.get_slot(br);
57
58 let name = match reflection_info.uniforms.get(&handle) {
59 Some(name) => name.clone(),
60 None => continue,
61 };
62 log::trace!(
63 "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
64 var.name.as_ref(),
65 &name,
66 register,
67 slot
68 );
69 self.name_binding_map.insert(name, (register, slot));
70 }
71
72 for (name, mapping) in reflection_info.texture_mapping {
73 let var = &module.global_variables[mapping.texture];
74 let register = match module.types[var.ty].inner {
75 naga::TypeInner::Image {
76 class: naga::ImageClass::Storage { .. },
77 ..
78 } => super::BindingRegister::Images,
79 _ => super::BindingRegister::Textures,
80 };
81
82 let tex_br = var.binding.as_ref().unwrap();
83 let texture_linear_index = self.layout.get_slot(tex_br);
84
85 self.name_binding_map
86 .insert(name, (register, texture_linear_index));
87 if let Some(sampler_handle) = mapping.sampler {
88 let sam_br = module.global_variables[sampler_handle]
89 .binding
90 .as_ref()
91 .unwrap();
92 let sampler_linear_index = self.layout.get_slot(sam_br);
93 self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
94 }
95 }
96
97 for (name, location) in reflection_info.varying {
98 match naga_stage {
99 naga::ShaderStage::Vertex => {
100 assert_eq!(location.index, 0);
101 unsafe { gl.bind_attrib_location(program, location.location, &name) }
102 }
103 naga::ShaderStage::Fragment => {
104 assert_eq!(location.index, 0);
105 unsafe { gl.bind_frag_data_location(program, location.location, &name) }
106 }
107 naga::ShaderStage::Compute => {}
108 naga::ShaderStage::Task
109 | naga::ShaderStage::Mesh
110 | naga::ShaderStage::RayGeneration
111 | naga::ShaderStage::AnyHit
112 | naga::ShaderStage::ClosestHit
113 | naga::ShaderStage::Miss => unreachable!(),
114 }
115 }
116
117 *self.immediates_items = reflection_info.immediates_items;
118
119 if naga_stage == naga::ShaderStage::Vertex {
120 *self.clip_distance_count = reflection_info.clip_distance_count;
121 }
122 }
123}
124
125impl super::Device {
126 #[cfg(any(native, Emscripten))]
133 pub unsafe fn texture_from_raw(
134 &self,
135 name: NonZeroU32,
136 desc: &crate::TextureDescriptor,
137 drop_callback: Option<crate::DropCallback>,
138 ) -> super::Texture {
139 super::Texture {
140 inner: super::TextureInner::Texture {
141 raw: glow::NativeTexture(name),
142 target: super::Texture::get_info_from_desc(desc),
143 },
144 drop_guard: crate::DropGuard::from_option(drop_callback),
145 mip_level_count: desc.mip_level_count,
146 array_layer_count: desc.array_layer_count(),
147 format: desc.format,
148 format_desc: self.shared.describe_texture_format(desc.format),
149 copy_size: desc.copy_extent(),
150 }
151 }
152
153 #[cfg(any(native, Emscripten))]
160 pub unsafe fn texture_from_raw_renderbuffer(
161 &self,
162 name: NonZeroU32,
163 desc: &crate::TextureDescriptor,
164 drop_callback: Option<crate::DropCallback>,
165 ) -> super::Texture {
166 super::Texture {
167 inner: super::TextureInner::Renderbuffer {
168 raw: glow::NativeRenderbuffer(name),
169 },
170 drop_guard: crate::DropGuard::from_option(drop_callback),
171 mip_level_count: desc.mip_level_count,
172 array_layer_count: desc.array_layer_count(),
173 format: desc.format,
174 format_desc: self.shared.describe_texture_format(desc.format),
175 copy_size: desc.copy_extent(),
176 }
177 }
178
179 unsafe fn compile_shader(
180 gl: &glow::Context,
181 shader: &str,
182 naga_stage: naga::ShaderStage,
183 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
184 ) -> Result<glow::Shader, crate::PipelineError> {
185 let target = match naga_stage {
186 naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
187 naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
188 naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
189 naga::ShaderStage::Task
190 | naga::ShaderStage::Mesh
191 | naga::ShaderStage::RayGeneration
192 | naga::ShaderStage::AnyHit
193 | naga::ShaderStage::ClosestHit
194 | naga::ShaderStage::Miss => unreachable!(),
195 };
196
197 let raw = unsafe { gl.create_shader(target) }.unwrap();
198 #[cfg(native)]
199 if gl.supports_debug() {
200 let name = raw.0.get();
201 unsafe { gl.object_label(glow::SHADER, name, label) };
202 }
203
204 unsafe { gl.shader_source(raw, shader) };
205 unsafe { gl.compile_shader(raw) };
206
207 log::debug!("\tCompiled shader {raw:?}");
208
209 let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
210 let msg = unsafe { gl.get_shader_info_log(raw) };
211 if compiled_ok {
212 if !msg.is_empty() {
213 log::debug!("\tCompile message: {msg}");
214 }
215 Ok(raw)
216 } else {
217 log::error!("\tShader compilation failed: {msg}");
218 unsafe { gl.delete_shader(raw) };
219 Err(crate::PipelineError::Linkage(
220 map_naga_stage(naga_stage),
221 msg,
222 ))
223 }
224 }
225
226 fn create_shader(
227 gl: &glow::Context,
228 naga_stage: naga::ShaderStage,
229 stage: &crate::ProgrammableStage<super::ShaderModule>,
230 context: CompilationContext,
231 program: glow::Program,
232 ) -> Result<glow::Shader, crate::PipelineError> {
233 let source = 'outer: {
234 use naga::back::glsl;
235 let pipeline_options = glsl::PipelineOptions {
236 shader_stage: naga_stage,
237 entry_point: stage.entry_point.to_owned(),
238 multiview: context
239 .multiview_mask
240 .map(|a| NonZeroU32::new(a.get().count_ones()).unwrap()),
241 };
242
243 let naga = match stage.module.source {
244 super::ShaderModuleSource::Naga(ref naga) => naga,
245 super::ShaderModuleSource::Passthrough { ref source } => {
246 break 'outer Cow::Borrowed(source);
247 }
248 };
249
250 let (module, info) = naga::back::pipeline_constants::process_overrides(
251 &naga.module,
252 &naga.info,
253 Some((naga_stage, stage.entry_point)),
254 stage.constants,
255 )
256 .map_err(|e| {
257 let msg = format!("{e}");
258 crate::PipelineError::PipelineConstants(map_naga_stage(naga_stage), msg)
259 })?;
260
261 let entry_point_index = module
262 .entry_points
263 .iter()
264 .position(|ep| ep.name.as_str() == stage.entry_point)
265 .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
266
267 use naga::proc::BoundsCheckPolicy;
268 let version = gl.version();
270 let image_check = if !version.is_embedded && (version.major, version.minor) >= (4, 3) {
271 BoundsCheckPolicy::ReadZeroSkipWrite
272 } else {
273 BoundsCheckPolicy::Unchecked
274 };
275
276 let policies = naga::proc::BoundsCheckPolicies {
278 index: BoundsCheckPolicy::Unchecked,
279 buffer: BoundsCheckPolicy::Unchecked,
280 image_load: image_check,
281 binding_array: BoundsCheckPolicy::Unchecked,
282 };
283
284 let mut output = String::new();
285 let needs_temp_options = stage.zero_initialize_workgroup_memory
286 != context.layout.naga_options.zero_initialize_workgroup_memory;
287 let mut temp_options;
288 let naga_options = if needs_temp_options {
289 temp_options = context.layout.naga_options.clone();
292 temp_options.zero_initialize_workgroup_memory =
293 stage.zero_initialize_workgroup_memory;
294 &temp_options
295 } else {
296 &context.layout.naga_options
297 };
298 let mut writer = glsl::Writer::new(
299 &mut output,
300 &module,
301 &info,
302 naga_options,
303 &pipeline_options,
304 policies,
305 )
306 .map_err(|e| {
307 let msg = format!("{e}");
308 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
309 })?;
310
311 let reflection_info = writer.write().map_err(|e| {
312 let msg = format!("{e}");
313 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
314 })?;
315
316 log::debug!("Naga generated shader:\n{output}");
317
318 context.consume_reflection(
319 gl,
320 &module,
321 info.get_entry_point(entry_point_index),
322 reflection_info,
323 naga_stage,
324 program,
325 );
326 Cow::Owned(output)
327 };
328
329 unsafe { Self::compile_shader(gl, &source, naga_stage, stage.module.label.as_deref()) }
330 }
331
332 unsafe fn create_pipeline<'a>(
333 &self,
334 gl: &glow::Context,
335 shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
336 layout: &super::PipelineLayout,
337 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
338 multiview_mask: Option<NonZeroU32>,
339 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
340 let mut program_stages = ArrayVec::new();
341 let group_to_binding_to_slot = layout
342 .group_infos
343 .iter()
344 .map(|group| group.as_ref().map(|group| group.binding_to_slot.clone()))
345 .collect::<Vec<_>>();
346 for &(naga_stage, stage) in &shaders {
347 program_stages.push(super::ProgramStage {
348 naga_stage: naga_stage.to_owned(),
349 shader_id: stage.module.id,
350 entry_point: stage.entry_point.to_owned(),
351 zero_initialize_workgroup_memory: stage.zero_initialize_workgroup_memory,
352 constant_hash: Self::create_constant_hash(stage),
353 });
354 }
355 let mut guard = self
356 .shared
357 .program_cache
358 .try_lock()
359 .expect("Couldn't acquire program_cache lock");
360 let program = guard
363 .entry(super::ProgramCacheKey {
364 stages: program_stages,
365 group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
366 })
367 .or_insert_with(|| unsafe {
368 Self::create_program(
369 gl,
370 shaders,
371 layout,
372 label,
373 multiview_mask,
374 self.shared.shading_language_version,
375 self.shared.private_caps,
376 )
377 })
378 .to_owned()?;
379 drop(guard);
380
381 Ok(program)
382 }
383
384 fn create_constant_hash(stage: &crate::ProgrammableStage<super::ShaderModule>) -> Vec<u8> {
385 let mut buf: Vec<u8> = Vec::new();
386
387 for (key, value) in stage.constants.iter() {
388 buf.extend_from_slice(key.as_bytes());
389 buf.extend_from_slice(&value.to_ne_bytes());
390 }
391
392 buf
393 }
394
395 unsafe fn create_program<'a>(
396 gl: &glow::Context,
397 shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
398 layout: &super::PipelineLayout,
399 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
400 multiview_mask: Option<NonZeroU32>,
401 glsl_version: naga::back::glsl::Version,
402 private_caps: PrivateCapabilities,
403 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
404 let glsl_version = match glsl_version {
405 naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
406 naga::back::glsl::Version::Desktop(version) => format!("{version}"),
407 };
408 let program = unsafe { gl.create_program() }.unwrap();
409 #[cfg(native)]
410 if let Some(label) = label {
411 if private_caps.contains(PrivateCapabilities::DEBUG_FNS) {
412 let name = program.0.get();
413 unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
414 }
415 }
416
417 let mut name_binding_map = NameBindingMap::default();
418 let mut immediates_items = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
419 let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
420 let mut has_stages = wgt::ShaderStages::empty();
421 let mut shaders_to_delete = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
422 let mut clip_distance_count = 0;
423
424 for &(naga_stage, stage) in &shaders {
425 has_stages |= map_naga_stage(naga_stage);
426 let pc_item = {
427 immediates_items.push(Vec::new());
428 immediates_items.last_mut().unwrap()
429 };
430 let context = CompilationContext {
431 layout,
432 sampler_map: &mut sampler_map,
433 name_binding_map: &mut name_binding_map,
434 immediates_items: pc_item,
435 multiview_mask,
436 clip_distance_count: &mut clip_distance_count,
437 };
438
439 let shader = Self::create_shader(gl, naga_stage, stage, context, program)?;
440 shaders_to_delete.push(shader);
441 }
442
443 if has_stages == wgt::ShaderStages::VERTEX {
445 let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
446 log::debug!("Only vertex shader is present. Creating an empty fragment shader",);
447 let shader = unsafe {
448 Self::compile_shader(
449 gl,
450 &shader_src,
451 naga::ShaderStage::Fragment,
452 Some("(wgpu internal) dummy fragment shader"),
453 )
454 }?;
455 shaders_to_delete.push(shader);
456 }
457
458 for &shader in shaders_to_delete.iter() {
459 unsafe { gl.attach_shader(program, shader) };
460 }
461 unsafe { gl.link_program(program) };
462
463 for shader in shaders_to_delete {
464 unsafe { gl.delete_shader(shader) };
465 }
466
467 log::debug!("\tLinked program {program:?}");
468
469 let linked_ok = unsafe { gl.get_program_link_status(program) };
470 let msg = unsafe { gl.get_program_info_log(program) };
471 if !linked_ok {
472 return Err(crate::PipelineError::Linkage(has_stages, msg));
473 }
474 if !msg.is_empty() {
475 log::debug!("\tLink message: {msg}");
476 }
477
478 if !private_caps.contains(PrivateCapabilities::SHADER_BINDING_LAYOUT) {
479 unsafe { gl.use_program(Some(program)) };
482 for (ref name, (register, slot)) in name_binding_map {
483 log::trace!("Get binding {name:?} from program {program:?}");
484 match register {
485 super::BindingRegister::UniformBuffers => {
486 let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
487 log::trace!("\tBinding slot {slot} to block index {index}");
488 unsafe { gl.uniform_block_binding(program, index, slot as _) };
489 }
490 super::BindingRegister::StorageBuffers => {
491 let index =
492 unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
493 log::error!("Unable to re-map shader storage block {name} to {index}");
494 return Err(crate::DeviceError::Lost.into());
495 }
496 super::BindingRegister::Textures | super::BindingRegister::Images => {
497 let location = unsafe { gl.get_uniform_location(program, name) };
498 unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
499 }
500 }
501 }
502 }
503
504 let mut uniforms = ArrayVec::new();
505
506 for (stage_idx, stage_items) in immediates_items.into_iter().enumerate() {
507 for item in stage_items {
508 let source = &shaders[stage_idx].1.module.source;
509 let super::ShaderModuleSource::Naga(naga_module) = source else {
510 unreachable!("Passthrough shaders don't currently support immediates on GLES");
513 };
514 let type_inner = &naga_module.module.types[item.ty].inner;
515
516 let location = unsafe { gl.get_uniform_location(program, &item.access_path) };
517
518 log::trace!(
519 "immediate data item: name={}, ty={:?}, offset={}, location={:?}",
520 item.access_path,
521 type_inner,
522 item.offset,
523 location,
524 );
525
526 if let Some(location) = location {
527 uniforms.push(super::ImmediateDesc {
528 location,
529 offset: item.offset,
530 size_bytes: type_inner.size(naga_module.module.to_ctx()),
531 ty: type_inner.clone(),
532 });
533 }
534 }
535 }
536
537 let first_instance_location = if has_stages.contains(wgt::ShaderStages::VERTEX) {
538 unsafe { gl.get_uniform_location(program, naga::back::glsl::FIRST_INSTANCE_BINDING) }
540 } else {
541 None
542 };
543
544 Ok(Arc::new(super::PipelineInner {
545 program,
546 sampler_map,
547 first_instance_location,
548 immediates_descs: uniforms,
549 clip_distance_count,
550 }))
551 }
552}
553
554impl crate::Device for super::Device {
555 type A = super::Api;
556
557 unsafe fn create_buffer(
558 &self,
559 desc: &crate::BufferDescriptor,
560 ) -> Result<super::Buffer, crate::DeviceError> {
561 let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
562 glow::ELEMENT_ARRAY_BUFFER
563 } else {
564 glow::ARRAY_BUFFER
565 };
566
567 let emulate_map = self
568 .shared
569 .workarounds
570 .contains(super::Workarounds::EMULATE_BUFFER_MAP)
571 || !self
572 .shared
573 .private_caps
574 .contains(PrivateCapabilities::BUFFER_ALLOCATION);
575
576 if emulate_map && desc.usage.intersects(wgt::BufferUses::MAP_WRITE) {
577 return Ok(super::Buffer {
578 raw: None,
579 target,
580 size: desc.size,
581 map_flags: 0,
582 mapped: false.into(),
583 data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
584 offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
585 });
586 }
587
588 let gl = &self.shared.context.lock();
589
590 let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
591 glow::ELEMENT_ARRAY_BUFFER
592 } else {
593 glow::ARRAY_BUFFER
594 };
595
596 let is_host_visible = desc
597 .usage
598 .intersects(wgt::BufferUses::MAP_READ | wgt::BufferUses::MAP_WRITE);
599 let is_coherent = desc
600 .memory_flags
601 .contains(crate::MemoryFlags::PREFER_COHERENT);
602
603 let mut map_flags = 0;
604 if desc.usage.contains(wgt::BufferUses::MAP_READ) {
605 map_flags |= glow::MAP_READ_BIT;
606 }
607 if desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
608 map_flags |= glow::MAP_WRITE_BIT;
609 }
610
611 let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
612 unsafe { gl.bind_buffer(target, raw) };
613 let raw_size = desc
614 .size
615 .try_into()
616 .map_err(|_| crate::DeviceError::OutOfMemory)?;
617
618 if self
619 .shared
620 .private_caps
621 .contains(PrivateCapabilities::BUFFER_ALLOCATION)
622 {
623 if is_host_visible {
624 map_flags |= glow::MAP_PERSISTENT_BIT;
625 if is_coherent {
626 map_flags |= glow::MAP_COHERENT_BIT;
627 }
628 }
629 if desc.usage.intersects(wgt::BufferUses::QUERY_RESOLVE) {
631 map_flags |= glow::DYNAMIC_STORAGE_BIT;
632 }
633 unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
634 } else {
635 assert!(!is_coherent);
636 let usage = if is_host_visible {
637 if desc.usage.contains(wgt::BufferUses::MAP_READ) {
638 glow::STREAM_READ
639 } else {
640 glow::DYNAMIC_DRAW
641 }
642 } else {
643 glow::DYNAMIC_DRAW
647 };
648 unsafe { gl.buffer_data_size(target, raw_size, usage) };
649 }
650
651 unsafe { gl.bind_buffer(target, None) };
652
653 if !is_coherent && desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
654 map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
655 }
656 #[cfg(native)]
659 if let Some(label) = desc.label {
660 if self
661 .shared
662 .private_caps
663 .contains(PrivateCapabilities::DEBUG_FNS)
664 {
665 let name = raw.map_or(0, |buf| buf.0.get());
666 unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
667 }
668 }
669
670 let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
671 Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
672 } else {
673 None
674 };
675
676 self.counters.buffers.add(1);
677
678 Ok(super::Buffer {
679 raw,
680 target,
681 size: desc.size,
682 mapped: false.into(),
683 map_flags,
684 data,
685 offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
686 })
687 }
688
689 unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
690 if let Some(raw) = buffer.raw {
691 let gl = &self.shared.context.lock();
692 unsafe { gl.delete_buffer(raw) };
693 }
694
695 self.counters.buffers.sub(1);
696 }
697
698 unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) {
699 self.counters.buffers.add(1);
700 }
701
702 unsafe fn map_buffer(
703 &self,
704 buffer: &super::Buffer,
705 range: crate::MemoryRange,
706 ) -> Result<crate::BufferMapping, crate::DeviceError> {
707 let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
708 let ptr = match buffer.raw {
709 None => {
710 let mut vec = lock(buffer.data.as_ref().unwrap());
711 let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
712 slice.as_mut_ptr()
713 }
714 Some(raw) => {
715 let gl = &self.shared.context.lock();
716 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
717 let ptr = if let Some(ref map_read_allocation) = buffer.data {
718 let mut guard = lock(map_read_allocation);
719 let slice = guard.as_mut_slice();
720 unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
721 slice.as_mut_ptr()
722 } else {
723 *lock(&buffer.offset_of_current_mapping) = range.start;
724 let range_start: i32 = range
728 .start
729 .try_into()
730 .expect("Buffer range invalid for GLES");
731 let range_length: i32 = (range.end - range.start)
732 .try_into()
733 .expect("Buffer range invalid for GLES");
734 if range_length != 0 {
735 buffer.mapped.set(true);
736 unsafe {
737 gl.map_buffer_range(
738 buffer.target,
739 range_start,
740 range_length,
741 buffer.map_flags,
742 )
743 }
744 } else {
745 ptr::dangling_mut()
746 }
747 };
748 unsafe { gl.bind_buffer(buffer.target, None) };
749 ptr
750 }
751 };
752 Ok(crate::BufferMapping {
753 ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
754 is_coherent,
755 })
756 }
757 unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
758 if buffer.mapped.replace(false) {
759 if let Some(raw) = buffer.raw {
760 if buffer.data.is_none() {
761 let gl = &self.shared.context.lock();
762 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
763 unsafe { gl.unmap_buffer(buffer.target) };
764 unsafe { gl.bind_buffer(buffer.target, None) };
765 *lock(&buffer.offset_of_current_mapping) = 0;
766 }
767 }
768 }
769 }
770 unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
771 where
772 I: Iterator<Item = crate::MemoryRange>,
773 {
774 if buffer.mapped.get() {
775 if let Some(raw) = buffer.raw {
776 if buffer.data.is_none() {
777 let gl = &self.shared.context.lock();
778 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
779 for range in ranges {
780 let offset_of_current_mapping = *lock(&buffer.offset_of_current_mapping);
781 unsafe {
782 gl.flush_mapped_buffer_range(
783 buffer.target,
784 (range.start - offset_of_current_mapping) as i32,
785 (range.end - range.start) as i32,
786 )
787 };
788 }
789 }
790 }
791 }
792 }
793 unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
794 }
796
797 unsafe fn create_texture(
798 &self,
799 desc: &crate::TextureDescriptor,
800 ) -> Result<super::Texture, crate::DeviceError> {
801 let gl = &self.shared.context.lock();
802
803 let render_usage = wgt::TextureUses::COLOR_TARGET
804 | wgt::TextureUses::DEPTH_STENCIL_WRITE
805 | wgt::TextureUses::DEPTH_STENCIL_READ
806 | wgt::TextureUses::TRANSIENT;
807 let format_desc = self.shared.describe_texture_format(desc.format);
808
809 let inner = if render_usage.contains(desc.usage)
810 && desc.dimension == wgt::TextureDimension::D2
811 && desc.size.depth_or_array_layers == 1
812 {
813 let raw = unsafe { gl.create_renderbuffer().unwrap() };
814 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
815 if desc.sample_count > 1 {
816 unsafe {
817 gl.renderbuffer_storage_multisample(
818 glow::RENDERBUFFER,
819 desc.sample_count as i32,
820 format_desc.internal,
821 desc.size.width as i32,
822 desc.size.height as i32,
823 )
824 };
825 } else {
826 unsafe {
827 gl.renderbuffer_storage(
828 glow::RENDERBUFFER,
829 format_desc.internal,
830 desc.size.width as i32,
831 desc.size.height as i32,
832 )
833 };
834 }
835
836 #[cfg(native)]
837 if let Some(label) = desc.label {
838 if self
839 .shared
840 .private_caps
841 .contains(PrivateCapabilities::DEBUG_FNS)
842 {
843 let name = raw.0.get();
844 unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
845 }
846 }
847
848 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
849 super::TextureInner::Renderbuffer { raw }
850 } else {
851 let raw = unsafe { gl.create_texture().unwrap() };
852 let target = super::Texture::get_info_from_desc(desc);
853
854 unsafe { gl.bind_texture(target, Some(raw)) };
855 match desc.format.sample_type(None, Some(self.shared.features)) {
857 Some(
858 wgt::TextureSampleType::Float { filterable: false }
859 | wgt::TextureSampleType::Uint
860 | wgt::TextureSampleType::Sint,
861 ) => {
862 unsafe {
864 gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
865 };
866 unsafe {
867 gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
868 };
869 }
870 _ => {}
871 }
872
873 if conv::is_layered_target(target) {
874 unsafe {
875 if self
876 .shared
877 .private_caps
878 .contains(PrivateCapabilities::TEXTURE_STORAGE)
879 {
880 gl.tex_storage_3d(
881 target,
882 desc.mip_level_count as i32,
883 format_desc.internal,
884 desc.size.width as i32,
885 desc.size.height as i32,
886 desc.size.depth_or_array_layers as i32,
887 )
888 } else if target == glow::TEXTURE_3D {
889 let mut width = desc.size.width;
890 let mut height = desc.size.height;
891 let mut depth = desc.size.depth_or_array_layers;
892 for i in 0..desc.mip_level_count {
893 gl.tex_image_3d(
894 target,
895 i as i32,
896 format_desc.internal as i32,
897 width as i32,
898 height as i32,
899 depth as i32,
900 0,
901 format_desc.external,
902 format_desc.data_type,
903 glow::PixelUnpackData::Slice(None),
904 );
905 width = max(1, width / 2);
906 height = max(1, height / 2);
907 depth = max(1, depth / 2);
908 }
909 } else {
910 let mut width = desc.size.width;
911 let mut height = desc.size.height;
912 for i in 0..desc.mip_level_count {
913 gl.tex_image_3d(
914 target,
915 i as i32,
916 format_desc.internal as i32,
917 width as i32,
918 height as i32,
919 desc.size.depth_or_array_layers as i32,
920 0,
921 format_desc.external,
922 format_desc.data_type,
923 glow::PixelUnpackData::Slice(None),
924 );
925 width = max(1, width / 2);
926 height = max(1, height / 2);
927 }
928 }
929 };
930 } else if desc.sample_count > 1 {
931 unsafe {
932 gl.tex_storage_2d_multisample(
933 target,
934 desc.sample_count as i32,
935 format_desc.internal,
936 desc.size.width as i32,
937 desc.size.height as i32,
938 true,
939 )
940 };
941 } else {
942 unsafe {
943 if self
944 .shared
945 .private_caps
946 .contains(PrivateCapabilities::TEXTURE_STORAGE)
947 {
948 gl.tex_storage_2d(
949 target,
950 desc.mip_level_count as i32,
951 format_desc.internal,
952 desc.size.width as i32,
953 desc.size.height as i32,
954 )
955 } else if target == glow::TEXTURE_CUBE_MAP {
956 let mut width = desc.size.width;
957 let mut height = desc.size.height;
958 for i in 0..desc.mip_level_count {
959 for face in [
960 glow::TEXTURE_CUBE_MAP_POSITIVE_X,
961 glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
962 glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
963 glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
964 glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
965 glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
966 ] {
967 gl.tex_image_2d(
968 face,
969 i as i32,
970 format_desc.internal as i32,
971 width as i32,
972 height as i32,
973 0,
974 format_desc.external,
975 format_desc.data_type,
976 glow::PixelUnpackData::Slice(None),
977 );
978 }
979 width = max(1, width / 2);
980 height = max(1, height / 2);
981 }
982 } else {
983 let mut width = desc.size.width;
984 let mut height = desc.size.height;
985 for i in 0..desc.mip_level_count {
986 gl.tex_image_2d(
987 target,
988 i as i32,
989 format_desc.internal as i32,
990 width as i32,
991 height as i32,
992 0,
993 format_desc.external,
994 format_desc.data_type,
995 glow::PixelUnpackData::Slice(None),
996 );
997 width = max(1, width / 2);
998 height = max(1, height / 2);
999 }
1000 }
1001 };
1002 }
1003
1004 #[cfg(native)]
1005 if let Some(label) = desc.label {
1006 if self
1007 .shared
1008 .private_caps
1009 .contains(PrivateCapabilities::DEBUG_FNS)
1010 {
1011 let name = raw.0.get();
1012 unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
1013 }
1014 }
1015
1016 unsafe { gl.bind_texture(target, None) };
1017 super::TextureInner::Texture { raw, target }
1018 };
1019
1020 self.counters.textures.add(1);
1021
1022 Ok(super::Texture {
1023 inner,
1024 drop_guard: None,
1025 mip_level_count: desc.mip_level_count,
1026 array_layer_count: desc.array_layer_count(),
1027 format: desc.format,
1028 format_desc,
1029 copy_size: desc.copy_extent(),
1030 })
1031 }
1032
1033 unsafe fn destroy_texture(&self, texture: super::Texture) {
1034 if texture.drop_guard.is_none() {
1035 let gl = &self.shared.context.lock();
1036 match texture.inner {
1037 super::TextureInner::Renderbuffer { raw, .. } => {
1038 unsafe { gl.delete_renderbuffer(raw) };
1039 }
1040 super::TextureInner::DefaultRenderbuffer => {}
1041 super::TextureInner::Texture { raw, .. } => {
1042 unsafe { gl.delete_texture(raw) };
1043 }
1044 #[cfg(webgl)]
1045 super::TextureInner::ExternalFramebuffer { .. } => {}
1046 #[cfg(native)]
1047 super::TextureInner::ExternalNativeFramebuffer { .. } => {}
1048 }
1049 }
1050
1051 drop(texture.drop_guard);
1054
1055 self.counters.textures.sub(1);
1056 }
1057
1058 unsafe fn add_raw_texture(&self, _texture: &super::Texture) {
1059 self.counters.textures.add(1);
1060 }
1061
1062 unsafe fn create_texture_view(
1063 &self,
1064 texture: &super::Texture,
1065 desc: &crate::TextureViewDescriptor,
1066 ) -> Result<super::TextureView, crate::DeviceError> {
1067 self.counters.texture_views.add(1);
1068 Ok(super::TextureView {
1069 inner: texture.inner.clone(),
1071 aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
1072 mip_levels: desc.range.mip_range(texture.mip_level_count),
1073 array_layers: desc.range.layer_range(texture.array_layer_count),
1074 format: texture.format,
1075 })
1076 }
1077
1078 unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
1079 self.counters.texture_views.sub(1);
1080 }
1081
1082 unsafe fn create_sampler(
1083 &self,
1084 desc: &crate::SamplerDescriptor,
1085 ) -> Result<super::Sampler, crate::DeviceError> {
1086 let gl = &self.shared.context.lock();
1087
1088 let raw = unsafe { gl.create_sampler().unwrap() };
1089
1090 let (min, mag) =
1091 conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
1092
1093 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
1094 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
1095
1096 unsafe {
1097 gl.sampler_parameter_i32(
1098 raw,
1099 glow::TEXTURE_WRAP_S,
1100 conv::map_address_mode(desc.address_modes[0]) as i32,
1101 )
1102 };
1103 unsafe {
1104 gl.sampler_parameter_i32(
1105 raw,
1106 glow::TEXTURE_WRAP_T,
1107 conv::map_address_mode(desc.address_modes[1]) as i32,
1108 )
1109 };
1110 unsafe {
1111 gl.sampler_parameter_i32(
1112 raw,
1113 glow::TEXTURE_WRAP_R,
1114 conv::map_address_mode(desc.address_modes[2]) as i32,
1115 )
1116 };
1117
1118 if let Some(border_color) = desc.border_color {
1119 let border = match border_color {
1120 wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
1121 [0.0; 4]
1122 }
1123 wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
1124 wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
1125 };
1126 unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
1127 }
1128
1129 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
1130 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
1131
1132 if desc.anisotropy_clamp != 1 {
1134 unsafe {
1135 gl.sampler_parameter_i32(
1136 raw,
1137 glow::TEXTURE_MAX_ANISOTROPY,
1138 desc.anisotropy_clamp as i32,
1139 )
1140 };
1141 }
1142
1143 if let Some(compare) = desc.compare {
1146 unsafe {
1147 gl.sampler_parameter_i32(
1148 raw,
1149 glow::TEXTURE_COMPARE_MODE,
1150 glow::COMPARE_REF_TO_TEXTURE as i32,
1151 )
1152 };
1153 unsafe {
1154 gl.sampler_parameter_i32(
1155 raw,
1156 glow::TEXTURE_COMPARE_FUNC,
1157 conv::map_compare_func(compare) as i32,
1158 )
1159 };
1160 }
1161
1162 #[cfg(native)]
1163 if let Some(label) = desc.label {
1164 if self
1165 .shared
1166 .private_caps
1167 .contains(PrivateCapabilities::DEBUG_FNS)
1168 {
1169 let name = raw.0.get();
1170 unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
1171 }
1172 }
1173
1174 self.counters.samplers.add(1);
1175
1176 Ok(super::Sampler { raw })
1177 }
1178
1179 unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
1180 let gl = &self.shared.context.lock();
1181 unsafe { gl.delete_sampler(sampler.raw) };
1182 self.counters.samplers.sub(1);
1183 }
1184
1185 unsafe fn create_command_encoder(
1186 &self,
1187 _desc: &crate::CommandEncoderDescriptor<super::Queue>,
1188 ) -> Result<super::CommandEncoder, crate::DeviceError> {
1189 self.counters.command_encoders.add(1);
1190
1191 Ok(super::CommandEncoder {
1192 cmd_buffer: super::CommandBuffer::default(),
1193 state: Default::default(),
1194 private_caps: self.shared.private_caps,
1195 counters: Arc::clone(&self.counters),
1196 })
1197 }
1198
1199 unsafe fn create_bind_group_layout(
1200 &self,
1201 desc: &crate::BindGroupLayoutDescriptor,
1202 ) -> Result<super::BindGroupLayout, crate::DeviceError> {
1203 self.counters.bind_group_layouts.add(1);
1204 Ok(super::BindGroupLayout {
1205 entries: Arc::from(desc.entries),
1206 })
1207 }
1208
1209 unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
1210 self.counters.bind_group_layouts.sub(1);
1211 }
1212
1213 unsafe fn create_pipeline_layout(
1214 &self,
1215 desc: &crate::PipelineLayoutDescriptor<super::BindGroupLayout>,
1216 ) -> Result<super::PipelineLayout, crate::DeviceError> {
1217 use naga::back::glsl;
1218
1219 let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
1220 let mut num_samplers = 0u8;
1221 let mut num_textures = 0u8;
1222 let mut num_images = 0u8;
1223 let mut num_uniform_buffers = 0u8;
1224 let mut num_storage_buffers = 0u8;
1225
1226 let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
1227 writer_flags.set(
1228 glsl::WriterFlags::TEXTURE_SHADOW_LOD,
1229 self.shared
1230 .private_caps
1231 .contains(PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
1232 );
1233 writer_flags.set(
1234 glsl::WriterFlags::DRAW_PARAMETERS,
1235 self.shared
1236 .private_caps
1237 .contains(PrivateCapabilities::FULLY_FEATURED_INSTANCING),
1238 );
1239 writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
1242 let mut binding_map = glsl::BindingMap::default();
1243
1244 for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
1245 let Some(bg_layout) = bg_layout else {
1246 group_infos.push(None);
1247 continue;
1248 };
1249
1250 let mut binding_to_slot = vec![
1252 !0;
1253 bg_layout
1254 .entries
1255 .iter()
1256 .map(|b| b.binding)
1257 .max()
1258 .map_or(0, |idx| idx as usize + 1)
1259 ]
1260 .into_boxed_slice();
1261
1262 for entry in bg_layout.entries.iter() {
1263 let counter = match entry.ty {
1264 wgt::BindingType::Sampler { .. } => &mut num_samplers,
1265 wgt::BindingType::Texture { .. } => &mut num_textures,
1266 wgt::BindingType::StorageTexture { .. } => &mut num_images,
1267 wgt::BindingType::Buffer {
1268 ty: wgt::BufferBindingType::Uniform,
1269 ..
1270 } => &mut num_uniform_buffers,
1271 wgt::BindingType::Buffer {
1272 ty: wgt::BufferBindingType::Storage { .. },
1273 ..
1274 } => &mut num_storage_buffers,
1275 wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1276 wgt::BindingType::ExternalTexture => unimplemented!(),
1277 };
1278
1279 binding_to_slot[entry.binding as usize] = *counter;
1280 let br = naga::ResourceBinding {
1281 group: group_index as u32,
1282 binding: entry.binding,
1283 };
1284 binding_map.insert(br, *counter);
1285 *counter += entry.count.map_or(1, |c| c.get() as u8);
1286 }
1287
1288 group_infos.push(Some(super::BindGroupLayoutInfo {
1289 entries: Arc::clone(&bg_layout.entries),
1290 binding_to_slot,
1291 }));
1292 }
1293
1294 self.counters.pipeline_layouts.add(1);
1295
1296 Ok(super::PipelineLayout {
1297 group_infos: group_infos.into_boxed_slice(),
1298 naga_options: glsl::Options {
1299 version: self.shared.shading_language_version,
1300 writer_flags,
1301 binding_map,
1302 zero_initialize_workgroup_memory: true,
1303 },
1304 })
1305 }
1306
1307 unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
1308 self.counters.pipeline_layouts.sub(1);
1309 }
1310
1311 unsafe fn create_bind_group(
1312 &self,
1313 desc: &crate::BindGroupDescriptor<
1314 super::BindGroupLayout,
1315 super::Buffer,
1316 super::Sampler,
1317 super::TextureView,
1318 super::AccelerationStructure,
1319 >,
1320 ) -> Result<super::BindGroup, crate::DeviceError> {
1321 let mut contents = Vec::new();
1322
1323 let layout_and_entry_iter = desc.entries.iter().map(|entry| {
1324 let layout = desc
1325 .layout
1326 .entries
1327 .iter()
1328 .find(|layout_entry| layout_entry.binding == entry.binding)
1329 .expect("internal error: no layout entry found with binding slot");
1330 (entry, layout)
1331 });
1332 for (entry, layout) in layout_and_entry_iter {
1333 let binding = match layout.ty {
1334 wgt::BindingType::Buffer { .. } => {
1335 let bb = &desc.buffers[entry.resource_index as usize];
1336 super::RawBinding::Buffer {
1337 raw: bb.buffer.raw.unwrap(),
1338 offset: bb.offset as i32,
1339 size: match bb.size {
1340 Some(s) => s.get() as i32,
1341 None => (bb.buffer.size - bb.offset) as i32,
1342 },
1343 }
1344 }
1345 wgt::BindingType::Sampler { .. } => {
1346 let sampler = desc.samplers[entry.resource_index as usize];
1347 super::RawBinding::Sampler(sampler.raw)
1348 }
1349 wgt::BindingType::Texture { view_dimension, .. } => {
1350 let view = desc.textures[entry.resource_index as usize].view;
1351 if view.array_layers.start != 0 {
1352 log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
1353 "This is an implementation problem of wgpu-hal/gles backend.")
1354 }
1355 let (raw, target) = view.inner.as_native();
1356
1357 super::Texture::log_failing_target_heuristics(view_dimension, target);
1358
1359 super::RawBinding::Texture {
1360 raw,
1361 target,
1362 aspects: view.aspects,
1363 mip_levels: view.mip_levels.clone(),
1364 }
1365 }
1366 wgt::BindingType::StorageTexture {
1367 access,
1368 format,
1369 view_dimension,
1370 } => {
1371 let view = desc.textures[entry.resource_index as usize].view;
1372 let format_desc = self.shared.describe_texture_format(format);
1373 let (raw, _target) = view.inner.as_native();
1374 super::RawBinding::Image(super::ImageBinding {
1375 raw,
1376 mip_level: view.mip_levels.start,
1377 array_layer: match view_dimension {
1378 wgt::TextureViewDimension::D2Array
1379 | wgt::TextureViewDimension::CubeArray => None,
1380 _ => Some(view.array_layers.start),
1381 },
1382 access: conv::map_storage_access(access),
1383 format: format_desc.internal,
1384 })
1385 }
1386 wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1387 wgt::BindingType::ExternalTexture => unimplemented!(),
1388 };
1389 contents.push(binding);
1390 }
1391
1392 self.counters.bind_groups.add(1);
1393
1394 Ok(super::BindGroup {
1395 contents: contents.into_boxed_slice(),
1396 })
1397 }
1398
1399 unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
1400 self.counters.bind_groups.sub(1);
1401 }
1402
1403 unsafe fn create_shader_module(
1404 &self,
1405 desc: &crate::ShaderModuleDescriptor,
1406 shader: crate::ShaderInput,
1407 ) -> Result<super::ShaderModule, crate::ShaderError> {
1408 self.counters.shader_modules.add(1);
1409
1410 Ok(super::ShaderModule {
1411 source: match shader {
1412 crate::ShaderInput::Naga(naga) => super::ShaderModuleSource::Naga(naga),
1413 crate::ShaderInput::Glsl { shader, .. } => super::ShaderModuleSource::Passthrough {
1415 source: shader.to_owned(),
1416 },
1417 crate::ShaderInput::SpirV(_)
1418 | crate::ShaderInput::MetalLib { .. }
1419 | crate::ShaderInput::Msl { .. }
1420 | crate::ShaderInput::Dxil { .. }
1421 | crate::ShaderInput::Hlsl { .. } => {
1422 unreachable!()
1423 }
1424 },
1425 label: desc.label.map(|str| str.to_string()),
1426 id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1427 })
1428 }
1429
1430 unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
1431 self.counters.shader_modules.sub(1);
1432 }
1433
1434 unsafe fn create_render_pipeline(
1435 &self,
1436 desc: &crate::RenderPipelineDescriptor<
1437 super::PipelineLayout,
1438 super::ShaderModule,
1439 super::PipelineCache,
1440 >,
1441 ) -> Result<super::RenderPipeline, crate::PipelineError> {
1442 let (vertex_stage, vertex_buffers) = match &desc.vertex_processor {
1443 crate::VertexProcessor::Standard {
1444 vertex_buffers,
1445 ref vertex_stage,
1446 } => (vertex_stage, vertex_buffers),
1447 crate::VertexProcessor::Mesh { .. } => unreachable!(),
1448 };
1449 let gl = &self.shared.context.lock();
1450 let mut shaders = ArrayVec::new();
1451 shaders.push((naga::ShaderStage::Vertex, vertex_stage));
1452 if let Some(ref fs) = desc.fragment_stage {
1453 shaders.push((naga::ShaderStage::Fragment, fs));
1454 }
1455 let inner = unsafe {
1456 self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview_mask)
1457 }?;
1458
1459 let (vertex_buffers, vertex_attributes) = {
1460 let mut buffers = Vec::new();
1461 let mut attributes = Vec::new();
1462 for (index, vb_layout) in vertex_buffers.iter().enumerate() {
1463 let vb_desc = if let Some(vb_layout) = vb_layout {
1464 for vat in vb_layout.attributes.iter() {
1465 let format_desc = conv::describe_vertex_format(vat.format);
1466 attributes.push(super::AttributeDesc {
1467 location: vat.shader_location,
1468 offset: vat.offset as u32,
1469 buffer_index: index as u32,
1470 format_desc,
1471 });
1472 }
1473 Some(super::VertexBufferDesc {
1474 step: vb_layout.step_mode,
1475 stride: vb_layout.array_stride as u32,
1476 })
1477 } else {
1478 None
1479 };
1480 buffers.push(vb_desc);
1481 }
1482 (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1483 };
1484
1485 let color_targets = {
1486 let mut targets = Vec::new();
1487 for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1488 targets.push(super::ColorTargetDesc {
1489 mask: ct.write_mask,
1490 blend: ct.blend.as_ref().map(conv::map_blend),
1491 });
1492 }
1493 targets.into_boxed_slice()
1496 };
1497
1498 self.counters.render_pipelines.add(1);
1499
1500 Ok(super::RenderPipeline {
1501 inner,
1502 primitive: desc.primitive,
1503 vertex_buffers,
1504 vertex_attributes,
1505 color_targets,
1506 depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1507 function: conv::map_compare_func(ds.depth_compare.unwrap_or_default()),
1508 mask: ds.depth_write_enabled.unwrap_or_default(),
1509 }),
1510 depth_bias: desc
1511 .depth_stencil
1512 .as_ref()
1513 .map(|ds| ds.bias)
1514 .unwrap_or_default(),
1515 stencil: desc
1516 .depth_stencil
1517 .as_ref()
1518 .map(|ds| conv::map_stencil(&ds.stencil)),
1519 alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1520 })
1521 }
1522
1523 unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1524 if Arc::strong_count(&pipeline.inner) == 2 {
1529 let gl = &self.shared.context.lock();
1530 let mut program_cache = self.shared.program_cache.lock();
1531 program_cache.retain(|_, v| match *v {
1532 Ok(ref p) => p.program != pipeline.inner.program,
1533 Err(_) => false,
1534 });
1535 unsafe { gl.delete_program(pipeline.inner.program) };
1536 }
1537
1538 self.counters.render_pipelines.sub(1);
1539 }
1540
1541 unsafe fn create_compute_pipeline(
1542 &self,
1543 desc: &crate::ComputePipelineDescriptor<
1544 super::PipelineLayout,
1545 super::ShaderModule,
1546 super::PipelineCache,
1547 >,
1548 ) -> Result<super::ComputePipeline, crate::PipelineError> {
1549 let gl = &self.shared.context.lock();
1550 let mut shaders = ArrayVec::new();
1551 shaders.push((naga::ShaderStage::Compute, &desc.stage));
1552 let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1553
1554 self.counters.compute_pipelines.add(1);
1555
1556 Ok(super::ComputePipeline { inner })
1557 }
1558
1559 unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1560 if Arc::strong_count(&pipeline.inner) == 2 {
1565 let gl = &self.shared.context.lock();
1566 let mut program_cache = self.shared.program_cache.lock();
1567 program_cache.retain(|_, v| match *v {
1568 Ok(ref p) => p.program != pipeline.inner.program,
1569 Err(_) => false,
1570 });
1571 unsafe { gl.delete_program(pipeline.inner.program) };
1572 }
1573
1574 self.counters.compute_pipelines.sub(1);
1575 }
1576
1577 unsafe fn create_pipeline_cache(
1578 &self,
1579 _: &crate::PipelineCacheDescriptor<'_>,
1580 ) -> Result<super::PipelineCache, crate::PipelineCacheError> {
1581 Ok(super::PipelineCache)
1584 }
1585 unsafe fn destroy_pipeline_cache(&self, _: super::PipelineCache) {}
1586
1587 #[cfg_attr(target_arch = "wasm32", allow(unused))]
1588 unsafe fn create_query_set(
1589 &self,
1590 desc: &wgt::QuerySetDescriptor<crate::Label>,
1591 ) -> Result<super::QuerySet, crate::DeviceError> {
1592 let gl = &self.shared.context.lock();
1593
1594 let mut queries = Vec::with_capacity(desc.count as usize);
1595 for _ in 0..desc.count {
1596 let query =
1597 unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1598
1599 queries.push(query);
1606 }
1607
1608 self.counters.query_sets.add(1);
1609
1610 Ok(super::QuerySet {
1611 queries: queries.into_boxed_slice(),
1612 target: match desc.ty {
1613 wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1614 wgt::QueryType::Timestamp => glow::TIMESTAMP,
1615 _ => unimplemented!(),
1616 },
1617 })
1618 }
1619
1620 unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1621 let gl = &self.shared.context.lock();
1622 for &query in set.queries.iter() {
1623 unsafe { gl.delete_query(query) };
1624 }
1625 self.counters.query_sets.sub(1);
1626 }
1627
1628 unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1629 self.counters.fences.add(1);
1630 Ok(super::Fence::new(&self.shared.options))
1631 }
1632
1633 unsafe fn destroy_fence(&self, fence: super::Fence) {
1634 let gl = &self.shared.context.lock();
1635 fence.destroy(gl);
1636 self.counters.fences.sub(1);
1637 }
1638
1639 unsafe fn get_fence_value(
1640 &self,
1641 fence: &super::Fence,
1642 ) -> Result<crate::FenceValue, crate::DeviceError> {
1643 #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1644 Ok(fence.get_latest(&self.shared.context.lock()))
1645 }
1646 unsafe fn wait(
1647 &self,
1648 fence: &super::Fence,
1649 wait_value: crate::FenceValue,
1650 timeout: Option<core::time::Duration>,
1651 ) -> Result<bool, crate::DeviceError> {
1652 if fence.satisfied(wait_value) {
1653 return Ok(true);
1654 }
1655
1656 let gl = &self.shared.context.lock();
1657 let timeout_ns = if cfg!(any(webgl, Emscripten)) {
1662 0
1663 } else {
1664 timeout
1665 .map(|t| t.as_nanos().min(u32::MAX as u128) as u32)
1666 .unwrap_or(u32::MAX)
1667 };
1668 fence.wait(gl, wait_value, timeout_ns)
1669 }
1670
1671 unsafe fn start_graphics_debugger_capture(&self) -> bool {
1672 #[cfg(all(native, feature = "renderdoc"))]
1673 return unsafe {
1674 self.render_doc
1675 .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1676 };
1677 #[allow(unreachable_code)]
1678 false
1679 }
1680 unsafe fn stop_graphics_debugger_capture(&self) {
1681 #[cfg(all(native, feature = "renderdoc"))]
1682 unsafe {
1683 self.render_doc
1684 .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1685 }
1686 }
1687 unsafe fn create_acceleration_structure(
1688 &self,
1689 _desc: &crate::AccelerationStructureDescriptor,
1690 ) -> Result<super::AccelerationStructure, crate::DeviceError> {
1691 unimplemented!()
1692 }
1693 unsafe fn get_acceleration_structure_build_sizes<'a>(
1694 &self,
1695 _desc: &crate::GetAccelerationStructureBuildSizesDescriptor<'a, super::Buffer>,
1696 ) -> crate::AccelerationStructureBuildSizes {
1697 unimplemented!()
1698 }
1699 unsafe fn get_acceleration_structure_device_address(
1700 &self,
1701 _acceleration_structure: &super::AccelerationStructure,
1702 ) -> wgt::BufferAddress {
1703 unimplemented!()
1704 }
1705 unsafe fn destroy_acceleration_structure(
1706 &self,
1707 _acceleration_structure: super::AccelerationStructure,
1708 ) {
1709 }
1710
1711 fn tlas_instance_to_bytes(&self, _instance: TlasInstance) -> Vec<u8> {
1712 unimplemented!()
1713 }
1714
1715 fn get_internal_counters(&self) -> wgt::HalCounters {
1716 self.counters.as_ref().clone()
1717 }
1718
1719 fn check_if_oom(&self) -> Result<(), crate::DeviceError> {
1720 Ok(())
1721 }
1722}
1723
1724#[cfg(send_sync)]
1725unsafe impl Sync for super::Device {}
1726#[cfg(send_sync)]
1727unsafe impl Send for super::Device {}