1use alloc::{
2 borrow::ToOwned, format, string::String, string::ToString as _, sync::Arc, vec, vec::Vec,
3};
4use core::{cmp::max, convert::TryInto, num::NonZeroU32, ptr, sync::atomic::Ordering};
5
6use arrayvec::ArrayVec;
7use glow::HasContext;
8use naga::FastHashMap;
9
10use super::{conv, lock, MaybeMutex, PrivateCapabilities};
11use crate::auxil::map_naga_stage;
12use crate::TlasInstance;
13
14type ShaderStage<'a> = (
15 naga::ShaderStage,
16 &'a crate::ProgrammableStage<'a, super::ShaderModule>,
17);
18type NameBindingMap = FastHashMap<String, (super::BindingRegister, u8)>;
19
20struct CompilationContext<'a> {
21 layout: &'a super::PipelineLayout,
22 sampler_map: &'a mut super::SamplerBindMap,
23 name_binding_map: &'a mut NameBindingMap,
24 immediates_items: &'a mut Vec<naga::back::glsl::ImmediateItem>,
25 multiview_mask: Option<NonZeroU32>,
26 clip_distance_count: &'a mut u32,
27}
28
29impl CompilationContext<'_> {
30 fn consume_reflection(
31 self,
32 gl: &glow::Context,
33 module: &naga::Module,
34 ep_info: &naga::valid::FunctionInfo,
35 reflection_info: naga::back::glsl::ReflectionInfo,
36 naga_stage: naga::ShaderStage,
37 program: glow::Program,
38 ) {
39 for (handle, var) in module.global_variables.iter() {
40 if ep_info[handle].is_empty() {
41 continue;
42 }
43 let register = match var.space {
44 naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
45 naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
46 _ => continue,
47 };
48
49 let br = var.binding.as_ref().unwrap();
50 let slot = self.layout.get_slot(br);
51
52 let name = match reflection_info.uniforms.get(&handle) {
53 Some(name) => name.clone(),
54 None => continue,
55 };
56 log::trace!(
57 "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
58 var.name.as_ref(),
59 &name,
60 register,
61 slot
62 );
63 self.name_binding_map.insert(name, (register, slot));
64 }
65
66 for (name, mapping) in reflection_info.texture_mapping {
67 let var = &module.global_variables[mapping.texture];
68 let register = match module.types[var.ty].inner {
69 naga::TypeInner::Image {
70 class: naga::ImageClass::Storage { .. },
71 ..
72 } => super::BindingRegister::Images,
73 _ => super::BindingRegister::Textures,
74 };
75
76 let tex_br = var.binding.as_ref().unwrap();
77 let texture_linear_index = self.layout.get_slot(tex_br);
78
79 self.name_binding_map
80 .insert(name, (register, texture_linear_index));
81 if let Some(sampler_handle) = mapping.sampler {
82 let sam_br = module.global_variables[sampler_handle]
83 .binding
84 .as_ref()
85 .unwrap();
86 let sampler_linear_index = self.layout.get_slot(sam_br);
87 self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
88 }
89 }
90
91 for (name, location) in reflection_info.varying {
92 match naga_stage {
93 naga::ShaderStage::Vertex => {
94 assert_eq!(location.index, 0);
95 unsafe { gl.bind_attrib_location(program, location.location, &name) }
96 }
97 naga::ShaderStage::Fragment => {
98 assert_eq!(location.index, 0);
99 unsafe { gl.bind_frag_data_location(program, location.location, &name) }
100 }
101 naga::ShaderStage::Compute => {}
102 naga::ShaderStage::Task
103 | naga::ShaderStage::Mesh
104 | naga::ShaderStage::RayGeneration
105 | naga::ShaderStage::AnyHit
106 | naga::ShaderStage::ClosestHit
107 | naga::ShaderStage::Miss => unreachable!(),
108 }
109 }
110
111 *self.immediates_items = reflection_info.immediates_items;
112
113 if naga_stage == naga::ShaderStage::Vertex {
114 *self.clip_distance_count = reflection_info.clip_distance_count;
115 }
116 }
117}
118
119impl super::Device {
120 #[cfg(any(native, Emscripten))]
127 pub unsafe fn texture_from_raw(
128 &self,
129 name: NonZeroU32,
130 desc: &crate::TextureDescriptor,
131 drop_callback: Option<crate::DropCallback>,
132 ) -> super::Texture {
133 super::Texture {
134 inner: super::TextureInner::Texture {
135 raw: glow::NativeTexture(name),
136 target: super::Texture::get_info_from_desc(desc),
137 },
138 drop_guard: crate::DropGuard::from_option(drop_callback),
139 mip_level_count: desc.mip_level_count,
140 array_layer_count: desc.array_layer_count(),
141 format: desc.format,
142 format_desc: self.shared.describe_texture_format(desc.format),
143 copy_size: desc.copy_extent(),
144 }
145 }
146
147 #[cfg(any(native, Emscripten))]
154 pub unsafe fn texture_from_raw_renderbuffer(
155 &self,
156 name: NonZeroU32,
157 desc: &crate::TextureDescriptor,
158 drop_callback: Option<crate::DropCallback>,
159 ) -> super::Texture {
160 super::Texture {
161 inner: super::TextureInner::Renderbuffer {
162 raw: glow::NativeRenderbuffer(name),
163 },
164 drop_guard: crate::DropGuard::from_option(drop_callback),
165 mip_level_count: desc.mip_level_count,
166 array_layer_count: desc.array_layer_count(),
167 format: desc.format,
168 format_desc: self.shared.describe_texture_format(desc.format),
169 copy_size: desc.copy_extent(),
170 }
171 }
172
173 unsafe fn compile_shader(
174 gl: &glow::Context,
175 shader: &str,
176 naga_stage: naga::ShaderStage,
177 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
178 ) -> Result<glow::Shader, crate::PipelineError> {
179 let target = match naga_stage {
180 naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
181 naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
182 naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
183 naga::ShaderStage::Task
184 | naga::ShaderStage::Mesh
185 | naga::ShaderStage::RayGeneration
186 | naga::ShaderStage::AnyHit
187 | naga::ShaderStage::ClosestHit
188 | naga::ShaderStage::Miss => unreachable!(),
189 };
190
191 let raw = unsafe { gl.create_shader(target) }.unwrap();
192 #[cfg(native)]
193 if gl.supports_debug() {
194 let name = raw.0.get();
195 unsafe { gl.object_label(glow::SHADER, name, label) };
196 }
197
198 unsafe { gl.shader_source(raw, shader) };
199 unsafe { gl.compile_shader(raw) };
200
201 log::debug!("\tCompiled shader {raw:?}");
202
203 let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
204 let msg = unsafe { gl.get_shader_info_log(raw) };
205 if compiled_ok {
206 if !msg.is_empty() {
207 log::debug!("\tCompile message: {msg}");
208 }
209 Ok(raw)
210 } else {
211 log::error!("\tShader compilation failed: {msg}");
212 unsafe { gl.delete_shader(raw) };
213 Err(crate::PipelineError::Linkage(
214 map_naga_stage(naga_stage),
215 msg,
216 ))
217 }
218 }
219
220 fn create_shader(
221 gl: &glow::Context,
222 naga_stage: naga::ShaderStage,
223 stage: &crate::ProgrammableStage<super::ShaderModule>,
224 context: CompilationContext,
225 program: glow::Program,
226 ) -> Result<glow::Shader, crate::PipelineError> {
227 use naga::back::glsl;
228 let pipeline_options = glsl::PipelineOptions {
229 shader_stage: naga_stage,
230 entry_point: stage.entry_point.to_owned(),
231 multiview: context
232 .multiview_mask
233 .map(|a| NonZeroU32::new(a.get().count_ones()).unwrap()),
234 };
235
236 let (module, info) = naga::back::pipeline_constants::process_overrides(
237 &stage.module.source.module,
238 &stage.module.source.info,
239 Some((naga_stage, stage.entry_point)),
240 stage.constants,
241 )
242 .map_err(|e| {
243 let msg = format!("{e}");
244 crate::PipelineError::PipelineConstants(map_naga_stage(naga_stage), msg)
245 })?;
246
247 let entry_point_index = module
248 .entry_points
249 .iter()
250 .position(|ep| ep.name.as_str() == stage.entry_point)
251 .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
252
253 use naga::proc::BoundsCheckPolicy;
254 let version = gl.version();
256 let image_check = if !version.is_embedded && (version.major, version.minor) >= (4, 3) {
257 BoundsCheckPolicy::ReadZeroSkipWrite
258 } else {
259 BoundsCheckPolicy::Unchecked
260 };
261
262 let policies = naga::proc::BoundsCheckPolicies {
264 index: BoundsCheckPolicy::Unchecked,
265 buffer: BoundsCheckPolicy::Unchecked,
266 image_load: image_check,
267 binding_array: BoundsCheckPolicy::Unchecked,
268 };
269
270 let mut output = String::new();
271 let needs_temp_options = stage.zero_initialize_workgroup_memory
272 != context.layout.naga_options.zero_initialize_workgroup_memory;
273 let mut temp_options;
274 let naga_options = if needs_temp_options {
275 temp_options = context.layout.naga_options.clone();
278 temp_options.zero_initialize_workgroup_memory = stage.zero_initialize_workgroup_memory;
279 &temp_options
280 } else {
281 &context.layout.naga_options
282 };
283 let mut writer = glsl::Writer::new(
284 &mut output,
285 &module,
286 &info,
287 naga_options,
288 &pipeline_options,
289 policies,
290 )
291 .map_err(|e| {
292 let msg = format!("{e}");
293 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
294 })?;
295
296 let reflection_info = writer.write().map_err(|e| {
297 let msg = format!("{e}");
298 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
299 })?;
300
301 log::debug!("Naga generated shader:\n{output}");
302
303 context.consume_reflection(
304 gl,
305 &module,
306 info.get_entry_point(entry_point_index),
307 reflection_info,
308 naga_stage,
309 program,
310 );
311
312 unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
313 }
314
315 unsafe fn create_pipeline<'a>(
316 &self,
317 gl: &glow::Context,
318 shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
319 layout: &super::PipelineLayout,
320 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
321 multiview_mask: Option<NonZeroU32>,
322 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
323 let mut program_stages = ArrayVec::new();
324 let group_to_binding_to_slot = layout
325 .group_infos
326 .iter()
327 .map(|group| group.as_ref().map(|group| group.binding_to_slot.clone()))
328 .collect::<Vec<_>>();
329 for &(naga_stage, stage) in &shaders {
330 program_stages.push(super::ProgramStage {
331 naga_stage: naga_stage.to_owned(),
332 shader_id: stage.module.id,
333 entry_point: stage.entry_point.to_owned(),
334 zero_initialize_workgroup_memory: stage.zero_initialize_workgroup_memory,
335 constant_hash: Self::create_constant_hash(stage),
336 });
337 }
338 let mut guard = self
339 .shared
340 .program_cache
341 .try_lock()
342 .expect("Couldn't acquire program_cache lock");
343 let program = guard
346 .entry(super::ProgramCacheKey {
347 stages: program_stages,
348 group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
349 })
350 .or_insert_with(|| unsafe {
351 Self::create_program(
352 gl,
353 shaders,
354 layout,
355 label,
356 multiview_mask,
357 self.shared.shading_language_version,
358 self.shared.private_caps,
359 )
360 })
361 .to_owned()?;
362 drop(guard);
363
364 Ok(program)
365 }
366
367 fn create_constant_hash(stage: &crate::ProgrammableStage<super::ShaderModule>) -> Vec<u8> {
368 let mut buf: Vec<u8> = Vec::new();
369
370 for (key, value) in stage.constants.iter() {
371 buf.extend_from_slice(key.as_bytes());
372 buf.extend_from_slice(&value.to_ne_bytes());
373 }
374
375 buf
376 }
377
378 unsafe fn create_program<'a>(
379 gl: &glow::Context,
380 shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
381 layout: &super::PipelineLayout,
382 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
383 multiview_mask: Option<NonZeroU32>,
384 glsl_version: naga::back::glsl::Version,
385 private_caps: PrivateCapabilities,
386 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
387 let glsl_version = match glsl_version {
388 naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
389 naga::back::glsl::Version::Desktop(version) => format!("{version}"),
390 };
391 let program = unsafe { gl.create_program() }.unwrap();
392 #[cfg(native)]
393 if let Some(label) = label {
394 if private_caps.contains(PrivateCapabilities::DEBUG_FNS) {
395 let name = program.0.get();
396 unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
397 }
398 }
399
400 let mut name_binding_map = NameBindingMap::default();
401 let mut immediates_items = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
402 let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
403 let mut has_stages = wgt::ShaderStages::empty();
404 let mut shaders_to_delete = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
405 let mut clip_distance_count = 0;
406
407 for &(naga_stage, stage) in &shaders {
408 has_stages |= map_naga_stage(naga_stage);
409 let pc_item = {
410 immediates_items.push(Vec::new());
411 immediates_items.last_mut().unwrap()
412 };
413 let context = CompilationContext {
414 layout,
415 sampler_map: &mut sampler_map,
416 name_binding_map: &mut name_binding_map,
417 immediates_items: pc_item,
418 multiview_mask,
419 clip_distance_count: &mut clip_distance_count,
420 };
421
422 let shader = Self::create_shader(gl, naga_stage, stage, context, program)?;
423 shaders_to_delete.push(shader);
424 }
425
426 if has_stages == wgt::ShaderStages::VERTEX {
428 let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
429 log::debug!("Only vertex shader is present. Creating an empty fragment shader",);
430 let shader = unsafe {
431 Self::compile_shader(
432 gl,
433 &shader_src,
434 naga::ShaderStage::Fragment,
435 Some("(wgpu internal) dummy fragment shader"),
436 )
437 }?;
438 shaders_to_delete.push(shader);
439 }
440
441 for &shader in shaders_to_delete.iter() {
442 unsafe { gl.attach_shader(program, shader) };
443 }
444 unsafe { gl.link_program(program) };
445
446 for shader in shaders_to_delete {
447 unsafe { gl.delete_shader(shader) };
448 }
449
450 log::debug!("\tLinked program {program:?}");
451
452 let linked_ok = unsafe { gl.get_program_link_status(program) };
453 let msg = unsafe { gl.get_program_info_log(program) };
454 if !linked_ok {
455 return Err(crate::PipelineError::Linkage(has_stages, msg));
456 }
457 if !msg.is_empty() {
458 log::debug!("\tLink message: {msg}");
459 }
460
461 if !private_caps.contains(PrivateCapabilities::SHADER_BINDING_LAYOUT) {
462 unsafe { gl.use_program(Some(program)) };
465 for (ref name, (register, slot)) in name_binding_map {
466 log::trace!("Get binding {name:?} from program {program:?}");
467 match register {
468 super::BindingRegister::UniformBuffers => {
469 let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
470 log::trace!("\tBinding slot {slot} to block index {index}");
471 unsafe { gl.uniform_block_binding(program, index, slot as _) };
472 }
473 super::BindingRegister::StorageBuffers => {
474 let index =
475 unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
476 log::error!("Unable to re-map shader storage block {name} to {index}");
477 return Err(crate::DeviceError::Lost.into());
478 }
479 super::BindingRegister::Textures | super::BindingRegister::Images => {
480 let location = unsafe { gl.get_uniform_location(program, name) };
481 unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
482 }
483 }
484 }
485 }
486
487 let mut uniforms = ArrayVec::new();
488
489 for (stage_idx, stage_items) in immediates_items.into_iter().enumerate() {
490 for item in stage_items {
491 let naga_module = &shaders[stage_idx].1.module.source.module;
492 let type_inner = &naga_module.types[item.ty].inner;
493
494 let location = unsafe { gl.get_uniform_location(program, &item.access_path) };
495
496 log::trace!(
497 "immediate data item: name={}, ty={:?}, offset={}, location={:?}",
498 item.access_path,
499 type_inner,
500 item.offset,
501 location,
502 );
503
504 if let Some(location) = location {
505 uniforms.push(super::ImmediateDesc {
506 location,
507 offset: item.offset,
508 size_bytes: type_inner.size(naga_module.to_ctx()),
509 ty: type_inner.clone(),
510 });
511 }
512 }
513 }
514
515 let first_instance_location = if has_stages.contains(wgt::ShaderStages::VERTEX) {
516 unsafe { gl.get_uniform_location(program, naga::back::glsl::FIRST_INSTANCE_BINDING) }
518 } else {
519 None
520 };
521
522 Ok(Arc::new(super::PipelineInner {
523 program,
524 sampler_map,
525 first_instance_location,
526 immediates_descs: uniforms,
527 clip_distance_count,
528 }))
529 }
530}
531
532impl crate::Device for super::Device {
533 type A = super::Api;
534
535 unsafe fn create_buffer(
536 &self,
537 desc: &crate::BufferDescriptor,
538 ) -> Result<super::Buffer, crate::DeviceError> {
539 let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
540 glow::ELEMENT_ARRAY_BUFFER
541 } else {
542 glow::ARRAY_BUFFER
543 };
544
545 let emulate_map = self
546 .shared
547 .workarounds
548 .contains(super::Workarounds::EMULATE_BUFFER_MAP)
549 || !self
550 .shared
551 .private_caps
552 .contains(PrivateCapabilities::BUFFER_ALLOCATION);
553
554 if emulate_map && desc.usage.intersects(wgt::BufferUses::MAP_WRITE) {
555 return Ok(super::Buffer {
556 raw: None,
557 target,
558 size: desc.size,
559 map_flags: 0,
560 data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
561 offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
562 });
563 }
564
565 let gl = &self.shared.context.lock();
566
567 let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
568 glow::ELEMENT_ARRAY_BUFFER
569 } else {
570 glow::ARRAY_BUFFER
571 };
572
573 let is_host_visible = desc
574 .usage
575 .intersects(wgt::BufferUses::MAP_READ | wgt::BufferUses::MAP_WRITE);
576 let is_coherent = desc
577 .memory_flags
578 .contains(crate::MemoryFlags::PREFER_COHERENT);
579
580 let mut map_flags = 0;
581 if desc.usage.contains(wgt::BufferUses::MAP_READ) {
582 map_flags |= glow::MAP_READ_BIT;
583 }
584 if desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
585 map_flags |= glow::MAP_WRITE_BIT;
586 }
587
588 let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
589 unsafe { gl.bind_buffer(target, raw) };
590 let raw_size = desc
591 .size
592 .try_into()
593 .map_err(|_| crate::DeviceError::OutOfMemory)?;
594
595 if self
596 .shared
597 .private_caps
598 .contains(PrivateCapabilities::BUFFER_ALLOCATION)
599 {
600 if is_host_visible {
601 map_flags |= glow::MAP_PERSISTENT_BIT;
602 if is_coherent {
603 map_flags |= glow::MAP_COHERENT_BIT;
604 }
605 }
606 if desc.usage.intersects(wgt::BufferUses::QUERY_RESOLVE) {
608 map_flags |= glow::DYNAMIC_STORAGE_BIT;
609 }
610 unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
611 } else {
612 assert!(!is_coherent);
613 let usage = if is_host_visible {
614 if desc.usage.contains(wgt::BufferUses::MAP_READ) {
615 glow::STREAM_READ
616 } else {
617 glow::DYNAMIC_DRAW
618 }
619 } else {
620 glow::DYNAMIC_DRAW
624 };
625 unsafe { gl.buffer_data_size(target, raw_size, usage) };
626 }
627
628 unsafe { gl.bind_buffer(target, None) };
629
630 if !is_coherent && desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
631 map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
632 }
633 #[cfg(native)]
636 if let Some(label) = desc.label {
637 if self
638 .shared
639 .private_caps
640 .contains(PrivateCapabilities::DEBUG_FNS)
641 {
642 let name = raw.map_or(0, |buf| buf.0.get());
643 unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
644 }
645 }
646
647 let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
648 Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
649 } else {
650 None
651 };
652
653 self.counters.buffers.add(1);
654
655 Ok(super::Buffer {
656 raw,
657 target,
658 size: desc.size,
659 map_flags,
660 data,
661 offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
662 })
663 }
664
665 unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
666 if let Some(raw) = buffer.raw {
667 let gl = &self.shared.context.lock();
668 unsafe { gl.delete_buffer(raw) };
669 }
670
671 self.counters.buffers.sub(1);
672 }
673
674 unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) {
675 self.counters.buffers.add(1);
676 }
677
678 unsafe fn map_buffer(
679 &self,
680 buffer: &super::Buffer,
681 range: crate::MemoryRange,
682 ) -> Result<crate::BufferMapping, crate::DeviceError> {
683 let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
684 let ptr = match buffer.raw {
685 None => {
686 let mut vec = lock(buffer.data.as_ref().unwrap());
687 let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
688 slice.as_mut_ptr()
689 }
690 Some(raw) => {
691 let gl = &self.shared.context.lock();
692 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
693 let ptr = if let Some(ref map_read_allocation) = buffer.data {
694 let mut guard = lock(map_read_allocation);
695 let slice = guard.as_mut_slice();
696 unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
697 slice.as_mut_ptr()
698 } else {
699 *lock(&buffer.offset_of_current_mapping) = range.start;
700 unsafe {
701 gl.map_buffer_range(
702 buffer.target,
703 range.start as i32,
704 (range.end - range.start) as i32,
705 buffer.map_flags,
706 )
707 }
708 };
709 unsafe { gl.bind_buffer(buffer.target, None) };
710 ptr
711 }
712 };
713 Ok(crate::BufferMapping {
714 ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
715 is_coherent,
716 })
717 }
718 unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
719 if let Some(raw) = buffer.raw {
720 if buffer.data.is_none() {
721 let gl = &self.shared.context.lock();
722 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
723 unsafe { gl.unmap_buffer(buffer.target) };
724 unsafe { gl.bind_buffer(buffer.target, None) };
725 *lock(&buffer.offset_of_current_mapping) = 0;
726 }
727 }
728 }
729 unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
730 where
731 I: Iterator<Item = crate::MemoryRange>,
732 {
733 if let Some(raw) = buffer.raw {
734 if buffer.data.is_none() {
735 let gl = &self.shared.context.lock();
736 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
737 for range in ranges {
738 let offset_of_current_mapping = *lock(&buffer.offset_of_current_mapping);
739 unsafe {
740 gl.flush_mapped_buffer_range(
741 buffer.target,
742 (range.start - offset_of_current_mapping) as i32,
743 (range.end - range.start) as i32,
744 )
745 };
746 }
747 }
748 }
749 }
750 unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
751 }
753
754 unsafe fn create_texture(
755 &self,
756 desc: &crate::TextureDescriptor,
757 ) -> Result<super::Texture, crate::DeviceError> {
758 let gl = &self.shared.context.lock();
759
760 let render_usage = wgt::TextureUses::COLOR_TARGET
761 | wgt::TextureUses::DEPTH_STENCIL_WRITE
762 | wgt::TextureUses::DEPTH_STENCIL_READ
763 | wgt::TextureUses::TRANSIENT;
764 let format_desc = self.shared.describe_texture_format(desc.format);
765
766 let inner = if render_usage.contains(desc.usage)
767 && desc.dimension == wgt::TextureDimension::D2
768 && desc.size.depth_or_array_layers == 1
769 {
770 let raw = unsafe { gl.create_renderbuffer().unwrap() };
771 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
772 if desc.sample_count > 1 {
773 unsafe {
774 gl.renderbuffer_storage_multisample(
775 glow::RENDERBUFFER,
776 desc.sample_count as i32,
777 format_desc.internal,
778 desc.size.width as i32,
779 desc.size.height as i32,
780 )
781 };
782 } else {
783 unsafe {
784 gl.renderbuffer_storage(
785 glow::RENDERBUFFER,
786 format_desc.internal,
787 desc.size.width as i32,
788 desc.size.height as i32,
789 )
790 };
791 }
792
793 #[cfg(native)]
794 if let Some(label) = desc.label {
795 if self
796 .shared
797 .private_caps
798 .contains(PrivateCapabilities::DEBUG_FNS)
799 {
800 let name = raw.0.get();
801 unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
802 }
803 }
804
805 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
806 super::TextureInner::Renderbuffer { raw }
807 } else {
808 let raw = unsafe { gl.create_texture().unwrap() };
809 let target = super::Texture::get_info_from_desc(desc);
810
811 unsafe { gl.bind_texture(target, Some(raw)) };
812 match desc.format.sample_type(None, Some(self.shared.features)) {
814 Some(
815 wgt::TextureSampleType::Float { filterable: false }
816 | wgt::TextureSampleType::Uint
817 | wgt::TextureSampleType::Sint,
818 ) => {
819 unsafe {
821 gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
822 };
823 unsafe {
824 gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
825 };
826 }
827 _ => {}
828 }
829
830 if conv::is_layered_target(target) {
831 unsafe {
832 if self
833 .shared
834 .private_caps
835 .contains(PrivateCapabilities::TEXTURE_STORAGE)
836 {
837 gl.tex_storage_3d(
838 target,
839 desc.mip_level_count as i32,
840 format_desc.internal,
841 desc.size.width as i32,
842 desc.size.height as i32,
843 desc.size.depth_or_array_layers as i32,
844 )
845 } else if target == glow::TEXTURE_3D {
846 let mut width = desc.size.width;
847 let mut height = desc.size.width;
848 let mut depth = desc.size.depth_or_array_layers;
849 for i in 0..desc.mip_level_count {
850 gl.tex_image_3d(
851 target,
852 i as i32,
853 format_desc.internal as i32,
854 width as i32,
855 height as i32,
856 depth as i32,
857 0,
858 format_desc.external,
859 format_desc.data_type,
860 glow::PixelUnpackData::Slice(None),
861 );
862 width = max(1, width / 2);
863 height = max(1, height / 2);
864 depth = max(1, depth / 2);
865 }
866 } else {
867 let mut width = desc.size.width;
868 let mut height = desc.size.width;
869 for i in 0..desc.mip_level_count {
870 gl.tex_image_3d(
871 target,
872 i as i32,
873 format_desc.internal as i32,
874 width as i32,
875 height as i32,
876 desc.size.depth_or_array_layers as i32,
877 0,
878 format_desc.external,
879 format_desc.data_type,
880 glow::PixelUnpackData::Slice(None),
881 );
882 width = max(1, width / 2);
883 height = max(1, height / 2);
884 }
885 }
886 };
887 } else if desc.sample_count > 1 {
888 unsafe {
889 gl.tex_storage_2d_multisample(
890 target,
891 desc.sample_count as i32,
892 format_desc.internal,
893 desc.size.width as i32,
894 desc.size.height as i32,
895 true,
896 )
897 };
898 } else {
899 unsafe {
900 if self
901 .shared
902 .private_caps
903 .contains(PrivateCapabilities::TEXTURE_STORAGE)
904 {
905 gl.tex_storage_2d(
906 target,
907 desc.mip_level_count as i32,
908 format_desc.internal,
909 desc.size.width as i32,
910 desc.size.height as i32,
911 )
912 } else if target == glow::TEXTURE_CUBE_MAP {
913 let mut width = desc.size.width;
914 let mut height = desc.size.width;
915 for i in 0..desc.mip_level_count {
916 for face in [
917 glow::TEXTURE_CUBE_MAP_POSITIVE_X,
918 glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
919 glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
920 glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
921 glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
922 glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
923 ] {
924 gl.tex_image_2d(
925 face,
926 i as i32,
927 format_desc.internal as i32,
928 width as i32,
929 height as i32,
930 0,
931 format_desc.external,
932 format_desc.data_type,
933 glow::PixelUnpackData::Slice(None),
934 );
935 }
936 width = max(1, width / 2);
937 height = max(1, height / 2);
938 }
939 } else {
940 let mut width = desc.size.width;
941 let mut height = desc.size.width;
942 for i in 0..desc.mip_level_count {
943 gl.tex_image_2d(
944 target,
945 i as i32,
946 format_desc.internal as i32,
947 width as i32,
948 height as i32,
949 0,
950 format_desc.external,
951 format_desc.data_type,
952 glow::PixelUnpackData::Slice(None),
953 );
954 width = max(1, width / 2);
955 height = max(1, height / 2);
956 }
957 }
958 };
959 }
960
961 #[cfg(native)]
962 if let Some(label) = desc.label {
963 if self
964 .shared
965 .private_caps
966 .contains(PrivateCapabilities::DEBUG_FNS)
967 {
968 let name = raw.0.get();
969 unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
970 }
971 }
972
973 unsafe { gl.bind_texture(target, None) };
974 super::TextureInner::Texture { raw, target }
975 };
976
977 self.counters.textures.add(1);
978
979 Ok(super::Texture {
980 inner,
981 drop_guard: None,
982 mip_level_count: desc.mip_level_count,
983 array_layer_count: desc.array_layer_count(),
984 format: desc.format,
985 format_desc,
986 copy_size: desc.copy_extent(),
987 })
988 }
989
990 unsafe fn destroy_texture(&self, texture: super::Texture) {
991 if texture.drop_guard.is_none() {
992 let gl = &self.shared.context.lock();
993 match texture.inner {
994 super::TextureInner::Renderbuffer { raw, .. } => {
995 unsafe { gl.delete_renderbuffer(raw) };
996 }
997 super::TextureInner::DefaultRenderbuffer => {}
998 super::TextureInner::Texture { raw, .. } => {
999 unsafe { gl.delete_texture(raw) };
1000 }
1001 #[cfg(webgl)]
1002 super::TextureInner::ExternalFramebuffer { .. } => {}
1003 #[cfg(native)]
1004 super::TextureInner::ExternalNativeFramebuffer { .. } => {}
1005 }
1006 }
1007
1008 drop(texture.drop_guard);
1011
1012 self.counters.textures.sub(1);
1013 }
1014
1015 unsafe fn add_raw_texture(&self, _texture: &super::Texture) {
1016 self.counters.textures.add(1);
1017 }
1018
1019 unsafe fn create_texture_view(
1020 &self,
1021 texture: &super::Texture,
1022 desc: &crate::TextureViewDescriptor,
1023 ) -> Result<super::TextureView, crate::DeviceError> {
1024 self.counters.texture_views.add(1);
1025 Ok(super::TextureView {
1026 inner: texture.inner.clone(),
1028 aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
1029 mip_levels: desc.range.mip_range(texture.mip_level_count),
1030 array_layers: desc.range.layer_range(texture.array_layer_count),
1031 format: texture.format,
1032 })
1033 }
1034
1035 unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
1036 self.counters.texture_views.sub(1);
1037 }
1038
1039 unsafe fn create_sampler(
1040 &self,
1041 desc: &crate::SamplerDescriptor,
1042 ) -> Result<super::Sampler, crate::DeviceError> {
1043 let gl = &self.shared.context.lock();
1044
1045 let raw = unsafe { gl.create_sampler().unwrap() };
1046
1047 let (min, mag) =
1048 conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
1049
1050 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
1051 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
1052
1053 unsafe {
1054 gl.sampler_parameter_i32(
1055 raw,
1056 glow::TEXTURE_WRAP_S,
1057 conv::map_address_mode(desc.address_modes[0]) as i32,
1058 )
1059 };
1060 unsafe {
1061 gl.sampler_parameter_i32(
1062 raw,
1063 glow::TEXTURE_WRAP_T,
1064 conv::map_address_mode(desc.address_modes[1]) as i32,
1065 )
1066 };
1067 unsafe {
1068 gl.sampler_parameter_i32(
1069 raw,
1070 glow::TEXTURE_WRAP_R,
1071 conv::map_address_mode(desc.address_modes[2]) as i32,
1072 )
1073 };
1074
1075 if let Some(border_color) = desc.border_color {
1076 let border = match border_color {
1077 wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
1078 [0.0; 4]
1079 }
1080 wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
1081 wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
1082 };
1083 unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
1084 }
1085
1086 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
1087 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
1088
1089 if desc.anisotropy_clamp != 1 {
1091 unsafe {
1092 gl.sampler_parameter_i32(
1093 raw,
1094 glow::TEXTURE_MAX_ANISOTROPY,
1095 desc.anisotropy_clamp as i32,
1096 )
1097 };
1098 }
1099
1100 if let Some(compare) = desc.compare {
1103 unsafe {
1104 gl.sampler_parameter_i32(
1105 raw,
1106 glow::TEXTURE_COMPARE_MODE,
1107 glow::COMPARE_REF_TO_TEXTURE as i32,
1108 )
1109 };
1110 unsafe {
1111 gl.sampler_parameter_i32(
1112 raw,
1113 glow::TEXTURE_COMPARE_FUNC,
1114 conv::map_compare_func(compare) as i32,
1115 )
1116 };
1117 }
1118
1119 #[cfg(native)]
1120 if let Some(label) = desc.label {
1121 if self
1122 .shared
1123 .private_caps
1124 .contains(PrivateCapabilities::DEBUG_FNS)
1125 {
1126 let name = raw.0.get();
1127 unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
1128 }
1129 }
1130
1131 self.counters.samplers.add(1);
1132
1133 Ok(super::Sampler { raw })
1134 }
1135
1136 unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
1137 let gl = &self.shared.context.lock();
1138 unsafe { gl.delete_sampler(sampler.raw) };
1139 self.counters.samplers.sub(1);
1140 }
1141
1142 unsafe fn create_command_encoder(
1143 &self,
1144 _desc: &crate::CommandEncoderDescriptor<super::Queue>,
1145 ) -> Result<super::CommandEncoder, crate::DeviceError> {
1146 self.counters.command_encoders.add(1);
1147
1148 Ok(super::CommandEncoder {
1149 cmd_buffer: super::CommandBuffer::default(),
1150 state: Default::default(),
1151 private_caps: self.shared.private_caps,
1152 counters: Arc::clone(&self.counters),
1153 })
1154 }
1155
1156 unsafe fn create_bind_group_layout(
1157 &self,
1158 desc: &crate::BindGroupLayoutDescriptor,
1159 ) -> Result<super::BindGroupLayout, crate::DeviceError> {
1160 self.counters.bind_group_layouts.add(1);
1161 Ok(super::BindGroupLayout {
1162 entries: Arc::from(desc.entries),
1163 })
1164 }
1165
1166 unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
1167 self.counters.bind_group_layouts.sub(1);
1168 }
1169
1170 unsafe fn create_pipeline_layout(
1171 &self,
1172 desc: &crate::PipelineLayoutDescriptor<super::BindGroupLayout>,
1173 ) -> Result<super::PipelineLayout, crate::DeviceError> {
1174 use naga::back::glsl;
1175
1176 let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
1177 let mut num_samplers = 0u8;
1178 let mut num_textures = 0u8;
1179 let mut num_images = 0u8;
1180 let mut num_uniform_buffers = 0u8;
1181 let mut num_storage_buffers = 0u8;
1182
1183 let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
1184 writer_flags.set(
1185 glsl::WriterFlags::TEXTURE_SHADOW_LOD,
1186 self.shared
1187 .private_caps
1188 .contains(PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
1189 );
1190 writer_flags.set(
1191 glsl::WriterFlags::DRAW_PARAMETERS,
1192 self.shared
1193 .private_caps
1194 .contains(PrivateCapabilities::FULLY_FEATURED_INSTANCING),
1195 );
1196 writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
1199 let mut binding_map = glsl::BindingMap::default();
1200
1201 for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
1202 let Some(bg_layout) = bg_layout else {
1203 group_infos.push(None);
1204 continue;
1205 };
1206
1207 let mut binding_to_slot = vec![
1209 !0;
1210 bg_layout
1211 .entries
1212 .iter()
1213 .map(|b| b.binding)
1214 .max()
1215 .map_or(0, |idx| idx as usize + 1)
1216 ]
1217 .into_boxed_slice();
1218
1219 for entry in bg_layout.entries.iter() {
1220 let counter = match entry.ty {
1221 wgt::BindingType::Sampler { .. } => &mut num_samplers,
1222 wgt::BindingType::Texture { .. } => &mut num_textures,
1223 wgt::BindingType::StorageTexture { .. } => &mut num_images,
1224 wgt::BindingType::Buffer {
1225 ty: wgt::BufferBindingType::Uniform,
1226 ..
1227 } => &mut num_uniform_buffers,
1228 wgt::BindingType::Buffer {
1229 ty: wgt::BufferBindingType::Storage { .. },
1230 ..
1231 } => &mut num_storage_buffers,
1232 wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1233 wgt::BindingType::ExternalTexture => unimplemented!(),
1234 };
1235
1236 binding_to_slot[entry.binding as usize] = *counter;
1237 let br = naga::ResourceBinding {
1238 group: group_index as u32,
1239 binding: entry.binding,
1240 };
1241 binding_map.insert(br, *counter);
1242 *counter += entry.count.map_or(1, |c| c.get() as u8);
1243 }
1244
1245 group_infos.push(Some(super::BindGroupLayoutInfo {
1246 entries: Arc::clone(&bg_layout.entries),
1247 binding_to_slot,
1248 }));
1249 }
1250
1251 self.counters.pipeline_layouts.add(1);
1252
1253 Ok(super::PipelineLayout {
1254 group_infos: group_infos.into_boxed_slice(),
1255 naga_options: glsl::Options {
1256 version: self.shared.shading_language_version,
1257 writer_flags,
1258 binding_map,
1259 zero_initialize_workgroup_memory: true,
1260 },
1261 })
1262 }
1263
1264 unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
1265 self.counters.pipeline_layouts.sub(1);
1266 }
1267
1268 unsafe fn create_bind_group(
1269 &self,
1270 desc: &crate::BindGroupDescriptor<
1271 super::BindGroupLayout,
1272 super::Buffer,
1273 super::Sampler,
1274 super::TextureView,
1275 super::AccelerationStructure,
1276 >,
1277 ) -> Result<super::BindGroup, crate::DeviceError> {
1278 let mut contents = Vec::new();
1279
1280 let layout_and_entry_iter = desc.entries.iter().map(|entry| {
1281 let layout = desc
1282 .layout
1283 .entries
1284 .iter()
1285 .find(|layout_entry| layout_entry.binding == entry.binding)
1286 .expect("internal error: no layout entry found with binding slot");
1287 (entry, layout)
1288 });
1289 for (entry, layout) in layout_and_entry_iter {
1290 let binding = match layout.ty {
1291 wgt::BindingType::Buffer { .. } => {
1292 let bb = &desc.buffers[entry.resource_index as usize];
1293 super::RawBinding::Buffer {
1294 raw: bb.buffer.raw.unwrap(),
1295 offset: bb.offset as i32,
1296 size: match bb.size {
1297 Some(s) => s.get() as i32,
1298 None => (bb.buffer.size - bb.offset) as i32,
1299 },
1300 }
1301 }
1302 wgt::BindingType::Sampler { .. } => {
1303 let sampler = desc.samplers[entry.resource_index as usize];
1304 super::RawBinding::Sampler(sampler.raw)
1305 }
1306 wgt::BindingType::Texture { view_dimension, .. } => {
1307 let view = desc.textures[entry.resource_index as usize].view;
1308 if view.array_layers.start != 0 {
1309 log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
1310 "This is an implementation problem of wgpu-hal/gles backend.")
1311 }
1312 let (raw, target) = view.inner.as_native();
1313
1314 super::Texture::log_failing_target_heuristics(view_dimension, target);
1315
1316 super::RawBinding::Texture {
1317 raw,
1318 target,
1319 aspects: view.aspects,
1320 mip_levels: view.mip_levels.clone(),
1321 }
1322 }
1323 wgt::BindingType::StorageTexture {
1324 access,
1325 format,
1326 view_dimension,
1327 } => {
1328 let view = desc.textures[entry.resource_index as usize].view;
1329 let format_desc = self.shared.describe_texture_format(format);
1330 let (raw, _target) = view.inner.as_native();
1331 super::RawBinding::Image(super::ImageBinding {
1332 raw,
1333 mip_level: view.mip_levels.start,
1334 array_layer: match view_dimension {
1335 wgt::TextureViewDimension::D2Array
1336 | wgt::TextureViewDimension::CubeArray => None,
1337 _ => Some(view.array_layers.start),
1338 },
1339 access: conv::map_storage_access(access),
1340 format: format_desc.internal,
1341 })
1342 }
1343 wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1344 wgt::BindingType::ExternalTexture => unimplemented!(),
1345 };
1346 contents.push(binding);
1347 }
1348
1349 self.counters.bind_groups.add(1);
1350
1351 Ok(super::BindGroup {
1352 contents: contents.into_boxed_slice(),
1353 })
1354 }
1355
1356 unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
1357 self.counters.bind_groups.sub(1);
1358 }
1359
1360 unsafe fn create_shader_module(
1361 &self,
1362 desc: &crate::ShaderModuleDescriptor,
1363 shader: crate::ShaderInput,
1364 ) -> Result<super::ShaderModule, crate::ShaderError> {
1365 self.counters.shader_modules.add(1);
1366
1367 Ok(super::ShaderModule {
1368 source: match shader {
1369 crate::ShaderInput::Naga(naga) => naga,
1370 crate::ShaderInput::Glsl { .. } => unimplemented!(),
1372 crate::ShaderInput::SpirV(_)
1373 | crate::ShaderInput::MetalLib { .. }
1374 | crate::ShaderInput::Msl { .. }
1375 | crate::ShaderInput::Dxil { .. }
1376 | crate::ShaderInput::Hlsl { .. } => {
1377 unreachable!()
1378 }
1379 },
1380 label: desc.label.map(|str| str.to_string()),
1381 id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1382 })
1383 }
1384
1385 unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
1386 self.counters.shader_modules.sub(1);
1387 }
1388
1389 unsafe fn create_render_pipeline(
1390 &self,
1391 desc: &crate::RenderPipelineDescriptor<
1392 super::PipelineLayout,
1393 super::ShaderModule,
1394 super::PipelineCache,
1395 >,
1396 ) -> Result<super::RenderPipeline, crate::PipelineError> {
1397 let (vertex_stage, vertex_buffers) = match &desc.vertex_processor {
1398 crate::VertexProcessor::Standard {
1399 vertex_buffers,
1400 ref vertex_stage,
1401 } => (vertex_stage, vertex_buffers),
1402 crate::VertexProcessor::Mesh { .. } => unreachable!(),
1403 };
1404 let gl = &self.shared.context.lock();
1405 let mut shaders = ArrayVec::new();
1406 shaders.push((naga::ShaderStage::Vertex, vertex_stage));
1407 if let Some(ref fs) = desc.fragment_stage {
1408 shaders.push((naga::ShaderStage::Fragment, fs));
1409 }
1410 let inner = unsafe {
1411 self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview_mask)
1412 }?;
1413
1414 let (vertex_buffers, vertex_attributes) = {
1415 let mut buffers = Vec::new();
1416 let mut attributes = Vec::new();
1417 for (index, vb_layout) in vertex_buffers.iter().enumerate() {
1418 buffers.push(super::VertexBufferDesc {
1419 step: vb_layout.step_mode,
1420 stride: vb_layout.array_stride as u32,
1421 });
1422 for vat in vb_layout.attributes.iter() {
1423 let format_desc = conv::describe_vertex_format(vat.format);
1424 attributes.push(super::AttributeDesc {
1425 location: vat.shader_location,
1426 offset: vat.offset as u32,
1427 buffer_index: index as u32,
1428 format_desc,
1429 });
1430 }
1431 }
1432 (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1433 };
1434
1435 let color_targets = {
1436 let mut targets = Vec::new();
1437 for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1438 targets.push(super::ColorTargetDesc {
1439 mask: ct.write_mask,
1440 blend: ct.blend.as_ref().map(conv::map_blend),
1441 });
1442 }
1443 targets.into_boxed_slice()
1446 };
1447
1448 self.counters.render_pipelines.add(1);
1449
1450 Ok(super::RenderPipeline {
1451 inner,
1452 primitive: desc.primitive,
1453 vertex_buffers,
1454 vertex_attributes,
1455 color_targets,
1456 depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1457 function: conv::map_compare_func(ds.depth_compare.unwrap_or_default()),
1458 mask: ds.depth_write_enabled.unwrap_or_default(),
1459 }),
1460 depth_bias: desc
1461 .depth_stencil
1462 .as_ref()
1463 .map(|ds| ds.bias)
1464 .unwrap_or_default(),
1465 stencil: desc
1466 .depth_stencil
1467 .as_ref()
1468 .map(|ds| conv::map_stencil(&ds.stencil)),
1469 alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1470 })
1471 }
1472
1473 unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1474 if Arc::strong_count(&pipeline.inner) == 2 {
1479 let gl = &self.shared.context.lock();
1480 let mut program_cache = self.shared.program_cache.lock();
1481 program_cache.retain(|_, v| match *v {
1482 Ok(ref p) => p.program != pipeline.inner.program,
1483 Err(_) => false,
1484 });
1485 unsafe { gl.delete_program(pipeline.inner.program) };
1486 }
1487
1488 self.counters.render_pipelines.sub(1);
1489 }
1490
1491 unsafe fn create_compute_pipeline(
1492 &self,
1493 desc: &crate::ComputePipelineDescriptor<
1494 super::PipelineLayout,
1495 super::ShaderModule,
1496 super::PipelineCache,
1497 >,
1498 ) -> Result<super::ComputePipeline, crate::PipelineError> {
1499 let gl = &self.shared.context.lock();
1500 let mut shaders = ArrayVec::new();
1501 shaders.push((naga::ShaderStage::Compute, &desc.stage));
1502 let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1503
1504 self.counters.compute_pipelines.add(1);
1505
1506 Ok(super::ComputePipeline { inner })
1507 }
1508
1509 unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1510 if Arc::strong_count(&pipeline.inner) == 2 {
1515 let gl = &self.shared.context.lock();
1516 let mut program_cache = self.shared.program_cache.lock();
1517 program_cache.retain(|_, v| match *v {
1518 Ok(ref p) => p.program != pipeline.inner.program,
1519 Err(_) => false,
1520 });
1521 unsafe { gl.delete_program(pipeline.inner.program) };
1522 }
1523
1524 self.counters.compute_pipelines.sub(1);
1525 }
1526
1527 unsafe fn create_pipeline_cache(
1528 &self,
1529 _: &crate::PipelineCacheDescriptor<'_>,
1530 ) -> Result<super::PipelineCache, crate::PipelineCacheError> {
1531 Ok(super::PipelineCache)
1534 }
1535 unsafe fn destroy_pipeline_cache(&self, _: super::PipelineCache) {}
1536
1537 #[cfg_attr(target_arch = "wasm32", allow(unused))]
1538 unsafe fn create_query_set(
1539 &self,
1540 desc: &wgt::QuerySetDescriptor<crate::Label>,
1541 ) -> Result<super::QuerySet, crate::DeviceError> {
1542 let gl = &self.shared.context.lock();
1543
1544 let mut queries = Vec::with_capacity(desc.count as usize);
1545 for _ in 0..desc.count {
1546 let query =
1547 unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1548
1549 queries.push(query);
1556 }
1557
1558 self.counters.query_sets.add(1);
1559
1560 Ok(super::QuerySet {
1561 queries: queries.into_boxed_slice(),
1562 target: match desc.ty {
1563 wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1564 wgt::QueryType::Timestamp => glow::TIMESTAMP,
1565 _ => unimplemented!(),
1566 },
1567 })
1568 }
1569
1570 unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1571 let gl = &self.shared.context.lock();
1572 for &query in set.queries.iter() {
1573 unsafe { gl.delete_query(query) };
1574 }
1575 self.counters.query_sets.sub(1);
1576 }
1577
1578 unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1579 self.counters.fences.add(1);
1580 Ok(super::Fence::new(&self.shared.options))
1581 }
1582
1583 unsafe fn destroy_fence(&self, fence: super::Fence) {
1584 let gl = &self.shared.context.lock();
1585 fence.destroy(gl);
1586 self.counters.fences.sub(1);
1587 }
1588
1589 unsafe fn get_fence_value(
1590 &self,
1591 fence: &super::Fence,
1592 ) -> Result<crate::FenceValue, crate::DeviceError> {
1593 #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1594 Ok(fence.get_latest(&self.shared.context.lock()))
1595 }
1596 unsafe fn wait(
1597 &self,
1598 fence: &super::Fence,
1599 wait_value: crate::FenceValue,
1600 timeout: Option<core::time::Duration>,
1601 ) -> Result<bool, crate::DeviceError> {
1602 if fence.satisfied(wait_value) {
1603 return Ok(true);
1604 }
1605
1606 let gl = &self.shared.context.lock();
1607 let timeout_ns = if cfg!(any(webgl, Emscripten)) {
1612 0
1613 } else {
1614 timeout
1615 .map(|t| t.as_nanos().min(u32::MAX as u128) as u32)
1616 .unwrap_or(u32::MAX)
1617 };
1618 fence.wait(gl, wait_value, timeout_ns)
1619 }
1620
1621 unsafe fn start_graphics_debugger_capture(&self) -> bool {
1622 #[cfg(all(native, feature = "renderdoc"))]
1623 return unsafe {
1624 self.render_doc
1625 .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1626 };
1627 #[allow(unreachable_code)]
1628 false
1629 }
1630 unsafe fn stop_graphics_debugger_capture(&self) {
1631 #[cfg(all(native, feature = "renderdoc"))]
1632 unsafe {
1633 self.render_doc
1634 .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1635 }
1636 }
1637 unsafe fn create_acceleration_structure(
1638 &self,
1639 _desc: &crate::AccelerationStructureDescriptor,
1640 ) -> Result<super::AccelerationStructure, crate::DeviceError> {
1641 unimplemented!()
1642 }
1643 unsafe fn get_acceleration_structure_build_sizes<'a>(
1644 &self,
1645 _desc: &crate::GetAccelerationStructureBuildSizesDescriptor<'a, super::Buffer>,
1646 ) -> crate::AccelerationStructureBuildSizes {
1647 unimplemented!()
1648 }
1649 unsafe fn get_acceleration_structure_device_address(
1650 &self,
1651 _acceleration_structure: &super::AccelerationStructure,
1652 ) -> wgt::BufferAddress {
1653 unimplemented!()
1654 }
1655 unsafe fn destroy_acceleration_structure(
1656 &self,
1657 _acceleration_structure: super::AccelerationStructure,
1658 ) {
1659 }
1660
1661 fn tlas_instance_to_bytes(&self, _instance: TlasInstance) -> Vec<u8> {
1662 unimplemented!()
1663 }
1664
1665 fn get_internal_counters(&self) -> wgt::HalCounters {
1666 self.counters.as_ref().clone()
1667 }
1668
1669 fn check_if_oom(&self) -> Result<(), crate::DeviceError> {
1670 Ok(())
1671 }
1672}
1673
1674#[cfg(send_sync)]
1675unsafe impl Sync for super::Device {}
1676#[cfg(send_sync)]
1677unsafe impl Send for super::Device {}