1use alloc::{
2 borrow::ToOwned, format, string::String, string::ToString as _, sync::Arc, vec, vec::Vec,
3};
4use core::{cmp::max, convert::TryInto, num::NonZeroU32, ptr, sync::atomic::Ordering};
5
6use arrayvec::ArrayVec;
7use glow::HasContext;
8use naga::FastHashMap;
9
10use super::{conv, lock, MaybeMutex, PrivateCapabilities};
11use crate::auxil::map_naga_stage;
12use crate::TlasInstance;
13
14type ShaderStage<'a> = (
15 naga::ShaderStage,
16 &'a crate::ProgrammableStage<'a, super::ShaderModule>,
17);
18type NameBindingMap = FastHashMap<String, (super::BindingRegister, u8)>;
19
20struct CompilationContext<'a> {
21 layout: &'a super::PipelineLayout,
22 sampler_map: &'a mut super::SamplerBindMap,
23 name_binding_map: &'a mut NameBindingMap,
24 push_constant_items: &'a mut Vec<naga::back::glsl::PushConstantItem>,
25 multiview: Option<NonZeroU32>,
26 clip_distance_count: &'a mut u32,
27}
28
29impl CompilationContext<'_> {
30 fn consume_reflection(
31 self,
32 gl: &glow::Context,
33 module: &naga::Module,
34 ep_info: &naga::valid::FunctionInfo,
35 reflection_info: naga::back::glsl::ReflectionInfo,
36 naga_stage: naga::ShaderStage,
37 program: glow::Program,
38 ) {
39 for (handle, var) in module.global_variables.iter() {
40 if ep_info[handle].is_empty() {
41 continue;
42 }
43 let register = match var.space {
44 naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
45 naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
46 _ => continue,
47 };
48
49 let br = var.binding.as_ref().unwrap();
50 let slot = self.layout.get_slot(br);
51
52 let name = match reflection_info.uniforms.get(&handle) {
53 Some(name) => name.clone(),
54 None => continue,
55 };
56 log::trace!(
57 "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
58 var.name.as_ref(),
59 &name,
60 register,
61 slot
62 );
63 self.name_binding_map.insert(name, (register, slot));
64 }
65
66 for (name, mapping) in reflection_info.texture_mapping {
67 let var = &module.global_variables[mapping.texture];
68 let register = match module.types[var.ty].inner {
69 naga::TypeInner::Image {
70 class: naga::ImageClass::Storage { .. },
71 ..
72 } => super::BindingRegister::Images,
73 _ => super::BindingRegister::Textures,
74 };
75
76 let tex_br = var.binding.as_ref().unwrap();
77 let texture_linear_index = self.layout.get_slot(tex_br);
78
79 self.name_binding_map
80 .insert(name, (register, texture_linear_index));
81 if let Some(sampler_handle) = mapping.sampler {
82 let sam_br = module.global_variables[sampler_handle]
83 .binding
84 .as_ref()
85 .unwrap();
86 let sampler_linear_index = self.layout.get_slot(sam_br);
87 self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
88 }
89 }
90
91 for (name, location) in reflection_info.varying {
92 match naga_stage {
93 naga::ShaderStage::Vertex => {
94 assert_eq!(location.index, 0);
95 unsafe { gl.bind_attrib_location(program, location.location, &name) }
96 }
97 naga::ShaderStage::Fragment => {
98 assert_eq!(location.index, 0);
99 unsafe { gl.bind_frag_data_location(program, location.location, &name) }
100 }
101 naga::ShaderStage::Compute => {}
102 naga::ShaderStage::Task | naga::ShaderStage::Mesh => unreachable!(),
103 }
104 }
105
106 *self.push_constant_items = reflection_info.push_constant_items;
107
108 if naga_stage == naga::ShaderStage::Vertex {
109 *self.clip_distance_count = reflection_info.clip_distance_count;
110 }
111 }
112}
113
114impl super::Device {
115 #[cfg(any(native, Emscripten))]
122 pub unsafe fn texture_from_raw(
123 &self,
124 name: NonZeroU32,
125 desc: &crate::TextureDescriptor,
126 drop_callback: Option<crate::DropCallback>,
127 ) -> super::Texture {
128 super::Texture {
129 inner: super::TextureInner::Texture {
130 raw: glow::NativeTexture(name),
131 target: super::Texture::get_info_from_desc(desc),
132 },
133 drop_guard: crate::DropGuard::from_option(drop_callback),
134 mip_level_count: desc.mip_level_count,
135 array_layer_count: desc.array_layer_count(),
136 format: desc.format,
137 format_desc: self.shared.describe_texture_format(desc.format),
138 copy_size: desc.copy_extent(),
139 }
140 }
141
142 #[cfg(any(native, Emscripten))]
149 pub unsafe fn texture_from_raw_renderbuffer(
150 &self,
151 name: NonZeroU32,
152 desc: &crate::TextureDescriptor,
153 drop_callback: Option<crate::DropCallback>,
154 ) -> super::Texture {
155 super::Texture {
156 inner: super::TextureInner::Renderbuffer {
157 raw: glow::NativeRenderbuffer(name),
158 },
159 drop_guard: crate::DropGuard::from_option(drop_callback),
160 mip_level_count: desc.mip_level_count,
161 array_layer_count: desc.array_layer_count(),
162 format: desc.format,
163 format_desc: self.shared.describe_texture_format(desc.format),
164 copy_size: desc.copy_extent(),
165 }
166 }
167
168 unsafe fn compile_shader(
169 gl: &glow::Context,
170 shader: &str,
171 naga_stage: naga::ShaderStage,
172 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
173 ) -> Result<glow::Shader, crate::PipelineError> {
174 let target = match naga_stage {
175 naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
176 naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
177 naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
178 naga::ShaderStage::Task | naga::ShaderStage::Mesh => unreachable!(),
179 };
180
181 let raw = unsafe { gl.create_shader(target) }.unwrap();
182 #[cfg(native)]
183 if gl.supports_debug() {
184 let name = raw.0.get();
185 unsafe { gl.object_label(glow::SHADER, name, label) };
186 }
187
188 unsafe { gl.shader_source(raw, shader) };
189 unsafe { gl.compile_shader(raw) };
190
191 log::debug!("\tCompiled shader {raw:?}");
192
193 let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
194 let msg = unsafe { gl.get_shader_info_log(raw) };
195 if compiled_ok {
196 if !msg.is_empty() {
197 log::warn!("\tCompile: {msg}");
198 }
199 Ok(raw)
200 } else {
201 log::error!("\tShader compilation failed: {msg}");
202 unsafe { gl.delete_shader(raw) };
203 Err(crate::PipelineError::Linkage(
204 map_naga_stage(naga_stage),
205 msg,
206 ))
207 }
208 }
209
210 fn create_shader(
211 gl: &glow::Context,
212 naga_stage: naga::ShaderStage,
213 stage: &crate::ProgrammableStage<super::ShaderModule>,
214 context: CompilationContext,
215 program: glow::Program,
216 ) -> Result<glow::Shader, crate::PipelineError> {
217 use naga::back::glsl;
218 let pipeline_options = glsl::PipelineOptions {
219 shader_stage: naga_stage,
220 entry_point: stage.entry_point.to_owned(),
221 multiview: context.multiview,
222 };
223
224 let (module, info) = naga::back::pipeline_constants::process_overrides(
225 &stage.module.naga.module,
226 &stage.module.naga.info,
227 Some((naga_stage, stage.entry_point)),
228 stage.constants,
229 )
230 .map_err(|e| {
231 let msg = format!("{e}");
232 crate::PipelineError::PipelineConstants(map_naga_stage(naga_stage), msg)
233 })?;
234
235 let entry_point_index = module
236 .entry_points
237 .iter()
238 .position(|ep| ep.name.as_str() == stage.entry_point)
239 .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
240
241 use naga::proc::BoundsCheckPolicy;
242 let version = gl.version();
244 let image_check = if !version.is_embedded && (version.major, version.minor) >= (4, 3) {
245 BoundsCheckPolicy::ReadZeroSkipWrite
246 } else {
247 BoundsCheckPolicy::Unchecked
248 };
249
250 let policies = naga::proc::BoundsCheckPolicies {
252 index: BoundsCheckPolicy::Unchecked,
253 buffer: BoundsCheckPolicy::Unchecked,
254 image_load: image_check,
255 binding_array: BoundsCheckPolicy::Unchecked,
256 };
257
258 let mut output = String::new();
259 let needs_temp_options = stage.zero_initialize_workgroup_memory
260 != context.layout.naga_options.zero_initialize_workgroup_memory;
261 let mut temp_options;
262 let naga_options = if needs_temp_options {
263 temp_options = context.layout.naga_options.clone();
266 temp_options.zero_initialize_workgroup_memory = stage.zero_initialize_workgroup_memory;
267 &temp_options
268 } else {
269 &context.layout.naga_options
270 };
271 let mut writer = glsl::Writer::new(
272 &mut output,
273 &module,
274 &info,
275 naga_options,
276 &pipeline_options,
277 policies,
278 )
279 .map_err(|e| {
280 let msg = format!("{e}");
281 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
282 })?;
283
284 let reflection_info = writer.write().map_err(|e| {
285 let msg = format!("{e}");
286 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
287 })?;
288
289 log::debug!("Naga generated shader:\n{output}");
290
291 context.consume_reflection(
292 gl,
293 &module,
294 info.get_entry_point(entry_point_index),
295 reflection_info,
296 naga_stage,
297 program,
298 );
299
300 unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
301 }
302
303 unsafe fn create_pipeline<'a>(
304 &self,
305 gl: &glow::Context,
306 shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
307 layout: &super::PipelineLayout,
308 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
309 multiview: Option<NonZeroU32>,
310 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
311 let mut program_stages = ArrayVec::new();
312 let mut group_to_binding_to_slot = Vec::with_capacity(layout.group_infos.len());
313 for group in &*layout.group_infos {
314 group_to_binding_to_slot.push(group.binding_to_slot.clone());
315 }
316 for &(naga_stage, stage) in &shaders {
317 program_stages.push(super::ProgramStage {
318 naga_stage: naga_stage.to_owned(),
319 shader_id: stage.module.id,
320 entry_point: stage.entry_point.to_owned(),
321 zero_initialize_workgroup_memory: stage.zero_initialize_workgroup_memory,
322 });
323 }
324 let mut guard = self
325 .shared
326 .program_cache
327 .try_lock()
328 .expect("Couldn't acquire program_cache lock");
329 let program = guard
332 .entry(super::ProgramCacheKey {
333 stages: program_stages,
334 group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
335 })
336 .or_insert_with(|| unsafe {
337 Self::create_program(
338 gl,
339 shaders,
340 layout,
341 label,
342 multiview,
343 self.shared.shading_language_version,
344 self.shared.private_caps,
345 )
346 })
347 .to_owned()?;
348 drop(guard);
349
350 Ok(program)
351 }
352
353 unsafe fn create_program<'a>(
354 gl: &glow::Context,
355 shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
356 layout: &super::PipelineLayout,
357 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
358 multiview: Option<NonZeroU32>,
359 glsl_version: naga::back::glsl::Version,
360 private_caps: PrivateCapabilities,
361 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
362 let glsl_version = match glsl_version {
363 naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
364 naga::back::glsl::Version::Desktop(version) => format!("{version}"),
365 };
366 let program = unsafe { gl.create_program() }.unwrap();
367 #[cfg(native)]
368 if let Some(label) = label {
369 if private_caps.contains(PrivateCapabilities::DEBUG_FNS) {
370 let name = program.0.get();
371 unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
372 }
373 }
374
375 let mut name_binding_map = NameBindingMap::default();
376 let mut push_constant_items = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
377 let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
378 let mut has_stages = wgt::ShaderStages::empty();
379 let mut shaders_to_delete = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
380 let mut clip_distance_count = 0;
381
382 for &(naga_stage, stage) in &shaders {
383 has_stages |= map_naga_stage(naga_stage);
384 let pc_item = {
385 push_constant_items.push(Vec::new());
386 push_constant_items.last_mut().unwrap()
387 };
388 let context = CompilationContext {
389 layout,
390 sampler_map: &mut sampler_map,
391 name_binding_map: &mut name_binding_map,
392 push_constant_items: pc_item,
393 multiview,
394 clip_distance_count: &mut clip_distance_count,
395 };
396
397 let shader = Self::create_shader(gl, naga_stage, stage, context, program)?;
398 shaders_to_delete.push(shader);
399 }
400
401 if has_stages == wgt::ShaderStages::VERTEX {
403 let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
404 log::info!("Only vertex shader is present. Creating an empty fragment shader",);
405 let shader = unsafe {
406 Self::compile_shader(
407 gl,
408 &shader_src,
409 naga::ShaderStage::Fragment,
410 Some("(wgpu internal) dummy fragment shader"),
411 )
412 }?;
413 shaders_to_delete.push(shader);
414 }
415
416 for &shader in shaders_to_delete.iter() {
417 unsafe { gl.attach_shader(program, shader) };
418 }
419 unsafe { gl.link_program(program) };
420
421 for shader in shaders_to_delete {
422 unsafe { gl.delete_shader(shader) };
423 }
424
425 log::debug!("\tLinked program {program:?}");
426
427 let linked_ok = unsafe { gl.get_program_link_status(program) };
428 let msg = unsafe { gl.get_program_info_log(program) };
429 if !linked_ok {
430 return Err(crate::PipelineError::Linkage(has_stages, msg));
431 }
432 if !msg.is_empty() {
433 log::warn!("\tLink: {msg}");
434 }
435
436 if !private_caps.contains(PrivateCapabilities::SHADER_BINDING_LAYOUT) {
437 unsafe { gl.use_program(Some(program)) };
440 for (ref name, (register, slot)) in name_binding_map {
441 log::trace!("Get binding {name:?} from program {program:?}");
442 match register {
443 super::BindingRegister::UniformBuffers => {
444 let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
445 log::trace!("\tBinding slot {slot} to block index {index}");
446 unsafe { gl.uniform_block_binding(program, index, slot as _) };
447 }
448 super::BindingRegister::StorageBuffers => {
449 let index =
450 unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
451 log::error!("Unable to re-map shader storage block {name} to {index}");
452 return Err(crate::DeviceError::Lost.into());
453 }
454 super::BindingRegister::Textures | super::BindingRegister::Images => {
455 let location = unsafe { gl.get_uniform_location(program, name) };
456 unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
457 }
458 }
459 }
460 }
461
462 let mut uniforms = ArrayVec::new();
463
464 for (stage_idx, stage_items) in push_constant_items.into_iter().enumerate() {
465 for item in stage_items {
466 let naga_module = &shaders[stage_idx].1.module.naga.module;
467 let type_inner = &naga_module.types[item.ty].inner;
468
469 let location = unsafe { gl.get_uniform_location(program, &item.access_path) };
470
471 log::trace!(
472 "push constant item: name={}, ty={:?}, offset={}, location={:?}",
473 item.access_path,
474 type_inner,
475 item.offset,
476 location,
477 );
478
479 if let Some(location) = location {
480 uniforms.push(super::PushConstantDesc {
481 location,
482 offset: item.offset,
483 size_bytes: type_inner.size(naga_module.to_ctx()),
484 ty: type_inner.clone(),
485 });
486 }
487 }
488 }
489
490 let first_instance_location = if has_stages.contains(wgt::ShaderStages::VERTEX) {
491 unsafe { gl.get_uniform_location(program, naga::back::glsl::FIRST_INSTANCE_BINDING) }
493 } else {
494 None
495 };
496
497 Ok(Arc::new(super::PipelineInner {
498 program,
499 sampler_map,
500 first_instance_location,
501 push_constant_descs: uniforms,
502 clip_distance_count,
503 }))
504 }
505}
506
507impl crate::Device for super::Device {
508 type A = super::Api;
509
510 unsafe fn create_buffer(
511 &self,
512 desc: &crate::BufferDescriptor,
513 ) -> Result<super::Buffer, crate::DeviceError> {
514 let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
515 glow::ELEMENT_ARRAY_BUFFER
516 } else {
517 glow::ARRAY_BUFFER
518 };
519
520 let emulate_map = self
521 .shared
522 .workarounds
523 .contains(super::Workarounds::EMULATE_BUFFER_MAP)
524 || !self
525 .shared
526 .private_caps
527 .contains(PrivateCapabilities::BUFFER_ALLOCATION);
528
529 if emulate_map && desc.usage.intersects(wgt::BufferUses::MAP_WRITE) {
530 return Ok(super::Buffer {
531 raw: None,
532 target,
533 size: desc.size,
534 map_flags: 0,
535 data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
536 offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
537 });
538 }
539
540 let gl = &self.shared.context.lock();
541
542 let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
543 glow::ELEMENT_ARRAY_BUFFER
544 } else {
545 glow::ARRAY_BUFFER
546 };
547
548 let is_host_visible = desc
549 .usage
550 .intersects(wgt::BufferUses::MAP_READ | wgt::BufferUses::MAP_WRITE);
551 let is_coherent = desc
552 .memory_flags
553 .contains(crate::MemoryFlags::PREFER_COHERENT);
554
555 let mut map_flags = 0;
556 if desc.usage.contains(wgt::BufferUses::MAP_READ) {
557 map_flags |= glow::MAP_READ_BIT;
558 }
559 if desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
560 map_flags |= glow::MAP_WRITE_BIT;
561 }
562
563 let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
564 unsafe { gl.bind_buffer(target, raw) };
565 let raw_size = desc
566 .size
567 .try_into()
568 .map_err(|_| crate::DeviceError::OutOfMemory)?;
569
570 if self
571 .shared
572 .private_caps
573 .contains(PrivateCapabilities::BUFFER_ALLOCATION)
574 {
575 if is_host_visible {
576 map_flags |= glow::MAP_PERSISTENT_BIT;
577 if is_coherent {
578 map_flags |= glow::MAP_COHERENT_BIT;
579 }
580 }
581 if desc.usage.intersects(wgt::BufferUses::QUERY_RESOLVE) {
583 map_flags |= glow::DYNAMIC_STORAGE_BIT;
584 }
585 unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
586 } else {
587 assert!(!is_coherent);
588 let usage = if is_host_visible {
589 if desc.usage.contains(wgt::BufferUses::MAP_READ) {
590 glow::STREAM_READ
591 } else {
592 glow::DYNAMIC_DRAW
593 }
594 } else {
595 glow::DYNAMIC_DRAW
599 };
600 unsafe { gl.buffer_data_size(target, raw_size, usage) };
601 }
602
603 unsafe { gl.bind_buffer(target, None) };
604
605 if !is_coherent && desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
606 map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
607 }
608 #[cfg(native)]
611 if let Some(label) = desc.label {
612 if self
613 .shared
614 .private_caps
615 .contains(PrivateCapabilities::DEBUG_FNS)
616 {
617 let name = raw.map_or(0, |buf| buf.0.get());
618 unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
619 }
620 }
621
622 let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
623 Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
624 } else {
625 None
626 };
627
628 self.counters.buffers.add(1);
629
630 Ok(super::Buffer {
631 raw,
632 target,
633 size: desc.size,
634 map_flags,
635 data,
636 offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
637 })
638 }
639
640 unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
641 if let Some(raw) = buffer.raw {
642 let gl = &self.shared.context.lock();
643 unsafe { gl.delete_buffer(raw) };
644 }
645
646 self.counters.buffers.sub(1);
647 }
648
649 unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) {
650 self.counters.buffers.add(1);
651 }
652
653 unsafe fn map_buffer(
654 &self,
655 buffer: &super::Buffer,
656 range: crate::MemoryRange,
657 ) -> Result<crate::BufferMapping, crate::DeviceError> {
658 let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
659 let ptr = match buffer.raw {
660 None => {
661 let mut vec = lock(buffer.data.as_ref().unwrap());
662 let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
663 slice.as_mut_ptr()
664 }
665 Some(raw) => {
666 let gl = &self.shared.context.lock();
667 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
668 let ptr = if let Some(ref map_read_allocation) = buffer.data {
669 let mut guard = lock(map_read_allocation);
670 let slice = guard.as_mut_slice();
671 unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
672 slice.as_mut_ptr()
673 } else {
674 *lock(&buffer.offset_of_current_mapping) = range.start;
675 unsafe {
676 gl.map_buffer_range(
677 buffer.target,
678 range.start as i32,
679 (range.end - range.start) as i32,
680 buffer.map_flags,
681 )
682 }
683 };
684 unsafe { gl.bind_buffer(buffer.target, None) };
685 ptr
686 }
687 };
688 Ok(crate::BufferMapping {
689 ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
690 is_coherent,
691 })
692 }
693 unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
694 if let Some(raw) = buffer.raw {
695 if buffer.data.is_none() {
696 let gl = &self.shared.context.lock();
697 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
698 unsafe { gl.unmap_buffer(buffer.target) };
699 unsafe { gl.bind_buffer(buffer.target, None) };
700 *lock(&buffer.offset_of_current_mapping) = 0;
701 }
702 }
703 }
704 unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
705 where
706 I: Iterator<Item = crate::MemoryRange>,
707 {
708 if let Some(raw) = buffer.raw {
709 if buffer.data.is_none() {
710 let gl = &self.shared.context.lock();
711 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
712 for range in ranges {
713 let offset_of_current_mapping = *lock(&buffer.offset_of_current_mapping);
714 unsafe {
715 gl.flush_mapped_buffer_range(
716 buffer.target,
717 (range.start - offset_of_current_mapping) as i32,
718 (range.end - range.start) as i32,
719 )
720 };
721 }
722 }
723 }
724 }
725 unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
726 }
728
729 unsafe fn create_texture(
730 &self,
731 desc: &crate::TextureDescriptor,
732 ) -> Result<super::Texture, crate::DeviceError> {
733 let gl = &self.shared.context.lock();
734
735 let render_usage = wgt::TextureUses::COLOR_TARGET
736 | wgt::TextureUses::DEPTH_STENCIL_WRITE
737 | wgt::TextureUses::DEPTH_STENCIL_READ;
738 let format_desc = self.shared.describe_texture_format(desc.format);
739
740 let inner = if render_usage.contains(desc.usage)
741 && desc.dimension == wgt::TextureDimension::D2
742 && desc.size.depth_or_array_layers == 1
743 {
744 let raw = unsafe { gl.create_renderbuffer().unwrap() };
745 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
746 if desc.sample_count > 1 {
747 unsafe {
748 gl.renderbuffer_storage_multisample(
749 glow::RENDERBUFFER,
750 desc.sample_count as i32,
751 format_desc.internal,
752 desc.size.width as i32,
753 desc.size.height as i32,
754 )
755 };
756 } else {
757 unsafe {
758 gl.renderbuffer_storage(
759 glow::RENDERBUFFER,
760 format_desc.internal,
761 desc.size.width as i32,
762 desc.size.height as i32,
763 )
764 };
765 }
766
767 #[cfg(native)]
768 if let Some(label) = desc.label {
769 if self
770 .shared
771 .private_caps
772 .contains(PrivateCapabilities::DEBUG_FNS)
773 {
774 let name = raw.0.get();
775 unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
776 }
777 }
778
779 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
780 super::TextureInner::Renderbuffer { raw }
781 } else {
782 let raw = unsafe { gl.create_texture().unwrap() };
783 let target = super::Texture::get_info_from_desc(desc);
784
785 unsafe { gl.bind_texture(target, Some(raw)) };
786 match desc.format.sample_type(None, Some(self.shared.features)) {
788 Some(
789 wgt::TextureSampleType::Float { filterable: false }
790 | wgt::TextureSampleType::Uint
791 | wgt::TextureSampleType::Sint,
792 ) => {
793 unsafe {
795 gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
796 };
797 unsafe {
798 gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
799 };
800 }
801 _ => {}
802 }
803
804 if conv::is_layered_target(target) {
805 unsafe {
806 if self
807 .shared
808 .private_caps
809 .contains(PrivateCapabilities::TEXTURE_STORAGE)
810 {
811 gl.tex_storage_3d(
812 target,
813 desc.mip_level_count as i32,
814 format_desc.internal,
815 desc.size.width as i32,
816 desc.size.height as i32,
817 desc.size.depth_or_array_layers as i32,
818 )
819 } else if target == glow::TEXTURE_3D {
820 let mut width = desc.size.width;
821 let mut height = desc.size.width;
822 let mut depth = desc.size.depth_or_array_layers;
823 for i in 0..desc.mip_level_count {
824 gl.tex_image_3d(
825 target,
826 i as i32,
827 format_desc.internal as i32,
828 width as i32,
829 height as i32,
830 depth as i32,
831 0,
832 format_desc.external,
833 format_desc.data_type,
834 glow::PixelUnpackData::Slice(None),
835 );
836 width = max(1, width / 2);
837 height = max(1, height / 2);
838 depth = max(1, depth / 2);
839 }
840 } else {
841 let mut width = desc.size.width;
842 let mut height = desc.size.width;
843 for i in 0..desc.mip_level_count {
844 gl.tex_image_3d(
845 target,
846 i as i32,
847 format_desc.internal as i32,
848 width as i32,
849 height as i32,
850 desc.size.depth_or_array_layers as i32,
851 0,
852 format_desc.external,
853 format_desc.data_type,
854 glow::PixelUnpackData::Slice(None),
855 );
856 width = max(1, width / 2);
857 height = max(1, height / 2);
858 }
859 }
860 };
861 } else if desc.sample_count > 1 {
862 unsafe {
863 gl.tex_storage_2d_multisample(
864 target,
865 desc.sample_count as i32,
866 format_desc.internal,
867 desc.size.width as i32,
868 desc.size.height as i32,
869 true,
870 )
871 };
872 } else {
873 unsafe {
874 if self
875 .shared
876 .private_caps
877 .contains(PrivateCapabilities::TEXTURE_STORAGE)
878 {
879 gl.tex_storage_2d(
880 target,
881 desc.mip_level_count as i32,
882 format_desc.internal,
883 desc.size.width as i32,
884 desc.size.height as i32,
885 )
886 } else if target == glow::TEXTURE_CUBE_MAP {
887 let mut width = desc.size.width;
888 let mut height = desc.size.width;
889 for i in 0..desc.mip_level_count {
890 for face in [
891 glow::TEXTURE_CUBE_MAP_POSITIVE_X,
892 glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
893 glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
894 glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
895 glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
896 glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
897 ] {
898 gl.tex_image_2d(
899 face,
900 i as i32,
901 format_desc.internal as i32,
902 width as i32,
903 height as i32,
904 0,
905 format_desc.external,
906 format_desc.data_type,
907 glow::PixelUnpackData::Slice(None),
908 );
909 }
910 width = max(1, width / 2);
911 height = max(1, height / 2);
912 }
913 } else {
914 let mut width = desc.size.width;
915 let mut height = desc.size.width;
916 for i in 0..desc.mip_level_count {
917 gl.tex_image_2d(
918 target,
919 i as i32,
920 format_desc.internal as i32,
921 width as i32,
922 height as i32,
923 0,
924 format_desc.external,
925 format_desc.data_type,
926 glow::PixelUnpackData::Slice(None),
927 );
928 width = max(1, width / 2);
929 height = max(1, height / 2);
930 }
931 }
932 };
933 }
934
935 #[cfg(native)]
936 if let Some(label) = desc.label {
937 if self
938 .shared
939 .private_caps
940 .contains(PrivateCapabilities::DEBUG_FNS)
941 {
942 let name = raw.0.get();
943 unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
944 }
945 }
946
947 unsafe { gl.bind_texture(target, None) };
948 super::TextureInner::Texture { raw, target }
949 };
950
951 self.counters.textures.add(1);
952
953 Ok(super::Texture {
954 inner,
955 drop_guard: None,
956 mip_level_count: desc.mip_level_count,
957 array_layer_count: desc.array_layer_count(),
958 format: desc.format,
959 format_desc,
960 copy_size: desc.copy_extent(),
961 })
962 }
963
964 unsafe fn destroy_texture(&self, texture: super::Texture) {
965 if texture.drop_guard.is_none() {
966 let gl = &self.shared.context.lock();
967 match texture.inner {
968 super::TextureInner::Renderbuffer { raw, .. } => {
969 unsafe { gl.delete_renderbuffer(raw) };
970 }
971 super::TextureInner::DefaultRenderbuffer => {}
972 super::TextureInner::Texture { raw, .. } => {
973 unsafe { gl.delete_texture(raw) };
974 }
975 #[cfg(webgl)]
976 super::TextureInner::ExternalFramebuffer { .. } => {}
977 #[cfg(native)]
978 super::TextureInner::ExternalNativeFramebuffer { .. } => {}
979 }
980 }
981
982 drop(texture.drop_guard);
985
986 self.counters.textures.sub(1);
987 }
988
989 unsafe fn add_raw_texture(&self, _texture: &super::Texture) {
990 self.counters.textures.add(1);
991 }
992
993 unsafe fn create_texture_view(
994 &self,
995 texture: &super::Texture,
996 desc: &crate::TextureViewDescriptor,
997 ) -> Result<super::TextureView, crate::DeviceError> {
998 self.counters.texture_views.add(1);
999 Ok(super::TextureView {
1000 inner: texture.inner.clone(),
1002 aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
1003 mip_levels: desc.range.mip_range(texture.mip_level_count),
1004 array_layers: desc.range.layer_range(texture.array_layer_count),
1005 format: texture.format,
1006 })
1007 }
1008
1009 unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
1010 self.counters.texture_views.sub(1);
1011 }
1012
1013 unsafe fn create_sampler(
1014 &self,
1015 desc: &crate::SamplerDescriptor,
1016 ) -> Result<super::Sampler, crate::DeviceError> {
1017 let gl = &self.shared.context.lock();
1018
1019 let raw = unsafe { gl.create_sampler().unwrap() };
1020
1021 let (min, mag) =
1022 conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
1023
1024 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
1025 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
1026
1027 unsafe {
1028 gl.sampler_parameter_i32(
1029 raw,
1030 glow::TEXTURE_WRAP_S,
1031 conv::map_address_mode(desc.address_modes[0]) as i32,
1032 )
1033 };
1034 unsafe {
1035 gl.sampler_parameter_i32(
1036 raw,
1037 glow::TEXTURE_WRAP_T,
1038 conv::map_address_mode(desc.address_modes[1]) as i32,
1039 )
1040 };
1041 unsafe {
1042 gl.sampler_parameter_i32(
1043 raw,
1044 glow::TEXTURE_WRAP_R,
1045 conv::map_address_mode(desc.address_modes[2]) as i32,
1046 )
1047 };
1048
1049 if let Some(border_color) = desc.border_color {
1050 let border = match border_color {
1051 wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
1052 [0.0; 4]
1053 }
1054 wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
1055 wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
1056 };
1057 unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
1058 }
1059
1060 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
1061 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
1062
1063 if desc.anisotropy_clamp != 1 {
1065 unsafe {
1066 gl.sampler_parameter_i32(
1067 raw,
1068 glow::TEXTURE_MAX_ANISOTROPY,
1069 desc.anisotropy_clamp as i32,
1070 )
1071 };
1072 }
1073
1074 if let Some(compare) = desc.compare {
1077 unsafe {
1078 gl.sampler_parameter_i32(
1079 raw,
1080 glow::TEXTURE_COMPARE_MODE,
1081 glow::COMPARE_REF_TO_TEXTURE as i32,
1082 )
1083 };
1084 unsafe {
1085 gl.sampler_parameter_i32(
1086 raw,
1087 glow::TEXTURE_COMPARE_FUNC,
1088 conv::map_compare_func(compare) as i32,
1089 )
1090 };
1091 }
1092
1093 #[cfg(native)]
1094 if let Some(label) = desc.label {
1095 if self
1096 .shared
1097 .private_caps
1098 .contains(PrivateCapabilities::DEBUG_FNS)
1099 {
1100 let name = raw.0.get();
1101 unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
1102 }
1103 }
1104
1105 self.counters.samplers.add(1);
1106
1107 Ok(super::Sampler { raw })
1108 }
1109
1110 unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
1111 let gl = &self.shared.context.lock();
1112 unsafe { gl.delete_sampler(sampler.raw) };
1113 self.counters.samplers.sub(1);
1114 }
1115
1116 unsafe fn create_command_encoder(
1117 &self,
1118 _desc: &crate::CommandEncoderDescriptor<super::Queue>,
1119 ) -> Result<super::CommandEncoder, crate::DeviceError> {
1120 self.counters.command_encoders.add(1);
1121
1122 Ok(super::CommandEncoder {
1123 cmd_buffer: super::CommandBuffer::default(),
1124 state: Default::default(),
1125 private_caps: self.shared.private_caps,
1126 counters: Arc::clone(&self.counters),
1127 })
1128 }
1129
1130 unsafe fn create_bind_group_layout(
1131 &self,
1132 desc: &crate::BindGroupLayoutDescriptor,
1133 ) -> Result<super::BindGroupLayout, crate::DeviceError> {
1134 self.counters.bind_group_layouts.add(1);
1135 Ok(super::BindGroupLayout {
1136 entries: Arc::from(desc.entries),
1137 })
1138 }
1139
1140 unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
1141 self.counters.bind_group_layouts.sub(1);
1142 }
1143
1144 unsafe fn create_pipeline_layout(
1145 &self,
1146 desc: &crate::PipelineLayoutDescriptor<super::BindGroupLayout>,
1147 ) -> Result<super::PipelineLayout, crate::DeviceError> {
1148 use naga::back::glsl;
1149
1150 let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
1151 let mut num_samplers = 0u8;
1152 let mut num_textures = 0u8;
1153 let mut num_images = 0u8;
1154 let mut num_uniform_buffers = 0u8;
1155 let mut num_storage_buffers = 0u8;
1156
1157 let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
1158 writer_flags.set(
1159 glsl::WriterFlags::TEXTURE_SHADOW_LOD,
1160 self.shared
1161 .private_caps
1162 .contains(PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
1163 );
1164 writer_flags.set(
1165 glsl::WriterFlags::DRAW_PARAMETERS,
1166 self.shared
1167 .private_caps
1168 .contains(PrivateCapabilities::FULLY_FEATURED_INSTANCING),
1169 );
1170 writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
1173 let mut binding_map = glsl::BindingMap::default();
1174
1175 for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
1176 let mut binding_to_slot = vec![
1178 !0;
1179 bg_layout
1180 .entries
1181 .iter()
1182 .map(|b| b.binding)
1183 .max()
1184 .map_or(0, |idx| idx as usize + 1)
1185 ]
1186 .into_boxed_slice();
1187
1188 for entry in bg_layout.entries.iter() {
1189 let counter = match entry.ty {
1190 wgt::BindingType::Sampler { .. } => &mut num_samplers,
1191 wgt::BindingType::Texture { .. } => &mut num_textures,
1192 wgt::BindingType::StorageTexture { .. } => &mut num_images,
1193 wgt::BindingType::Buffer {
1194 ty: wgt::BufferBindingType::Uniform,
1195 ..
1196 } => &mut num_uniform_buffers,
1197 wgt::BindingType::Buffer {
1198 ty: wgt::BufferBindingType::Storage { .. },
1199 ..
1200 } => &mut num_storage_buffers,
1201 wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1202 wgt::BindingType::ExternalTexture => unimplemented!(),
1203 };
1204
1205 binding_to_slot[entry.binding as usize] = *counter;
1206 let br = naga::ResourceBinding {
1207 group: group_index as u32,
1208 binding: entry.binding,
1209 };
1210 binding_map.insert(br, *counter);
1211 *counter += entry.count.map_or(1, |c| c.get() as u8);
1212 }
1213
1214 group_infos.push(super::BindGroupLayoutInfo {
1215 entries: Arc::clone(&bg_layout.entries),
1216 binding_to_slot,
1217 });
1218 }
1219
1220 self.counters.pipeline_layouts.add(1);
1221
1222 Ok(super::PipelineLayout {
1223 group_infos: group_infos.into_boxed_slice(),
1224 naga_options: glsl::Options {
1225 version: self.shared.shading_language_version,
1226 writer_flags,
1227 binding_map,
1228 zero_initialize_workgroup_memory: true,
1229 },
1230 })
1231 }
1232
1233 unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
1234 self.counters.pipeline_layouts.sub(1);
1235 }
1236
1237 unsafe fn create_bind_group(
1238 &self,
1239 desc: &crate::BindGroupDescriptor<
1240 super::BindGroupLayout,
1241 super::Buffer,
1242 super::Sampler,
1243 super::TextureView,
1244 super::AccelerationStructure,
1245 >,
1246 ) -> Result<super::BindGroup, crate::DeviceError> {
1247 let mut contents = Vec::new();
1248
1249 let layout_and_entry_iter = desc.entries.iter().map(|entry| {
1250 let layout = desc
1251 .layout
1252 .entries
1253 .iter()
1254 .find(|layout_entry| layout_entry.binding == entry.binding)
1255 .expect("internal error: no layout entry found with binding slot");
1256 (entry, layout)
1257 });
1258 for (entry, layout) in layout_and_entry_iter {
1259 let binding = match layout.ty {
1260 wgt::BindingType::Buffer { .. } => {
1261 let bb = &desc.buffers[entry.resource_index as usize];
1262 super::RawBinding::Buffer {
1263 raw: bb.buffer.raw.unwrap(),
1264 offset: bb.offset as i32,
1265 size: match bb.size {
1266 Some(s) => s.get() as i32,
1267 None => (bb.buffer.size - bb.offset) as i32,
1268 },
1269 }
1270 }
1271 wgt::BindingType::Sampler { .. } => {
1272 let sampler = desc.samplers[entry.resource_index as usize];
1273 super::RawBinding::Sampler(sampler.raw)
1274 }
1275 wgt::BindingType::Texture { view_dimension, .. } => {
1276 let view = desc.textures[entry.resource_index as usize].view;
1277 if view.array_layers.start != 0 {
1278 log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
1279 "This is an implementation problem of wgpu-hal/gles backend.")
1280 }
1281 let (raw, target) = view.inner.as_native();
1282
1283 super::Texture::log_failing_target_heuristics(view_dimension, target);
1284
1285 super::RawBinding::Texture {
1286 raw,
1287 target,
1288 aspects: view.aspects,
1289 mip_levels: view.mip_levels.clone(),
1290 }
1291 }
1292 wgt::BindingType::StorageTexture {
1293 access,
1294 format,
1295 view_dimension,
1296 } => {
1297 let view = desc.textures[entry.resource_index as usize].view;
1298 let format_desc = self.shared.describe_texture_format(format);
1299 let (raw, _target) = view.inner.as_native();
1300 super::RawBinding::Image(super::ImageBinding {
1301 raw,
1302 mip_level: view.mip_levels.start,
1303 array_layer: match view_dimension {
1304 wgt::TextureViewDimension::D2Array
1305 | wgt::TextureViewDimension::CubeArray => None,
1306 _ => Some(view.array_layers.start),
1307 },
1308 access: conv::map_storage_access(access),
1309 format: format_desc.internal,
1310 })
1311 }
1312 wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1313 wgt::BindingType::ExternalTexture => unimplemented!(),
1314 };
1315 contents.push(binding);
1316 }
1317
1318 self.counters.bind_groups.add(1);
1319
1320 Ok(super::BindGroup {
1321 contents: contents.into_boxed_slice(),
1322 })
1323 }
1324
1325 unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
1326 self.counters.bind_groups.sub(1);
1327 }
1328
1329 unsafe fn create_shader_module(
1330 &self,
1331 desc: &crate::ShaderModuleDescriptor,
1332 shader: crate::ShaderInput,
1333 ) -> Result<super::ShaderModule, crate::ShaderError> {
1334 self.counters.shader_modules.add(1);
1335
1336 Ok(super::ShaderModule {
1337 naga: match shader {
1338 crate::ShaderInput::SpirV(_) => {
1339 panic!("`Features::SPIRV_SHADER_PASSTHROUGH` is not enabled")
1340 }
1341 crate::ShaderInput::Msl { .. } => {
1342 panic!("`Features::MSL_SHADER_PASSTHROUGH` is not enabled")
1343 }
1344 crate::ShaderInput::Naga(naga) => naga,
1345 crate::ShaderInput::Dxil { .. } | crate::ShaderInput::Hlsl { .. } => {
1346 panic!("`Features::HLSL_DXIL_SHADER_PASSTHROUGH` is not enabled")
1347 }
1348 },
1349 label: desc.label.map(|str| str.to_string()),
1350 id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1351 })
1352 }
1353
1354 unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
1355 self.counters.shader_modules.sub(1);
1356 }
1357
1358 unsafe fn create_render_pipeline(
1359 &self,
1360 desc: &crate::RenderPipelineDescriptor<
1361 super::PipelineLayout,
1362 super::ShaderModule,
1363 super::PipelineCache,
1364 >,
1365 ) -> Result<super::RenderPipeline, crate::PipelineError> {
1366 let gl = &self.shared.context.lock();
1367 let mut shaders = ArrayVec::new();
1368 shaders.push((naga::ShaderStage::Vertex, &desc.vertex_stage));
1369 if let Some(ref fs) = desc.fragment_stage {
1370 shaders.push((naga::ShaderStage::Fragment, fs));
1371 }
1372 let inner =
1373 unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview) }?;
1374
1375 let (vertex_buffers, vertex_attributes) = {
1376 let mut buffers = Vec::new();
1377 let mut attributes = Vec::new();
1378 for (index, vb_layout) in desc.vertex_buffers.iter().enumerate() {
1379 buffers.push(super::VertexBufferDesc {
1380 step: vb_layout.step_mode,
1381 stride: vb_layout.array_stride as u32,
1382 });
1383 for vat in vb_layout.attributes.iter() {
1384 let format_desc = conv::describe_vertex_format(vat.format);
1385 attributes.push(super::AttributeDesc {
1386 location: vat.shader_location,
1387 offset: vat.offset as u32,
1388 buffer_index: index as u32,
1389 format_desc,
1390 });
1391 }
1392 }
1393 (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1394 };
1395
1396 let color_targets = {
1397 let mut targets = Vec::new();
1398 for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1399 targets.push(super::ColorTargetDesc {
1400 mask: ct.write_mask,
1401 blend: ct.blend.as_ref().map(conv::map_blend),
1402 });
1403 }
1404 targets.into_boxed_slice()
1407 };
1408
1409 self.counters.render_pipelines.add(1);
1410
1411 Ok(super::RenderPipeline {
1412 inner,
1413 primitive: desc.primitive,
1414 vertex_buffers,
1415 vertex_attributes,
1416 color_targets,
1417 depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1418 function: conv::map_compare_func(ds.depth_compare),
1419 mask: ds.depth_write_enabled,
1420 }),
1421 depth_bias: desc
1422 .depth_stencil
1423 .as_ref()
1424 .map(|ds| ds.bias)
1425 .unwrap_or_default(),
1426 stencil: desc
1427 .depth_stencil
1428 .as_ref()
1429 .map(|ds| conv::map_stencil(&ds.stencil)),
1430 alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1431 })
1432 }
1433 unsafe fn create_mesh_pipeline(
1434 &self,
1435 _desc: &crate::MeshPipelineDescriptor<
1436 <Self::A as crate::Api>::PipelineLayout,
1437 <Self::A as crate::Api>::ShaderModule,
1438 <Self::A as crate::Api>::PipelineCache,
1439 >,
1440 ) -> Result<<Self::A as crate::Api>::RenderPipeline, crate::PipelineError> {
1441 unreachable!()
1442 }
1443
1444 unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1445 if Arc::strong_count(&pipeline.inner) == 2 {
1450 let gl = &self.shared.context.lock();
1451 let mut program_cache = self.shared.program_cache.lock();
1452 program_cache.retain(|_, v| match *v {
1453 Ok(ref p) => p.program != pipeline.inner.program,
1454 Err(_) => false,
1455 });
1456 unsafe { gl.delete_program(pipeline.inner.program) };
1457 }
1458
1459 self.counters.render_pipelines.sub(1);
1460 }
1461
1462 unsafe fn create_compute_pipeline(
1463 &self,
1464 desc: &crate::ComputePipelineDescriptor<
1465 super::PipelineLayout,
1466 super::ShaderModule,
1467 super::PipelineCache,
1468 >,
1469 ) -> Result<super::ComputePipeline, crate::PipelineError> {
1470 let gl = &self.shared.context.lock();
1471 let mut shaders = ArrayVec::new();
1472 shaders.push((naga::ShaderStage::Compute, &desc.stage));
1473 let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1474
1475 self.counters.compute_pipelines.add(1);
1476
1477 Ok(super::ComputePipeline { inner })
1478 }
1479
1480 unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1481 if Arc::strong_count(&pipeline.inner) == 2 {
1486 let gl = &self.shared.context.lock();
1487 let mut program_cache = self.shared.program_cache.lock();
1488 program_cache.retain(|_, v| match *v {
1489 Ok(ref p) => p.program != pipeline.inner.program,
1490 Err(_) => false,
1491 });
1492 unsafe { gl.delete_program(pipeline.inner.program) };
1493 }
1494
1495 self.counters.compute_pipelines.sub(1);
1496 }
1497
1498 unsafe fn create_pipeline_cache(
1499 &self,
1500 _: &crate::PipelineCacheDescriptor<'_>,
1501 ) -> Result<super::PipelineCache, crate::PipelineCacheError> {
1502 Ok(super::PipelineCache)
1505 }
1506 unsafe fn destroy_pipeline_cache(&self, _: super::PipelineCache) {}
1507
1508 #[cfg_attr(target_arch = "wasm32", allow(unused))]
1509 unsafe fn create_query_set(
1510 &self,
1511 desc: &wgt::QuerySetDescriptor<crate::Label>,
1512 ) -> Result<super::QuerySet, crate::DeviceError> {
1513 let gl = &self.shared.context.lock();
1514
1515 let mut queries = Vec::with_capacity(desc.count as usize);
1516 for _ in 0..desc.count {
1517 let query =
1518 unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1519
1520 queries.push(query);
1527 }
1528
1529 self.counters.query_sets.add(1);
1530
1531 Ok(super::QuerySet {
1532 queries: queries.into_boxed_slice(),
1533 target: match desc.ty {
1534 wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1535 wgt::QueryType::Timestamp => glow::TIMESTAMP,
1536 _ => unimplemented!(),
1537 },
1538 })
1539 }
1540
1541 unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1542 let gl = &self.shared.context.lock();
1543 for &query in set.queries.iter() {
1544 unsafe { gl.delete_query(query) };
1545 }
1546 self.counters.query_sets.sub(1);
1547 }
1548
1549 unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1550 self.counters.fences.add(1);
1551 Ok(super::Fence::new(&self.shared.options))
1552 }
1553
1554 unsafe fn destroy_fence(&self, fence: super::Fence) {
1555 let gl = &self.shared.context.lock();
1556 fence.destroy(gl);
1557 self.counters.fences.sub(1);
1558 }
1559
1560 unsafe fn get_fence_value(
1561 &self,
1562 fence: &super::Fence,
1563 ) -> Result<crate::FenceValue, crate::DeviceError> {
1564 #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1565 Ok(fence.get_latest(&self.shared.context.lock()))
1566 }
1567 unsafe fn wait(
1568 &self,
1569 fence: &super::Fence,
1570 wait_value: crate::FenceValue,
1571 timeout_ms: u32,
1572 ) -> Result<bool, crate::DeviceError> {
1573 if fence.satisfied(wait_value) {
1574 return Ok(true);
1575 }
1576
1577 let gl = &self.shared.context.lock();
1578 let timeout_ns = if cfg!(any(webgl, Emscripten)) {
1583 0
1584 } else {
1585 (timeout_ms as u64 * 1_000_000).min(!0u32 as u64)
1586 };
1587 fence.wait(gl, wait_value, timeout_ns)
1588 }
1589
1590 unsafe fn start_graphics_debugger_capture(&self) -> bool {
1591 #[cfg(all(native, feature = "renderdoc"))]
1592 return unsafe {
1593 self.render_doc
1594 .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1595 };
1596 #[allow(unreachable_code)]
1597 false
1598 }
1599 unsafe fn stop_graphics_debugger_capture(&self) {
1600 #[cfg(all(native, feature = "renderdoc"))]
1601 unsafe {
1602 self.render_doc
1603 .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1604 }
1605 }
1606 unsafe fn create_acceleration_structure(
1607 &self,
1608 _desc: &crate::AccelerationStructureDescriptor,
1609 ) -> Result<super::AccelerationStructure, crate::DeviceError> {
1610 unimplemented!()
1611 }
1612 unsafe fn get_acceleration_structure_build_sizes<'a>(
1613 &self,
1614 _desc: &crate::GetAccelerationStructureBuildSizesDescriptor<'a, super::Buffer>,
1615 ) -> crate::AccelerationStructureBuildSizes {
1616 unimplemented!()
1617 }
1618 unsafe fn get_acceleration_structure_device_address(
1619 &self,
1620 _acceleration_structure: &super::AccelerationStructure,
1621 ) -> wgt::BufferAddress {
1622 unimplemented!()
1623 }
1624 unsafe fn destroy_acceleration_structure(
1625 &self,
1626 _acceleration_structure: super::AccelerationStructure,
1627 ) {
1628 }
1629
1630 fn tlas_instance_to_bytes(&self, _instance: TlasInstance) -> Vec<u8> {
1631 unimplemented!()
1632 }
1633
1634 fn get_internal_counters(&self) -> wgt::HalCounters {
1635 self.counters.as_ref().clone()
1636 }
1637
1638 fn check_if_oom(&self) -> Result<(), crate::DeviceError> {
1639 Ok(())
1640 }
1641}
1642
1643#[cfg(send_sync)]
1644unsafe impl Sync for super::Device {}
1645#[cfg(send_sync)]
1646unsafe impl Send for super::Device {}