1use alloc::{
2 borrow::ToOwned, format, string::String, string::ToString as _, sync::Arc, vec, vec::Vec,
3};
4use core::{cmp::max, convert::TryInto, num::NonZeroU32, ptr, sync::atomic::Ordering};
5
6use arrayvec::ArrayVec;
7use glow::HasContext;
8use naga::FastHashMap;
9
10use super::{conv, lock, MaybeMutex, PrivateCapabilities};
11use crate::auxil::map_naga_stage;
12use crate::TlasInstance;
13
14type ShaderStage<'a> = (
15 naga::ShaderStage,
16 &'a crate::ProgrammableStage<'a, super::ShaderModule>,
17);
18type NameBindingMap = FastHashMap<String, (super::BindingRegister, u8)>;
19
20struct CompilationContext<'a> {
21 layout: &'a super::PipelineLayout,
22 sampler_map: &'a mut super::SamplerBindMap,
23 name_binding_map: &'a mut NameBindingMap,
24 push_constant_items: &'a mut Vec<naga::back::glsl::PushConstantItem>,
25 multiview: Option<NonZeroU32>,
26 clip_distance_count: &'a mut u32,
27}
28
29impl CompilationContext<'_> {
30 fn consume_reflection(
31 self,
32 gl: &glow::Context,
33 module: &naga::Module,
34 ep_info: &naga::valid::FunctionInfo,
35 reflection_info: naga::back::glsl::ReflectionInfo,
36 naga_stage: naga::ShaderStage,
37 program: glow::Program,
38 ) {
39 for (handle, var) in module.global_variables.iter() {
40 if ep_info[handle].is_empty() {
41 continue;
42 }
43 let register = match var.space {
44 naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
45 naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
46 _ => continue,
47 };
48
49 let br = var.binding.as_ref().unwrap();
50 let slot = self.layout.get_slot(br);
51
52 let name = match reflection_info.uniforms.get(&handle) {
53 Some(name) => name.clone(),
54 None => continue,
55 };
56 log::trace!(
57 "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
58 var.name.as_ref(),
59 &name,
60 register,
61 slot
62 );
63 self.name_binding_map.insert(name, (register, slot));
64 }
65
66 for (name, mapping) in reflection_info.texture_mapping {
67 let var = &module.global_variables[mapping.texture];
68 let register = match module.types[var.ty].inner {
69 naga::TypeInner::Image {
70 class: naga::ImageClass::Storage { .. },
71 ..
72 } => super::BindingRegister::Images,
73 _ => super::BindingRegister::Textures,
74 };
75
76 let tex_br = var.binding.as_ref().unwrap();
77 let texture_linear_index = self.layout.get_slot(tex_br);
78
79 self.name_binding_map
80 .insert(name, (register, texture_linear_index));
81 if let Some(sampler_handle) = mapping.sampler {
82 let sam_br = module.global_variables[sampler_handle]
83 .binding
84 .as_ref()
85 .unwrap();
86 let sampler_linear_index = self.layout.get_slot(sam_br);
87 self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
88 }
89 }
90
91 for (name, location) in reflection_info.varying {
92 match naga_stage {
93 naga::ShaderStage::Vertex => {
94 assert_eq!(location.index, 0);
95 unsafe { gl.bind_attrib_location(program, location.location, &name) }
96 }
97 naga::ShaderStage::Fragment => {
98 assert_eq!(location.index, 0);
99 unsafe { gl.bind_frag_data_location(program, location.location, &name) }
100 }
101 naga::ShaderStage::Compute => {}
102 naga::ShaderStage::Task | naga::ShaderStage::Mesh => unreachable!(),
103 }
104 }
105
106 *self.push_constant_items = reflection_info.push_constant_items;
107
108 if naga_stage == naga::ShaderStage::Vertex {
109 *self.clip_distance_count = reflection_info.clip_distance_count;
110 }
111 }
112}
113
114impl super::Device {
115 #[cfg(any(native, Emscripten))]
122 pub unsafe fn texture_from_raw(
123 &self,
124 name: NonZeroU32,
125 desc: &crate::TextureDescriptor,
126 drop_callback: Option<crate::DropCallback>,
127 ) -> super::Texture {
128 super::Texture {
129 inner: super::TextureInner::Texture {
130 raw: glow::NativeTexture(name),
131 target: super::Texture::get_info_from_desc(desc),
132 },
133 drop_guard: crate::DropGuard::from_option(drop_callback),
134 mip_level_count: desc.mip_level_count,
135 array_layer_count: desc.array_layer_count(),
136 format: desc.format,
137 format_desc: self.shared.describe_texture_format(desc.format),
138 copy_size: desc.copy_extent(),
139 }
140 }
141
142 #[cfg(any(native, Emscripten))]
149 pub unsafe fn texture_from_raw_renderbuffer(
150 &self,
151 name: NonZeroU32,
152 desc: &crate::TextureDescriptor,
153 drop_callback: Option<crate::DropCallback>,
154 ) -> super::Texture {
155 super::Texture {
156 inner: super::TextureInner::Renderbuffer {
157 raw: glow::NativeRenderbuffer(name),
158 },
159 drop_guard: crate::DropGuard::from_option(drop_callback),
160 mip_level_count: desc.mip_level_count,
161 array_layer_count: desc.array_layer_count(),
162 format: desc.format,
163 format_desc: self.shared.describe_texture_format(desc.format),
164 copy_size: desc.copy_extent(),
165 }
166 }
167
168 unsafe fn compile_shader(
169 gl: &glow::Context,
170 shader: &str,
171 naga_stage: naga::ShaderStage,
172 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
173 ) -> Result<glow::Shader, crate::PipelineError> {
174 let target = match naga_stage {
175 naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
176 naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
177 naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
178 naga::ShaderStage::Task | naga::ShaderStage::Mesh => unreachable!(),
179 };
180
181 let raw = unsafe { gl.create_shader(target) }.unwrap();
182 #[cfg(native)]
183 if gl.supports_debug() {
184 let name = raw.0.get();
185 unsafe { gl.object_label(glow::SHADER, name, label) };
186 }
187
188 unsafe { gl.shader_source(raw, shader) };
189 unsafe { gl.compile_shader(raw) };
190
191 log::debug!("\tCompiled shader {raw:?}");
192
193 let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
194 let msg = unsafe { gl.get_shader_info_log(raw) };
195 if compiled_ok {
196 if !msg.is_empty() {
197 log::warn!("\tCompile: {msg}");
198 }
199 Ok(raw)
200 } else {
201 log::error!("\tShader compilation failed: {msg}");
202 unsafe { gl.delete_shader(raw) };
203 Err(crate::PipelineError::Linkage(
204 map_naga_stage(naga_stage),
205 msg,
206 ))
207 }
208 }
209
210 fn create_shader(
211 gl: &glow::Context,
212 naga_stage: naga::ShaderStage,
213 stage: &crate::ProgrammableStage<super::ShaderModule>,
214 context: CompilationContext,
215 program: glow::Program,
216 ) -> Result<glow::Shader, crate::PipelineError> {
217 use naga::back::glsl;
218 let pipeline_options = glsl::PipelineOptions {
219 shader_stage: naga_stage,
220 entry_point: stage.entry_point.to_owned(),
221 multiview: context.multiview,
222 };
223
224 let (module, info) = naga::back::pipeline_constants::process_overrides(
225 &stage.module.source.module,
226 &stage.module.source.info,
227 Some((naga_stage, stage.entry_point)),
228 stage.constants,
229 )
230 .map_err(|e| {
231 let msg = format!("{e}");
232 crate::PipelineError::PipelineConstants(map_naga_stage(naga_stage), msg)
233 })?;
234
235 let entry_point_index = module
236 .entry_points
237 .iter()
238 .position(|ep| ep.name.as_str() == stage.entry_point)
239 .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
240
241 use naga::proc::BoundsCheckPolicy;
242 let version = gl.version();
244 let image_check = if !version.is_embedded && (version.major, version.minor) >= (4, 3) {
245 BoundsCheckPolicy::ReadZeroSkipWrite
246 } else {
247 BoundsCheckPolicy::Unchecked
248 };
249
250 let policies = naga::proc::BoundsCheckPolicies {
252 index: BoundsCheckPolicy::Unchecked,
253 buffer: BoundsCheckPolicy::Unchecked,
254 image_load: image_check,
255 binding_array: BoundsCheckPolicy::Unchecked,
256 };
257
258 let mut output = String::new();
259 let needs_temp_options = stage.zero_initialize_workgroup_memory
260 != context.layout.naga_options.zero_initialize_workgroup_memory;
261 let mut temp_options;
262 let naga_options = if needs_temp_options {
263 temp_options = context.layout.naga_options.clone();
266 temp_options.zero_initialize_workgroup_memory = stage.zero_initialize_workgroup_memory;
267 &temp_options
268 } else {
269 &context.layout.naga_options
270 };
271 let mut writer = glsl::Writer::new(
272 &mut output,
273 &module,
274 &info,
275 naga_options,
276 &pipeline_options,
277 policies,
278 )
279 .map_err(|e| {
280 let msg = format!("{e}");
281 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
282 })?;
283
284 let reflection_info = writer.write().map_err(|e| {
285 let msg = format!("{e}");
286 crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
287 })?;
288
289 log::debug!("Naga generated shader:\n{output}");
290
291 context.consume_reflection(
292 gl,
293 &module,
294 info.get_entry_point(entry_point_index),
295 reflection_info,
296 naga_stage,
297 program,
298 );
299
300 unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
301 }
302
303 unsafe fn create_pipeline<'a>(
304 &self,
305 gl: &glow::Context,
306 shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
307 layout: &super::PipelineLayout,
308 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
309 multiview: Option<NonZeroU32>,
310 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
311 let mut program_stages = ArrayVec::new();
312 let mut group_to_binding_to_slot = Vec::with_capacity(layout.group_infos.len());
313 for group in &*layout.group_infos {
314 group_to_binding_to_slot.push(group.binding_to_slot.clone());
315 }
316 for &(naga_stage, stage) in &shaders {
317 program_stages.push(super::ProgramStage {
318 naga_stage: naga_stage.to_owned(),
319 shader_id: stage.module.id,
320 entry_point: stage.entry_point.to_owned(),
321 zero_initialize_workgroup_memory: stage.zero_initialize_workgroup_memory,
322 });
323 }
324 let mut guard = self
325 .shared
326 .program_cache
327 .try_lock()
328 .expect("Couldn't acquire program_cache lock");
329 let program = guard
332 .entry(super::ProgramCacheKey {
333 stages: program_stages,
334 group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
335 })
336 .or_insert_with(|| unsafe {
337 Self::create_program(
338 gl,
339 shaders,
340 layout,
341 label,
342 multiview,
343 self.shared.shading_language_version,
344 self.shared.private_caps,
345 )
346 })
347 .to_owned()?;
348 drop(guard);
349
350 Ok(program)
351 }
352
353 unsafe fn create_program<'a>(
354 gl: &glow::Context,
355 shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
356 layout: &super::PipelineLayout,
357 #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
358 multiview: Option<NonZeroU32>,
359 glsl_version: naga::back::glsl::Version,
360 private_caps: PrivateCapabilities,
361 ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
362 let glsl_version = match glsl_version {
363 naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
364 naga::back::glsl::Version::Desktop(version) => format!("{version}"),
365 };
366 let program = unsafe { gl.create_program() }.unwrap();
367 #[cfg(native)]
368 if let Some(label) = label {
369 if private_caps.contains(PrivateCapabilities::DEBUG_FNS) {
370 let name = program.0.get();
371 unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
372 }
373 }
374
375 let mut name_binding_map = NameBindingMap::default();
376 let mut push_constant_items = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
377 let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
378 let mut has_stages = wgt::ShaderStages::empty();
379 let mut shaders_to_delete = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
380 let mut clip_distance_count = 0;
381
382 for &(naga_stage, stage) in &shaders {
383 has_stages |= map_naga_stage(naga_stage);
384 let pc_item = {
385 push_constant_items.push(Vec::new());
386 push_constant_items.last_mut().unwrap()
387 };
388 let context = CompilationContext {
389 layout,
390 sampler_map: &mut sampler_map,
391 name_binding_map: &mut name_binding_map,
392 push_constant_items: pc_item,
393 multiview,
394 clip_distance_count: &mut clip_distance_count,
395 };
396
397 let shader = Self::create_shader(gl, naga_stage, stage, context, program)?;
398 shaders_to_delete.push(shader);
399 }
400
401 if has_stages == wgt::ShaderStages::VERTEX {
403 let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
404 log::info!("Only vertex shader is present. Creating an empty fragment shader",);
405 let shader = unsafe {
406 Self::compile_shader(
407 gl,
408 &shader_src,
409 naga::ShaderStage::Fragment,
410 Some("(wgpu internal) dummy fragment shader"),
411 )
412 }?;
413 shaders_to_delete.push(shader);
414 }
415
416 for &shader in shaders_to_delete.iter() {
417 unsafe { gl.attach_shader(program, shader) };
418 }
419 unsafe { gl.link_program(program) };
420
421 for shader in shaders_to_delete {
422 unsafe { gl.delete_shader(shader) };
423 }
424
425 log::debug!("\tLinked program {program:?}");
426
427 let linked_ok = unsafe { gl.get_program_link_status(program) };
428 let msg = unsafe { gl.get_program_info_log(program) };
429 if !linked_ok {
430 return Err(crate::PipelineError::Linkage(has_stages, msg));
431 }
432 if !msg.is_empty() {
433 log::warn!("\tLink: {msg}");
434 }
435
436 if !private_caps.contains(PrivateCapabilities::SHADER_BINDING_LAYOUT) {
437 unsafe { gl.use_program(Some(program)) };
440 for (ref name, (register, slot)) in name_binding_map {
441 log::trace!("Get binding {name:?} from program {program:?}");
442 match register {
443 super::BindingRegister::UniformBuffers => {
444 let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
445 log::trace!("\tBinding slot {slot} to block index {index}");
446 unsafe { gl.uniform_block_binding(program, index, slot as _) };
447 }
448 super::BindingRegister::StorageBuffers => {
449 let index =
450 unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
451 log::error!("Unable to re-map shader storage block {name} to {index}");
452 return Err(crate::DeviceError::Lost.into());
453 }
454 super::BindingRegister::Textures | super::BindingRegister::Images => {
455 let location = unsafe { gl.get_uniform_location(program, name) };
456 unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
457 }
458 }
459 }
460 }
461
462 let mut uniforms = ArrayVec::new();
463
464 for (stage_idx, stage_items) in push_constant_items.into_iter().enumerate() {
465 for item in stage_items {
466 let naga_module = &shaders[stage_idx].1.module.source.module;
467 let type_inner = &naga_module.types[item.ty].inner;
468
469 let location = unsafe { gl.get_uniform_location(program, &item.access_path) };
470
471 log::trace!(
472 "push constant item: name={}, ty={:?}, offset={}, location={:?}",
473 item.access_path,
474 type_inner,
475 item.offset,
476 location,
477 );
478
479 if let Some(location) = location {
480 uniforms.push(super::PushConstantDesc {
481 location,
482 offset: item.offset,
483 size_bytes: type_inner.size(naga_module.to_ctx()),
484 ty: type_inner.clone(),
485 });
486 }
487 }
488 }
489
490 let first_instance_location = if has_stages.contains(wgt::ShaderStages::VERTEX) {
491 unsafe { gl.get_uniform_location(program, naga::back::glsl::FIRST_INSTANCE_BINDING) }
493 } else {
494 None
495 };
496
497 Ok(Arc::new(super::PipelineInner {
498 program,
499 sampler_map,
500 first_instance_location,
501 push_constant_descs: uniforms,
502 clip_distance_count,
503 }))
504 }
505}
506
507impl crate::Device for super::Device {
508 type A = super::Api;
509
510 unsafe fn create_buffer(
511 &self,
512 desc: &crate::BufferDescriptor,
513 ) -> Result<super::Buffer, crate::DeviceError> {
514 let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
515 glow::ELEMENT_ARRAY_BUFFER
516 } else {
517 glow::ARRAY_BUFFER
518 };
519
520 let emulate_map = self
521 .shared
522 .workarounds
523 .contains(super::Workarounds::EMULATE_BUFFER_MAP)
524 || !self
525 .shared
526 .private_caps
527 .contains(PrivateCapabilities::BUFFER_ALLOCATION);
528
529 if emulate_map && desc.usage.intersects(wgt::BufferUses::MAP_WRITE) {
530 return Ok(super::Buffer {
531 raw: None,
532 target,
533 size: desc.size,
534 map_flags: 0,
535 data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
536 offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
537 });
538 }
539
540 let gl = &self.shared.context.lock();
541
542 let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
543 glow::ELEMENT_ARRAY_BUFFER
544 } else {
545 glow::ARRAY_BUFFER
546 };
547
548 let is_host_visible = desc
549 .usage
550 .intersects(wgt::BufferUses::MAP_READ | wgt::BufferUses::MAP_WRITE);
551 let is_coherent = desc
552 .memory_flags
553 .contains(crate::MemoryFlags::PREFER_COHERENT);
554
555 let mut map_flags = 0;
556 if desc.usage.contains(wgt::BufferUses::MAP_READ) {
557 map_flags |= glow::MAP_READ_BIT;
558 }
559 if desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
560 map_flags |= glow::MAP_WRITE_BIT;
561 }
562
563 let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
564 unsafe { gl.bind_buffer(target, raw) };
565 let raw_size = desc
566 .size
567 .try_into()
568 .map_err(|_| crate::DeviceError::OutOfMemory)?;
569
570 if self
571 .shared
572 .private_caps
573 .contains(PrivateCapabilities::BUFFER_ALLOCATION)
574 {
575 if is_host_visible {
576 map_flags |= glow::MAP_PERSISTENT_BIT;
577 if is_coherent {
578 map_flags |= glow::MAP_COHERENT_BIT;
579 }
580 }
581 if desc.usage.intersects(wgt::BufferUses::QUERY_RESOLVE) {
583 map_flags |= glow::DYNAMIC_STORAGE_BIT;
584 }
585 unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
586 } else {
587 assert!(!is_coherent);
588 let usage = if is_host_visible {
589 if desc.usage.contains(wgt::BufferUses::MAP_READ) {
590 glow::STREAM_READ
591 } else {
592 glow::DYNAMIC_DRAW
593 }
594 } else {
595 glow::DYNAMIC_DRAW
599 };
600 unsafe { gl.buffer_data_size(target, raw_size, usage) };
601 }
602
603 unsafe { gl.bind_buffer(target, None) };
604
605 if !is_coherent && desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
606 map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
607 }
608 #[cfg(native)]
611 if let Some(label) = desc.label {
612 if self
613 .shared
614 .private_caps
615 .contains(PrivateCapabilities::DEBUG_FNS)
616 {
617 let name = raw.map_or(0, |buf| buf.0.get());
618 unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
619 }
620 }
621
622 let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
623 Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
624 } else {
625 None
626 };
627
628 self.counters.buffers.add(1);
629
630 Ok(super::Buffer {
631 raw,
632 target,
633 size: desc.size,
634 map_flags,
635 data,
636 offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
637 })
638 }
639
640 unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
641 if let Some(raw) = buffer.raw {
642 let gl = &self.shared.context.lock();
643 unsafe { gl.delete_buffer(raw) };
644 }
645
646 self.counters.buffers.sub(1);
647 }
648
649 unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) {
650 self.counters.buffers.add(1);
651 }
652
653 unsafe fn map_buffer(
654 &self,
655 buffer: &super::Buffer,
656 range: crate::MemoryRange,
657 ) -> Result<crate::BufferMapping, crate::DeviceError> {
658 let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
659 let ptr = match buffer.raw {
660 None => {
661 let mut vec = lock(buffer.data.as_ref().unwrap());
662 let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
663 slice.as_mut_ptr()
664 }
665 Some(raw) => {
666 let gl = &self.shared.context.lock();
667 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
668 let ptr = if let Some(ref map_read_allocation) = buffer.data {
669 let mut guard = lock(map_read_allocation);
670 let slice = guard.as_mut_slice();
671 unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
672 slice.as_mut_ptr()
673 } else {
674 *lock(&buffer.offset_of_current_mapping) = range.start;
675 unsafe {
676 gl.map_buffer_range(
677 buffer.target,
678 range.start as i32,
679 (range.end - range.start) as i32,
680 buffer.map_flags,
681 )
682 }
683 };
684 unsafe { gl.bind_buffer(buffer.target, None) };
685 ptr
686 }
687 };
688 Ok(crate::BufferMapping {
689 ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
690 is_coherent,
691 })
692 }
693 unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
694 if let Some(raw) = buffer.raw {
695 if buffer.data.is_none() {
696 let gl = &self.shared.context.lock();
697 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
698 unsafe { gl.unmap_buffer(buffer.target) };
699 unsafe { gl.bind_buffer(buffer.target, None) };
700 *lock(&buffer.offset_of_current_mapping) = 0;
701 }
702 }
703 }
704 unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
705 where
706 I: Iterator<Item = crate::MemoryRange>,
707 {
708 if let Some(raw) = buffer.raw {
709 if buffer.data.is_none() {
710 let gl = &self.shared.context.lock();
711 unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
712 for range in ranges {
713 let offset_of_current_mapping = *lock(&buffer.offset_of_current_mapping);
714 unsafe {
715 gl.flush_mapped_buffer_range(
716 buffer.target,
717 (range.start - offset_of_current_mapping) as i32,
718 (range.end - range.start) as i32,
719 )
720 };
721 }
722 }
723 }
724 }
725 unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
726 }
728
729 unsafe fn create_texture(
730 &self,
731 desc: &crate::TextureDescriptor,
732 ) -> Result<super::Texture, crate::DeviceError> {
733 let gl = &self.shared.context.lock();
734
735 let render_usage = wgt::TextureUses::COLOR_TARGET
736 | wgt::TextureUses::DEPTH_STENCIL_WRITE
737 | wgt::TextureUses::DEPTH_STENCIL_READ
738 | wgt::TextureUses::TRANSIENT;
739 let format_desc = self.shared.describe_texture_format(desc.format);
740
741 let inner = if render_usage.contains(desc.usage)
742 && desc.dimension == wgt::TextureDimension::D2
743 && desc.size.depth_or_array_layers == 1
744 {
745 let raw = unsafe { gl.create_renderbuffer().unwrap() };
746 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
747 if desc.sample_count > 1 {
748 unsafe {
749 gl.renderbuffer_storage_multisample(
750 glow::RENDERBUFFER,
751 desc.sample_count as i32,
752 format_desc.internal,
753 desc.size.width as i32,
754 desc.size.height as i32,
755 )
756 };
757 } else {
758 unsafe {
759 gl.renderbuffer_storage(
760 glow::RENDERBUFFER,
761 format_desc.internal,
762 desc.size.width as i32,
763 desc.size.height as i32,
764 )
765 };
766 }
767
768 #[cfg(native)]
769 if let Some(label) = desc.label {
770 if self
771 .shared
772 .private_caps
773 .contains(PrivateCapabilities::DEBUG_FNS)
774 {
775 let name = raw.0.get();
776 unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
777 }
778 }
779
780 unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
781 super::TextureInner::Renderbuffer { raw }
782 } else {
783 let raw = unsafe { gl.create_texture().unwrap() };
784 let target = super::Texture::get_info_from_desc(desc);
785
786 unsafe { gl.bind_texture(target, Some(raw)) };
787 match desc.format.sample_type(None, Some(self.shared.features)) {
789 Some(
790 wgt::TextureSampleType::Float { filterable: false }
791 | wgt::TextureSampleType::Uint
792 | wgt::TextureSampleType::Sint,
793 ) => {
794 unsafe {
796 gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
797 };
798 unsafe {
799 gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
800 };
801 }
802 _ => {}
803 }
804
805 if conv::is_layered_target(target) {
806 unsafe {
807 if self
808 .shared
809 .private_caps
810 .contains(PrivateCapabilities::TEXTURE_STORAGE)
811 {
812 gl.tex_storage_3d(
813 target,
814 desc.mip_level_count as i32,
815 format_desc.internal,
816 desc.size.width as i32,
817 desc.size.height as i32,
818 desc.size.depth_or_array_layers as i32,
819 )
820 } else if target == glow::TEXTURE_3D {
821 let mut width = desc.size.width;
822 let mut height = desc.size.width;
823 let mut depth = desc.size.depth_or_array_layers;
824 for i in 0..desc.mip_level_count {
825 gl.tex_image_3d(
826 target,
827 i as i32,
828 format_desc.internal as i32,
829 width as i32,
830 height as i32,
831 depth as i32,
832 0,
833 format_desc.external,
834 format_desc.data_type,
835 glow::PixelUnpackData::Slice(None),
836 );
837 width = max(1, width / 2);
838 height = max(1, height / 2);
839 depth = max(1, depth / 2);
840 }
841 } else {
842 let mut width = desc.size.width;
843 let mut height = desc.size.width;
844 for i in 0..desc.mip_level_count {
845 gl.tex_image_3d(
846 target,
847 i as i32,
848 format_desc.internal as i32,
849 width as i32,
850 height as i32,
851 desc.size.depth_or_array_layers as i32,
852 0,
853 format_desc.external,
854 format_desc.data_type,
855 glow::PixelUnpackData::Slice(None),
856 );
857 width = max(1, width / 2);
858 height = max(1, height / 2);
859 }
860 }
861 };
862 } else if desc.sample_count > 1 {
863 unsafe {
864 gl.tex_storage_2d_multisample(
865 target,
866 desc.sample_count as i32,
867 format_desc.internal,
868 desc.size.width as i32,
869 desc.size.height as i32,
870 true,
871 )
872 };
873 } else {
874 unsafe {
875 if self
876 .shared
877 .private_caps
878 .contains(PrivateCapabilities::TEXTURE_STORAGE)
879 {
880 gl.tex_storage_2d(
881 target,
882 desc.mip_level_count as i32,
883 format_desc.internal,
884 desc.size.width as i32,
885 desc.size.height as i32,
886 )
887 } else if target == glow::TEXTURE_CUBE_MAP {
888 let mut width = desc.size.width;
889 let mut height = desc.size.width;
890 for i in 0..desc.mip_level_count {
891 for face in [
892 glow::TEXTURE_CUBE_MAP_POSITIVE_X,
893 glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
894 glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
895 glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
896 glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
897 glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
898 ] {
899 gl.tex_image_2d(
900 face,
901 i as i32,
902 format_desc.internal as i32,
903 width as i32,
904 height as i32,
905 0,
906 format_desc.external,
907 format_desc.data_type,
908 glow::PixelUnpackData::Slice(None),
909 );
910 }
911 width = max(1, width / 2);
912 height = max(1, height / 2);
913 }
914 } else {
915 let mut width = desc.size.width;
916 let mut height = desc.size.width;
917 for i in 0..desc.mip_level_count {
918 gl.tex_image_2d(
919 target,
920 i as i32,
921 format_desc.internal as i32,
922 width as i32,
923 height as i32,
924 0,
925 format_desc.external,
926 format_desc.data_type,
927 glow::PixelUnpackData::Slice(None),
928 );
929 width = max(1, width / 2);
930 height = max(1, height / 2);
931 }
932 }
933 };
934 }
935
936 #[cfg(native)]
937 if let Some(label) = desc.label {
938 if self
939 .shared
940 .private_caps
941 .contains(PrivateCapabilities::DEBUG_FNS)
942 {
943 let name = raw.0.get();
944 unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
945 }
946 }
947
948 unsafe { gl.bind_texture(target, None) };
949 super::TextureInner::Texture { raw, target }
950 };
951
952 self.counters.textures.add(1);
953
954 Ok(super::Texture {
955 inner,
956 drop_guard: None,
957 mip_level_count: desc.mip_level_count,
958 array_layer_count: desc.array_layer_count(),
959 format: desc.format,
960 format_desc,
961 copy_size: desc.copy_extent(),
962 })
963 }
964
965 unsafe fn destroy_texture(&self, texture: super::Texture) {
966 if texture.drop_guard.is_none() {
967 let gl = &self.shared.context.lock();
968 match texture.inner {
969 super::TextureInner::Renderbuffer { raw, .. } => {
970 unsafe { gl.delete_renderbuffer(raw) };
971 }
972 super::TextureInner::DefaultRenderbuffer => {}
973 super::TextureInner::Texture { raw, .. } => {
974 unsafe { gl.delete_texture(raw) };
975 }
976 #[cfg(webgl)]
977 super::TextureInner::ExternalFramebuffer { .. } => {}
978 #[cfg(native)]
979 super::TextureInner::ExternalNativeFramebuffer { .. } => {}
980 }
981 }
982
983 drop(texture.drop_guard);
986
987 self.counters.textures.sub(1);
988 }
989
990 unsafe fn add_raw_texture(&self, _texture: &super::Texture) {
991 self.counters.textures.add(1);
992 }
993
994 unsafe fn create_texture_view(
995 &self,
996 texture: &super::Texture,
997 desc: &crate::TextureViewDescriptor,
998 ) -> Result<super::TextureView, crate::DeviceError> {
999 self.counters.texture_views.add(1);
1000 Ok(super::TextureView {
1001 inner: texture.inner.clone(),
1003 aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
1004 mip_levels: desc.range.mip_range(texture.mip_level_count),
1005 array_layers: desc.range.layer_range(texture.array_layer_count),
1006 format: texture.format,
1007 })
1008 }
1009
1010 unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
1011 self.counters.texture_views.sub(1);
1012 }
1013
1014 unsafe fn create_sampler(
1015 &self,
1016 desc: &crate::SamplerDescriptor,
1017 ) -> Result<super::Sampler, crate::DeviceError> {
1018 let gl = &self.shared.context.lock();
1019
1020 let raw = unsafe { gl.create_sampler().unwrap() };
1021
1022 let (min, mag) =
1023 conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
1024
1025 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
1026 unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
1027
1028 unsafe {
1029 gl.sampler_parameter_i32(
1030 raw,
1031 glow::TEXTURE_WRAP_S,
1032 conv::map_address_mode(desc.address_modes[0]) as i32,
1033 )
1034 };
1035 unsafe {
1036 gl.sampler_parameter_i32(
1037 raw,
1038 glow::TEXTURE_WRAP_T,
1039 conv::map_address_mode(desc.address_modes[1]) as i32,
1040 )
1041 };
1042 unsafe {
1043 gl.sampler_parameter_i32(
1044 raw,
1045 glow::TEXTURE_WRAP_R,
1046 conv::map_address_mode(desc.address_modes[2]) as i32,
1047 )
1048 };
1049
1050 if let Some(border_color) = desc.border_color {
1051 let border = match border_color {
1052 wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
1053 [0.0; 4]
1054 }
1055 wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
1056 wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
1057 };
1058 unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
1059 }
1060
1061 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
1062 unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
1063
1064 if desc.anisotropy_clamp != 1 {
1066 unsafe {
1067 gl.sampler_parameter_i32(
1068 raw,
1069 glow::TEXTURE_MAX_ANISOTROPY,
1070 desc.anisotropy_clamp as i32,
1071 )
1072 };
1073 }
1074
1075 if let Some(compare) = desc.compare {
1078 unsafe {
1079 gl.sampler_parameter_i32(
1080 raw,
1081 glow::TEXTURE_COMPARE_MODE,
1082 glow::COMPARE_REF_TO_TEXTURE as i32,
1083 )
1084 };
1085 unsafe {
1086 gl.sampler_parameter_i32(
1087 raw,
1088 glow::TEXTURE_COMPARE_FUNC,
1089 conv::map_compare_func(compare) as i32,
1090 )
1091 };
1092 }
1093
1094 #[cfg(native)]
1095 if let Some(label) = desc.label {
1096 if self
1097 .shared
1098 .private_caps
1099 .contains(PrivateCapabilities::DEBUG_FNS)
1100 {
1101 let name = raw.0.get();
1102 unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
1103 }
1104 }
1105
1106 self.counters.samplers.add(1);
1107
1108 Ok(super::Sampler { raw })
1109 }
1110
1111 unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
1112 let gl = &self.shared.context.lock();
1113 unsafe { gl.delete_sampler(sampler.raw) };
1114 self.counters.samplers.sub(1);
1115 }
1116
1117 unsafe fn create_command_encoder(
1118 &self,
1119 _desc: &crate::CommandEncoderDescriptor<super::Queue>,
1120 ) -> Result<super::CommandEncoder, crate::DeviceError> {
1121 self.counters.command_encoders.add(1);
1122
1123 Ok(super::CommandEncoder {
1124 cmd_buffer: super::CommandBuffer::default(),
1125 state: Default::default(),
1126 private_caps: self.shared.private_caps,
1127 counters: Arc::clone(&self.counters),
1128 })
1129 }
1130
1131 unsafe fn create_bind_group_layout(
1132 &self,
1133 desc: &crate::BindGroupLayoutDescriptor,
1134 ) -> Result<super::BindGroupLayout, crate::DeviceError> {
1135 self.counters.bind_group_layouts.add(1);
1136 Ok(super::BindGroupLayout {
1137 entries: Arc::from(desc.entries),
1138 })
1139 }
1140
1141 unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
1142 self.counters.bind_group_layouts.sub(1);
1143 }
1144
1145 unsafe fn create_pipeline_layout(
1146 &self,
1147 desc: &crate::PipelineLayoutDescriptor<super::BindGroupLayout>,
1148 ) -> Result<super::PipelineLayout, crate::DeviceError> {
1149 use naga::back::glsl;
1150
1151 let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
1152 let mut num_samplers = 0u8;
1153 let mut num_textures = 0u8;
1154 let mut num_images = 0u8;
1155 let mut num_uniform_buffers = 0u8;
1156 let mut num_storage_buffers = 0u8;
1157
1158 let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
1159 writer_flags.set(
1160 glsl::WriterFlags::TEXTURE_SHADOW_LOD,
1161 self.shared
1162 .private_caps
1163 .contains(PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
1164 );
1165 writer_flags.set(
1166 glsl::WriterFlags::DRAW_PARAMETERS,
1167 self.shared
1168 .private_caps
1169 .contains(PrivateCapabilities::FULLY_FEATURED_INSTANCING),
1170 );
1171 writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
1174 let mut binding_map = glsl::BindingMap::default();
1175
1176 for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
1177 let mut binding_to_slot = vec![
1179 !0;
1180 bg_layout
1181 .entries
1182 .iter()
1183 .map(|b| b.binding)
1184 .max()
1185 .map_or(0, |idx| idx as usize + 1)
1186 ]
1187 .into_boxed_slice();
1188
1189 for entry in bg_layout.entries.iter() {
1190 let counter = match entry.ty {
1191 wgt::BindingType::Sampler { .. } => &mut num_samplers,
1192 wgt::BindingType::Texture { .. } => &mut num_textures,
1193 wgt::BindingType::StorageTexture { .. } => &mut num_images,
1194 wgt::BindingType::Buffer {
1195 ty: wgt::BufferBindingType::Uniform,
1196 ..
1197 } => &mut num_uniform_buffers,
1198 wgt::BindingType::Buffer {
1199 ty: wgt::BufferBindingType::Storage { .. },
1200 ..
1201 } => &mut num_storage_buffers,
1202 wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1203 wgt::BindingType::ExternalTexture => unimplemented!(),
1204 };
1205
1206 binding_to_slot[entry.binding as usize] = *counter;
1207 let br = naga::ResourceBinding {
1208 group: group_index as u32,
1209 binding: entry.binding,
1210 };
1211 binding_map.insert(br, *counter);
1212 *counter += entry.count.map_or(1, |c| c.get() as u8);
1213 }
1214
1215 group_infos.push(super::BindGroupLayoutInfo {
1216 entries: Arc::clone(&bg_layout.entries),
1217 binding_to_slot,
1218 });
1219 }
1220
1221 self.counters.pipeline_layouts.add(1);
1222
1223 Ok(super::PipelineLayout {
1224 group_infos: group_infos.into_boxed_slice(),
1225 naga_options: glsl::Options {
1226 version: self.shared.shading_language_version,
1227 writer_flags,
1228 binding_map,
1229 zero_initialize_workgroup_memory: true,
1230 },
1231 })
1232 }
1233
1234 unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
1235 self.counters.pipeline_layouts.sub(1);
1236 }
1237
1238 unsafe fn create_bind_group(
1239 &self,
1240 desc: &crate::BindGroupDescriptor<
1241 super::BindGroupLayout,
1242 super::Buffer,
1243 super::Sampler,
1244 super::TextureView,
1245 super::AccelerationStructure,
1246 >,
1247 ) -> Result<super::BindGroup, crate::DeviceError> {
1248 let mut contents = Vec::new();
1249
1250 let layout_and_entry_iter = desc.entries.iter().map(|entry| {
1251 let layout = desc
1252 .layout
1253 .entries
1254 .iter()
1255 .find(|layout_entry| layout_entry.binding == entry.binding)
1256 .expect("internal error: no layout entry found with binding slot");
1257 (entry, layout)
1258 });
1259 for (entry, layout) in layout_and_entry_iter {
1260 let binding = match layout.ty {
1261 wgt::BindingType::Buffer { .. } => {
1262 let bb = &desc.buffers[entry.resource_index as usize];
1263 super::RawBinding::Buffer {
1264 raw: bb.buffer.raw.unwrap(),
1265 offset: bb.offset as i32,
1266 size: match bb.size {
1267 Some(s) => s.get() as i32,
1268 None => (bb.buffer.size - bb.offset) as i32,
1269 },
1270 }
1271 }
1272 wgt::BindingType::Sampler { .. } => {
1273 let sampler = desc.samplers[entry.resource_index as usize];
1274 super::RawBinding::Sampler(sampler.raw)
1275 }
1276 wgt::BindingType::Texture { view_dimension, .. } => {
1277 let view = desc.textures[entry.resource_index as usize].view;
1278 if view.array_layers.start != 0 {
1279 log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
1280 "This is an implementation problem of wgpu-hal/gles backend.")
1281 }
1282 let (raw, target) = view.inner.as_native();
1283
1284 super::Texture::log_failing_target_heuristics(view_dimension, target);
1285
1286 super::RawBinding::Texture {
1287 raw,
1288 target,
1289 aspects: view.aspects,
1290 mip_levels: view.mip_levels.clone(),
1291 }
1292 }
1293 wgt::BindingType::StorageTexture {
1294 access,
1295 format,
1296 view_dimension,
1297 } => {
1298 let view = desc.textures[entry.resource_index as usize].view;
1299 let format_desc = self.shared.describe_texture_format(format);
1300 let (raw, _target) = view.inner.as_native();
1301 super::RawBinding::Image(super::ImageBinding {
1302 raw,
1303 mip_level: view.mip_levels.start,
1304 array_layer: match view_dimension {
1305 wgt::TextureViewDimension::D2Array
1306 | wgt::TextureViewDimension::CubeArray => None,
1307 _ => Some(view.array_layers.start),
1308 },
1309 access: conv::map_storage_access(access),
1310 format: format_desc.internal,
1311 })
1312 }
1313 wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1314 wgt::BindingType::ExternalTexture => unimplemented!(),
1315 };
1316 contents.push(binding);
1317 }
1318
1319 self.counters.bind_groups.add(1);
1320
1321 Ok(super::BindGroup {
1322 contents: contents.into_boxed_slice(),
1323 })
1324 }
1325
1326 unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
1327 self.counters.bind_groups.sub(1);
1328 }
1329
1330 unsafe fn create_shader_module(
1331 &self,
1332 desc: &crate::ShaderModuleDescriptor,
1333 shader: crate::ShaderInput,
1334 ) -> Result<super::ShaderModule, crate::ShaderError> {
1335 self.counters.shader_modules.add(1);
1336
1337 Ok(super::ShaderModule {
1338 source: match shader {
1339 crate::ShaderInput::Naga(naga) => naga,
1340 crate::ShaderInput::Glsl { .. } => unimplemented!(),
1342 crate::ShaderInput::SpirV(_)
1343 | crate::ShaderInput::Msl { .. }
1344 | crate::ShaderInput::Dxil { .. }
1345 | crate::ShaderInput::Hlsl { .. } => {
1346 unreachable!()
1347 }
1348 },
1349 label: desc.label.map(|str| str.to_string()),
1350 id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1351 })
1352 }
1353
1354 unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
1355 self.counters.shader_modules.sub(1);
1356 }
1357
1358 unsafe fn create_render_pipeline(
1359 &self,
1360 desc: &crate::RenderPipelineDescriptor<
1361 super::PipelineLayout,
1362 super::ShaderModule,
1363 super::PipelineCache,
1364 >,
1365 ) -> Result<super::RenderPipeline, crate::PipelineError> {
1366 let (vertex_stage, vertex_buffers) = match &desc.vertex_processor {
1367 crate::VertexProcessor::Standard {
1368 vertex_buffers,
1369 ref vertex_stage,
1370 } => (vertex_stage, vertex_buffers),
1371 crate::VertexProcessor::Mesh { .. } => unreachable!(),
1372 };
1373 let gl = &self.shared.context.lock();
1374 let mut shaders = ArrayVec::new();
1375 shaders.push((naga::ShaderStage::Vertex, vertex_stage));
1376 if let Some(ref fs) = desc.fragment_stage {
1377 shaders.push((naga::ShaderStage::Fragment, fs));
1378 }
1379 let inner =
1380 unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview) }?;
1381
1382 let (vertex_buffers, vertex_attributes) = {
1383 let mut buffers = Vec::new();
1384 let mut attributes = Vec::new();
1385 for (index, vb_layout) in vertex_buffers.iter().enumerate() {
1386 buffers.push(super::VertexBufferDesc {
1387 step: vb_layout.step_mode,
1388 stride: vb_layout.array_stride as u32,
1389 });
1390 for vat in vb_layout.attributes.iter() {
1391 let format_desc = conv::describe_vertex_format(vat.format);
1392 attributes.push(super::AttributeDesc {
1393 location: vat.shader_location,
1394 offset: vat.offset as u32,
1395 buffer_index: index as u32,
1396 format_desc,
1397 });
1398 }
1399 }
1400 (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1401 };
1402
1403 let color_targets = {
1404 let mut targets = Vec::new();
1405 for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1406 targets.push(super::ColorTargetDesc {
1407 mask: ct.write_mask,
1408 blend: ct.blend.as_ref().map(conv::map_blend),
1409 });
1410 }
1411 targets.into_boxed_slice()
1414 };
1415
1416 self.counters.render_pipelines.add(1);
1417
1418 Ok(super::RenderPipeline {
1419 inner,
1420 primitive: desc.primitive,
1421 vertex_buffers,
1422 vertex_attributes,
1423 color_targets,
1424 depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1425 function: conv::map_compare_func(ds.depth_compare),
1426 mask: ds.depth_write_enabled,
1427 }),
1428 depth_bias: desc
1429 .depth_stencil
1430 .as_ref()
1431 .map(|ds| ds.bias)
1432 .unwrap_or_default(),
1433 stencil: desc
1434 .depth_stencil
1435 .as_ref()
1436 .map(|ds| conv::map_stencil(&ds.stencil)),
1437 alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1438 })
1439 }
1440
1441 unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1442 if Arc::strong_count(&pipeline.inner) == 2 {
1447 let gl = &self.shared.context.lock();
1448 let mut program_cache = self.shared.program_cache.lock();
1449 program_cache.retain(|_, v| match *v {
1450 Ok(ref p) => p.program != pipeline.inner.program,
1451 Err(_) => false,
1452 });
1453 unsafe { gl.delete_program(pipeline.inner.program) };
1454 }
1455
1456 self.counters.render_pipelines.sub(1);
1457 }
1458
1459 unsafe fn create_compute_pipeline(
1460 &self,
1461 desc: &crate::ComputePipelineDescriptor<
1462 super::PipelineLayout,
1463 super::ShaderModule,
1464 super::PipelineCache,
1465 >,
1466 ) -> Result<super::ComputePipeline, crate::PipelineError> {
1467 let gl = &self.shared.context.lock();
1468 let mut shaders = ArrayVec::new();
1469 shaders.push((naga::ShaderStage::Compute, &desc.stage));
1470 let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1471
1472 self.counters.compute_pipelines.add(1);
1473
1474 Ok(super::ComputePipeline { inner })
1475 }
1476
1477 unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1478 if Arc::strong_count(&pipeline.inner) == 2 {
1483 let gl = &self.shared.context.lock();
1484 let mut program_cache = self.shared.program_cache.lock();
1485 program_cache.retain(|_, v| match *v {
1486 Ok(ref p) => p.program != pipeline.inner.program,
1487 Err(_) => false,
1488 });
1489 unsafe { gl.delete_program(pipeline.inner.program) };
1490 }
1491
1492 self.counters.compute_pipelines.sub(1);
1493 }
1494
1495 unsafe fn create_pipeline_cache(
1496 &self,
1497 _: &crate::PipelineCacheDescriptor<'_>,
1498 ) -> Result<super::PipelineCache, crate::PipelineCacheError> {
1499 Ok(super::PipelineCache)
1502 }
1503 unsafe fn destroy_pipeline_cache(&self, _: super::PipelineCache) {}
1504
1505 #[cfg_attr(target_arch = "wasm32", allow(unused))]
1506 unsafe fn create_query_set(
1507 &self,
1508 desc: &wgt::QuerySetDescriptor<crate::Label>,
1509 ) -> Result<super::QuerySet, crate::DeviceError> {
1510 let gl = &self.shared.context.lock();
1511
1512 let mut queries = Vec::with_capacity(desc.count as usize);
1513 for _ in 0..desc.count {
1514 let query =
1515 unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1516
1517 queries.push(query);
1524 }
1525
1526 self.counters.query_sets.add(1);
1527
1528 Ok(super::QuerySet {
1529 queries: queries.into_boxed_slice(),
1530 target: match desc.ty {
1531 wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1532 wgt::QueryType::Timestamp => glow::TIMESTAMP,
1533 _ => unimplemented!(),
1534 },
1535 })
1536 }
1537
1538 unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1539 let gl = &self.shared.context.lock();
1540 for &query in set.queries.iter() {
1541 unsafe { gl.delete_query(query) };
1542 }
1543 self.counters.query_sets.sub(1);
1544 }
1545
1546 unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1547 self.counters.fences.add(1);
1548 Ok(super::Fence::new(&self.shared.options))
1549 }
1550
1551 unsafe fn destroy_fence(&self, fence: super::Fence) {
1552 let gl = &self.shared.context.lock();
1553 fence.destroy(gl);
1554 self.counters.fences.sub(1);
1555 }
1556
1557 unsafe fn get_fence_value(
1558 &self,
1559 fence: &super::Fence,
1560 ) -> Result<crate::FenceValue, crate::DeviceError> {
1561 #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1562 Ok(fence.get_latest(&self.shared.context.lock()))
1563 }
1564 unsafe fn wait(
1565 &self,
1566 fence: &super::Fence,
1567 wait_value: crate::FenceValue,
1568 timeout: Option<core::time::Duration>,
1569 ) -> Result<bool, crate::DeviceError> {
1570 if fence.satisfied(wait_value) {
1571 return Ok(true);
1572 }
1573
1574 let gl = &self.shared.context.lock();
1575 let timeout_ns = if cfg!(any(webgl, Emscripten)) {
1580 0
1581 } else {
1582 timeout
1583 .map(|t| t.as_nanos().min(u32::MAX as u128) as u32)
1584 .unwrap_or(u32::MAX)
1585 };
1586 fence.wait(gl, wait_value, timeout_ns)
1587 }
1588
1589 unsafe fn start_graphics_debugger_capture(&self) -> bool {
1590 #[cfg(all(native, feature = "renderdoc"))]
1591 return unsafe {
1592 self.render_doc
1593 .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1594 };
1595 #[allow(unreachable_code)]
1596 false
1597 }
1598 unsafe fn stop_graphics_debugger_capture(&self) {
1599 #[cfg(all(native, feature = "renderdoc"))]
1600 unsafe {
1601 self.render_doc
1602 .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1603 }
1604 }
1605 unsafe fn create_acceleration_structure(
1606 &self,
1607 _desc: &crate::AccelerationStructureDescriptor,
1608 ) -> Result<super::AccelerationStructure, crate::DeviceError> {
1609 unimplemented!()
1610 }
1611 unsafe fn get_acceleration_structure_build_sizes<'a>(
1612 &self,
1613 _desc: &crate::GetAccelerationStructureBuildSizesDescriptor<'a, super::Buffer>,
1614 ) -> crate::AccelerationStructureBuildSizes {
1615 unimplemented!()
1616 }
1617 unsafe fn get_acceleration_structure_device_address(
1618 &self,
1619 _acceleration_structure: &super::AccelerationStructure,
1620 ) -> wgt::BufferAddress {
1621 unimplemented!()
1622 }
1623 unsafe fn destroy_acceleration_structure(
1624 &self,
1625 _acceleration_structure: super::AccelerationStructure,
1626 ) {
1627 }
1628
1629 fn tlas_instance_to_bytes(&self, _instance: TlasInstance) -> Vec<u8> {
1630 unimplemented!()
1631 }
1632
1633 fn get_internal_counters(&self) -> wgt::HalCounters {
1634 self.counters.as_ref().clone()
1635 }
1636
1637 fn check_if_oom(&self) -> Result<(), crate::DeviceError> {
1638 Ok(())
1639 }
1640}
1641
1642#[cfg(send_sync)]
1643unsafe impl Sync for super::Device {}
1644#[cfg(send_sync)]
1645unsafe impl Send for super::Device {}