wgpu_hal/gles/
device.rs

1use alloc::{
2    borrow::{Cow, ToOwned},
3    format,
4    string::String,
5    string::ToString as _,
6    sync::Arc,
7    vec,
8    vec::Vec,
9};
10use core::{cmp::max, convert::TryInto, num::NonZeroU32, ptr, sync::atomic::Ordering};
11
12use arrayvec::ArrayVec;
13use glow::HasContext;
14use naga::FastHashMap;
15
16use super::{conv, lock, MaybeMutex, PrivateCapabilities};
17use crate::auxil::map_naga_stage;
18use crate::TlasInstance;
19
20type ShaderStage<'a> = (
21    naga::ShaderStage,
22    &'a crate::ProgrammableStage<'a, super::ShaderModule>,
23);
24type NameBindingMap = FastHashMap<String, (super::BindingRegister, u8)>;
25
26struct CompilationContext<'a> {
27    layout: &'a super::PipelineLayout,
28    sampler_map: &'a mut super::SamplerBindMap,
29    name_binding_map: &'a mut NameBindingMap,
30    immediates_items: &'a mut Vec<naga::back::glsl::ImmediateItem>,
31    multiview_mask: Option<NonZeroU32>,
32    clip_distance_count: &'a mut u32,
33}
34
35impl CompilationContext<'_> {
36    fn consume_reflection(
37        self,
38        gl: &glow::Context,
39        module: &naga::Module,
40        ep_info: &naga::valid::FunctionInfo,
41        reflection_info: naga::back::glsl::ReflectionInfo,
42        naga_stage: naga::ShaderStage,
43        program: glow::Program,
44    ) {
45        for (handle, var) in module.global_variables.iter() {
46            if ep_info[handle].is_empty() {
47                continue;
48            }
49            let register = match var.space {
50                naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
51                naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
52                _ => continue,
53            };
54
55            let br = var.binding.as_ref().unwrap();
56            let slot = self.layout.get_slot(br);
57
58            let name = match reflection_info.uniforms.get(&handle) {
59                Some(name) => name.clone(),
60                None => continue,
61            };
62            log::trace!(
63                "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
64                var.name.as_ref(),
65                &name,
66                register,
67                slot
68            );
69            self.name_binding_map.insert(name, (register, slot));
70        }
71
72        for (name, mapping) in reflection_info.texture_mapping {
73            let var = &module.global_variables[mapping.texture];
74            let register = match module.types[var.ty].inner {
75                naga::TypeInner::Image {
76                    class: naga::ImageClass::Storage { .. },
77                    ..
78                } => super::BindingRegister::Images,
79                _ => super::BindingRegister::Textures,
80            };
81
82            let tex_br = var.binding.as_ref().unwrap();
83            let texture_linear_index = self.layout.get_slot(tex_br);
84
85            self.name_binding_map
86                .insert(name, (register, texture_linear_index));
87            if let Some(sampler_handle) = mapping.sampler {
88                let sam_br = module.global_variables[sampler_handle]
89                    .binding
90                    .as_ref()
91                    .unwrap();
92                let sampler_linear_index = self.layout.get_slot(sam_br);
93                self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
94            }
95        }
96
97        for (name, location) in reflection_info.varying {
98            match naga_stage {
99                naga::ShaderStage::Vertex => {
100                    assert_eq!(location.index, 0);
101                    unsafe { gl.bind_attrib_location(program, location.location, &name) }
102                }
103                naga::ShaderStage::Fragment => {
104                    assert_eq!(location.index, 0);
105                    unsafe { gl.bind_frag_data_location(program, location.location, &name) }
106                }
107                naga::ShaderStage::Compute => {}
108                naga::ShaderStage::Task
109                | naga::ShaderStage::Mesh
110                | naga::ShaderStage::RayGeneration
111                | naga::ShaderStage::AnyHit
112                | naga::ShaderStage::ClosestHit
113                | naga::ShaderStage::Miss => unreachable!(),
114            }
115        }
116
117        *self.immediates_items = reflection_info.immediates_items;
118
119        if naga_stage == naga::ShaderStage::Vertex {
120            *self.clip_distance_count = reflection_info.clip_distance_count;
121        }
122    }
123}
124
125impl super::Device {
126    /// # Safety
127    ///
128    /// - `name` must be created respecting `desc`
129    /// - `name` must be a texture
130    /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of the texture. If
131    ///   `drop_callback` is [`Some`], the texture must be valid until the callback is called.
132    #[cfg(any(native, Emscripten))]
133    pub unsafe fn texture_from_raw(
134        &self,
135        name: NonZeroU32,
136        desc: &crate::TextureDescriptor,
137        drop_callback: Option<crate::DropCallback>,
138    ) -> super::Texture {
139        super::Texture {
140            inner: super::TextureInner::Texture {
141                raw: glow::NativeTexture(name),
142                target: super::Texture::get_info_from_desc(desc),
143            },
144            drop_guard: crate::DropGuard::from_option(drop_callback),
145            mip_level_count: desc.mip_level_count,
146            array_layer_count: desc.array_layer_count(),
147            format: desc.format,
148            format_desc: self.shared.describe_texture_format(desc.format),
149            copy_size: desc.copy_extent(),
150        }
151    }
152
153    /// # Safety
154    ///
155    /// - `name` must be created respecting `desc`
156    /// - `name` must be a renderbuffer
157    /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of the renderbuffer. If
158    ///   `drop_callback` is [`Some`], the renderbuffer must be valid until the callback is called.
159    #[cfg(any(native, Emscripten))]
160    pub unsafe fn texture_from_raw_renderbuffer(
161        &self,
162        name: NonZeroU32,
163        desc: &crate::TextureDescriptor,
164        drop_callback: Option<crate::DropCallback>,
165    ) -> super::Texture {
166        super::Texture {
167            inner: super::TextureInner::Renderbuffer {
168                raw: glow::NativeRenderbuffer(name),
169            },
170            drop_guard: crate::DropGuard::from_option(drop_callback),
171            mip_level_count: desc.mip_level_count,
172            array_layer_count: desc.array_layer_count(),
173            format: desc.format,
174            format_desc: self.shared.describe_texture_format(desc.format),
175            copy_size: desc.copy_extent(),
176        }
177    }
178
179    unsafe fn compile_shader(
180        gl: &glow::Context,
181        shader: &str,
182        naga_stage: naga::ShaderStage,
183        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
184    ) -> Result<glow::Shader, crate::PipelineError> {
185        let target = match naga_stage {
186            naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
187            naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
188            naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
189            naga::ShaderStage::Task
190            | naga::ShaderStage::Mesh
191            | naga::ShaderStage::RayGeneration
192            | naga::ShaderStage::AnyHit
193            | naga::ShaderStage::ClosestHit
194            | naga::ShaderStage::Miss => unreachable!(),
195        };
196
197        let raw = unsafe { gl.create_shader(target) }.unwrap();
198        #[cfg(native)]
199        if gl.supports_debug() {
200            let name = raw.0.get();
201            unsafe { gl.object_label(glow::SHADER, name, label) };
202        }
203
204        unsafe { gl.shader_source(raw, shader) };
205        unsafe { gl.compile_shader(raw) };
206
207        log::debug!("\tCompiled shader {raw:?}");
208
209        let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
210        let msg = unsafe { gl.get_shader_info_log(raw) };
211        if compiled_ok {
212            if !msg.is_empty() {
213                log::debug!("\tCompile message: {msg}");
214            }
215            Ok(raw)
216        } else {
217            log::error!("\tShader compilation failed: {msg}");
218            unsafe { gl.delete_shader(raw) };
219            Err(crate::PipelineError::Linkage(
220                map_naga_stage(naga_stage),
221                msg,
222            ))
223        }
224    }
225
226    fn create_shader(
227        gl: &glow::Context,
228        naga_stage: naga::ShaderStage,
229        stage: &crate::ProgrammableStage<super::ShaderModule>,
230        context: CompilationContext,
231        program: glow::Program,
232    ) -> Result<glow::Shader, crate::PipelineError> {
233        let source = 'outer: {
234            use naga::back::glsl;
235            let pipeline_options = glsl::PipelineOptions {
236                shader_stage: naga_stage,
237                entry_point: stage.entry_point.to_owned(),
238                multiview: context
239                    .multiview_mask
240                    .map(|a| NonZeroU32::new(a.get().count_ones()).unwrap()),
241            };
242
243            let naga = match stage.module.source {
244                super::ShaderModuleSource::Naga(ref naga) => naga,
245                super::ShaderModuleSource::Passthrough { ref source } => {
246                    break 'outer Cow::Borrowed(source);
247                }
248            };
249
250            let (module, info) = naga::back::pipeline_constants::process_overrides(
251                &naga.module,
252                &naga.info,
253                Some((naga_stage, stage.entry_point)),
254                stage.constants,
255            )
256            .map_err(|e| {
257                let msg = format!("{e}");
258                crate::PipelineError::PipelineConstants(map_naga_stage(naga_stage), msg)
259            })?;
260
261            let entry_point_index = module
262                .entry_points
263                .iter()
264                .position(|ep| ep.name.as_str() == stage.entry_point)
265                .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
266
267            use naga::proc::BoundsCheckPolicy;
268            // The image bounds checks require the TEXTURE_LEVELS feature available in GL core 4.3+.
269            let version = gl.version();
270            let image_check = if !version.is_embedded && (version.major, version.minor) >= (4, 3) {
271                BoundsCheckPolicy::ReadZeroSkipWrite
272            } else {
273                BoundsCheckPolicy::Unchecked
274            };
275
276            // Other bounds check are either provided by glsl or not implemented yet.
277            let policies = naga::proc::BoundsCheckPolicies {
278                index: BoundsCheckPolicy::Unchecked,
279                buffer: BoundsCheckPolicy::Unchecked,
280                image_load: image_check,
281                binding_array: BoundsCheckPolicy::Unchecked,
282            };
283
284            let mut output = String::new();
285            let needs_temp_options = stage.zero_initialize_workgroup_memory
286                != context.layout.naga_options.zero_initialize_workgroup_memory;
287            let mut temp_options;
288            let naga_options = if needs_temp_options {
289                // We use a conditional here, as cloning the naga_options could be expensive
290                // That is, we want to avoid doing that unless we cannot avoid it
291                temp_options = context.layout.naga_options.clone();
292                temp_options.zero_initialize_workgroup_memory =
293                    stage.zero_initialize_workgroup_memory;
294                &temp_options
295            } else {
296                &context.layout.naga_options
297            };
298            let mut writer = glsl::Writer::new(
299                &mut output,
300                &module,
301                &info,
302                naga_options,
303                &pipeline_options,
304                policies,
305            )
306            .map_err(|e| {
307                let msg = format!("{e}");
308                crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
309            })?;
310
311            let reflection_info = writer.write().map_err(|e| {
312                let msg = format!("{e}");
313                crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
314            })?;
315
316            log::debug!("Naga generated shader:\n{output}");
317
318            context.consume_reflection(
319                gl,
320                &module,
321                info.get_entry_point(entry_point_index),
322                reflection_info,
323                naga_stage,
324                program,
325            );
326            Cow::Owned(output)
327        };
328
329        unsafe { Self::compile_shader(gl, &source, naga_stage, stage.module.label.as_deref()) }
330    }
331
332    unsafe fn create_pipeline<'a>(
333        &self,
334        gl: &glow::Context,
335        shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
336        layout: &super::PipelineLayout,
337        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
338        multiview_mask: Option<NonZeroU32>,
339    ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
340        let mut program_stages = ArrayVec::new();
341        let group_to_binding_to_slot = layout
342            .group_infos
343            .iter()
344            .map(|group| group.as_ref().map(|group| group.binding_to_slot.clone()))
345            .collect::<Vec<_>>();
346        for &(naga_stage, stage) in &shaders {
347            program_stages.push(super::ProgramStage {
348                naga_stage: naga_stage.to_owned(),
349                shader_id: stage.module.id,
350                entry_point: stage.entry_point.to_owned(),
351                zero_initialize_workgroup_memory: stage.zero_initialize_workgroup_memory,
352                constant_hash: Self::create_constant_hash(stage),
353            });
354        }
355        let mut guard = self
356            .shared
357            .program_cache
358            .try_lock()
359            .expect("Couldn't acquire program_cache lock");
360        // This guard ensures that we can't accidentally destroy a program whilst we're about to reuse it
361        // The only place that destroys a pipeline is also locking on `program_cache`
362        let program = guard
363            .entry(super::ProgramCacheKey {
364                stages: program_stages,
365                group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
366            })
367            .or_insert_with(|| unsafe {
368                Self::create_program(
369                    gl,
370                    shaders,
371                    layout,
372                    label,
373                    multiview_mask,
374                    self.shared.shading_language_version,
375                    self.shared.private_caps,
376                )
377            })
378            .to_owned()?;
379        drop(guard);
380
381        Ok(program)
382    }
383
384    fn create_constant_hash(stage: &crate::ProgrammableStage<super::ShaderModule>) -> Vec<u8> {
385        let mut buf: Vec<u8> = Vec::new();
386
387        for (key, value) in stage.constants.iter() {
388            buf.extend_from_slice(key.as_bytes());
389            buf.extend_from_slice(&value.to_ne_bytes());
390        }
391
392        buf
393    }
394
395    unsafe fn create_program<'a>(
396        gl: &glow::Context,
397        shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
398        layout: &super::PipelineLayout,
399        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
400        multiview_mask: Option<NonZeroU32>,
401        glsl_version: naga::back::glsl::Version,
402        private_caps: PrivateCapabilities,
403    ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
404        let glsl_version = match glsl_version {
405            naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
406            naga::back::glsl::Version::Desktop(version) => format!("{version}"),
407        };
408        let program = unsafe { gl.create_program() }.unwrap();
409        #[cfg(native)]
410        if let Some(label) = label {
411            if private_caps.contains(PrivateCapabilities::DEBUG_FNS) {
412                let name = program.0.get();
413                unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
414            }
415        }
416
417        let mut name_binding_map = NameBindingMap::default();
418        let mut immediates_items = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
419        let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
420        let mut has_stages = wgt::ShaderStages::empty();
421        let mut shaders_to_delete = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
422        let mut clip_distance_count = 0;
423
424        for &(naga_stage, stage) in &shaders {
425            has_stages |= map_naga_stage(naga_stage);
426            let pc_item = {
427                immediates_items.push(Vec::new());
428                immediates_items.last_mut().unwrap()
429            };
430            let context = CompilationContext {
431                layout,
432                sampler_map: &mut sampler_map,
433                name_binding_map: &mut name_binding_map,
434                immediates_items: pc_item,
435                multiview_mask,
436                clip_distance_count: &mut clip_distance_count,
437            };
438
439            let shader = Self::create_shader(gl, naga_stage, stage, context, program)?;
440            shaders_to_delete.push(shader);
441        }
442
443        // Create empty fragment shader if only vertex shader is present
444        if has_stages == wgt::ShaderStages::VERTEX {
445            let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
446            log::debug!("Only vertex shader is present. Creating an empty fragment shader",);
447            let shader = unsafe {
448                Self::compile_shader(
449                    gl,
450                    &shader_src,
451                    naga::ShaderStage::Fragment,
452                    Some("(wgpu internal) dummy fragment shader"),
453                )
454            }?;
455            shaders_to_delete.push(shader);
456        }
457
458        for &shader in shaders_to_delete.iter() {
459            unsafe { gl.attach_shader(program, shader) };
460        }
461        unsafe { gl.link_program(program) };
462
463        for shader in shaders_to_delete {
464            unsafe { gl.delete_shader(shader) };
465        }
466
467        log::debug!("\tLinked program {program:?}");
468
469        let linked_ok = unsafe { gl.get_program_link_status(program) };
470        let msg = unsafe { gl.get_program_info_log(program) };
471        if !linked_ok {
472            return Err(crate::PipelineError::Linkage(has_stages, msg));
473        }
474        if !msg.is_empty() {
475            log::debug!("\tLink message: {msg}");
476        }
477
478        if !private_caps.contains(PrivateCapabilities::SHADER_BINDING_LAYOUT) {
479            // This remapping is only needed if we aren't able to put the binding layout
480            // in the shader. We can't remap storage buffers this way.
481            unsafe { gl.use_program(Some(program)) };
482            for (ref name, (register, slot)) in name_binding_map {
483                log::trace!("Get binding {name:?} from program {program:?}");
484                match register {
485                    super::BindingRegister::UniformBuffers => {
486                        let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
487                        log::trace!("\tBinding slot {slot} to block index {index}");
488                        unsafe { gl.uniform_block_binding(program, index, slot as _) };
489                    }
490                    super::BindingRegister::StorageBuffers => {
491                        let index =
492                            unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
493                        log::error!("Unable to re-map shader storage block {name} to {index}");
494                        return Err(crate::DeviceError::Lost.into());
495                    }
496                    super::BindingRegister::Textures | super::BindingRegister::Images => {
497                        let location = unsafe { gl.get_uniform_location(program, name) };
498                        unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
499                    }
500                }
501            }
502        }
503
504        let mut uniforms = ArrayVec::new();
505
506        for (stage_idx, stage_items) in immediates_items.into_iter().enumerate() {
507            for item in stage_items {
508                let source = &shaders[stage_idx].1.module.source;
509                let super::ShaderModuleSource::Naga(naga_module) = source else {
510                    // ImmediateItem can only be constructed given a naga module, as it requires a type handle.
511                    // Passthrough shaders will have immediates_items empty
512                    unreachable!("Passthrough shaders don't currently support immediates on GLES");
513                };
514                let type_inner = &naga_module.module.types[item.ty].inner;
515
516                let location = unsafe { gl.get_uniform_location(program, &item.access_path) };
517
518                log::trace!(
519                    "immediate data item: name={}, ty={:?}, offset={}, location={:?}",
520                    item.access_path,
521                    type_inner,
522                    item.offset,
523                    location,
524                );
525
526                if let Some(location) = location {
527                    uniforms.push(super::ImmediateDesc {
528                        location,
529                        offset: item.offset,
530                        size_bytes: type_inner.size(naga_module.module.to_ctx()),
531                        ty: type_inner.clone(),
532                    });
533                }
534            }
535        }
536
537        let first_instance_location = if has_stages.contains(wgt::ShaderStages::VERTEX) {
538            // If this returns none (the uniform isn't active), that's fine, we just won't set it.
539            unsafe { gl.get_uniform_location(program, naga::back::glsl::FIRST_INSTANCE_BINDING) }
540        } else {
541            None
542        };
543
544        Ok(Arc::new(super::PipelineInner {
545            program,
546            sampler_map,
547            first_instance_location,
548            immediates_descs: uniforms,
549            clip_distance_count,
550        }))
551    }
552}
553
554impl crate::Device for super::Device {
555    type A = super::Api;
556
557    unsafe fn create_buffer(
558        &self,
559        desc: &crate::BufferDescriptor,
560    ) -> Result<super::Buffer, crate::DeviceError> {
561        let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
562            glow::ELEMENT_ARRAY_BUFFER
563        } else {
564            glow::ARRAY_BUFFER
565        };
566
567        let emulate_map = self
568            .shared
569            .workarounds
570            .contains(super::Workarounds::EMULATE_BUFFER_MAP)
571            || !self
572                .shared
573                .private_caps
574                .contains(PrivateCapabilities::BUFFER_ALLOCATION);
575
576        if emulate_map && desc.usage.intersects(wgt::BufferUses::MAP_WRITE) {
577            return Ok(super::Buffer {
578                raw: None,
579                target,
580                size: desc.size,
581                map_flags: 0,
582                data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
583                offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
584            });
585        }
586
587        let gl = &self.shared.context.lock();
588
589        let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
590            glow::ELEMENT_ARRAY_BUFFER
591        } else {
592            glow::ARRAY_BUFFER
593        };
594
595        let is_host_visible = desc
596            .usage
597            .intersects(wgt::BufferUses::MAP_READ | wgt::BufferUses::MAP_WRITE);
598        let is_coherent = desc
599            .memory_flags
600            .contains(crate::MemoryFlags::PREFER_COHERENT);
601
602        let mut map_flags = 0;
603        if desc.usage.contains(wgt::BufferUses::MAP_READ) {
604            map_flags |= glow::MAP_READ_BIT;
605        }
606        if desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
607            map_flags |= glow::MAP_WRITE_BIT;
608        }
609
610        let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
611        unsafe { gl.bind_buffer(target, raw) };
612        let raw_size = desc
613            .size
614            .try_into()
615            .map_err(|_| crate::DeviceError::OutOfMemory)?;
616
617        if self
618            .shared
619            .private_caps
620            .contains(PrivateCapabilities::BUFFER_ALLOCATION)
621        {
622            if is_host_visible {
623                map_flags |= glow::MAP_PERSISTENT_BIT;
624                if is_coherent {
625                    map_flags |= glow::MAP_COHERENT_BIT;
626                }
627            }
628            // TODO: may also be required for other calls involving `buffer_sub_data_u8_slice` (e.g. copy buffer to buffer and clear buffer)
629            if desc.usage.intersects(wgt::BufferUses::QUERY_RESOLVE) {
630                map_flags |= glow::DYNAMIC_STORAGE_BIT;
631            }
632            unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
633        } else {
634            assert!(!is_coherent);
635            let usage = if is_host_visible {
636                if desc.usage.contains(wgt::BufferUses::MAP_READ) {
637                    glow::STREAM_READ
638                } else {
639                    glow::DYNAMIC_DRAW
640                }
641            } else {
642                // Even if the usage doesn't contain SRC_READ, we update it internally at least once
643                // Some vendors take usage very literally and STATIC_DRAW will freeze us with an empty buffer
644                // https://github.com/gfx-rs/wgpu/issues/3371
645                glow::DYNAMIC_DRAW
646            };
647            unsafe { gl.buffer_data_size(target, raw_size, usage) };
648        }
649
650        unsafe { gl.bind_buffer(target, None) };
651
652        if !is_coherent && desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
653            map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
654        }
655        //TODO: do we need `glow::MAP_UNSYNCHRONIZED_BIT`?
656
657        #[cfg(native)]
658        if let Some(label) = desc.label {
659            if self
660                .shared
661                .private_caps
662                .contains(PrivateCapabilities::DEBUG_FNS)
663            {
664                let name = raw.map_or(0, |buf| buf.0.get());
665                unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
666            }
667        }
668
669        let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
670            Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
671        } else {
672            None
673        };
674
675        self.counters.buffers.add(1);
676
677        Ok(super::Buffer {
678            raw,
679            target,
680            size: desc.size,
681            map_flags,
682            data,
683            offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
684        })
685    }
686
687    unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
688        if let Some(raw) = buffer.raw {
689            let gl = &self.shared.context.lock();
690            unsafe { gl.delete_buffer(raw) };
691        }
692
693        self.counters.buffers.sub(1);
694    }
695
696    unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) {
697        self.counters.buffers.add(1);
698    }
699
700    unsafe fn map_buffer(
701        &self,
702        buffer: &super::Buffer,
703        range: crate::MemoryRange,
704    ) -> Result<crate::BufferMapping, crate::DeviceError> {
705        let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
706        let ptr = match buffer.raw {
707            None => {
708                let mut vec = lock(buffer.data.as_ref().unwrap());
709                let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
710                slice.as_mut_ptr()
711            }
712            Some(raw) => {
713                let gl = &self.shared.context.lock();
714                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
715                let ptr = if let Some(ref map_read_allocation) = buffer.data {
716                    let mut guard = lock(map_read_allocation);
717                    let slice = guard.as_mut_slice();
718                    unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
719                    slice.as_mut_ptr()
720                } else {
721                    *lock(&buffer.offset_of_current_mapping) = range.start;
722                    unsafe {
723                        gl.map_buffer_range(
724                            buffer.target,
725                            range.start as i32,
726                            (range.end - range.start) as i32,
727                            buffer.map_flags,
728                        )
729                    }
730                };
731                unsafe { gl.bind_buffer(buffer.target, None) };
732                ptr
733            }
734        };
735        Ok(crate::BufferMapping {
736            ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
737            is_coherent,
738        })
739    }
740    unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
741        if let Some(raw) = buffer.raw {
742            if buffer.data.is_none() {
743                let gl = &self.shared.context.lock();
744                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
745                unsafe { gl.unmap_buffer(buffer.target) };
746                unsafe { gl.bind_buffer(buffer.target, None) };
747                *lock(&buffer.offset_of_current_mapping) = 0;
748            }
749        }
750    }
751    unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
752    where
753        I: Iterator<Item = crate::MemoryRange>,
754    {
755        if let Some(raw) = buffer.raw {
756            if buffer.data.is_none() {
757                let gl = &self.shared.context.lock();
758                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
759                for range in ranges {
760                    let offset_of_current_mapping = *lock(&buffer.offset_of_current_mapping);
761                    unsafe {
762                        gl.flush_mapped_buffer_range(
763                            buffer.target,
764                            (range.start - offset_of_current_mapping) as i32,
765                            (range.end - range.start) as i32,
766                        )
767                    };
768                }
769            }
770        }
771    }
772    unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
773        //TODO: do we need to do anything?
774    }
775
776    unsafe fn create_texture(
777        &self,
778        desc: &crate::TextureDescriptor,
779    ) -> Result<super::Texture, crate::DeviceError> {
780        let gl = &self.shared.context.lock();
781
782        let render_usage = wgt::TextureUses::COLOR_TARGET
783            | wgt::TextureUses::DEPTH_STENCIL_WRITE
784            | wgt::TextureUses::DEPTH_STENCIL_READ
785            | wgt::TextureUses::TRANSIENT;
786        let format_desc = self.shared.describe_texture_format(desc.format);
787
788        let inner = if render_usage.contains(desc.usage)
789            && desc.dimension == wgt::TextureDimension::D2
790            && desc.size.depth_or_array_layers == 1
791        {
792            let raw = unsafe { gl.create_renderbuffer().unwrap() };
793            unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
794            if desc.sample_count > 1 {
795                unsafe {
796                    gl.renderbuffer_storage_multisample(
797                        glow::RENDERBUFFER,
798                        desc.sample_count as i32,
799                        format_desc.internal,
800                        desc.size.width as i32,
801                        desc.size.height as i32,
802                    )
803                };
804            } else {
805                unsafe {
806                    gl.renderbuffer_storage(
807                        glow::RENDERBUFFER,
808                        format_desc.internal,
809                        desc.size.width as i32,
810                        desc.size.height as i32,
811                    )
812                };
813            }
814
815            #[cfg(native)]
816            if let Some(label) = desc.label {
817                if self
818                    .shared
819                    .private_caps
820                    .contains(PrivateCapabilities::DEBUG_FNS)
821                {
822                    let name = raw.0.get();
823                    unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
824                }
825            }
826
827            unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
828            super::TextureInner::Renderbuffer { raw }
829        } else {
830            let raw = unsafe { gl.create_texture().unwrap() };
831            let target = super::Texture::get_info_from_desc(desc);
832
833            unsafe { gl.bind_texture(target, Some(raw)) };
834            //Note: this has to be done before defining the storage!
835            match desc.format.sample_type(None, Some(self.shared.features)) {
836                Some(
837                    wgt::TextureSampleType::Float { filterable: false }
838                    | wgt::TextureSampleType::Uint
839                    | wgt::TextureSampleType::Sint,
840                ) => {
841                    // reset default filtering mode
842                    unsafe {
843                        gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
844                    };
845                    unsafe {
846                        gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
847                    };
848                }
849                _ => {}
850            }
851
852            if conv::is_layered_target(target) {
853                unsafe {
854                    if self
855                        .shared
856                        .private_caps
857                        .contains(PrivateCapabilities::TEXTURE_STORAGE)
858                    {
859                        gl.tex_storage_3d(
860                            target,
861                            desc.mip_level_count as i32,
862                            format_desc.internal,
863                            desc.size.width as i32,
864                            desc.size.height as i32,
865                            desc.size.depth_or_array_layers as i32,
866                        )
867                    } else if target == glow::TEXTURE_3D {
868                        let mut width = desc.size.width;
869                        let mut height = desc.size.height;
870                        let mut depth = desc.size.depth_or_array_layers;
871                        for i in 0..desc.mip_level_count {
872                            gl.tex_image_3d(
873                                target,
874                                i as i32,
875                                format_desc.internal as i32,
876                                width as i32,
877                                height as i32,
878                                depth as i32,
879                                0,
880                                format_desc.external,
881                                format_desc.data_type,
882                                glow::PixelUnpackData::Slice(None),
883                            );
884                            width = max(1, width / 2);
885                            height = max(1, height / 2);
886                            depth = max(1, depth / 2);
887                        }
888                    } else {
889                        let mut width = desc.size.width;
890                        let mut height = desc.size.height;
891                        for i in 0..desc.mip_level_count {
892                            gl.tex_image_3d(
893                                target,
894                                i as i32,
895                                format_desc.internal as i32,
896                                width as i32,
897                                height as i32,
898                                desc.size.depth_or_array_layers as i32,
899                                0,
900                                format_desc.external,
901                                format_desc.data_type,
902                                glow::PixelUnpackData::Slice(None),
903                            );
904                            width = max(1, width / 2);
905                            height = max(1, height / 2);
906                        }
907                    }
908                };
909            } else if desc.sample_count > 1 {
910                unsafe {
911                    gl.tex_storage_2d_multisample(
912                        target,
913                        desc.sample_count as i32,
914                        format_desc.internal,
915                        desc.size.width as i32,
916                        desc.size.height as i32,
917                        true,
918                    )
919                };
920            } else {
921                unsafe {
922                    if self
923                        .shared
924                        .private_caps
925                        .contains(PrivateCapabilities::TEXTURE_STORAGE)
926                    {
927                        gl.tex_storage_2d(
928                            target,
929                            desc.mip_level_count as i32,
930                            format_desc.internal,
931                            desc.size.width as i32,
932                            desc.size.height as i32,
933                        )
934                    } else if target == glow::TEXTURE_CUBE_MAP {
935                        let mut width = desc.size.width;
936                        let mut height = desc.size.height;
937                        for i in 0..desc.mip_level_count {
938                            for face in [
939                                glow::TEXTURE_CUBE_MAP_POSITIVE_X,
940                                glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
941                                glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
942                                glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
943                                glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
944                                glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
945                            ] {
946                                gl.tex_image_2d(
947                                    face,
948                                    i as i32,
949                                    format_desc.internal as i32,
950                                    width as i32,
951                                    height as i32,
952                                    0,
953                                    format_desc.external,
954                                    format_desc.data_type,
955                                    glow::PixelUnpackData::Slice(None),
956                                );
957                            }
958                            width = max(1, width / 2);
959                            height = max(1, height / 2);
960                        }
961                    } else {
962                        let mut width = desc.size.width;
963                        let mut height = desc.size.height;
964                        for i in 0..desc.mip_level_count {
965                            gl.tex_image_2d(
966                                target,
967                                i as i32,
968                                format_desc.internal as i32,
969                                width as i32,
970                                height as i32,
971                                0,
972                                format_desc.external,
973                                format_desc.data_type,
974                                glow::PixelUnpackData::Slice(None),
975                            );
976                            width = max(1, width / 2);
977                            height = max(1, height / 2);
978                        }
979                    }
980                };
981            }
982
983            #[cfg(native)]
984            if let Some(label) = desc.label {
985                if self
986                    .shared
987                    .private_caps
988                    .contains(PrivateCapabilities::DEBUG_FNS)
989                {
990                    let name = raw.0.get();
991                    unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
992                }
993            }
994
995            unsafe { gl.bind_texture(target, None) };
996            super::TextureInner::Texture { raw, target }
997        };
998
999        self.counters.textures.add(1);
1000
1001        Ok(super::Texture {
1002            inner,
1003            drop_guard: None,
1004            mip_level_count: desc.mip_level_count,
1005            array_layer_count: desc.array_layer_count(),
1006            format: desc.format,
1007            format_desc,
1008            copy_size: desc.copy_extent(),
1009        })
1010    }
1011
1012    unsafe fn destroy_texture(&self, texture: super::Texture) {
1013        if texture.drop_guard.is_none() {
1014            let gl = &self.shared.context.lock();
1015            match texture.inner {
1016                super::TextureInner::Renderbuffer { raw, .. } => {
1017                    unsafe { gl.delete_renderbuffer(raw) };
1018                }
1019                super::TextureInner::DefaultRenderbuffer => {}
1020                super::TextureInner::Texture { raw, .. } => {
1021                    unsafe { gl.delete_texture(raw) };
1022                }
1023                #[cfg(webgl)]
1024                super::TextureInner::ExternalFramebuffer { .. } => {}
1025                #[cfg(native)]
1026                super::TextureInner::ExternalNativeFramebuffer { .. } => {}
1027            }
1028        }
1029
1030        // For clarity, we explicitly drop the drop guard. Although this has no real semantic effect as the
1031        // end of the scope will drop the drop guard since this function takes ownership of the texture.
1032        drop(texture.drop_guard);
1033
1034        self.counters.textures.sub(1);
1035    }
1036
1037    unsafe fn add_raw_texture(&self, _texture: &super::Texture) {
1038        self.counters.textures.add(1);
1039    }
1040
1041    unsafe fn create_texture_view(
1042        &self,
1043        texture: &super::Texture,
1044        desc: &crate::TextureViewDescriptor,
1045    ) -> Result<super::TextureView, crate::DeviceError> {
1046        self.counters.texture_views.add(1);
1047        Ok(super::TextureView {
1048            //TODO: use `conv::map_view_dimension(desc.dimension)`?
1049            inner: texture.inner.clone(),
1050            aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
1051            mip_levels: desc.range.mip_range(texture.mip_level_count),
1052            array_layers: desc.range.layer_range(texture.array_layer_count),
1053            format: texture.format,
1054        })
1055    }
1056
1057    unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
1058        self.counters.texture_views.sub(1);
1059    }
1060
1061    unsafe fn create_sampler(
1062        &self,
1063        desc: &crate::SamplerDescriptor,
1064    ) -> Result<super::Sampler, crate::DeviceError> {
1065        let gl = &self.shared.context.lock();
1066
1067        let raw = unsafe { gl.create_sampler().unwrap() };
1068
1069        let (min, mag) =
1070            conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
1071
1072        unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
1073        unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
1074
1075        unsafe {
1076            gl.sampler_parameter_i32(
1077                raw,
1078                glow::TEXTURE_WRAP_S,
1079                conv::map_address_mode(desc.address_modes[0]) as i32,
1080            )
1081        };
1082        unsafe {
1083            gl.sampler_parameter_i32(
1084                raw,
1085                glow::TEXTURE_WRAP_T,
1086                conv::map_address_mode(desc.address_modes[1]) as i32,
1087            )
1088        };
1089        unsafe {
1090            gl.sampler_parameter_i32(
1091                raw,
1092                glow::TEXTURE_WRAP_R,
1093                conv::map_address_mode(desc.address_modes[2]) as i32,
1094            )
1095        };
1096
1097        if let Some(border_color) = desc.border_color {
1098            let border = match border_color {
1099                wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
1100                    [0.0; 4]
1101                }
1102                wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
1103                wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
1104            };
1105            unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
1106        }
1107
1108        unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
1109        unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
1110
1111        // If clamp is not 1, we know anisotropy is supported up to 16x
1112        if desc.anisotropy_clamp != 1 {
1113            unsafe {
1114                gl.sampler_parameter_i32(
1115                    raw,
1116                    glow::TEXTURE_MAX_ANISOTROPY,
1117                    desc.anisotropy_clamp as i32,
1118                )
1119            };
1120        }
1121
1122        //set_param_float(glow::TEXTURE_LOD_BIAS, info.lod_bias.0);
1123
1124        if let Some(compare) = desc.compare {
1125            unsafe {
1126                gl.sampler_parameter_i32(
1127                    raw,
1128                    glow::TEXTURE_COMPARE_MODE,
1129                    glow::COMPARE_REF_TO_TEXTURE as i32,
1130                )
1131            };
1132            unsafe {
1133                gl.sampler_parameter_i32(
1134                    raw,
1135                    glow::TEXTURE_COMPARE_FUNC,
1136                    conv::map_compare_func(compare) as i32,
1137                )
1138            };
1139        }
1140
1141        #[cfg(native)]
1142        if let Some(label) = desc.label {
1143            if self
1144                .shared
1145                .private_caps
1146                .contains(PrivateCapabilities::DEBUG_FNS)
1147            {
1148                let name = raw.0.get();
1149                unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
1150            }
1151        }
1152
1153        self.counters.samplers.add(1);
1154
1155        Ok(super::Sampler { raw })
1156    }
1157
1158    unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
1159        let gl = &self.shared.context.lock();
1160        unsafe { gl.delete_sampler(sampler.raw) };
1161        self.counters.samplers.sub(1);
1162    }
1163
1164    unsafe fn create_command_encoder(
1165        &self,
1166        _desc: &crate::CommandEncoderDescriptor<super::Queue>,
1167    ) -> Result<super::CommandEncoder, crate::DeviceError> {
1168        self.counters.command_encoders.add(1);
1169
1170        Ok(super::CommandEncoder {
1171            cmd_buffer: super::CommandBuffer::default(),
1172            state: Default::default(),
1173            private_caps: self.shared.private_caps,
1174            counters: Arc::clone(&self.counters),
1175        })
1176    }
1177
1178    unsafe fn create_bind_group_layout(
1179        &self,
1180        desc: &crate::BindGroupLayoutDescriptor,
1181    ) -> Result<super::BindGroupLayout, crate::DeviceError> {
1182        self.counters.bind_group_layouts.add(1);
1183        Ok(super::BindGroupLayout {
1184            entries: Arc::from(desc.entries),
1185        })
1186    }
1187
1188    unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
1189        self.counters.bind_group_layouts.sub(1);
1190    }
1191
1192    unsafe fn create_pipeline_layout(
1193        &self,
1194        desc: &crate::PipelineLayoutDescriptor<super::BindGroupLayout>,
1195    ) -> Result<super::PipelineLayout, crate::DeviceError> {
1196        use naga::back::glsl;
1197
1198        let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
1199        let mut num_samplers = 0u8;
1200        let mut num_textures = 0u8;
1201        let mut num_images = 0u8;
1202        let mut num_uniform_buffers = 0u8;
1203        let mut num_storage_buffers = 0u8;
1204
1205        let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
1206        writer_flags.set(
1207            glsl::WriterFlags::TEXTURE_SHADOW_LOD,
1208            self.shared
1209                .private_caps
1210                .contains(PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
1211        );
1212        writer_flags.set(
1213            glsl::WriterFlags::DRAW_PARAMETERS,
1214            self.shared
1215                .private_caps
1216                .contains(PrivateCapabilities::FULLY_FEATURED_INSTANCING),
1217        );
1218        // We always force point size to be written and it will be ignored by the driver if it's not a point list primitive.
1219        // https://github.com/gfx-rs/wgpu/pull/3440/files#r1095726950
1220        writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
1221        let mut binding_map = glsl::BindingMap::default();
1222
1223        for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
1224            let Some(bg_layout) = bg_layout else {
1225                group_infos.push(None);
1226                continue;
1227            };
1228
1229            // create a vector with the size enough to hold all the bindings, filled with `!0`
1230            let mut binding_to_slot = vec![
1231                !0;
1232                bg_layout
1233                    .entries
1234                    .iter()
1235                    .map(|b| b.binding)
1236                    .max()
1237                    .map_or(0, |idx| idx as usize + 1)
1238            ]
1239            .into_boxed_slice();
1240
1241            for entry in bg_layout.entries.iter() {
1242                let counter = match entry.ty {
1243                    wgt::BindingType::Sampler { .. } => &mut num_samplers,
1244                    wgt::BindingType::Texture { .. } => &mut num_textures,
1245                    wgt::BindingType::StorageTexture { .. } => &mut num_images,
1246                    wgt::BindingType::Buffer {
1247                        ty: wgt::BufferBindingType::Uniform,
1248                        ..
1249                    } => &mut num_uniform_buffers,
1250                    wgt::BindingType::Buffer {
1251                        ty: wgt::BufferBindingType::Storage { .. },
1252                        ..
1253                    } => &mut num_storage_buffers,
1254                    wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1255                    wgt::BindingType::ExternalTexture => unimplemented!(),
1256                };
1257
1258                binding_to_slot[entry.binding as usize] = *counter;
1259                let br = naga::ResourceBinding {
1260                    group: group_index as u32,
1261                    binding: entry.binding,
1262                };
1263                binding_map.insert(br, *counter);
1264                *counter += entry.count.map_or(1, |c| c.get() as u8);
1265            }
1266
1267            group_infos.push(Some(super::BindGroupLayoutInfo {
1268                entries: Arc::clone(&bg_layout.entries),
1269                binding_to_slot,
1270            }));
1271        }
1272
1273        self.counters.pipeline_layouts.add(1);
1274
1275        Ok(super::PipelineLayout {
1276            group_infos: group_infos.into_boxed_slice(),
1277            naga_options: glsl::Options {
1278                version: self.shared.shading_language_version,
1279                writer_flags,
1280                binding_map,
1281                zero_initialize_workgroup_memory: true,
1282            },
1283        })
1284    }
1285
1286    unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
1287        self.counters.pipeline_layouts.sub(1);
1288    }
1289
1290    unsafe fn create_bind_group(
1291        &self,
1292        desc: &crate::BindGroupDescriptor<
1293            super::BindGroupLayout,
1294            super::Buffer,
1295            super::Sampler,
1296            super::TextureView,
1297            super::AccelerationStructure,
1298        >,
1299    ) -> Result<super::BindGroup, crate::DeviceError> {
1300        let mut contents = Vec::new();
1301
1302        let layout_and_entry_iter = desc.entries.iter().map(|entry| {
1303            let layout = desc
1304                .layout
1305                .entries
1306                .iter()
1307                .find(|layout_entry| layout_entry.binding == entry.binding)
1308                .expect("internal error: no layout entry found with binding slot");
1309            (entry, layout)
1310        });
1311        for (entry, layout) in layout_and_entry_iter {
1312            let binding = match layout.ty {
1313                wgt::BindingType::Buffer { .. } => {
1314                    let bb = &desc.buffers[entry.resource_index as usize];
1315                    super::RawBinding::Buffer {
1316                        raw: bb.buffer.raw.unwrap(),
1317                        offset: bb.offset as i32,
1318                        size: match bb.size {
1319                            Some(s) => s.get() as i32,
1320                            None => (bb.buffer.size - bb.offset) as i32,
1321                        },
1322                    }
1323                }
1324                wgt::BindingType::Sampler { .. } => {
1325                    let sampler = desc.samplers[entry.resource_index as usize];
1326                    super::RawBinding::Sampler(sampler.raw)
1327                }
1328                wgt::BindingType::Texture { view_dimension, .. } => {
1329                    let view = desc.textures[entry.resource_index as usize].view;
1330                    if view.array_layers.start != 0 {
1331                        log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
1332                            "This is an implementation problem of wgpu-hal/gles backend.")
1333                    }
1334                    let (raw, target) = view.inner.as_native();
1335
1336                    super::Texture::log_failing_target_heuristics(view_dimension, target);
1337
1338                    super::RawBinding::Texture {
1339                        raw,
1340                        target,
1341                        aspects: view.aspects,
1342                        mip_levels: view.mip_levels.clone(),
1343                    }
1344                }
1345                wgt::BindingType::StorageTexture {
1346                    access,
1347                    format,
1348                    view_dimension,
1349                } => {
1350                    let view = desc.textures[entry.resource_index as usize].view;
1351                    let format_desc = self.shared.describe_texture_format(format);
1352                    let (raw, _target) = view.inner.as_native();
1353                    super::RawBinding::Image(super::ImageBinding {
1354                        raw,
1355                        mip_level: view.mip_levels.start,
1356                        array_layer: match view_dimension {
1357                            wgt::TextureViewDimension::D2Array
1358                            | wgt::TextureViewDimension::CubeArray => None,
1359                            _ => Some(view.array_layers.start),
1360                        },
1361                        access: conv::map_storage_access(access),
1362                        format: format_desc.internal,
1363                    })
1364                }
1365                wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1366                wgt::BindingType::ExternalTexture => unimplemented!(),
1367            };
1368            contents.push(binding);
1369        }
1370
1371        self.counters.bind_groups.add(1);
1372
1373        Ok(super::BindGroup {
1374            contents: contents.into_boxed_slice(),
1375        })
1376    }
1377
1378    unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
1379        self.counters.bind_groups.sub(1);
1380    }
1381
1382    unsafe fn create_shader_module(
1383        &self,
1384        desc: &crate::ShaderModuleDescriptor,
1385        shader: crate::ShaderInput,
1386    ) -> Result<super::ShaderModule, crate::ShaderError> {
1387        self.counters.shader_modules.add(1);
1388
1389        Ok(super::ShaderModule {
1390            source: match shader {
1391                crate::ShaderInput::Naga(naga) => super::ShaderModuleSource::Naga(naga),
1392                // The backend doesn't yet expose this feature so it should be fine
1393                crate::ShaderInput::Glsl { shader, .. } => super::ShaderModuleSource::Passthrough {
1394                    source: shader.to_owned(),
1395                },
1396                crate::ShaderInput::SpirV(_)
1397                | crate::ShaderInput::MetalLib { .. }
1398                | crate::ShaderInput::Msl { .. }
1399                | crate::ShaderInput::Dxil { .. }
1400                | crate::ShaderInput::Hlsl { .. } => {
1401                    unreachable!()
1402                }
1403            },
1404            label: desc.label.map(|str| str.to_string()),
1405            id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1406        })
1407    }
1408
1409    unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
1410        self.counters.shader_modules.sub(1);
1411    }
1412
1413    unsafe fn create_render_pipeline(
1414        &self,
1415        desc: &crate::RenderPipelineDescriptor<
1416            super::PipelineLayout,
1417            super::ShaderModule,
1418            super::PipelineCache,
1419        >,
1420    ) -> Result<super::RenderPipeline, crate::PipelineError> {
1421        let (vertex_stage, vertex_buffers) = match &desc.vertex_processor {
1422            crate::VertexProcessor::Standard {
1423                vertex_buffers,
1424                ref vertex_stage,
1425            } => (vertex_stage, vertex_buffers),
1426            crate::VertexProcessor::Mesh { .. } => unreachable!(),
1427        };
1428        let gl = &self.shared.context.lock();
1429        let mut shaders = ArrayVec::new();
1430        shaders.push((naga::ShaderStage::Vertex, vertex_stage));
1431        if let Some(ref fs) = desc.fragment_stage {
1432            shaders.push((naga::ShaderStage::Fragment, fs));
1433        }
1434        let inner = unsafe {
1435            self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview_mask)
1436        }?;
1437
1438        let (vertex_buffers, vertex_attributes) = {
1439            let mut buffers = Vec::new();
1440            let mut attributes = Vec::new();
1441            for (index, vb_layout) in vertex_buffers.iter().enumerate() {
1442                let vb_desc = if let Some(vb_layout) = vb_layout {
1443                    for vat in vb_layout.attributes.iter() {
1444                        let format_desc = conv::describe_vertex_format(vat.format);
1445                        attributes.push(super::AttributeDesc {
1446                            location: vat.shader_location,
1447                            offset: vat.offset as u32,
1448                            buffer_index: index as u32,
1449                            format_desc,
1450                        });
1451                    }
1452                    Some(super::VertexBufferDesc {
1453                        step: vb_layout.step_mode,
1454                        stride: vb_layout.array_stride as u32,
1455                    })
1456                } else {
1457                    None
1458                };
1459                buffers.push(vb_desc);
1460            }
1461            (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1462        };
1463
1464        let color_targets = {
1465            let mut targets = Vec::new();
1466            for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1467                targets.push(super::ColorTargetDesc {
1468                    mask: ct.write_mask,
1469                    blend: ct.blend.as_ref().map(conv::map_blend),
1470                });
1471            }
1472            //Note: if any of the states are different, and `INDEPENDENT_BLEND` flag
1473            // is not exposed, then this pipeline will not bind correctly.
1474            targets.into_boxed_slice()
1475        };
1476
1477        self.counters.render_pipelines.add(1);
1478
1479        Ok(super::RenderPipeline {
1480            inner,
1481            primitive: desc.primitive,
1482            vertex_buffers,
1483            vertex_attributes,
1484            color_targets,
1485            depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1486                function: conv::map_compare_func(ds.depth_compare.unwrap_or_default()),
1487                mask: ds.depth_write_enabled.unwrap_or_default(),
1488            }),
1489            depth_bias: desc
1490                .depth_stencil
1491                .as_ref()
1492                .map(|ds| ds.bias)
1493                .unwrap_or_default(),
1494            stencil: desc
1495                .depth_stencil
1496                .as_ref()
1497                .map(|ds| conv::map_stencil(&ds.stencil)),
1498            alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1499        })
1500    }
1501
1502    unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1503        // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache`
1504        // This is safe to assume as long as:
1505        // - `RenderPipeline` can't be cloned
1506        // - The only place that we can get a new reference is during `program_cache.lock()`
1507        if Arc::strong_count(&pipeline.inner) == 2 {
1508            let gl = &self.shared.context.lock();
1509            let mut program_cache = self.shared.program_cache.lock();
1510            program_cache.retain(|_, v| match *v {
1511                Ok(ref p) => p.program != pipeline.inner.program,
1512                Err(_) => false,
1513            });
1514            unsafe { gl.delete_program(pipeline.inner.program) };
1515        }
1516
1517        self.counters.render_pipelines.sub(1);
1518    }
1519
1520    unsafe fn create_compute_pipeline(
1521        &self,
1522        desc: &crate::ComputePipelineDescriptor<
1523            super::PipelineLayout,
1524            super::ShaderModule,
1525            super::PipelineCache,
1526        >,
1527    ) -> Result<super::ComputePipeline, crate::PipelineError> {
1528        let gl = &self.shared.context.lock();
1529        let mut shaders = ArrayVec::new();
1530        shaders.push((naga::ShaderStage::Compute, &desc.stage));
1531        let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1532
1533        self.counters.compute_pipelines.add(1);
1534
1535        Ok(super::ComputePipeline { inner })
1536    }
1537
1538    unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1539        // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache``
1540        // This is safe to assume as long as:
1541        // - `ComputePipeline` can't be cloned
1542        // - The only place that we can get a new reference is during `program_cache.lock()`
1543        if Arc::strong_count(&pipeline.inner) == 2 {
1544            let gl = &self.shared.context.lock();
1545            let mut program_cache = self.shared.program_cache.lock();
1546            program_cache.retain(|_, v| match *v {
1547                Ok(ref p) => p.program != pipeline.inner.program,
1548                Err(_) => false,
1549            });
1550            unsafe { gl.delete_program(pipeline.inner.program) };
1551        }
1552
1553        self.counters.compute_pipelines.sub(1);
1554    }
1555
1556    unsafe fn create_pipeline_cache(
1557        &self,
1558        _: &crate::PipelineCacheDescriptor<'_>,
1559    ) -> Result<super::PipelineCache, crate::PipelineCacheError> {
1560        // Even though the cache doesn't do anything, we still return something here
1561        // as the least bad option
1562        Ok(super::PipelineCache)
1563    }
1564    unsafe fn destroy_pipeline_cache(&self, _: super::PipelineCache) {}
1565
1566    #[cfg_attr(target_arch = "wasm32", allow(unused))]
1567    unsafe fn create_query_set(
1568        &self,
1569        desc: &wgt::QuerySetDescriptor<crate::Label>,
1570    ) -> Result<super::QuerySet, crate::DeviceError> {
1571        let gl = &self.shared.context.lock();
1572
1573        let mut queries = Vec::with_capacity(desc.count as usize);
1574        for _ in 0..desc.count {
1575            let query =
1576                unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1577
1578            // We aren't really able to, in general, label queries.
1579            //
1580            // We could take a timestamp here to "initialize" the query,
1581            // but that's a bit of a hack, and we don't want to insert
1582            // random timestamps into the command stream of we don't have to.
1583
1584            queries.push(query);
1585        }
1586
1587        self.counters.query_sets.add(1);
1588
1589        Ok(super::QuerySet {
1590            queries: queries.into_boxed_slice(),
1591            target: match desc.ty {
1592                wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1593                wgt::QueryType::Timestamp => glow::TIMESTAMP,
1594                _ => unimplemented!(),
1595            },
1596        })
1597    }
1598
1599    unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1600        let gl = &self.shared.context.lock();
1601        for &query in set.queries.iter() {
1602            unsafe { gl.delete_query(query) };
1603        }
1604        self.counters.query_sets.sub(1);
1605    }
1606
1607    unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1608        self.counters.fences.add(1);
1609        Ok(super::Fence::new(&self.shared.options))
1610    }
1611
1612    unsafe fn destroy_fence(&self, fence: super::Fence) {
1613        let gl = &self.shared.context.lock();
1614        fence.destroy(gl);
1615        self.counters.fences.sub(1);
1616    }
1617
1618    unsafe fn get_fence_value(
1619        &self,
1620        fence: &super::Fence,
1621    ) -> Result<crate::FenceValue, crate::DeviceError> {
1622        #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1623        Ok(fence.get_latest(&self.shared.context.lock()))
1624    }
1625    unsafe fn wait(
1626        &self,
1627        fence: &super::Fence,
1628        wait_value: crate::FenceValue,
1629        timeout: Option<core::time::Duration>,
1630    ) -> Result<bool, crate::DeviceError> {
1631        if fence.satisfied(wait_value) {
1632            return Ok(true);
1633        }
1634
1635        let gl = &self.shared.context.lock();
1636        // MAX_CLIENT_WAIT_TIMEOUT_WEBGL is:
1637        // - 1s in Gecko https://searchfox.org/mozilla-central/rev/754074e05178e017ef6c3d8e30428ffa8f1b794d/dom/canvas/WebGLTypes.h#1386
1638        // - 0 in WebKit https://github.com/WebKit/WebKit/blob/4ef90d4672ca50267c0971b85db403d9684508ea/Source/WebCore/html/canvas/WebGL2RenderingContext.cpp#L110
1639        // - 0 in Chromium https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/modules/webgl/webgl2_rendering_context_base.cc;l=112;drc=a3cb0ac4c71ec04abfeaed199e5d63230eca2551
1640        let timeout_ns = if cfg!(any(webgl, Emscripten)) {
1641            0
1642        } else {
1643            timeout
1644                .map(|t| t.as_nanos().min(u32::MAX as u128) as u32)
1645                .unwrap_or(u32::MAX)
1646        };
1647        fence.wait(gl, wait_value, timeout_ns)
1648    }
1649
1650    unsafe fn start_graphics_debugger_capture(&self) -> bool {
1651        #[cfg(all(native, feature = "renderdoc"))]
1652        return unsafe {
1653            self.render_doc
1654                .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1655        };
1656        #[allow(unreachable_code)]
1657        false
1658    }
1659    unsafe fn stop_graphics_debugger_capture(&self) {
1660        #[cfg(all(native, feature = "renderdoc"))]
1661        unsafe {
1662            self.render_doc
1663                .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1664        }
1665    }
1666    unsafe fn create_acceleration_structure(
1667        &self,
1668        _desc: &crate::AccelerationStructureDescriptor,
1669    ) -> Result<super::AccelerationStructure, crate::DeviceError> {
1670        unimplemented!()
1671    }
1672    unsafe fn get_acceleration_structure_build_sizes<'a>(
1673        &self,
1674        _desc: &crate::GetAccelerationStructureBuildSizesDescriptor<'a, super::Buffer>,
1675    ) -> crate::AccelerationStructureBuildSizes {
1676        unimplemented!()
1677    }
1678    unsafe fn get_acceleration_structure_device_address(
1679        &self,
1680        _acceleration_structure: &super::AccelerationStructure,
1681    ) -> wgt::BufferAddress {
1682        unimplemented!()
1683    }
1684    unsafe fn destroy_acceleration_structure(
1685        &self,
1686        _acceleration_structure: super::AccelerationStructure,
1687    ) {
1688    }
1689
1690    fn tlas_instance_to_bytes(&self, _instance: TlasInstance) -> Vec<u8> {
1691        unimplemented!()
1692    }
1693
1694    fn get_internal_counters(&self) -> wgt::HalCounters {
1695        self.counters.as_ref().clone()
1696    }
1697
1698    fn check_if_oom(&self) -> Result<(), crate::DeviceError> {
1699        Ok(())
1700    }
1701}
1702
1703#[cfg(send_sync)]
1704unsafe impl Sync for super::Device {}
1705#[cfg(send_sync)]
1706unsafe impl Send for super::Device {}