wgpu_hal/gles/
device.rs

1use alloc::{
2    borrow::ToOwned, format, string::String, string::ToString as _, sync::Arc, vec, vec::Vec,
3};
4use core::{cmp::max, convert::TryInto, num::NonZeroU32, ptr, sync::atomic::Ordering};
5
6use arrayvec::ArrayVec;
7use glow::HasContext;
8use naga::FastHashMap;
9
10use super::{conv, lock, MaybeMutex, PrivateCapabilities};
11use crate::auxil::map_naga_stage;
12use crate::TlasInstance;
13
14type ShaderStage<'a> = (
15    naga::ShaderStage,
16    &'a crate::ProgrammableStage<'a, super::ShaderModule>,
17);
18type NameBindingMap = FastHashMap<String, (super::BindingRegister, u8)>;
19
20struct CompilationContext<'a> {
21    layout: &'a super::PipelineLayout,
22    sampler_map: &'a mut super::SamplerBindMap,
23    name_binding_map: &'a mut NameBindingMap,
24    immediates_items: &'a mut Vec<naga::back::glsl::ImmediateItem>,
25    multiview_mask: Option<NonZeroU32>,
26    clip_distance_count: &'a mut u32,
27}
28
29impl CompilationContext<'_> {
30    fn consume_reflection(
31        self,
32        gl: &glow::Context,
33        module: &naga::Module,
34        ep_info: &naga::valid::FunctionInfo,
35        reflection_info: naga::back::glsl::ReflectionInfo,
36        naga_stage: naga::ShaderStage,
37        program: glow::Program,
38    ) {
39        for (handle, var) in module.global_variables.iter() {
40            if ep_info[handle].is_empty() {
41                continue;
42            }
43            let register = match var.space {
44                naga::AddressSpace::Uniform => super::BindingRegister::UniformBuffers,
45                naga::AddressSpace::Storage { .. } => super::BindingRegister::StorageBuffers,
46                _ => continue,
47            };
48
49            let br = var.binding.as_ref().unwrap();
50            let slot = self.layout.get_slot(br);
51
52            let name = match reflection_info.uniforms.get(&handle) {
53                Some(name) => name.clone(),
54                None => continue,
55            };
56            log::trace!(
57                "Rebind buffer: {:?} -> {}, register={:?}, slot={}",
58                var.name.as_ref(),
59                &name,
60                register,
61                slot
62            );
63            self.name_binding_map.insert(name, (register, slot));
64        }
65
66        for (name, mapping) in reflection_info.texture_mapping {
67            let var = &module.global_variables[mapping.texture];
68            let register = match module.types[var.ty].inner {
69                naga::TypeInner::Image {
70                    class: naga::ImageClass::Storage { .. },
71                    ..
72                } => super::BindingRegister::Images,
73                _ => super::BindingRegister::Textures,
74            };
75
76            let tex_br = var.binding.as_ref().unwrap();
77            let texture_linear_index = self.layout.get_slot(tex_br);
78
79            self.name_binding_map
80                .insert(name, (register, texture_linear_index));
81            if let Some(sampler_handle) = mapping.sampler {
82                let sam_br = module.global_variables[sampler_handle]
83                    .binding
84                    .as_ref()
85                    .unwrap();
86                let sampler_linear_index = self.layout.get_slot(sam_br);
87                self.sampler_map[texture_linear_index as usize] = Some(sampler_linear_index);
88            }
89        }
90
91        for (name, location) in reflection_info.varying {
92            match naga_stage {
93                naga::ShaderStage::Vertex => {
94                    assert_eq!(location.index, 0);
95                    unsafe { gl.bind_attrib_location(program, location.location, &name) }
96                }
97                naga::ShaderStage::Fragment => {
98                    assert_eq!(location.index, 0);
99                    unsafe { gl.bind_frag_data_location(program, location.location, &name) }
100                }
101                naga::ShaderStage::Compute => {}
102                naga::ShaderStage::Task
103                | naga::ShaderStage::Mesh
104                | naga::ShaderStage::RayGeneration
105                | naga::ShaderStage::AnyHit
106                | naga::ShaderStage::ClosestHit
107                | naga::ShaderStage::Miss => unreachable!(),
108            }
109        }
110
111        *self.immediates_items = reflection_info.immediates_items;
112
113        if naga_stage == naga::ShaderStage::Vertex {
114            *self.clip_distance_count = reflection_info.clip_distance_count;
115        }
116    }
117}
118
119impl super::Device {
120    /// # Safety
121    ///
122    /// - `name` must be created respecting `desc`
123    /// - `name` must be a texture
124    /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of the texture. If
125    ///   `drop_callback` is [`Some`], the texture must be valid until the callback is called.
126    #[cfg(any(native, Emscripten))]
127    pub unsafe fn texture_from_raw(
128        &self,
129        name: NonZeroU32,
130        desc: &crate::TextureDescriptor,
131        drop_callback: Option<crate::DropCallback>,
132    ) -> super::Texture {
133        super::Texture {
134            inner: super::TextureInner::Texture {
135                raw: glow::NativeTexture(name),
136                target: super::Texture::get_info_from_desc(desc),
137            },
138            drop_guard: crate::DropGuard::from_option(drop_callback),
139            mip_level_count: desc.mip_level_count,
140            array_layer_count: desc.array_layer_count(),
141            format: desc.format,
142            format_desc: self.shared.describe_texture_format(desc.format),
143            copy_size: desc.copy_extent(),
144        }
145    }
146
147    /// # Safety
148    ///
149    /// - `name` must be created respecting `desc`
150    /// - `name` must be a renderbuffer
151    /// - If `drop_callback` is [`None`], wgpu-hal will take ownership of the renderbuffer. If
152    ///   `drop_callback` is [`Some`], the renderbuffer must be valid until the callback is called.
153    #[cfg(any(native, Emscripten))]
154    pub unsafe fn texture_from_raw_renderbuffer(
155        &self,
156        name: NonZeroU32,
157        desc: &crate::TextureDescriptor,
158        drop_callback: Option<crate::DropCallback>,
159    ) -> super::Texture {
160        super::Texture {
161            inner: super::TextureInner::Renderbuffer {
162                raw: glow::NativeRenderbuffer(name),
163            },
164            drop_guard: crate::DropGuard::from_option(drop_callback),
165            mip_level_count: desc.mip_level_count,
166            array_layer_count: desc.array_layer_count(),
167            format: desc.format,
168            format_desc: self.shared.describe_texture_format(desc.format),
169            copy_size: desc.copy_extent(),
170        }
171    }
172
173    unsafe fn compile_shader(
174        gl: &glow::Context,
175        shader: &str,
176        naga_stage: naga::ShaderStage,
177        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
178    ) -> Result<glow::Shader, crate::PipelineError> {
179        let target = match naga_stage {
180            naga::ShaderStage::Vertex => glow::VERTEX_SHADER,
181            naga::ShaderStage::Fragment => glow::FRAGMENT_SHADER,
182            naga::ShaderStage::Compute => glow::COMPUTE_SHADER,
183            naga::ShaderStage::Task
184            | naga::ShaderStage::Mesh
185            | naga::ShaderStage::RayGeneration
186            | naga::ShaderStage::AnyHit
187            | naga::ShaderStage::ClosestHit
188            | naga::ShaderStage::Miss => unreachable!(),
189        };
190
191        let raw = unsafe { gl.create_shader(target) }.unwrap();
192        #[cfg(native)]
193        if gl.supports_debug() {
194            let name = raw.0.get();
195            unsafe { gl.object_label(glow::SHADER, name, label) };
196        }
197
198        unsafe { gl.shader_source(raw, shader) };
199        unsafe { gl.compile_shader(raw) };
200
201        log::debug!("\tCompiled shader {raw:?}");
202
203        let compiled_ok = unsafe { gl.get_shader_compile_status(raw) };
204        let msg = unsafe { gl.get_shader_info_log(raw) };
205        if compiled_ok {
206            if !msg.is_empty() {
207                log::debug!("\tCompile message: {msg}");
208            }
209            Ok(raw)
210        } else {
211            log::error!("\tShader compilation failed: {msg}");
212            unsafe { gl.delete_shader(raw) };
213            Err(crate::PipelineError::Linkage(
214                map_naga_stage(naga_stage),
215                msg,
216            ))
217        }
218    }
219
220    fn create_shader(
221        gl: &glow::Context,
222        naga_stage: naga::ShaderStage,
223        stage: &crate::ProgrammableStage<super::ShaderModule>,
224        context: CompilationContext,
225        program: glow::Program,
226    ) -> Result<glow::Shader, crate::PipelineError> {
227        use naga::back::glsl;
228        let pipeline_options = glsl::PipelineOptions {
229            shader_stage: naga_stage,
230            entry_point: stage.entry_point.to_owned(),
231            multiview: context
232                .multiview_mask
233                .map(|a| NonZeroU32::new(a.get().count_ones()).unwrap()),
234        };
235
236        let (module, info) = naga::back::pipeline_constants::process_overrides(
237            &stage.module.source.module,
238            &stage.module.source.info,
239            Some((naga_stage, stage.entry_point)),
240            stage.constants,
241        )
242        .map_err(|e| {
243            let msg = format!("{e}");
244            crate::PipelineError::PipelineConstants(map_naga_stage(naga_stage), msg)
245        })?;
246
247        let entry_point_index = module
248            .entry_points
249            .iter()
250            .position(|ep| ep.name.as_str() == stage.entry_point)
251            .ok_or(crate::PipelineError::EntryPoint(naga_stage))?;
252
253        use naga::proc::BoundsCheckPolicy;
254        // The image bounds checks require the TEXTURE_LEVELS feature available in GL core 4.3+.
255        let version = gl.version();
256        let image_check = if !version.is_embedded && (version.major, version.minor) >= (4, 3) {
257            BoundsCheckPolicy::ReadZeroSkipWrite
258        } else {
259            BoundsCheckPolicy::Unchecked
260        };
261
262        // Other bounds check are either provided by glsl or not implemented yet.
263        let policies = naga::proc::BoundsCheckPolicies {
264            index: BoundsCheckPolicy::Unchecked,
265            buffer: BoundsCheckPolicy::Unchecked,
266            image_load: image_check,
267            binding_array: BoundsCheckPolicy::Unchecked,
268        };
269
270        let mut output = String::new();
271        let needs_temp_options = stage.zero_initialize_workgroup_memory
272            != context.layout.naga_options.zero_initialize_workgroup_memory;
273        let mut temp_options;
274        let naga_options = if needs_temp_options {
275            // We use a conditional here, as cloning the naga_options could be expensive
276            // That is, we want to avoid doing that unless we cannot avoid it
277            temp_options = context.layout.naga_options.clone();
278            temp_options.zero_initialize_workgroup_memory = stage.zero_initialize_workgroup_memory;
279            &temp_options
280        } else {
281            &context.layout.naga_options
282        };
283        let mut writer = glsl::Writer::new(
284            &mut output,
285            &module,
286            &info,
287            naga_options,
288            &pipeline_options,
289            policies,
290        )
291        .map_err(|e| {
292            let msg = format!("{e}");
293            crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
294        })?;
295
296        let reflection_info = writer.write().map_err(|e| {
297            let msg = format!("{e}");
298            crate::PipelineError::Linkage(map_naga_stage(naga_stage), msg)
299        })?;
300
301        log::debug!("Naga generated shader:\n{output}");
302
303        context.consume_reflection(
304            gl,
305            &module,
306            info.get_entry_point(entry_point_index),
307            reflection_info,
308            naga_stage,
309            program,
310        );
311
312        unsafe { Self::compile_shader(gl, &output, naga_stage, stage.module.label.as_deref()) }
313    }
314
315    unsafe fn create_pipeline<'a>(
316        &self,
317        gl: &glow::Context,
318        shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
319        layout: &super::PipelineLayout,
320        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
321        multiview_mask: Option<NonZeroU32>,
322    ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
323        let mut program_stages = ArrayVec::new();
324        let mut group_to_binding_to_slot = Vec::with_capacity(layout.group_infos.len());
325        for group in &*layout.group_infos {
326            group_to_binding_to_slot.push(group.binding_to_slot.clone());
327        }
328        for &(naga_stage, stage) in &shaders {
329            program_stages.push(super::ProgramStage {
330                naga_stage: naga_stage.to_owned(),
331                shader_id: stage.module.id,
332                entry_point: stage.entry_point.to_owned(),
333                zero_initialize_workgroup_memory: stage.zero_initialize_workgroup_memory,
334                constant_hash: Self::create_constant_hash(stage),
335            });
336        }
337        let mut guard = self
338            .shared
339            .program_cache
340            .try_lock()
341            .expect("Couldn't acquire program_cache lock");
342        // This guard ensures that we can't accidentally destroy a program whilst we're about to reuse it
343        // The only place that destroys a pipeline is also locking on `program_cache`
344        let program = guard
345            .entry(super::ProgramCacheKey {
346                stages: program_stages,
347                group_to_binding_to_slot: group_to_binding_to_slot.into_boxed_slice(),
348            })
349            .or_insert_with(|| unsafe {
350                Self::create_program(
351                    gl,
352                    shaders,
353                    layout,
354                    label,
355                    multiview_mask,
356                    self.shared.shading_language_version,
357                    self.shared.private_caps,
358                )
359            })
360            .to_owned()?;
361        drop(guard);
362
363        Ok(program)
364    }
365
366    fn create_constant_hash(stage: &crate::ProgrammableStage<super::ShaderModule>) -> Vec<u8> {
367        let mut buf: Vec<u8> = Vec::new();
368
369        for (key, value) in stage.constants.iter() {
370            buf.extend_from_slice(key.as_bytes());
371            buf.extend_from_slice(&value.to_ne_bytes());
372        }
373
374        buf
375    }
376
377    unsafe fn create_program<'a>(
378        gl: &glow::Context,
379        shaders: ArrayVec<ShaderStage<'a>, { crate::MAX_CONCURRENT_SHADER_STAGES }>,
380        layout: &super::PipelineLayout,
381        #[cfg_attr(target_arch = "wasm32", allow(unused))] label: Option<&str>,
382        multiview_mask: Option<NonZeroU32>,
383        glsl_version: naga::back::glsl::Version,
384        private_caps: PrivateCapabilities,
385    ) -> Result<Arc<super::PipelineInner>, crate::PipelineError> {
386        let glsl_version = match glsl_version {
387            naga::back::glsl::Version::Embedded { version, .. } => format!("{version} es"),
388            naga::back::glsl::Version::Desktop(version) => format!("{version}"),
389        };
390        let program = unsafe { gl.create_program() }.unwrap();
391        #[cfg(native)]
392        if let Some(label) = label {
393            if private_caps.contains(PrivateCapabilities::DEBUG_FNS) {
394                let name = program.0.get();
395                unsafe { gl.object_label(glow::PROGRAM, name, Some(label)) };
396            }
397        }
398
399        let mut name_binding_map = NameBindingMap::default();
400        let mut immediates_items = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
401        let mut sampler_map = [None; super::MAX_TEXTURE_SLOTS];
402        let mut has_stages = wgt::ShaderStages::empty();
403        let mut shaders_to_delete = ArrayVec::<_, { crate::MAX_CONCURRENT_SHADER_STAGES }>::new();
404        let mut clip_distance_count = 0;
405
406        for &(naga_stage, stage) in &shaders {
407            has_stages |= map_naga_stage(naga_stage);
408            let pc_item = {
409                immediates_items.push(Vec::new());
410                immediates_items.last_mut().unwrap()
411            };
412            let context = CompilationContext {
413                layout,
414                sampler_map: &mut sampler_map,
415                name_binding_map: &mut name_binding_map,
416                immediates_items: pc_item,
417                multiview_mask,
418                clip_distance_count: &mut clip_distance_count,
419            };
420
421            let shader = Self::create_shader(gl, naga_stage, stage, context, program)?;
422            shaders_to_delete.push(shader);
423        }
424
425        // Create empty fragment shader if only vertex shader is present
426        if has_stages == wgt::ShaderStages::VERTEX {
427            let shader_src = format!("#version {glsl_version}\n void main(void) {{}}",);
428            log::debug!("Only vertex shader is present. Creating an empty fragment shader",);
429            let shader = unsafe {
430                Self::compile_shader(
431                    gl,
432                    &shader_src,
433                    naga::ShaderStage::Fragment,
434                    Some("(wgpu internal) dummy fragment shader"),
435                )
436            }?;
437            shaders_to_delete.push(shader);
438        }
439
440        for &shader in shaders_to_delete.iter() {
441            unsafe { gl.attach_shader(program, shader) };
442        }
443        unsafe { gl.link_program(program) };
444
445        for shader in shaders_to_delete {
446            unsafe { gl.delete_shader(shader) };
447        }
448
449        log::debug!("\tLinked program {program:?}");
450
451        let linked_ok = unsafe { gl.get_program_link_status(program) };
452        let msg = unsafe { gl.get_program_info_log(program) };
453        if !linked_ok {
454            return Err(crate::PipelineError::Linkage(has_stages, msg));
455        }
456        if !msg.is_empty() {
457            log::debug!("\tLink message: {msg}");
458        }
459
460        if !private_caps.contains(PrivateCapabilities::SHADER_BINDING_LAYOUT) {
461            // This remapping is only needed if we aren't able to put the binding layout
462            // in the shader. We can't remap storage buffers this way.
463            unsafe { gl.use_program(Some(program)) };
464            for (ref name, (register, slot)) in name_binding_map {
465                log::trace!("Get binding {name:?} from program {program:?}");
466                match register {
467                    super::BindingRegister::UniformBuffers => {
468                        let index = unsafe { gl.get_uniform_block_index(program, name) }.unwrap();
469                        log::trace!("\tBinding slot {slot} to block index {index}");
470                        unsafe { gl.uniform_block_binding(program, index, slot as _) };
471                    }
472                    super::BindingRegister::StorageBuffers => {
473                        let index =
474                            unsafe { gl.get_shader_storage_block_index(program, name) }.unwrap();
475                        log::error!("Unable to re-map shader storage block {name} to {index}");
476                        return Err(crate::DeviceError::Lost.into());
477                    }
478                    super::BindingRegister::Textures | super::BindingRegister::Images => {
479                        let location = unsafe { gl.get_uniform_location(program, name) };
480                        unsafe { gl.uniform_1_i32(location.as_ref(), slot as _) };
481                    }
482                }
483            }
484        }
485
486        let mut uniforms = ArrayVec::new();
487
488        for (stage_idx, stage_items) in immediates_items.into_iter().enumerate() {
489            for item in stage_items {
490                let naga_module = &shaders[stage_idx].1.module.source.module;
491                let type_inner = &naga_module.types[item.ty].inner;
492
493                let location = unsafe { gl.get_uniform_location(program, &item.access_path) };
494
495                log::trace!(
496                    "immediate data item: name={}, ty={:?}, offset={}, location={:?}",
497                    item.access_path,
498                    type_inner,
499                    item.offset,
500                    location,
501                );
502
503                if let Some(location) = location {
504                    uniforms.push(super::ImmediateDesc {
505                        location,
506                        offset: item.offset,
507                        size_bytes: type_inner.size(naga_module.to_ctx()),
508                        ty: type_inner.clone(),
509                    });
510                }
511            }
512        }
513
514        let first_instance_location = if has_stages.contains(wgt::ShaderStages::VERTEX) {
515            // If this returns none (the uniform isn't active), that's fine, we just won't set it.
516            unsafe { gl.get_uniform_location(program, naga::back::glsl::FIRST_INSTANCE_BINDING) }
517        } else {
518            None
519        };
520
521        Ok(Arc::new(super::PipelineInner {
522            program,
523            sampler_map,
524            first_instance_location,
525            immediates_descs: uniforms,
526            clip_distance_count,
527        }))
528    }
529}
530
531impl crate::Device for super::Device {
532    type A = super::Api;
533
534    unsafe fn create_buffer(
535        &self,
536        desc: &crate::BufferDescriptor,
537    ) -> Result<super::Buffer, crate::DeviceError> {
538        let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
539            glow::ELEMENT_ARRAY_BUFFER
540        } else {
541            glow::ARRAY_BUFFER
542        };
543
544        let emulate_map = self
545            .shared
546            .workarounds
547            .contains(super::Workarounds::EMULATE_BUFFER_MAP)
548            || !self
549                .shared
550                .private_caps
551                .contains(PrivateCapabilities::BUFFER_ALLOCATION);
552
553        if emulate_map && desc.usage.intersects(wgt::BufferUses::MAP_WRITE) {
554            return Ok(super::Buffer {
555                raw: None,
556                target,
557                size: desc.size,
558                map_flags: 0,
559                data: Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize]))),
560                offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
561            });
562        }
563
564        let gl = &self.shared.context.lock();
565
566        let target = if desc.usage.contains(wgt::BufferUses::INDEX) {
567            glow::ELEMENT_ARRAY_BUFFER
568        } else {
569            glow::ARRAY_BUFFER
570        };
571
572        let is_host_visible = desc
573            .usage
574            .intersects(wgt::BufferUses::MAP_READ | wgt::BufferUses::MAP_WRITE);
575        let is_coherent = desc
576            .memory_flags
577            .contains(crate::MemoryFlags::PREFER_COHERENT);
578
579        let mut map_flags = 0;
580        if desc.usage.contains(wgt::BufferUses::MAP_READ) {
581            map_flags |= glow::MAP_READ_BIT;
582        }
583        if desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
584            map_flags |= glow::MAP_WRITE_BIT;
585        }
586
587        let raw = Some(unsafe { gl.create_buffer() }.map_err(|_| crate::DeviceError::OutOfMemory)?);
588        unsafe { gl.bind_buffer(target, raw) };
589        let raw_size = desc
590            .size
591            .try_into()
592            .map_err(|_| crate::DeviceError::OutOfMemory)?;
593
594        if self
595            .shared
596            .private_caps
597            .contains(PrivateCapabilities::BUFFER_ALLOCATION)
598        {
599            if is_host_visible {
600                map_flags |= glow::MAP_PERSISTENT_BIT;
601                if is_coherent {
602                    map_flags |= glow::MAP_COHERENT_BIT;
603                }
604            }
605            // TODO: may also be required for other calls involving `buffer_sub_data_u8_slice` (e.g. copy buffer to buffer and clear buffer)
606            if desc.usage.intersects(wgt::BufferUses::QUERY_RESOLVE) {
607                map_flags |= glow::DYNAMIC_STORAGE_BIT;
608            }
609            unsafe { gl.buffer_storage(target, raw_size, None, map_flags) };
610        } else {
611            assert!(!is_coherent);
612            let usage = if is_host_visible {
613                if desc.usage.contains(wgt::BufferUses::MAP_READ) {
614                    glow::STREAM_READ
615                } else {
616                    glow::DYNAMIC_DRAW
617                }
618            } else {
619                // Even if the usage doesn't contain SRC_READ, we update it internally at least once
620                // Some vendors take usage very literally and STATIC_DRAW will freeze us with an empty buffer
621                // https://github.com/gfx-rs/wgpu/issues/3371
622                glow::DYNAMIC_DRAW
623            };
624            unsafe { gl.buffer_data_size(target, raw_size, usage) };
625        }
626
627        unsafe { gl.bind_buffer(target, None) };
628
629        if !is_coherent && desc.usage.contains(wgt::BufferUses::MAP_WRITE) {
630            map_flags |= glow::MAP_FLUSH_EXPLICIT_BIT;
631        }
632        //TODO: do we need `glow::MAP_UNSYNCHRONIZED_BIT`?
633
634        #[cfg(native)]
635        if let Some(label) = desc.label {
636            if self
637                .shared
638                .private_caps
639                .contains(PrivateCapabilities::DEBUG_FNS)
640            {
641                let name = raw.map_or(0, |buf| buf.0.get());
642                unsafe { gl.object_label(glow::BUFFER, name, Some(label)) };
643            }
644        }
645
646        let data = if emulate_map && desc.usage.contains(wgt::BufferUses::MAP_READ) {
647            Some(Arc::new(MaybeMutex::new(vec![0; desc.size as usize])))
648        } else {
649            None
650        };
651
652        self.counters.buffers.add(1);
653
654        Ok(super::Buffer {
655            raw,
656            target,
657            size: desc.size,
658            map_flags,
659            data,
660            offset_of_current_mapping: Arc::new(MaybeMutex::new(0)),
661        })
662    }
663
664    unsafe fn destroy_buffer(&self, buffer: super::Buffer) {
665        if let Some(raw) = buffer.raw {
666            let gl = &self.shared.context.lock();
667            unsafe { gl.delete_buffer(raw) };
668        }
669
670        self.counters.buffers.sub(1);
671    }
672
673    unsafe fn add_raw_buffer(&self, _buffer: &super::Buffer) {
674        self.counters.buffers.add(1);
675    }
676
677    unsafe fn map_buffer(
678        &self,
679        buffer: &super::Buffer,
680        range: crate::MemoryRange,
681    ) -> Result<crate::BufferMapping, crate::DeviceError> {
682        let is_coherent = buffer.map_flags & glow::MAP_COHERENT_BIT != 0;
683        let ptr = match buffer.raw {
684            None => {
685                let mut vec = lock(buffer.data.as_ref().unwrap());
686                let slice = &mut vec.as_mut_slice()[range.start as usize..range.end as usize];
687                slice.as_mut_ptr()
688            }
689            Some(raw) => {
690                let gl = &self.shared.context.lock();
691                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
692                let ptr = if let Some(ref map_read_allocation) = buffer.data {
693                    let mut guard = lock(map_read_allocation);
694                    let slice = guard.as_mut_slice();
695                    unsafe { self.shared.get_buffer_sub_data(gl, buffer.target, 0, slice) };
696                    slice.as_mut_ptr()
697                } else {
698                    *lock(&buffer.offset_of_current_mapping) = range.start;
699                    unsafe {
700                        gl.map_buffer_range(
701                            buffer.target,
702                            range.start as i32,
703                            (range.end - range.start) as i32,
704                            buffer.map_flags,
705                        )
706                    }
707                };
708                unsafe { gl.bind_buffer(buffer.target, None) };
709                ptr
710            }
711        };
712        Ok(crate::BufferMapping {
713            ptr: ptr::NonNull::new(ptr).ok_or(crate::DeviceError::Lost)?,
714            is_coherent,
715        })
716    }
717    unsafe fn unmap_buffer(&self, buffer: &super::Buffer) {
718        if let Some(raw) = buffer.raw {
719            if buffer.data.is_none() {
720                let gl = &self.shared.context.lock();
721                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
722                unsafe { gl.unmap_buffer(buffer.target) };
723                unsafe { gl.bind_buffer(buffer.target, None) };
724                *lock(&buffer.offset_of_current_mapping) = 0;
725            }
726        }
727    }
728    unsafe fn flush_mapped_ranges<I>(&self, buffer: &super::Buffer, ranges: I)
729    where
730        I: Iterator<Item = crate::MemoryRange>,
731    {
732        if let Some(raw) = buffer.raw {
733            if buffer.data.is_none() {
734                let gl = &self.shared.context.lock();
735                unsafe { gl.bind_buffer(buffer.target, Some(raw)) };
736                for range in ranges {
737                    let offset_of_current_mapping = *lock(&buffer.offset_of_current_mapping);
738                    unsafe {
739                        gl.flush_mapped_buffer_range(
740                            buffer.target,
741                            (range.start - offset_of_current_mapping) as i32,
742                            (range.end - range.start) as i32,
743                        )
744                    };
745                }
746            }
747        }
748    }
749    unsafe fn invalidate_mapped_ranges<I>(&self, _buffer: &super::Buffer, _ranges: I) {
750        //TODO: do we need to do anything?
751    }
752
753    unsafe fn create_texture(
754        &self,
755        desc: &crate::TextureDescriptor,
756    ) -> Result<super::Texture, crate::DeviceError> {
757        let gl = &self.shared.context.lock();
758
759        let render_usage = wgt::TextureUses::COLOR_TARGET
760            | wgt::TextureUses::DEPTH_STENCIL_WRITE
761            | wgt::TextureUses::DEPTH_STENCIL_READ
762            | wgt::TextureUses::TRANSIENT;
763        let format_desc = self.shared.describe_texture_format(desc.format);
764
765        let inner = if render_usage.contains(desc.usage)
766            && desc.dimension == wgt::TextureDimension::D2
767            && desc.size.depth_or_array_layers == 1
768        {
769            let raw = unsafe { gl.create_renderbuffer().unwrap() };
770            unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, Some(raw)) };
771            if desc.sample_count > 1 {
772                unsafe {
773                    gl.renderbuffer_storage_multisample(
774                        glow::RENDERBUFFER,
775                        desc.sample_count as i32,
776                        format_desc.internal,
777                        desc.size.width as i32,
778                        desc.size.height as i32,
779                    )
780                };
781            } else {
782                unsafe {
783                    gl.renderbuffer_storage(
784                        glow::RENDERBUFFER,
785                        format_desc.internal,
786                        desc.size.width as i32,
787                        desc.size.height as i32,
788                    )
789                };
790            }
791
792            #[cfg(native)]
793            if let Some(label) = desc.label {
794                if self
795                    .shared
796                    .private_caps
797                    .contains(PrivateCapabilities::DEBUG_FNS)
798                {
799                    let name = raw.0.get();
800                    unsafe { gl.object_label(glow::RENDERBUFFER, name, Some(label)) };
801                }
802            }
803
804            unsafe { gl.bind_renderbuffer(glow::RENDERBUFFER, None) };
805            super::TextureInner::Renderbuffer { raw }
806        } else {
807            let raw = unsafe { gl.create_texture().unwrap() };
808            let target = super::Texture::get_info_from_desc(desc);
809
810            unsafe { gl.bind_texture(target, Some(raw)) };
811            //Note: this has to be done before defining the storage!
812            match desc.format.sample_type(None, Some(self.shared.features)) {
813                Some(
814                    wgt::TextureSampleType::Float { filterable: false }
815                    | wgt::TextureSampleType::Uint
816                    | wgt::TextureSampleType::Sint,
817                ) => {
818                    // reset default filtering mode
819                    unsafe {
820                        gl.tex_parameter_i32(target, glow::TEXTURE_MIN_FILTER, glow::NEAREST as i32)
821                    };
822                    unsafe {
823                        gl.tex_parameter_i32(target, glow::TEXTURE_MAG_FILTER, glow::NEAREST as i32)
824                    };
825                }
826                _ => {}
827            }
828
829            if conv::is_layered_target(target) {
830                unsafe {
831                    if self
832                        .shared
833                        .private_caps
834                        .contains(PrivateCapabilities::TEXTURE_STORAGE)
835                    {
836                        gl.tex_storage_3d(
837                            target,
838                            desc.mip_level_count as i32,
839                            format_desc.internal,
840                            desc.size.width as i32,
841                            desc.size.height as i32,
842                            desc.size.depth_or_array_layers as i32,
843                        )
844                    } else if target == glow::TEXTURE_3D {
845                        let mut width = desc.size.width;
846                        let mut height = desc.size.width;
847                        let mut depth = desc.size.depth_or_array_layers;
848                        for i in 0..desc.mip_level_count {
849                            gl.tex_image_3d(
850                                target,
851                                i as i32,
852                                format_desc.internal as i32,
853                                width as i32,
854                                height as i32,
855                                depth as i32,
856                                0,
857                                format_desc.external,
858                                format_desc.data_type,
859                                glow::PixelUnpackData::Slice(None),
860                            );
861                            width = max(1, width / 2);
862                            height = max(1, height / 2);
863                            depth = max(1, depth / 2);
864                        }
865                    } else {
866                        let mut width = desc.size.width;
867                        let mut height = desc.size.width;
868                        for i in 0..desc.mip_level_count {
869                            gl.tex_image_3d(
870                                target,
871                                i as i32,
872                                format_desc.internal as i32,
873                                width as i32,
874                                height as i32,
875                                desc.size.depth_or_array_layers as i32,
876                                0,
877                                format_desc.external,
878                                format_desc.data_type,
879                                glow::PixelUnpackData::Slice(None),
880                            );
881                            width = max(1, width / 2);
882                            height = max(1, height / 2);
883                        }
884                    }
885                };
886            } else if desc.sample_count > 1 {
887                unsafe {
888                    gl.tex_storage_2d_multisample(
889                        target,
890                        desc.sample_count as i32,
891                        format_desc.internal,
892                        desc.size.width as i32,
893                        desc.size.height as i32,
894                        true,
895                    )
896                };
897            } else {
898                unsafe {
899                    if self
900                        .shared
901                        .private_caps
902                        .contains(PrivateCapabilities::TEXTURE_STORAGE)
903                    {
904                        gl.tex_storage_2d(
905                            target,
906                            desc.mip_level_count as i32,
907                            format_desc.internal,
908                            desc.size.width as i32,
909                            desc.size.height as i32,
910                        )
911                    } else if target == glow::TEXTURE_CUBE_MAP {
912                        let mut width = desc.size.width;
913                        let mut height = desc.size.width;
914                        for i in 0..desc.mip_level_count {
915                            for face in [
916                                glow::TEXTURE_CUBE_MAP_POSITIVE_X,
917                                glow::TEXTURE_CUBE_MAP_NEGATIVE_X,
918                                glow::TEXTURE_CUBE_MAP_POSITIVE_Y,
919                                glow::TEXTURE_CUBE_MAP_NEGATIVE_Y,
920                                glow::TEXTURE_CUBE_MAP_POSITIVE_Z,
921                                glow::TEXTURE_CUBE_MAP_NEGATIVE_Z,
922                            ] {
923                                gl.tex_image_2d(
924                                    face,
925                                    i as i32,
926                                    format_desc.internal as i32,
927                                    width as i32,
928                                    height as i32,
929                                    0,
930                                    format_desc.external,
931                                    format_desc.data_type,
932                                    glow::PixelUnpackData::Slice(None),
933                                );
934                            }
935                            width = max(1, width / 2);
936                            height = max(1, height / 2);
937                        }
938                    } else {
939                        let mut width = desc.size.width;
940                        let mut height = desc.size.width;
941                        for i in 0..desc.mip_level_count {
942                            gl.tex_image_2d(
943                                target,
944                                i as i32,
945                                format_desc.internal as i32,
946                                width as i32,
947                                height as i32,
948                                0,
949                                format_desc.external,
950                                format_desc.data_type,
951                                glow::PixelUnpackData::Slice(None),
952                            );
953                            width = max(1, width / 2);
954                            height = max(1, height / 2);
955                        }
956                    }
957                };
958            }
959
960            #[cfg(native)]
961            if let Some(label) = desc.label {
962                if self
963                    .shared
964                    .private_caps
965                    .contains(PrivateCapabilities::DEBUG_FNS)
966                {
967                    let name = raw.0.get();
968                    unsafe { gl.object_label(glow::TEXTURE, name, Some(label)) };
969                }
970            }
971
972            unsafe { gl.bind_texture(target, None) };
973            super::TextureInner::Texture { raw, target }
974        };
975
976        self.counters.textures.add(1);
977
978        Ok(super::Texture {
979            inner,
980            drop_guard: None,
981            mip_level_count: desc.mip_level_count,
982            array_layer_count: desc.array_layer_count(),
983            format: desc.format,
984            format_desc,
985            copy_size: desc.copy_extent(),
986        })
987    }
988
989    unsafe fn destroy_texture(&self, texture: super::Texture) {
990        if texture.drop_guard.is_none() {
991            let gl = &self.shared.context.lock();
992            match texture.inner {
993                super::TextureInner::Renderbuffer { raw, .. } => {
994                    unsafe { gl.delete_renderbuffer(raw) };
995                }
996                super::TextureInner::DefaultRenderbuffer => {}
997                super::TextureInner::Texture { raw, .. } => {
998                    unsafe { gl.delete_texture(raw) };
999                }
1000                #[cfg(webgl)]
1001                super::TextureInner::ExternalFramebuffer { .. } => {}
1002                #[cfg(native)]
1003                super::TextureInner::ExternalNativeFramebuffer { .. } => {}
1004            }
1005        }
1006
1007        // For clarity, we explicitly drop the drop guard. Although this has no real semantic effect as the
1008        // end of the scope will drop the drop guard since this function takes ownership of the texture.
1009        drop(texture.drop_guard);
1010
1011        self.counters.textures.sub(1);
1012    }
1013
1014    unsafe fn add_raw_texture(&self, _texture: &super::Texture) {
1015        self.counters.textures.add(1);
1016    }
1017
1018    unsafe fn create_texture_view(
1019        &self,
1020        texture: &super::Texture,
1021        desc: &crate::TextureViewDescriptor,
1022    ) -> Result<super::TextureView, crate::DeviceError> {
1023        self.counters.texture_views.add(1);
1024        Ok(super::TextureView {
1025            //TODO: use `conv::map_view_dimension(desc.dimension)`?
1026            inner: texture.inner.clone(),
1027            aspects: crate::FormatAspects::new(texture.format, desc.range.aspect),
1028            mip_levels: desc.range.mip_range(texture.mip_level_count),
1029            array_layers: desc.range.layer_range(texture.array_layer_count),
1030            format: texture.format,
1031        })
1032    }
1033
1034    unsafe fn destroy_texture_view(&self, _view: super::TextureView) {
1035        self.counters.texture_views.sub(1);
1036    }
1037
1038    unsafe fn create_sampler(
1039        &self,
1040        desc: &crate::SamplerDescriptor,
1041    ) -> Result<super::Sampler, crate::DeviceError> {
1042        let gl = &self.shared.context.lock();
1043
1044        let raw = unsafe { gl.create_sampler().unwrap() };
1045
1046        let (min, mag) =
1047            conv::map_filter_modes(desc.min_filter, desc.mag_filter, desc.mipmap_filter);
1048
1049        unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MIN_FILTER, min as i32) };
1050        unsafe { gl.sampler_parameter_i32(raw, glow::TEXTURE_MAG_FILTER, mag as i32) };
1051
1052        unsafe {
1053            gl.sampler_parameter_i32(
1054                raw,
1055                glow::TEXTURE_WRAP_S,
1056                conv::map_address_mode(desc.address_modes[0]) as i32,
1057            )
1058        };
1059        unsafe {
1060            gl.sampler_parameter_i32(
1061                raw,
1062                glow::TEXTURE_WRAP_T,
1063                conv::map_address_mode(desc.address_modes[1]) as i32,
1064            )
1065        };
1066        unsafe {
1067            gl.sampler_parameter_i32(
1068                raw,
1069                glow::TEXTURE_WRAP_R,
1070                conv::map_address_mode(desc.address_modes[2]) as i32,
1071            )
1072        };
1073
1074        if let Some(border_color) = desc.border_color {
1075            let border = match border_color {
1076                wgt::SamplerBorderColor::TransparentBlack | wgt::SamplerBorderColor::Zero => {
1077                    [0.0; 4]
1078                }
1079                wgt::SamplerBorderColor::OpaqueBlack => [0.0, 0.0, 0.0, 1.0],
1080                wgt::SamplerBorderColor::OpaqueWhite => [1.0; 4],
1081            };
1082            unsafe { gl.sampler_parameter_f32_slice(raw, glow::TEXTURE_BORDER_COLOR, &border) };
1083        }
1084
1085        unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MIN_LOD, desc.lod_clamp.start) };
1086        unsafe { gl.sampler_parameter_f32(raw, glow::TEXTURE_MAX_LOD, desc.lod_clamp.end) };
1087
1088        // If clamp is not 1, we know anisotropy is supported up to 16x
1089        if desc.anisotropy_clamp != 1 {
1090            unsafe {
1091                gl.sampler_parameter_i32(
1092                    raw,
1093                    glow::TEXTURE_MAX_ANISOTROPY,
1094                    desc.anisotropy_clamp as i32,
1095                )
1096            };
1097        }
1098
1099        //set_param_float(glow::TEXTURE_LOD_BIAS, info.lod_bias.0);
1100
1101        if let Some(compare) = desc.compare {
1102            unsafe {
1103                gl.sampler_parameter_i32(
1104                    raw,
1105                    glow::TEXTURE_COMPARE_MODE,
1106                    glow::COMPARE_REF_TO_TEXTURE as i32,
1107                )
1108            };
1109            unsafe {
1110                gl.sampler_parameter_i32(
1111                    raw,
1112                    glow::TEXTURE_COMPARE_FUNC,
1113                    conv::map_compare_func(compare) as i32,
1114                )
1115            };
1116        }
1117
1118        #[cfg(native)]
1119        if let Some(label) = desc.label {
1120            if self
1121                .shared
1122                .private_caps
1123                .contains(PrivateCapabilities::DEBUG_FNS)
1124            {
1125                let name = raw.0.get();
1126                unsafe { gl.object_label(glow::SAMPLER, name, Some(label)) };
1127            }
1128        }
1129
1130        self.counters.samplers.add(1);
1131
1132        Ok(super::Sampler { raw })
1133    }
1134
1135    unsafe fn destroy_sampler(&self, sampler: super::Sampler) {
1136        let gl = &self.shared.context.lock();
1137        unsafe { gl.delete_sampler(sampler.raw) };
1138        self.counters.samplers.sub(1);
1139    }
1140
1141    unsafe fn create_command_encoder(
1142        &self,
1143        _desc: &crate::CommandEncoderDescriptor<super::Queue>,
1144    ) -> Result<super::CommandEncoder, crate::DeviceError> {
1145        self.counters.command_encoders.add(1);
1146
1147        Ok(super::CommandEncoder {
1148            cmd_buffer: super::CommandBuffer::default(),
1149            state: Default::default(),
1150            private_caps: self.shared.private_caps,
1151            counters: Arc::clone(&self.counters),
1152        })
1153    }
1154
1155    unsafe fn create_bind_group_layout(
1156        &self,
1157        desc: &crate::BindGroupLayoutDescriptor,
1158    ) -> Result<super::BindGroupLayout, crate::DeviceError> {
1159        self.counters.bind_group_layouts.add(1);
1160        Ok(super::BindGroupLayout {
1161            entries: Arc::from(desc.entries),
1162        })
1163    }
1164
1165    unsafe fn destroy_bind_group_layout(&self, _bg_layout: super::BindGroupLayout) {
1166        self.counters.bind_group_layouts.sub(1);
1167    }
1168
1169    unsafe fn create_pipeline_layout(
1170        &self,
1171        desc: &crate::PipelineLayoutDescriptor<super::BindGroupLayout>,
1172    ) -> Result<super::PipelineLayout, crate::DeviceError> {
1173        use naga::back::glsl;
1174
1175        let mut group_infos = Vec::with_capacity(desc.bind_group_layouts.len());
1176        let mut num_samplers = 0u8;
1177        let mut num_textures = 0u8;
1178        let mut num_images = 0u8;
1179        let mut num_uniform_buffers = 0u8;
1180        let mut num_storage_buffers = 0u8;
1181
1182        let mut writer_flags = glsl::WriterFlags::ADJUST_COORDINATE_SPACE;
1183        writer_flags.set(
1184            glsl::WriterFlags::TEXTURE_SHADOW_LOD,
1185            self.shared
1186                .private_caps
1187                .contains(PrivateCapabilities::SHADER_TEXTURE_SHADOW_LOD),
1188        );
1189        writer_flags.set(
1190            glsl::WriterFlags::DRAW_PARAMETERS,
1191            self.shared
1192                .private_caps
1193                .contains(PrivateCapabilities::FULLY_FEATURED_INSTANCING),
1194        );
1195        // We always force point size to be written and it will be ignored by the driver if it's not a point list primitive.
1196        // https://github.com/gfx-rs/wgpu/pull/3440/files#r1095726950
1197        writer_flags.set(glsl::WriterFlags::FORCE_POINT_SIZE, true);
1198        let mut binding_map = glsl::BindingMap::default();
1199
1200        for (group_index, bg_layout) in desc.bind_group_layouts.iter().enumerate() {
1201            // create a vector with the size enough to hold all the bindings, filled with `!0`
1202            let mut binding_to_slot = vec![
1203                !0;
1204                bg_layout
1205                    .entries
1206                    .iter()
1207                    .map(|b| b.binding)
1208                    .max()
1209                    .map_or(0, |idx| idx as usize + 1)
1210            ]
1211            .into_boxed_slice();
1212
1213            for entry in bg_layout.entries.iter() {
1214                let counter = match entry.ty {
1215                    wgt::BindingType::Sampler { .. } => &mut num_samplers,
1216                    wgt::BindingType::Texture { .. } => &mut num_textures,
1217                    wgt::BindingType::StorageTexture { .. } => &mut num_images,
1218                    wgt::BindingType::Buffer {
1219                        ty: wgt::BufferBindingType::Uniform,
1220                        ..
1221                    } => &mut num_uniform_buffers,
1222                    wgt::BindingType::Buffer {
1223                        ty: wgt::BufferBindingType::Storage { .. },
1224                        ..
1225                    } => &mut num_storage_buffers,
1226                    wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1227                    wgt::BindingType::ExternalTexture => unimplemented!(),
1228                };
1229
1230                binding_to_slot[entry.binding as usize] = *counter;
1231                let br = naga::ResourceBinding {
1232                    group: group_index as u32,
1233                    binding: entry.binding,
1234                };
1235                binding_map.insert(br, *counter);
1236                *counter += entry.count.map_or(1, |c| c.get() as u8);
1237            }
1238
1239            group_infos.push(super::BindGroupLayoutInfo {
1240                entries: Arc::clone(&bg_layout.entries),
1241                binding_to_slot,
1242            });
1243        }
1244
1245        self.counters.pipeline_layouts.add(1);
1246
1247        Ok(super::PipelineLayout {
1248            group_infos: group_infos.into_boxed_slice(),
1249            naga_options: glsl::Options {
1250                version: self.shared.shading_language_version,
1251                writer_flags,
1252                binding_map,
1253                zero_initialize_workgroup_memory: true,
1254            },
1255        })
1256    }
1257
1258    unsafe fn destroy_pipeline_layout(&self, _pipeline_layout: super::PipelineLayout) {
1259        self.counters.pipeline_layouts.sub(1);
1260    }
1261
1262    unsafe fn create_bind_group(
1263        &self,
1264        desc: &crate::BindGroupDescriptor<
1265            super::BindGroupLayout,
1266            super::Buffer,
1267            super::Sampler,
1268            super::TextureView,
1269            super::AccelerationStructure,
1270        >,
1271    ) -> Result<super::BindGroup, crate::DeviceError> {
1272        let mut contents = Vec::new();
1273
1274        let layout_and_entry_iter = desc.entries.iter().map(|entry| {
1275            let layout = desc
1276                .layout
1277                .entries
1278                .iter()
1279                .find(|layout_entry| layout_entry.binding == entry.binding)
1280                .expect("internal error: no layout entry found with binding slot");
1281            (entry, layout)
1282        });
1283        for (entry, layout) in layout_and_entry_iter {
1284            let binding = match layout.ty {
1285                wgt::BindingType::Buffer { .. } => {
1286                    let bb = &desc.buffers[entry.resource_index as usize];
1287                    super::RawBinding::Buffer {
1288                        raw: bb.buffer.raw.unwrap(),
1289                        offset: bb.offset as i32,
1290                        size: match bb.size {
1291                            Some(s) => s.get() as i32,
1292                            None => (bb.buffer.size - bb.offset) as i32,
1293                        },
1294                    }
1295                }
1296                wgt::BindingType::Sampler { .. } => {
1297                    let sampler = desc.samplers[entry.resource_index as usize];
1298                    super::RawBinding::Sampler(sampler.raw)
1299                }
1300                wgt::BindingType::Texture { view_dimension, .. } => {
1301                    let view = desc.textures[entry.resource_index as usize].view;
1302                    if view.array_layers.start != 0 {
1303                        log::error!("Unable to create a sampled texture binding for non-zero array layer.\n{}",
1304                            "This is an implementation problem of wgpu-hal/gles backend.")
1305                    }
1306                    let (raw, target) = view.inner.as_native();
1307
1308                    super::Texture::log_failing_target_heuristics(view_dimension, target);
1309
1310                    super::RawBinding::Texture {
1311                        raw,
1312                        target,
1313                        aspects: view.aspects,
1314                        mip_levels: view.mip_levels.clone(),
1315                    }
1316                }
1317                wgt::BindingType::StorageTexture {
1318                    access,
1319                    format,
1320                    view_dimension,
1321                } => {
1322                    let view = desc.textures[entry.resource_index as usize].view;
1323                    let format_desc = self.shared.describe_texture_format(format);
1324                    let (raw, _target) = view.inner.as_native();
1325                    super::RawBinding::Image(super::ImageBinding {
1326                        raw,
1327                        mip_level: view.mip_levels.start,
1328                        array_layer: match view_dimension {
1329                            wgt::TextureViewDimension::D2Array
1330                            | wgt::TextureViewDimension::CubeArray => None,
1331                            _ => Some(view.array_layers.start),
1332                        },
1333                        access: conv::map_storage_access(access),
1334                        format: format_desc.internal,
1335                    })
1336                }
1337                wgt::BindingType::AccelerationStructure { .. } => unimplemented!(),
1338                wgt::BindingType::ExternalTexture => unimplemented!(),
1339            };
1340            contents.push(binding);
1341        }
1342
1343        self.counters.bind_groups.add(1);
1344
1345        Ok(super::BindGroup {
1346            contents: contents.into_boxed_slice(),
1347        })
1348    }
1349
1350    unsafe fn destroy_bind_group(&self, _group: super::BindGroup) {
1351        self.counters.bind_groups.sub(1);
1352    }
1353
1354    unsafe fn create_shader_module(
1355        &self,
1356        desc: &crate::ShaderModuleDescriptor,
1357        shader: crate::ShaderInput,
1358    ) -> Result<super::ShaderModule, crate::ShaderError> {
1359        self.counters.shader_modules.add(1);
1360
1361        Ok(super::ShaderModule {
1362            source: match shader {
1363                crate::ShaderInput::Naga(naga) => naga,
1364                // The backend doesn't yet expose this feature so it should be fine
1365                crate::ShaderInput::Glsl { .. } => unimplemented!(),
1366                crate::ShaderInput::SpirV(_)
1367                | crate::ShaderInput::MetalLib { .. }
1368                | crate::ShaderInput::Msl { .. }
1369                | crate::ShaderInput::Dxil { .. }
1370                | crate::ShaderInput::Hlsl { .. } => {
1371                    unreachable!()
1372                }
1373            },
1374            label: desc.label.map(|str| str.to_string()),
1375            id: self.shared.next_shader_id.fetch_add(1, Ordering::Relaxed),
1376        })
1377    }
1378
1379    unsafe fn destroy_shader_module(&self, _module: super::ShaderModule) {
1380        self.counters.shader_modules.sub(1);
1381    }
1382
1383    unsafe fn create_render_pipeline(
1384        &self,
1385        desc: &crate::RenderPipelineDescriptor<
1386            super::PipelineLayout,
1387            super::ShaderModule,
1388            super::PipelineCache,
1389        >,
1390    ) -> Result<super::RenderPipeline, crate::PipelineError> {
1391        let (vertex_stage, vertex_buffers) = match &desc.vertex_processor {
1392            crate::VertexProcessor::Standard {
1393                vertex_buffers,
1394                ref vertex_stage,
1395            } => (vertex_stage, vertex_buffers),
1396            crate::VertexProcessor::Mesh { .. } => unreachable!(),
1397        };
1398        let gl = &self.shared.context.lock();
1399        let mut shaders = ArrayVec::new();
1400        shaders.push((naga::ShaderStage::Vertex, vertex_stage));
1401        if let Some(ref fs) = desc.fragment_stage {
1402            shaders.push((naga::ShaderStage::Fragment, fs));
1403        }
1404        let inner = unsafe {
1405            self.create_pipeline(gl, shaders, desc.layout, desc.label, desc.multiview_mask)
1406        }?;
1407
1408        let (vertex_buffers, vertex_attributes) = {
1409            let mut buffers = Vec::new();
1410            let mut attributes = Vec::new();
1411            for (index, vb_layout) in vertex_buffers.iter().enumerate() {
1412                buffers.push(super::VertexBufferDesc {
1413                    step: vb_layout.step_mode,
1414                    stride: vb_layout.array_stride as u32,
1415                });
1416                for vat in vb_layout.attributes.iter() {
1417                    let format_desc = conv::describe_vertex_format(vat.format);
1418                    attributes.push(super::AttributeDesc {
1419                        location: vat.shader_location,
1420                        offset: vat.offset as u32,
1421                        buffer_index: index as u32,
1422                        format_desc,
1423                    });
1424                }
1425            }
1426            (buffers.into_boxed_slice(), attributes.into_boxed_slice())
1427        };
1428
1429        let color_targets = {
1430            let mut targets = Vec::new();
1431            for ct in desc.color_targets.iter().filter_map(|at| at.as_ref()) {
1432                targets.push(super::ColorTargetDesc {
1433                    mask: ct.write_mask,
1434                    blend: ct.blend.as_ref().map(conv::map_blend),
1435                });
1436            }
1437            //Note: if any of the states are different, and `INDEPENDENT_BLEND` flag
1438            // is not exposed, then this pipeline will not bind correctly.
1439            targets.into_boxed_slice()
1440        };
1441
1442        self.counters.render_pipelines.add(1);
1443
1444        Ok(super::RenderPipeline {
1445            inner,
1446            primitive: desc.primitive,
1447            vertex_buffers,
1448            vertex_attributes,
1449            color_targets,
1450            depth: desc.depth_stencil.as_ref().map(|ds| super::DepthState {
1451                function: conv::map_compare_func(ds.depth_compare),
1452                mask: ds.depth_write_enabled,
1453            }),
1454            depth_bias: desc
1455                .depth_stencil
1456                .as_ref()
1457                .map(|ds| ds.bias)
1458                .unwrap_or_default(),
1459            stencil: desc
1460                .depth_stencil
1461                .as_ref()
1462                .map(|ds| conv::map_stencil(&ds.stencil)),
1463            alpha_to_coverage_enabled: desc.multisample.alpha_to_coverage_enabled,
1464        })
1465    }
1466
1467    unsafe fn destroy_render_pipeline(&self, pipeline: super::RenderPipeline) {
1468        // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache`
1469        // This is safe to assume as long as:
1470        // - `RenderPipeline` can't be cloned
1471        // - The only place that we can get a new reference is during `program_cache.lock()`
1472        if Arc::strong_count(&pipeline.inner) == 2 {
1473            let gl = &self.shared.context.lock();
1474            let mut program_cache = self.shared.program_cache.lock();
1475            program_cache.retain(|_, v| match *v {
1476                Ok(ref p) => p.program != pipeline.inner.program,
1477                Err(_) => false,
1478            });
1479            unsafe { gl.delete_program(pipeline.inner.program) };
1480        }
1481
1482        self.counters.render_pipelines.sub(1);
1483    }
1484
1485    unsafe fn create_compute_pipeline(
1486        &self,
1487        desc: &crate::ComputePipelineDescriptor<
1488            super::PipelineLayout,
1489            super::ShaderModule,
1490            super::PipelineCache,
1491        >,
1492    ) -> Result<super::ComputePipeline, crate::PipelineError> {
1493        let gl = &self.shared.context.lock();
1494        let mut shaders = ArrayVec::new();
1495        shaders.push((naga::ShaderStage::Compute, &desc.stage));
1496        let inner = unsafe { self.create_pipeline(gl, shaders, desc.layout, desc.label, None) }?;
1497
1498        self.counters.compute_pipelines.add(1);
1499
1500        Ok(super::ComputePipeline { inner })
1501    }
1502
1503    unsafe fn destroy_compute_pipeline(&self, pipeline: super::ComputePipeline) {
1504        // If the pipeline only has 2 strong references remaining, they're `pipeline` and `program_cache``
1505        // This is safe to assume as long as:
1506        // - `ComputePipeline` can't be cloned
1507        // - The only place that we can get a new reference is during `program_cache.lock()`
1508        if Arc::strong_count(&pipeline.inner) == 2 {
1509            let gl = &self.shared.context.lock();
1510            let mut program_cache = self.shared.program_cache.lock();
1511            program_cache.retain(|_, v| match *v {
1512                Ok(ref p) => p.program != pipeline.inner.program,
1513                Err(_) => false,
1514            });
1515            unsafe { gl.delete_program(pipeline.inner.program) };
1516        }
1517
1518        self.counters.compute_pipelines.sub(1);
1519    }
1520
1521    unsafe fn create_pipeline_cache(
1522        &self,
1523        _: &crate::PipelineCacheDescriptor<'_>,
1524    ) -> Result<super::PipelineCache, crate::PipelineCacheError> {
1525        // Even though the cache doesn't do anything, we still return something here
1526        // as the least bad option
1527        Ok(super::PipelineCache)
1528    }
1529    unsafe fn destroy_pipeline_cache(&self, _: super::PipelineCache) {}
1530
1531    #[cfg_attr(target_arch = "wasm32", allow(unused))]
1532    unsafe fn create_query_set(
1533        &self,
1534        desc: &wgt::QuerySetDescriptor<crate::Label>,
1535    ) -> Result<super::QuerySet, crate::DeviceError> {
1536        let gl = &self.shared.context.lock();
1537
1538        let mut queries = Vec::with_capacity(desc.count as usize);
1539        for _ in 0..desc.count {
1540            let query =
1541                unsafe { gl.create_query() }.map_err(|_| crate::DeviceError::OutOfMemory)?;
1542
1543            // We aren't really able to, in general, label queries.
1544            //
1545            // We could take a timestamp here to "initialize" the query,
1546            // but that's a bit of a hack, and we don't want to insert
1547            // random timestamps into the command stream of we don't have to.
1548
1549            queries.push(query);
1550        }
1551
1552        self.counters.query_sets.add(1);
1553
1554        Ok(super::QuerySet {
1555            queries: queries.into_boxed_slice(),
1556            target: match desc.ty {
1557                wgt::QueryType::Occlusion => glow::ANY_SAMPLES_PASSED_CONSERVATIVE,
1558                wgt::QueryType::Timestamp => glow::TIMESTAMP,
1559                _ => unimplemented!(),
1560            },
1561        })
1562    }
1563
1564    unsafe fn destroy_query_set(&self, set: super::QuerySet) {
1565        let gl = &self.shared.context.lock();
1566        for &query in set.queries.iter() {
1567            unsafe { gl.delete_query(query) };
1568        }
1569        self.counters.query_sets.sub(1);
1570    }
1571
1572    unsafe fn create_fence(&self) -> Result<super::Fence, crate::DeviceError> {
1573        self.counters.fences.add(1);
1574        Ok(super::Fence::new(&self.shared.options))
1575    }
1576
1577    unsafe fn destroy_fence(&self, fence: super::Fence) {
1578        let gl = &self.shared.context.lock();
1579        fence.destroy(gl);
1580        self.counters.fences.sub(1);
1581    }
1582
1583    unsafe fn get_fence_value(
1584        &self,
1585        fence: &super::Fence,
1586    ) -> Result<crate::FenceValue, crate::DeviceError> {
1587        #[cfg_attr(target_arch = "wasm32", allow(clippy::needless_borrow))]
1588        Ok(fence.get_latest(&self.shared.context.lock()))
1589    }
1590    unsafe fn wait(
1591        &self,
1592        fence: &super::Fence,
1593        wait_value: crate::FenceValue,
1594        timeout: Option<core::time::Duration>,
1595    ) -> Result<bool, crate::DeviceError> {
1596        if fence.satisfied(wait_value) {
1597            return Ok(true);
1598        }
1599
1600        let gl = &self.shared.context.lock();
1601        // MAX_CLIENT_WAIT_TIMEOUT_WEBGL is:
1602        // - 1s in Gecko https://searchfox.org/mozilla-central/rev/754074e05178e017ef6c3d8e30428ffa8f1b794d/dom/canvas/WebGLTypes.h#1386
1603        // - 0 in WebKit https://github.com/WebKit/WebKit/blob/4ef90d4672ca50267c0971b85db403d9684508ea/Source/WebCore/html/canvas/WebGL2RenderingContext.cpp#L110
1604        // - 0 in Chromium https://source.chromium.org/chromium/chromium/src/+/main:third_party/blink/renderer/modules/webgl/webgl2_rendering_context_base.cc;l=112;drc=a3cb0ac4c71ec04abfeaed199e5d63230eca2551
1605        let timeout_ns = if cfg!(any(webgl, Emscripten)) {
1606            0
1607        } else {
1608            timeout
1609                .map(|t| t.as_nanos().min(u32::MAX as u128) as u32)
1610                .unwrap_or(u32::MAX)
1611        };
1612        fence.wait(gl, wait_value, timeout_ns)
1613    }
1614
1615    unsafe fn start_graphics_debugger_capture(&self) -> bool {
1616        #[cfg(all(native, feature = "renderdoc"))]
1617        return unsafe {
1618            self.render_doc
1619                .start_frame_capture(self.shared.context.raw_context(), ptr::null_mut())
1620        };
1621        #[allow(unreachable_code)]
1622        false
1623    }
1624    unsafe fn stop_graphics_debugger_capture(&self) {
1625        #[cfg(all(native, feature = "renderdoc"))]
1626        unsafe {
1627            self.render_doc
1628                .end_frame_capture(ptr::null_mut(), ptr::null_mut())
1629        }
1630    }
1631    unsafe fn create_acceleration_structure(
1632        &self,
1633        _desc: &crate::AccelerationStructureDescriptor,
1634    ) -> Result<super::AccelerationStructure, crate::DeviceError> {
1635        unimplemented!()
1636    }
1637    unsafe fn get_acceleration_structure_build_sizes<'a>(
1638        &self,
1639        _desc: &crate::GetAccelerationStructureBuildSizesDescriptor<'a, super::Buffer>,
1640    ) -> crate::AccelerationStructureBuildSizes {
1641        unimplemented!()
1642    }
1643    unsafe fn get_acceleration_structure_device_address(
1644        &self,
1645        _acceleration_structure: &super::AccelerationStructure,
1646    ) -> wgt::BufferAddress {
1647        unimplemented!()
1648    }
1649    unsafe fn destroy_acceleration_structure(
1650        &self,
1651        _acceleration_structure: super::AccelerationStructure,
1652    ) {
1653    }
1654
1655    fn tlas_instance_to_bytes(&self, _instance: TlasInstance) -> Vec<u8> {
1656        unimplemented!()
1657    }
1658
1659    fn get_internal_counters(&self) -> wgt::HalCounters {
1660        self.counters.as_ref().clone()
1661    }
1662
1663    fn check_if_oom(&self) -> Result<(), crate::DeviceError> {
1664        Ok(())
1665    }
1666}
1667
1668#[cfg(send_sync)]
1669unsafe impl Sync for super::Device {}
1670#[cfg(send_sync)]
1671unsafe impl Send for super::Device {}