naga/back/glsl/mod.rs
1/*!
2Backend for [GLSL][glsl] (OpenGL Shading Language).
3
4The main structure is [`Writer`], it maintains internal state that is used
5to output a [`Module`](crate::Module) into glsl
6
7# Supported versions
8### Core
9- 330
10- 400
11- 410
12- 420
13- 430
14- 450
15
16### ES
17- 300
18- 310
19
20[glsl]: https://www.khronos.org/registry/OpenGL/index_gl.php
21*/
22
23// GLSL is mostly a superset of C but it also removes some parts of it this is a list of relevant
24// aspects for this backend.
25//
26// The most notable change is the introduction of the version preprocessor directive that must
27// always be the first line of a glsl file and is written as
28// `#version number profile`
29// `number` is the version itself (i.e. 300) and `profile` is the
30// shader profile we only support "core" and "es", the former is used in desktop applications and
31// the later is used in embedded contexts, mobile devices and browsers. Each one as it's own
32// versions (at the time of writing this the latest version for "core" is 460 and for "es" is 320)
33//
34// Other important preprocessor addition is the extension directive which is written as
35// `#extension name: behaviour`
36// Extensions provide increased features in a plugin fashion but they aren't required to be
37// supported hence why they are called extensions, that's why `behaviour` is used it specifies
38// whether the extension is strictly required or if it should only be enabled if needed. In our case
39// when we use extensions we set behaviour to `require` always.
40//
41// The only thing that glsl removes that makes a difference are pointers.
42//
43// Additions that are relevant for the backend are the discard keyword, the introduction of
44// vector, matrices, samplers, image types and functions that provide common shader operations
45
46pub use features::Features;
47
48use alloc::{
49 borrow::ToOwned,
50 format,
51 string::{String, ToString},
52 vec,
53 vec::Vec,
54};
55use core::{
56 cmp::Ordering,
57 fmt::{self, Error as FmtError, Write},
58 mem,
59};
60
61use hashbrown::hash_map;
62use thiserror::Error;
63
64use crate::{
65 back::{self, Baked},
66 common,
67 proc::{self, NameKey},
68 valid, Handle, ShaderStage, TypeInner,
69};
70use features::FeaturesManager;
71
72/// Contains the features related code and the features querying method
73mod features;
74/// Contains a constant with a slice of all the reserved keywords RESERVED_KEYWORDS
75mod keywords;
76
77/// List of supported `core` GLSL versions.
78pub const SUPPORTED_CORE_VERSIONS: &[u16] = &[140, 150, 330, 400, 410, 420, 430, 440, 450, 460];
79/// List of supported `es` GLSL versions.
80pub const SUPPORTED_ES_VERSIONS: &[u16] = &[300, 310, 320];
81
82/// The suffix of the variable that will hold the calculated clamped level
83/// of detail for bounds checking in `ImageLoad`
84const CLAMPED_LOD_SUFFIX: &str = "_clamped_lod";
85
86pub(crate) const MODF_FUNCTION: &str = "naga_modf";
87pub(crate) const FREXP_FUNCTION: &str = "naga_frexp";
88
89// Must match code in glsl_built_in
90pub const FIRST_INSTANCE_BINDING: &str = "naga_vs_first_instance";
91
92#[cfg(any(feature = "serialize", feature = "deserialize"))]
93#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
94#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
95struct BindingMapSerialization {
96 resource_binding: crate::ResourceBinding,
97 bind_target: u8,
98}
99
100#[cfg(feature = "deserialize")]
101fn deserialize_binding_map<'de, D>(deserializer: D) -> Result<BindingMap, D::Error>
102where
103 D: serde::Deserializer<'de>,
104{
105 use serde::Deserialize;
106
107 let vec = Vec::<BindingMapSerialization>::deserialize(deserializer)?;
108 let mut map = BindingMap::default();
109 for item in vec {
110 map.insert(item.resource_binding, item.bind_target);
111 }
112 Ok(map)
113}
114
115/// Mapping between resources and bindings.
116pub type BindingMap = alloc::collections::BTreeMap<crate::ResourceBinding, u8>;
117
118impl crate::AtomicFunction {
119 const fn to_glsl(self) -> &'static str {
120 match self {
121 Self::Add | Self::Subtract => "Add",
122 Self::And => "And",
123 Self::InclusiveOr => "Or",
124 Self::ExclusiveOr => "Xor",
125 Self::Min => "Min",
126 Self::Max => "Max",
127 Self::Exchange { compare: None } => "Exchange",
128 Self::Exchange { compare: Some(_) } => "", //TODO
129 }
130 }
131}
132
133impl crate::AddressSpace {
134 /// Whether a variable with this address space can be initialized
135 const fn initializable(&self) -> bool {
136 match *self {
137 crate::AddressSpace::Function | crate::AddressSpace::Private => true,
138 crate::AddressSpace::WorkGroup
139 | crate::AddressSpace::Uniform
140 | crate::AddressSpace::Storage { .. }
141 | crate::AddressSpace::Handle
142 | crate::AddressSpace::PushConstant => false,
143 }
144 }
145}
146
147/// A GLSL version.
148#[derive(Debug, Copy, Clone, PartialEq)]
149#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
150#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
151pub enum Version {
152 /// `core` GLSL.
153 Desktop(u16),
154 /// `es` GLSL.
155 Embedded { version: u16, is_webgl: bool },
156}
157
158impl Version {
159 /// Create a new gles version
160 pub const fn new_gles(version: u16) -> Self {
161 Self::Embedded {
162 version,
163 is_webgl: false,
164 }
165 }
166
167 /// Returns true if self is `Version::Embedded` (i.e. is a es version)
168 const fn is_es(&self) -> bool {
169 match *self {
170 Version::Desktop(_) => false,
171 Version::Embedded { .. } => true,
172 }
173 }
174
175 /// Returns true if targeting WebGL
176 const fn is_webgl(&self) -> bool {
177 match *self {
178 Version::Desktop(_) => false,
179 Version::Embedded { is_webgl, .. } => is_webgl,
180 }
181 }
182
183 /// Checks the list of currently supported versions and returns true if it contains the
184 /// specified version
185 ///
186 /// # Notes
187 /// As an invalid version number will never be added to the supported version list
188 /// so this also checks for version validity
189 fn is_supported(&self) -> bool {
190 match *self {
191 Version::Desktop(v) => SUPPORTED_CORE_VERSIONS.contains(&v),
192 Version::Embedded { version: v, .. } => SUPPORTED_ES_VERSIONS.contains(&v),
193 }
194 }
195
196 fn supports_io_locations(&self) -> bool {
197 *self >= Version::Desktop(330) || *self >= Version::new_gles(300)
198 }
199
200 /// Checks if the version supports all of the explicit layouts:
201 /// - `location=` qualifiers for bindings
202 /// - `binding=` qualifiers for resources
203 ///
204 /// Note: `location=` for vertex inputs and fragment outputs is supported
205 /// unconditionally for GLES 300.
206 fn supports_explicit_locations(&self) -> bool {
207 *self >= Version::Desktop(420) || *self >= Version::new_gles(310)
208 }
209
210 fn supports_early_depth_test(&self) -> bool {
211 *self >= Version::Desktop(130) || *self >= Version::new_gles(310)
212 }
213
214 fn supports_std140_layout(&self) -> bool {
215 *self >= Version::Desktop(140) || *self >= Version::new_gles(300)
216 }
217
218 fn supports_std430_layout(&self) -> bool {
219 *self >= Version::Desktop(430) || *self >= Version::new_gles(310)
220 }
221
222 fn supports_fma_function(&self) -> bool {
223 *self >= Version::Desktop(400) || *self >= Version::new_gles(320)
224 }
225
226 fn supports_integer_functions(&self) -> bool {
227 *self >= Version::Desktop(400) || *self >= Version::new_gles(310)
228 }
229
230 fn supports_frexp_function(&self) -> bool {
231 *self >= Version::Desktop(400) || *self >= Version::new_gles(310)
232 }
233
234 fn supports_derivative_control(&self) -> bool {
235 *self >= Version::Desktop(450)
236 }
237
238 // For supports_pack_unpack_4x8, supports_pack_unpack_snorm_2x16, supports_pack_unpack_unorm_2x16
239 // see:
240 // https://registry.khronos.org/OpenGL-Refpages/gl4/html/unpackUnorm.xhtml
241 // https://registry.khronos.org/OpenGL-Refpages/es3/html/unpackUnorm.xhtml
242 // https://registry.khronos.org/OpenGL-Refpages/gl4/html/packUnorm.xhtml
243 // https://registry.khronos.org/OpenGL-Refpages/es3/html/packUnorm.xhtml
244 fn supports_pack_unpack_4x8(&self) -> bool {
245 *self >= Version::Desktop(400) || *self >= Version::new_gles(310)
246 }
247 fn supports_pack_unpack_snorm_2x16(&self) -> bool {
248 *self >= Version::Desktop(420) || *self >= Version::new_gles(300)
249 }
250 fn supports_pack_unpack_unorm_2x16(&self) -> bool {
251 *self >= Version::Desktop(400) || *self >= Version::new_gles(300)
252 }
253
254 // https://registry.khronos.org/OpenGL-Refpages/gl4/html/unpackHalf2x16.xhtml
255 // https://registry.khronos.org/OpenGL-Refpages/gl4/html/packHalf2x16.xhtml
256 // https://registry.khronos.org/OpenGL-Refpages/es3/html/unpackHalf2x16.xhtml
257 // https://registry.khronos.org/OpenGL-Refpages/es3/html/packHalf2x16.xhtml
258 fn supports_pack_unpack_half_2x16(&self) -> bool {
259 *self >= Version::Desktop(420) || *self >= Version::new_gles(300)
260 }
261}
262
263impl PartialOrd for Version {
264 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
265 match (*self, *other) {
266 (Version::Desktop(x), Version::Desktop(y)) => Some(x.cmp(&y)),
267 (Version::Embedded { version: x, .. }, Version::Embedded { version: y, .. }) => {
268 Some(x.cmp(&y))
269 }
270 _ => None,
271 }
272 }
273}
274
275impl fmt::Display for Version {
276 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
277 match *self {
278 Version::Desktop(v) => write!(f, "{v} core"),
279 Version::Embedded { version: v, .. } => write!(f, "{v} es"),
280 }
281 }
282}
283
284bitflags::bitflags! {
285 /// Configuration flags for the [`Writer`].
286 #[cfg_attr(feature = "serialize", derive(serde::Serialize))]
287 #[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
288 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
289 pub struct WriterFlags: u32 {
290 /// Flip output Y and extend Z from (0, 1) to (-1, 1).
291 const ADJUST_COORDINATE_SPACE = 0x1;
292 /// Supports GL_EXT_texture_shadow_lod on the host, which provides
293 /// additional functions on shadows and arrays of shadows.
294 const TEXTURE_SHADOW_LOD = 0x2;
295 /// Supports ARB_shader_draw_parameters on the host, which provides
296 /// support for `gl_BaseInstanceARB`, `gl_BaseVertexARB`, `gl_DrawIDARB`, and `gl_DrawID`.
297 const DRAW_PARAMETERS = 0x4;
298 /// Include unused global variables, constants and functions. By default the output will exclude
299 /// global variables that are not used in the specified entrypoint (including indirect use),
300 /// all constant declarations, and functions that use excluded global variables.
301 const INCLUDE_UNUSED_ITEMS = 0x10;
302 /// Emit `PointSize` output builtin to vertex shaders, which is
303 /// required for drawing with `PointList` topology.
304 ///
305 /// https://registry.khronos.org/OpenGL/specs/es/3.2/GLSL_ES_Specification_3.20.html#built-in-language-variables
306 /// The variable gl_PointSize is intended for a shader to write the size of the point to be rasterized. It is measured in pixels.
307 /// If gl_PointSize is not written to, its value is undefined in subsequent pipe stages.
308 const FORCE_POINT_SIZE = 0x20;
309 }
310}
311
312/// Configuration used in the [`Writer`].
313#[derive(Debug, Clone)]
314#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
315#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
316#[cfg_attr(feature = "deserialize", serde(default))]
317pub struct Options {
318 /// The GLSL version to be used.
319 pub version: Version,
320 /// Configuration flags for the [`Writer`].
321 pub writer_flags: WriterFlags,
322 /// Map of resources association to binding locations.
323 #[cfg_attr(
324 feature = "deserialize",
325 serde(deserialize_with = "deserialize_binding_map")
326 )]
327 pub binding_map: BindingMap,
328 /// Should workgroup variables be zero initialized (by polyfilling)?
329 pub zero_initialize_workgroup_memory: bool,
330}
331
332impl Default for Options {
333 fn default() -> Self {
334 Options {
335 version: Version::new_gles(310),
336 writer_flags: WriterFlags::ADJUST_COORDINATE_SPACE,
337 binding_map: BindingMap::default(),
338 zero_initialize_workgroup_memory: true,
339 }
340 }
341}
342
343/// A subset of options meant to be changed per pipeline.
344#[derive(Debug, Clone)]
345#[cfg_attr(feature = "serialize", derive(serde::Serialize))]
346#[cfg_attr(feature = "deserialize", derive(serde::Deserialize))]
347pub struct PipelineOptions {
348 /// The stage of the entry point.
349 pub shader_stage: ShaderStage,
350 /// The name of the entry point.
351 ///
352 /// If no entry point that matches is found while creating a [`Writer`], an
353 /// error will be thrown.
354 pub entry_point: String,
355 /// How many views to render to, if doing multiview rendering.
356 pub multiview: Option<core::num::NonZeroU32>,
357}
358
359#[derive(Debug)]
360pub struct VaryingLocation {
361 /// The location of the global.
362 /// This corresponds to `layout(location = ..)` in GLSL.
363 pub location: u32,
364 /// The index which can be used for dual source blending.
365 /// This corresponds to `layout(index = ..)` in GLSL.
366 pub index: u32,
367}
368
369/// Reflection info for texture mappings and uniforms.
370#[derive(Debug)]
371pub struct ReflectionInfo {
372 /// Mapping between texture names and variables/samplers.
373 pub texture_mapping: crate::FastHashMap<String, TextureMapping>,
374 /// Mapping between uniform variables and names.
375 pub uniforms: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
376 /// Mapping between names and attribute locations.
377 pub varying: crate::FastHashMap<String, VaryingLocation>,
378 /// List of push constant items in the shader.
379 pub push_constant_items: Vec<PushConstantItem>,
380 /// Number of user-defined clip planes. Only applicable to vertex shaders.
381 pub clip_distance_count: u32,
382}
383
384/// Mapping between a texture and its sampler, if it exists.
385///
386/// GLSL pre-Vulkan has no concept of separate textures and samplers. Instead, everything is a
387/// `gsamplerN` where `g` is the scalar type and `N` is the dimension. But naga uses separate textures
388/// and samplers in the IR, so the backend produces a [`FastHashMap`](crate::FastHashMap) with the texture name
389/// as a key and a [`TextureMapping`] as a value. This way, the user knows where to bind.
390///
391/// [`Storage`](crate::ImageClass::Storage) images produce `gimageN` and don't have an associated sampler,
392/// so the [`sampler`](Self::sampler) field will be [`None`].
393#[derive(Debug, Clone)]
394pub struct TextureMapping {
395 /// Handle to the image global variable.
396 pub texture: Handle<crate::GlobalVariable>,
397 /// Handle to the associated sampler global variable, if it exists.
398 pub sampler: Option<Handle<crate::GlobalVariable>>,
399}
400
401/// All information to bind a single uniform value to the shader.
402///
403/// Push constants are emulated using traditional uniforms in OpenGL.
404///
405/// These are composed of a set of primitives (scalar, vector, matrix) that
406/// are given names. Because they are not backed by the concept of a buffer,
407/// we must do the work of calculating the offset of each primitive in the
408/// push constant block.
409#[derive(Debug, Clone)]
410pub struct PushConstantItem {
411 /// GL uniform name for the item. This name is the same as if you were
412 /// to access it directly from a GLSL shader.
413 ///
414 /// The with the following example, the following names will be generated,
415 /// one name per GLSL uniform.
416 ///
417 /// ```glsl
418 /// struct InnerStruct {
419 /// value: f32,
420 /// }
421 ///
422 /// struct PushConstant {
423 /// InnerStruct inner;
424 /// vec4 array[2];
425 /// }
426 ///
427 /// uniform PushConstants _push_constant_binding_cs;
428 /// ```
429 ///
430 /// ```text
431 /// - _push_constant_binding_cs.inner.value
432 /// - _push_constant_binding_cs.array[0]
433 /// - _push_constant_binding_cs.array[1]
434 /// ```
435 ///
436 pub access_path: String,
437 /// Type of the uniform. This will only ever be a scalar, vector, or matrix.
438 pub ty: Handle<crate::Type>,
439 /// The offset in the push constant memory block this uniform maps to.
440 ///
441 /// The size of the uniform can be derived from the type.
442 pub offset: u32,
443}
444
445/// Helper structure that generates a number
446#[derive(Default)]
447struct IdGenerator(u32);
448
449impl IdGenerator {
450 /// Generates a number that's guaranteed to be unique for this `IdGenerator`
451 fn generate(&mut self) -> u32 {
452 // It's just an increasing number but it does the job
453 let ret = self.0;
454 self.0 += 1;
455 ret
456 }
457}
458
459/// Assorted options needed for generating varyings.
460#[derive(Clone, Copy)]
461struct VaryingOptions {
462 output: bool,
463 targeting_webgl: bool,
464 draw_parameters: bool,
465}
466
467impl VaryingOptions {
468 const fn from_writer_options(options: &Options, output: bool) -> Self {
469 Self {
470 output,
471 targeting_webgl: options.version.is_webgl(),
472 draw_parameters: options.writer_flags.contains(WriterFlags::DRAW_PARAMETERS),
473 }
474 }
475}
476
477/// Helper wrapper used to get a name for a varying
478///
479/// Varying have different naming schemes depending on their binding:
480/// - Varyings with builtin bindings get their name from [`glsl_built_in`].
481/// - Varyings with location bindings are named `_S_location_X` where `S` is a
482/// prefix identifying which pipeline stage the varying connects, and `X` is
483/// the location.
484struct VaryingName<'a> {
485 binding: &'a crate::Binding,
486 stage: ShaderStage,
487 options: VaryingOptions,
488}
489impl fmt::Display for VaryingName<'_> {
490 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
491 match *self.binding {
492 crate::Binding::Location {
493 blend_src: Some(1), ..
494 } => {
495 write!(f, "_fs2p_location1",)
496 }
497 crate::Binding::Location { location, .. } => {
498 let prefix = match (self.stage, self.options.output) {
499 (ShaderStage::Compute, _) => unreachable!(),
500 // pipeline to vertex
501 (ShaderStage::Vertex, false) => "p2vs",
502 // vertex to fragment
503 (ShaderStage::Vertex, true) | (ShaderStage::Fragment, false) => "vs2fs",
504 // fragment to pipeline
505 (ShaderStage::Fragment, true) => "fs2p",
506 (ShaderStage::Task | ShaderStage::Mesh, _) => unreachable!(),
507 };
508 write!(f, "_{prefix}_location{location}",)
509 }
510 crate::Binding::BuiltIn(built_in) => {
511 write!(f, "{}", glsl_built_in(built_in, self.options))
512 }
513 }
514 }
515}
516
517impl ShaderStage {
518 const fn to_str(self) -> &'static str {
519 match self {
520 ShaderStage::Compute => "cs",
521 ShaderStage::Fragment => "fs",
522 ShaderStage::Vertex => "vs",
523 ShaderStage::Task | ShaderStage::Mesh => unreachable!(),
524 }
525 }
526}
527
528/// Shorthand result used internally by the backend
529type BackendResult<T = ()> = Result<T, Error>;
530
531/// A GLSL compilation error.
532#[derive(Debug, Error)]
533pub enum Error {
534 /// A error occurred while writing to the output.
535 #[error("Format error")]
536 FmtError(#[from] FmtError),
537 /// The specified [`Version`] doesn't have all required [`Features`].
538 ///
539 /// Contains the missing [`Features`].
540 #[error("The selected version doesn't support {0:?}")]
541 MissingFeatures(Features),
542 /// [`AddressSpace::PushConstant`](crate::AddressSpace::PushConstant) was used more than
543 /// once in the entry point, which isn't supported.
544 #[error("Multiple push constants aren't supported")]
545 MultiplePushConstants,
546 /// The specified [`Version`] isn't supported.
547 #[error("The specified version isn't supported")]
548 VersionNotSupported,
549 /// The entry point couldn't be found.
550 #[error("The requested entry point couldn't be found")]
551 EntryPointNotFound,
552 /// A call was made to an unsupported external.
553 #[error("A call was made to an unsupported external: {0}")]
554 UnsupportedExternal(String),
555 /// A scalar with an unsupported width was requested.
556 #[error("A scalar with an unsupported width was requested: {0:?}")]
557 UnsupportedScalar(crate::Scalar),
558 /// A image was used with multiple samplers, which isn't supported.
559 #[error("A image was used with multiple samplers")]
560 ImageMultipleSamplers,
561 #[error("{0}")]
562 Custom(String),
563 #[error("overrides should not be present at this stage")]
564 Override,
565 /// [`crate::Sampling::First`] is unsupported.
566 #[error("`{:?}` sampling is unsupported", crate::Sampling::First)]
567 FirstSamplingNotSupported,
568 #[error(transparent)]
569 ResolveArraySizeError(#[from] proc::ResolveArraySizeError),
570}
571
572/// Binary operation with a different logic on the GLSL side.
573enum BinaryOperation {
574 /// Vector comparison should use the function like `greaterThan()`, etc.
575 VectorCompare,
576 /// Vector component wise operation; used to polyfill unsupported ops like `|` and `&` for `bvecN`'s
577 VectorComponentWise,
578 /// GLSL `%` is SPIR-V `OpUMod/OpSMod` and `mod()` is `OpFMod`, but [`BinaryOperator::Modulo`](crate::BinaryOperator::Modulo) is `OpFRem`.
579 Modulo,
580 /// Any plain operation. No additional logic required.
581 Other,
582}
583
584/// Writer responsible for all code generation.
585pub struct Writer<'a, W> {
586 // Inputs
587 /// The module being written.
588 module: &'a crate::Module,
589 /// The module analysis.
590 info: &'a valid::ModuleInfo,
591 /// The output writer.
592 out: W,
593 /// User defined configuration to be used.
594 options: &'a Options,
595 /// The bound checking policies to be used
596 policies: proc::BoundsCheckPolicies,
597
598 // Internal State
599 /// Features manager used to store all the needed features and write them.
600 features: FeaturesManager,
601 namer: proc::Namer,
602 /// A map with all the names needed for writing the module
603 /// (generated by a [`Namer`](crate::proc::Namer)).
604 names: crate::FastHashMap<NameKey, String>,
605 /// A map with the names of global variables needed for reflections.
606 reflection_names_globals: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
607 /// The selected entry point.
608 entry_point: &'a crate::EntryPoint,
609 /// The index of the selected entry point.
610 entry_point_idx: proc::EntryPointIndex,
611 /// A generator for unique block numbers.
612 block_id: IdGenerator,
613 /// Set of expressions that have associated temporary variables.
614 named_expressions: crate::NamedExpressions,
615 /// Set of expressions that need to be baked to avoid unnecessary repetition in output
616 need_bake_expressions: back::NeedBakeExpressions,
617 /// Information about nesting of loops and switches.
618 ///
619 /// Used for forwarding continue statements in switches that have been
620 /// transformed to `do {} while(false);` loops.
621 continue_ctx: back::continue_forward::ContinueCtx,
622 /// How many views to render to, if doing multiview rendering.
623 multiview: Option<core::num::NonZeroU32>,
624 /// Mapping of varying variables to their location. Needed for reflections.
625 varying: crate::FastHashMap<String, VaryingLocation>,
626 /// Number of user-defined clip planes. Only non-zero for vertex shaders.
627 clip_distance_count: u32,
628}
629
630impl<'a, W: Write> Writer<'a, W> {
631 /// Creates a new [`Writer`] instance.
632 ///
633 /// # Errors
634 /// - If the version specified is invalid or supported.
635 /// - If the entry point couldn't be found in the module.
636 /// - If the version specified doesn't support some used features.
637 pub fn new(
638 out: W,
639 module: &'a crate::Module,
640 info: &'a valid::ModuleInfo,
641 options: &'a Options,
642 pipeline_options: &'a PipelineOptions,
643 policies: proc::BoundsCheckPolicies,
644 ) -> Result<Self, Error> {
645 // Check if the requested version is supported
646 if !options.version.is_supported() {
647 log::error!("Version {}", options.version);
648 return Err(Error::VersionNotSupported);
649 }
650
651 // Try to find the entry point and corresponding index
652 let ep_idx = module
653 .entry_points
654 .iter()
655 .position(|ep| {
656 pipeline_options.shader_stage == ep.stage && pipeline_options.entry_point == ep.name
657 })
658 .ok_or(Error::EntryPointNotFound)?;
659
660 // Generate a map with names required to write the module
661 let mut names = crate::FastHashMap::default();
662 let mut namer = proc::Namer::default();
663 namer.reset(
664 module,
665 &keywords::RESERVED_KEYWORD_SET,
666 proc::CaseInsensitiveKeywordSet::empty(),
667 &[
668 "gl_", // all GL built-in variables
669 "_group", // all normal bindings
670 "_push_constant_binding_", // all push constant bindings
671 ],
672 &mut names,
673 );
674
675 // Build the instance
676 let mut this = Self {
677 module,
678 info,
679 out,
680 options,
681 policies,
682
683 namer,
684 features: FeaturesManager::new(),
685 names,
686 reflection_names_globals: crate::FastHashMap::default(),
687 entry_point: &module.entry_points[ep_idx],
688 entry_point_idx: ep_idx as u16,
689 multiview: pipeline_options.multiview,
690 block_id: IdGenerator::default(),
691 named_expressions: Default::default(),
692 need_bake_expressions: Default::default(),
693 continue_ctx: back::continue_forward::ContinueCtx::default(),
694 varying: Default::default(),
695 clip_distance_count: 0,
696 };
697
698 // Find all features required to print this module
699 this.collect_required_features()?;
700
701 Ok(this)
702 }
703
704 /// Writes the [`Module`](crate::Module) as glsl to the output
705 ///
706 /// # Notes
707 /// If an error occurs while writing, the output might have been written partially
708 ///
709 /// # Panics
710 /// Might panic if the module is invalid
711 pub fn write(&mut self) -> Result<ReflectionInfo, Error> {
712 // We use `writeln!(self.out)` throughout the write to add newlines
713 // to make the output more readable
714
715 let es = self.options.version.is_es();
716
717 // Write the version (It must be the first thing or it isn't a valid glsl output)
718 writeln!(self.out, "#version {}", self.options.version)?;
719 // Write all the needed extensions
720 //
721 // This used to be the last thing being written as it allowed to search for features while
722 // writing the module saving some loops but some older versions (420 or less) required the
723 // extensions to appear before being used, even though extensions are part of the
724 // preprocessor not the processor ¯\_(ツ)_/¯
725 self.features.write(self.options, &mut self.out)?;
726
727 // glsl es requires a precision to be specified for floats and ints
728 // TODO: Should this be user configurable?
729 if es {
730 writeln!(self.out)?;
731 writeln!(self.out, "precision highp float;")?;
732 writeln!(self.out, "precision highp int;")?;
733 writeln!(self.out)?;
734 }
735
736 if self.entry_point.stage == ShaderStage::Compute {
737 let workgroup_size = self.entry_point.workgroup_size;
738 writeln!(
739 self.out,
740 "layout(local_size_x = {}, local_size_y = {}, local_size_z = {}) in;",
741 workgroup_size[0], workgroup_size[1], workgroup_size[2]
742 )?;
743 writeln!(self.out)?;
744 }
745
746 if self.entry_point.stage == ShaderStage::Vertex
747 && !self
748 .options
749 .writer_flags
750 .contains(WriterFlags::DRAW_PARAMETERS)
751 && self.features.contains(Features::INSTANCE_INDEX)
752 {
753 writeln!(self.out, "uniform uint {FIRST_INSTANCE_BINDING};")?;
754 writeln!(self.out)?;
755 }
756
757 // Enable early depth tests if needed
758 if let Some(early_depth_test) = self.entry_point.early_depth_test {
759 // If early depth test is supported for this version of GLSL
760 if self.options.version.supports_early_depth_test() {
761 match early_depth_test {
762 crate::EarlyDepthTest::Force => {
763 writeln!(self.out, "layout(early_fragment_tests) in;")?;
764 }
765 crate::EarlyDepthTest::Allow { conservative, .. } => {
766 use crate::ConservativeDepth as Cd;
767 let depth = match conservative {
768 Cd::GreaterEqual => "greater",
769 Cd::LessEqual => "less",
770 Cd::Unchanged => "unchanged",
771 };
772 writeln!(self.out, "layout (depth_{depth}) out float gl_FragDepth;")?;
773 }
774 }
775 } else {
776 log::warn!(
777 "Early depth testing is not supported for this version of GLSL: {}",
778 self.options.version
779 );
780 }
781 }
782
783 if self.entry_point.stage == ShaderStage::Vertex && self.options.version.is_webgl() {
784 if let Some(multiview) = self.multiview.as_ref() {
785 writeln!(self.out, "layout(num_views = {multiview}) in;")?;
786 writeln!(self.out)?;
787 }
788 }
789
790 // Write struct types.
791 //
792 // This are always ordered because the IR is structured in a way that
793 // you can't make a struct without adding all of its members first.
794 for (handle, ty) in self.module.types.iter() {
795 if let TypeInner::Struct { ref members, .. } = ty.inner {
796 let struct_name = &self.names[&NameKey::Type(handle)];
797
798 // Structures ending with runtime-sized arrays can only be
799 // rendered as shader storage blocks in GLSL, not stand-alone
800 // struct types.
801 if !self.module.types[members.last().unwrap().ty]
802 .inner
803 .is_dynamically_sized(&self.module.types)
804 {
805 write!(self.out, "struct {struct_name} ")?;
806 self.write_struct_body(handle, members)?;
807 writeln!(self.out, ";")?;
808 }
809 }
810 }
811
812 // Write functions for special types.
813 for (type_key, struct_ty) in self.module.special_types.predeclared_types.iter() {
814 match type_key {
815 &crate::PredeclaredType::ModfResult { size, scalar }
816 | &crate::PredeclaredType::FrexpResult { size, scalar } => {
817 let struct_name = &self.names[&NameKey::Type(*struct_ty)];
818 let arg_type_name_owner;
819 let arg_type_name = if let Some(size) = size {
820 arg_type_name_owner = format!(
821 "{}vec{}",
822 if scalar.width == 8 { "d" } else { "" },
823 size as u8
824 );
825 &arg_type_name_owner
826 } else if scalar.width == 8 {
827 "double"
828 } else {
829 "float"
830 };
831
832 let other_type_name_owner;
833 let (defined_func_name, called_func_name, other_type_name) =
834 if matches!(type_key, &crate::PredeclaredType::ModfResult { .. }) {
835 (MODF_FUNCTION, "modf", arg_type_name)
836 } else {
837 let other_type_name = if let Some(size) = size {
838 other_type_name_owner = format!("ivec{}", size as u8);
839 &other_type_name_owner
840 } else {
841 "int"
842 };
843 (FREXP_FUNCTION, "frexp", other_type_name)
844 };
845
846 writeln!(self.out)?;
847 if !self.options.version.supports_frexp_function()
848 && matches!(type_key, &crate::PredeclaredType::FrexpResult { .. })
849 {
850 writeln!(
851 self.out,
852 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
853 {other_type_name} other = arg == {arg_type_name}(0) ? {other_type_name}(0) : {other_type_name}({arg_type_name}(1) + log2(arg));
854 {arg_type_name} fract = arg * exp2({arg_type_name}(-other));
855 return {struct_name}(fract, other);
856}}",
857 )?;
858 } else {
859 writeln!(
860 self.out,
861 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
862 {other_type_name} other;
863 {arg_type_name} fract = {called_func_name}(arg, other);
864 return {struct_name}(fract, other);
865}}",
866 )?;
867 }
868 }
869 &crate::PredeclaredType::AtomicCompareExchangeWeakResult(_) => {
870 // Handled by the general struct writing loop earlier.
871 }
872 }
873 }
874
875 // Write all named constants
876 let mut constants = self
877 .module
878 .constants
879 .iter()
880 .filter(|&(_, c)| c.name.is_some())
881 .peekable();
882 while let Some((handle, _)) = constants.next() {
883 self.write_global_constant(handle)?;
884 // Add extra newline for readability on last iteration
885 if constants.peek().is_none() {
886 writeln!(self.out)?;
887 }
888 }
889
890 let ep_info = self.info.get_entry_point(self.entry_point_idx as usize);
891
892 // Write the globals
893 //
894 // Unless explicitly disabled with WriterFlags::INCLUDE_UNUSED_ITEMS,
895 // we filter all globals that aren't used by the selected entry point as they might be
896 // interfere with each other (i.e. two globals with the same location but different with
897 // different classes)
898 let include_unused = self
899 .options
900 .writer_flags
901 .contains(WriterFlags::INCLUDE_UNUSED_ITEMS);
902 for (handle, global) in self.module.global_variables.iter() {
903 let is_unused = ep_info[handle].is_empty();
904 if !include_unused && is_unused {
905 continue;
906 }
907
908 match self.module.types[global.ty].inner {
909 // We treat images separately because they might require
910 // writing the storage format
911 TypeInner::Image {
912 mut dim,
913 arrayed,
914 class,
915 } => {
916 // Gather the storage format if needed
917 let storage_format_access = match self.module.types[global.ty].inner {
918 TypeInner::Image {
919 class: crate::ImageClass::Storage { format, access },
920 ..
921 } => Some((format, access)),
922 _ => None,
923 };
924
925 if dim == crate::ImageDimension::D1 && es {
926 dim = crate::ImageDimension::D2
927 }
928
929 // Gether the location if needed
930 let layout_binding = if self.options.version.supports_explicit_locations() {
931 let br = global.binding.as_ref().unwrap();
932 self.options.binding_map.get(br).cloned()
933 } else {
934 None
935 };
936
937 // Write all the layout qualifiers
938 if layout_binding.is_some() || storage_format_access.is_some() {
939 write!(self.out, "layout(")?;
940 if let Some(binding) = layout_binding {
941 write!(self.out, "binding = {binding}")?;
942 }
943 if let Some((format, _)) = storage_format_access {
944 let format_str = glsl_storage_format(format)?;
945 let separator = match layout_binding {
946 Some(_) => ",",
947 None => "",
948 };
949 write!(self.out, "{separator}{format_str}")?;
950 }
951 write!(self.out, ") ")?;
952 }
953
954 if let Some((_, access)) = storage_format_access {
955 self.write_storage_access(access)?;
956 }
957
958 // All images in glsl are `uniform`
959 // The trailing space is important
960 write!(self.out, "uniform ")?;
961
962 // write the type
963 //
964 // This is way we need the leading space because `write_image_type` doesn't add
965 // any spaces at the beginning or end
966 self.write_image_type(dim, arrayed, class)?;
967
968 // Finally write the name and end the global with a `;`
969 // The leading space is important
970 let global_name = self.get_global_name(handle, global);
971 writeln!(self.out, " {global_name};")?;
972 writeln!(self.out)?;
973
974 self.reflection_names_globals.insert(handle, global_name);
975 }
976 // glsl has no concept of samplers so we just ignore it
977 TypeInner::Sampler { .. } => continue,
978 // All other globals are written by `write_global`
979 _ => {
980 self.write_global(handle, global)?;
981 // Add a newline (only for readability)
982 writeln!(self.out)?;
983 }
984 }
985 }
986
987 for arg in self.entry_point.function.arguments.iter() {
988 self.write_varying(arg.binding.as_ref(), arg.ty, false)?;
989 }
990 if let Some(ref result) = self.entry_point.function.result {
991 self.write_varying(result.binding.as_ref(), result.ty, true)?;
992 }
993 writeln!(self.out)?;
994
995 // Write all regular functions
996 for (handle, function) in self.module.functions.iter() {
997 // Check that the function doesn't use globals that aren't supported
998 // by the current entry point
999 if !include_unused && !ep_info.dominates_global_use(&self.info[handle]) {
1000 continue;
1001 }
1002
1003 let fun_info = &self.info[handle];
1004
1005 // Skip functions that that are not compatible with this entry point's stage.
1006 //
1007 // When validation is enabled, it rejects modules whose entry points try to call
1008 // incompatible functions, so if we got this far, then any functions incompatible
1009 // with our selected entry point must not be used.
1010 //
1011 // When validation is disabled, `fun_info.available_stages` is always just
1012 // `ShaderStages::all()`, so this will write all functions in the module, and
1013 // the downstream GLSL compiler will catch any problems.
1014 if !fun_info.available_stages.contains(ep_info.available_stages) {
1015 continue;
1016 }
1017
1018 // Write the function
1019 self.write_function(back::FunctionType::Function(handle), function, fun_info)?;
1020
1021 writeln!(self.out)?;
1022 }
1023
1024 self.write_function(
1025 back::FunctionType::EntryPoint(self.entry_point_idx),
1026 &self.entry_point.function,
1027 ep_info,
1028 )?;
1029
1030 // Add newline at the end of file
1031 writeln!(self.out)?;
1032
1033 // Collect all reflection info and return it to the user
1034 self.collect_reflection_info()
1035 }
1036
1037 fn write_array_size(
1038 &mut self,
1039 base: Handle<crate::Type>,
1040 size: crate::ArraySize,
1041 ) -> BackendResult {
1042 write!(self.out, "[")?;
1043
1044 // Write the array size
1045 // Writes nothing if `IndexableLength::Dynamic`
1046 match size.resolve(self.module.to_ctx())? {
1047 proc::IndexableLength::Known(size) => {
1048 write!(self.out, "{size}")?;
1049 }
1050 proc::IndexableLength::Dynamic => (),
1051 }
1052
1053 write!(self.out, "]")?;
1054
1055 if let TypeInner::Array {
1056 base: next_base,
1057 size: next_size,
1058 ..
1059 } = self.module.types[base].inner
1060 {
1061 self.write_array_size(next_base, next_size)?;
1062 }
1063
1064 Ok(())
1065 }
1066
1067 /// Helper method used to write value types
1068 ///
1069 /// # Notes
1070 /// Adds no trailing or leading whitespace
1071 fn write_value_type(&mut self, inner: &TypeInner) -> BackendResult {
1072 match *inner {
1073 // Scalars are simple we just get the full name from `glsl_scalar`
1074 TypeInner::Scalar(scalar)
1075 | TypeInner::Atomic(scalar)
1076 | TypeInner::ValuePointer {
1077 size: None,
1078 scalar,
1079 space: _,
1080 } => write!(self.out, "{}", glsl_scalar(scalar)?.full)?,
1081 // Vectors are just `gvecN` where `g` is the scalar prefix and `N` is the vector size
1082 TypeInner::Vector { size, scalar }
1083 | TypeInner::ValuePointer {
1084 size: Some(size),
1085 scalar,
1086 space: _,
1087 } => write!(self.out, "{}vec{}", glsl_scalar(scalar)?.prefix, size as u8)?,
1088 // Matrices are written with `gmatMxN` where `g` is the scalar prefix (only floats and
1089 // doubles are allowed), `M` is the columns count and `N` is the rows count
1090 //
1091 // glsl supports a matrix shorthand `gmatN` where `N` = `M` but it doesn't justify the
1092 // extra branch to write matrices this way
1093 TypeInner::Matrix {
1094 columns,
1095 rows,
1096 scalar,
1097 } => write!(
1098 self.out,
1099 "{}mat{}x{}",
1100 glsl_scalar(scalar)?.prefix,
1101 columns as u8,
1102 rows as u8
1103 )?,
1104 // GLSL arrays are written as `type name[size]`
1105 // Here we only write the size of the array i.e. `[size]`
1106 // Base `type` and `name` should be written outside
1107 TypeInner::Array { base, size, .. } => self.write_array_size(base, size)?,
1108 // Write all variants instead of `_` so that if new variants are added a
1109 // no exhaustiveness error is thrown
1110 TypeInner::Pointer { .. }
1111 | TypeInner::Struct { .. }
1112 | TypeInner::Image { .. }
1113 | TypeInner::Sampler { .. }
1114 | TypeInner::AccelerationStructure { .. }
1115 | TypeInner::RayQuery { .. }
1116 | TypeInner::BindingArray { .. } => {
1117 return Err(Error::Custom(format!("Unable to write type {inner:?}")))
1118 }
1119 }
1120
1121 Ok(())
1122 }
1123
1124 /// Helper method used to write non image/sampler types
1125 ///
1126 /// # Notes
1127 /// Adds no trailing or leading whitespace
1128 fn write_type(&mut self, ty: Handle<crate::Type>) -> BackendResult {
1129 match self.module.types[ty].inner {
1130 // glsl has no pointer types so just write types as normal and loads are skipped
1131 TypeInner::Pointer { base, .. } => self.write_type(base),
1132 // glsl structs are written as just the struct name
1133 TypeInner::Struct { .. } => {
1134 // Get the struct name
1135 let name = &self.names[&NameKey::Type(ty)];
1136 write!(self.out, "{name}")?;
1137 Ok(())
1138 }
1139 // glsl array has the size separated from the base type
1140 TypeInner::Array { base, .. } => self.write_type(base),
1141 ref other => self.write_value_type(other),
1142 }
1143 }
1144
1145 /// Helper method to write a image type
1146 ///
1147 /// # Notes
1148 /// Adds no leading or trailing whitespace
1149 fn write_image_type(
1150 &mut self,
1151 dim: crate::ImageDimension,
1152 arrayed: bool,
1153 class: crate::ImageClass,
1154 ) -> BackendResult {
1155 // glsl images consist of four parts the scalar prefix, the image "type", the dimensions
1156 // and modifiers
1157 //
1158 // There exists two image types
1159 // - sampler - for sampled images
1160 // - image - for storage images
1161 //
1162 // There are three possible modifiers that can be used together and must be written in
1163 // this order to be valid
1164 // - MS - used if it's a multisampled image
1165 // - Array - used if it's an image array
1166 // - Shadow - used if it's a depth image
1167 use crate::ImageClass as Ic;
1168 use crate::Scalar as S;
1169 let float = S {
1170 kind: crate::ScalarKind::Float,
1171 width: 4,
1172 };
1173 let (base, scalar, ms, comparison) = match class {
1174 Ic::Sampled { kind, multi: true } => ("sampler", S { kind, width: 4 }, "MS", ""),
1175 Ic::Sampled { kind, multi: false } => ("sampler", S { kind, width: 4 }, "", ""),
1176 Ic::Depth { multi: true } => ("sampler", float, "MS", ""),
1177 Ic::Depth { multi: false } => ("sampler", float, "", "Shadow"),
1178 Ic::Storage { format, .. } => ("image", format.into(), "", ""),
1179 Ic::External => unimplemented!(),
1180 };
1181
1182 let precision = if self.options.version.is_es() {
1183 "highp "
1184 } else {
1185 ""
1186 };
1187
1188 write!(
1189 self.out,
1190 "{}{}{}{}{}{}{}",
1191 precision,
1192 glsl_scalar(scalar)?.prefix,
1193 base,
1194 glsl_dimension(dim),
1195 ms,
1196 if arrayed { "Array" } else { "" },
1197 comparison
1198 )?;
1199
1200 Ok(())
1201 }
1202
1203 /// Helper method used by [Self::write_global] to write just the layout part of
1204 /// a non image/sampler global variable, if applicable.
1205 ///
1206 /// # Notes
1207 ///
1208 /// Adds trailing whitespace if any layout qualifier is written
1209 fn write_global_layout(&mut self, global: &crate::GlobalVariable) -> BackendResult {
1210 // Determine which (if any) explicit memory layout to use, and whether we support it
1211 let layout = match global.space {
1212 crate::AddressSpace::Uniform => {
1213 if !self.options.version.supports_std140_layout() {
1214 return Err(Error::Custom(
1215 "Uniform address space requires std140 layout support".to_string(),
1216 ));
1217 }
1218
1219 Some("std140")
1220 }
1221 crate::AddressSpace::Storage { .. } => {
1222 if !self.options.version.supports_std430_layout() {
1223 return Err(Error::Custom(
1224 "Storage address space requires std430 layout support".to_string(),
1225 ));
1226 }
1227
1228 Some("std430")
1229 }
1230 _ => None,
1231 };
1232
1233 // If our version supports explicit layouts, we can also output the explicit binding
1234 // if we have it
1235 if self.options.version.supports_explicit_locations() {
1236 if let Some(ref br) = global.binding {
1237 match self.options.binding_map.get(br) {
1238 Some(binding) => {
1239 write!(self.out, "layout(")?;
1240
1241 if let Some(layout) = layout {
1242 write!(self.out, "{layout}, ")?;
1243 }
1244
1245 write!(self.out, "binding = {binding}) ")?;
1246
1247 return Ok(());
1248 }
1249 None => {
1250 log::debug!("unassigned binding for {:?}", global.name);
1251 }
1252 }
1253 }
1254 }
1255
1256 // Either no explicit bindings are supported or we didn't have any.
1257 // Write just the memory layout.
1258 if let Some(layout) = layout {
1259 write!(self.out, "layout({layout}) ")?;
1260 }
1261
1262 Ok(())
1263 }
1264
1265 /// Helper method used to write non images/sampler globals
1266 ///
1267 /// # Notes
1268 /// Adds a newline
1269 ///
1270 /// # Panics
1271 /// If the global has type sampler
1272 fn write_global(
1273 &mut self,
1274 handle: Handle<crate::GlobalVariable>,
1275 global: &crate::GlobalVariable,
1276 ) -> BackendResult {
1277 self.write_global_layout(global)?;
1278
1279 if let crate::AddressSpace::Storage { access } = global.space {
1280 self.write_storage_access(access)?;
1281 }
1282
1283 if let Some(storage_qualifier) = glsl_storage_qualifier(global.space) {
1284 write!(self.out, "{storage_qualifier} ")?;
1285 }
1286
1287 match global.space {
1288 crate::AddressSpace::Private => {
1289 self.write_simple_global(handle, global)?;
1290 }
1291 crate::AddressSpace::WorkGroup => {
1292 self.write_simple_global(handle, global)?;
1293 }
1294 crate::AddressSpace::PushConstant => {
1295 self.write_simple_global(handle, global)?;
1296 }
1297 crate::AddressSpace::Uniform => {
1298 self.write_interface_block(handle, global)?;
1299 }
1300 crate::AddressSpace::Storage { .. } => {
1301 self.write_interface_block(handle, global)?;
1302 }
1303 // A global variable in the `Function` address space is a
1304 // contradiction in terms.
1305 crate::AddressSpace::Function => unreachable!(),
1306 // Textures and samplers are handled directly in `Writer::write`.
1307 crate::AddressSpace::Handle => unreachable!(),
1308 }
1309
1310 Ok(())
1311 }
1312
1313 fn write_simple_global(
1314 &mut self,
1315 handle: Handle<crate::GlobalVariable>,
1316 global: &crate::GlobalVariable,
1317 ) -> BackendResult {
1318 self.write_type(global.ty)?;
1319 write!(self.out, " ")?;
1320 self.write_global_name(handle, global)?;
1321
1322 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
1323 self.write_array_size(base, size)?;
1324 }
1325
1326 if global.space.initializable() && is_value_init_supported(self.module, global.ty) {
1327 write!(self.out, " = ")?;
1328 if let Some(init) = global.init {
1329 self.write_const_expr(init, &self.module.global_expressions)?;
1330 } else {
1331 self.write_zero_init_value(global.ty)?;
1332 }
1333 }
1334
1335 writeln!(self.out, ";")?;
1336
1337 if let crate::AddressSpace::PushConstant = global.space {
1338 let global_name = self.get_global_name(handle, global);
1339 self.reflection_names_globals.insert(handle, global_name);
1340 }
1341
1342 Ok(())
1343 }
1344
1345 /// Write an interface block for a single Naga global.
1346 ///
1347 /// Write `block_name { members }`. Since `block_name` must be unique
1348 /// between blocks and structs, we add `_block_ID` where `ID` is a
1349 /// `IdGenerator` generated number. Write `members` in the same way we write
1350 /// a struct's members.
1351 fn write_interface_block(
1352 &mut self,
1353 handle: Handle<crate::GlobalVariable>,
1354 global: &crate::GlobalVariable,
1355 ) -> BackendResult {
1356 // Write the block name, it's just the struct name appended with `_block_ID`
1357 let ty_name = &self.names[&NameKey::Type(global.ty)];
1358 let block_name = format!(
1359 "{}_block_{}{:?}",
1360 // avoid double underscores as they are reserved in GLSL
1361 ty_name.trim_end_matches('_'),
1362 self.block_id.generate(),
1363 self.entry_point.stage,
1364 );
1365 write!(self.out, "{block_name} ")?;
1366 self.reflection_names_globals.insert(handle, block_name);
1367
1368 match self.module.types[global.ty].inner {
1369 TypeInner::Struct { ref members, .. }
1370 if self.module.types[members.last().unwrap().ty]
1371 .inner
1372 .is_dynamically_sized(&self.module.types) =>
1373 {
1374 // Structs with dynamically sized arrays must have their
1375 // members lifted up as members of the interface block. GLSL
1376 // can't write such struct types anyway.
1377 self.write_struct_body(global.ty, members)?;
1378 write!(self.out, " ")?;
1379 self.write_global_name(handle, global)?;
1380 }
1381 _ => {
1382 // A global of any other type is written as the sole member
1383 // of the interface block. Since the interface block is
1384 // anonymous, this becomes visible in the global scope.
1385 write!(self.out, "{{ ")?;
1386 self.write_type(global.ty)?;
1387 write!(self.out, " ")?;
1388 self.write_global_name(handle, global)?;
1389 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
1390 self.write_array_size(base, size)?;
1391 }
1392 write!(self.out, "; }}")?;
1393 }
1394 }
1395
1396 writeln!(self.out, ";")?;
1397
1398 Ok(())
1399 }
1400
1401 /// Helper method used to find which expressions of a given function require baking
1402 ///
1403 /// # Notes
1404 /// Clears `need_bake_expressions` set before adding to it
1405 fn update_expressions_to_bake(&mut self, func: &crate::Function, info: &valid::FunctionInfo) {
1406 use crate::Expression;
1407 self.need_bake_expressions.clear();
1408 for (fun_handle, expr) in func.expressions.iter() {
1409 let expr_info = &info[fun_handle];
1410 let min_ref_count = func.expressions[fun_handle].bake_ref_count();
1411 if min_ref_count <= expr_info.ref_count {
1412 self.need_bake_expressions.insert(fun_handle);
1413 }
1414
1415 let inner = expr_info.ty.inner_with(&self.module.types);
1416
1417 if let Expression::Math {
1418 fun,
1419 arg,
1420 arg1,
1421 arg2,
1422 ..
1423 } = *expr
1424 {
1425 match fun {
1426 crate::MathFunction::Dot => {
1427 // if the expression is a Dot product with integer arguments,
1428 // then the args needs baking as well
1429 if let TypeInner::Scalar(crate::Scalar {
1430 kind: crate::ScalarKind::Sint | crate::ScalarKind::Uint,
1431 ..
1432 }) = *inner
1433 {
1434 self.need_bake_expressions.insert(arg);
1435 self.need_bake_expressions.insert(arg1.unwrap());
1436 }
1437 }
1438 crate::MathFunction::Dot4U8Packed | crate::MathFunction::Dot4I8Packed => {
1439 self.need_bake_expressions.insert(arg);
1440 self.need_bake_expressions.insert(arg1.unwrap());
1441 }
1442 crate::MathFunction::Pack4xI8
1443 | crate::MathFunction::Pack4xU8
1444 | crate::MathFunction::Pack4xI8Clamp
1445 | crate::MathFunction::Pack4xU8Clamp
1446 | crate::MathFunction::Unpack4xI8
1447 | crate::MathFunction::Unpack4xU8
1448 | crate::MathFunction::QuantizeToF16 => {
1449 self.need_bake_expressions.insert(arg);
1450 }
1451 /* crate::MathFunction::Pack4x8unorm | */
1452 crate::MathFunction::Unpack4x8snorm
1453 if !self.options.version.supports_pack_unpack_4x8() =>
1454 {
1455 // We have a fallback if the platform doesn't natively support these
1456 self.need_bake_expressions.insert(arg);
1457 }
1458 /* crate::MathFunction::Pack4x8unorm | */
1459 crate::MathFunction::Unpack4x8unorm
1460 if !self.options.version.supports_pack_unpack_4x8() =>
1461 {
1462 self.need_bake_expressions.insert(arg);
1463 }
1464 /* crate::MathFunction::Pack2x16snorm | */
1465 crate::MathFunction::Unpack2x16snorm
1466 if !self.options.version.supports_pack_unpack_snorm_2x16() =>
1467 {
1468 self.need_bake_expressions.insert(arg);
1469 }
1470 /* crate::MathFunction::Pack2x16unorm | */
1471 crate::MathFunction::Unpack2x16unorm
1472 if !self.options.version.supports_pack_unpack_unorm_2x16() =>
1473 {
1474 self.need_bake_expressions.insert(arg);
1475 }
1476 crate::MathFunction::ExtractBits => {
1477 // Only argument 1 is re-used.
1478 self.need_bake_expressions.insert(arg1.unwrap());
1479 }
1480 crate::MathFunction::InsertBits => {
1481 // Only argument 2 is re-used.
1482 self.need_bake_expressions.insert(arg2.unwrap());
1483 }
1484 crate::MathFunction::CountLeadingZeros => {
1485 if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
1486 self.need_bake_expressions.insert(arg);
1487 }
1488 }
1489 _ => {}
1490 }
1491 }
1492 }
1493
1494 for statement in func.body.iter() {
1495 match *statement {
1496 crate::Statement::Atomic {
1497 fun: crate::AtomicFunction::Exchange { compare: Some(cmp) },
1498 ..
1499 } => {
1500 self.need_bake_expressions.insert(cmp);
1501 }
1502 _ => {}
1503 }
1504 }
1505 }
1506
1507 /// Helper method used to get a name for a global
1508 ///
1509 /// Globals have different naming schemes depending on their binding:
1510 /// - Globals without bindings use the name from the [`Namer`](crate::proc::Namer)
1511 /// - Globals with resource binding are named `_group_X_binding_Y` where `X`
1512 /// is the group and `Y` is the binding
1513 fn get_global_name(
1514 &self,
1515 handle: Handle<crate::GlobalVariable>,
1516 global: &crate::GlobalVariable,
1517 ) -> String {
1518 match (&global.binding, global.space) {
1519 (&Some(ref br), _) => {
1520 format!(
1521 "_group_{}_binding_{}_{}",
1522 br.group,
1523 br.binding,
1524 self.entry_point.stage.to_str()
1525 )
1526 }
1527 (&None, crate::AddressSpace::PushConstant) => {
1528 format!("_push_constant_binding_{}", self.entry_point.stage.to_str())
1529 }
1530 (&None, _) => self.names[&NameKey::GlobalVariable(handle)].clone(),
1531 }
1532 }
1533
1534 /// Helper method used to write a name for a global without additional heap allocation
1535 fn write_global_name(
1536 &mut self,
1537 handle: Handle<crate::GlobalVariable>,
1538 global: &crate::GlobalVariable,
1539 ) -> BackendResult {
1540 match (&global.binding, global.space) {
1541 (&Some(ref br), _) => write!(
1542 self.out,
1543 "_group_{}_binding_{}_{}",
1544 br.group,
1545 br.binding,
1546 self.entry_point.stage.to_str()
1547 )?,
1548 (&None, crate::AddressSpace::PushConstant) => write!(
1549 self.out,
1550 "_push_constant_binding_{}",
1551 self.entry_point.stage.to_str()
1552 )?,
1553 (&None, _) => write!(
1554 self.out,
1555 "{}",
1556 &self.names[&NameKey::GlobalVariable(handle)]
1557 )?,
1558 }
1559
1560 Ok(())
1561 }
1562
1563 /// Write a GLSL global that will carry a Naga entry point's argument or return value.
1564 ///
1565 /// A Naga entry point's arguments and return value are rendered in GLSL as
1566 /// variables at global scope with the `in` and `out` storage qualifiers.
1567 /// The code we generate for `main` loads from all the `in` globals into
1568 /// appropriately named locals. Before it returns, `main` assigns the
1569 /// components of its return value into all the `out` globals.
1570 ///
1571 /// This function writes a declaration for one such GLSL global,
1572 /// representing a value passed into or returned from [`self.entry_point`]
1573 /// that has a [`Location`] binding. The global's name is generated based on
1574 /// the location index and the shader stages being connected; see
1575 /// [`VaryingName`]. This means we don't need to know the names of
1576 /// arguments, just their types and bindings.
1577 ///
1578 /// Emit nothing for entry point arguments or return values with [`BuiltIn`]
1579 /// bindings; `main` will read from or assign to the appropriate GLSL
1580 /// special variable; these are pre-declared. As an exception, we do declare
1581 /// `gl_Position` or `gl_FragCoord` with the `invariant` qualifier if
1582 /// needed.
1583 ///
1584 /// Use `output` together with [`self.entry_point.stage`] to determine which
1585 /// shader stages are being connected, and choose the `in` or `out` storage
1586 /// qualifier.
1587 ///
1588 /// [`self.entry_point`]: Writer::entry_point
1589 /// [`self.entry_point.stage`]: crate::EntryPoint::stage
1590 /// [`Location`]: crate::Binding::Location
1591 /// [`BuiltIn`]: crate::Binding::BuiltIn
1592 fn write_varying(
1593 &mut self,
1594 binding: Option<&crate::Binding>,
1595 ty: Handle<crate::Type>,
1596 output: bool,
1597 ) -> Result<(), Error> {
1598 // For a struct, emit a separate global for each member with a binding.
1599 if let TypeInner::Struct { ref members, .. } = self.module.types[ty].inner {
1600 for member in members {
1601 self.write_varying(member.binding.as_ref(), member.ty, output)?;
1602 }
1603 return Ok(());
1604 }
1605
1606 let binding = match binding {
1607 None => return Ok(()),
1608 Some(binding) => binding,
1609 };
1610
1611 let (location, interpolation, sampling, blend_src) = match *binding {
1612 crate::Binding::Location {
1613 location,
1614 interpolation,
1615 sampling,
1616 blend_src,
1617 } => (location, interpolation, sampling, blend_src),
1618 crate::Binding::BuiltIn(built_in) => {
1619 match built_in {
1620 crate::BuiltIn::Position { invariant: true } => {
1621 match (self.options.version, self.entry_point.stage) {
1622 (
1623 Version::Embedded {
1624 version: 300,
1625 is_webgl: true,
1626 },
1627 ShaderStage::Fragment,
1628 ) => {
1629 // `invariant gl_FragCoord` is not allowed in WebGL2 and possibly
1630 // OpenGL ES in general (waiting on confirmation).
1631 //
1632 // See https://github.com/KhronosGroup/WebGL/issues/3518
1633 }
1634 _ => {
1635 writeln!(
1636 self.out,
1637 "invariant {};",
1638 glsl_built_in(
1639 built_in,
1640 VaryingOptions::from_writer_options(self.options, output)
1641 )
1642 )?;
1643 }
1644 }
1645 }
1646 crate::BuiltIn::ClipDistance => {
1647 // Re-declare `gl_ClipDistance` with number of clip planes.
1648 let TypeInner::Array { size, .. } = self.module.types[ty].inner else {
1649 unreachable!();
1650 };
1651 let proc::IndexableLength::Known(size) =
1652 size.resolve(self.module.to_ctx())?
1653 else {
1654 unreachable!();
1655 };
1656 self.clip_distance_count = size;
1657 writeln!(self.out, "out float gl_ClipDistance[{size}];")?;
1658 }
1659 _ => {}
1660 }
1661 return Ok(());
1662 }
1663 };
1664
1665 // Write the interpolation modifier if needed
1666 //
1667 // We ignore all interpolation and auxiliary modifiers that aren't used in fragment
1668 // shaders' input globals or vertex shaders' output globals.
1669 let emit_interpolation_and_auxiliary = match self.entry_point.stage {
1670 ShaderStage::Vertex => output,
1671 ShaderStage::Fragment => !output,
1672 ShaderStage::Compute => false,
1673 ShaderStage::Task | ShaderStage::Mesh => unreachable!(),
1674 };
1675
1676 // Write the I/O locations, if allowed
1677 let io_location = if self.options.version.supports_explicit_locations()
1678 || !emit_interpolation_and_auxiliary
1679 {
1680 if self.options.version.supports_io_locations() {
1681 if let Some(blend_src) = blend_src {
1682 write!(
1683 self.out,
1684 "layout(location = {location}, index = {blend_src}) "
1685 )?;
1686 } else {
1687 write!(self.out, "layout(location = {location}) ")?;
1688 }
1689 None
1690 } else {
1691 Some(VaryingLocation {
1692 location,
1693 index: blend_src.unwrap_or(0),
1694 })
1695 }
1696 } else {
1697 None
1698 };
1699
1700 // Write the interpolation qualifier.
1701 if let Some(interp) = interpolation {
1702 if emit_interpolation_and_auxiliary {
1703 write!(self.out, "{} ", glsl_interpolation(interp))?;
1704 }
1705 }
1706
1707 // Write the sampling auxiliary qualifier.
1708 //
1709 // Before GLSL 4.2, the `centroid` and `sample` qualifiers were required to appear
1710 // immediately before the `in` / `out` qualifier, so we'll just follow that rule
1711 // here, regardless of the version.
1712 if let Some(sampling) = sampling {
1713 if emit_interpolation_and_auxiliary {
1714 if let Some(qualifier) = glsl_sampling(sampling)? {
1715 write!(self.out, "{qualifier} ")?;
1716 }
1717 }
1718 }
1719
1720 // Write the input/output qualifier.
1721 write!(self.out, "{} ", if output { "out" } else { "in" })?;
1722
1723 // Write the type
1724 // `write_type` adds no leading or trailing spaces
1725 self.write_type(ty)?;
1726
1727 // Finally write the global name and end the global with a `;` and a newline
1728 // Leading space is important
1729 let vname = VaryingName {
1730 binding: &crate::Binding::Location {
1731 location,
1732 interpolation: None,
1733 sampling: None,
1734 blend_src,
1735 },
1736 stage: self.entry_point.stage,
1737 options: VaryingOptions::from_writer_options(self.options, output),
1738 };
1739 writeln!(self.out, " {vname};")?;
1740
1741 if let Some(location) = io_location {
1742 self.varying.insert(vname.to_string(), location);
1743 }
1744
1745 Ok(())
1746 }
1747
1748 /// Helper method used to write functions (both entry points and regular functions)
1749 ///
1750 /// # Notes
1751 /// Adds a newline
1752 fn write_function(
1753 &mut self,
1754 ty: back::FunctionType,
1755 func: &crate::Function,
1756 info: &valid::FunctionInfo,
1757 ) -> BackendResult {
1758 // Create a function context for the function being written
1759 let ctx = back::FunctionCtx {
1760 ty,
1761 info,
1762 expressions: &func.expressions,
1763 named_expressions: &func.named_expressions,
1764 };
1765
1766 self.named_expressions.clear();
1767 self.update_expressions_to_bake(func, info);
1768
1769 // Write the function header
1770 //
1771 // glsl headers are the same as in c:
1772 // `ret_type name(args)`
1773 // `ret_type` is the return type
1774 // `name` is the function name
1775 // `args` is a comma separated list of `type name`
1776 // | - `type` is the argument type
1777 // | - `name` is the argument name
1778
1779 // Start by writing the return type if any otherwise write void
1780 // This is the only place where `void` is a valid type
1781 // (though it's more a keyword than a type)
1782 if let back::FunctionType::EntryPoint(_) = ctx.ty {
1783 write!(self.out, "void")?;
1784 } else if let Some(ref result) = func.result {
1785 self.write_type(result.ty)?;
1786 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner {
1787 self.write_array_size(base, size)?
1788 }
1789 } else {
1790 write!(self.out, "void")?;
1791 }
1792
1793 // Write the function name and open parentheses for the argument list
1794 let function_name = match ctx.ty {
1795 back::FunctionType::Function(handle) => &self.names[&NameKey::Function(handle)],
1796 back::FunctionType::EntryPoint(_) => "main",
1797 };
1798 write!(self.out, " {function_name}(")?;
1799
1800 // Write the comma separated argument list
1801 //
1802 // We need access to `Self` here so we use the reference passed to the closure as an
1803 // argument instead of capturing as that would cause a borrow checker error
1804 let arguments = match ctx.ty {
1805 back::FunctionType::EntryPoint(_) => &[][..],
1806 back::FunctionType::Function(_) => &func.arguments,
1807 };
1808 let arguments: Vec<_> = arguments
1809 .iter()
1810 .enumerate()
1811 .filter(|&(_, arg)| match self.module.types[arg.ty].inner {
1812 TypeInner::Sampler { .. } => false,
1813 _ => true,
1814 })
1815 .collect();
1816 self.write_slice(&arguments, |this, _, &(i, arg)| {
1817 // Write the argument type
1818 match this.module.types[arg.ty].inner {
1819 // We treat images separately because they might require
1820 // writing the storage format
1821 TypeInner::Image {
1822 dim,
1823 arrayed,
1824 class,
1825 } => {
1826 // Write the storage format if needed
1827 if let TypeInner::Image {
1828 class: crate::ImageClass::Storage { format, .. },
1829 ..
1830 } = this.module.types[arg.ty].inner
1831 {
1832 write!(this.out, "layout({}) ", glsl_storage_format(format)?)?;
1833 }
1834
1835 // write the type
1836 //
1837 // This is way we need the leading space because `write_image_type` doesn't add
1838 // any spaces at the beginning or end
1839 this.write_image_type(dim, arrayed, class)?;
1840 }
1841 TypeInner::Pointer { base, .. } => {
1842 // write parameter qualifiers
1843 write!(this.out, "inout ")?;
1844 this.write_type(base)?;
1845 }
1846 // All other types are written by `write_type`
1847 _ => {
1848 this.write_type(arg.ty)?;
1849 }
1850 }
1851
1852 // Write the argument name
1853 // The leading space is important
1854 write!(this.out, " {}", &this.names[&ctx.argument_key(i as u32)])?;
1855
1856 // Write array size
1857 match this.module.types[arg.ty].inner {
1858 TypeInner::Array { base, size, .. } => {
1859 this.write_array_size(base, size)?;
1860 }
1861 TypeInner::Pointer { base, .. } => {
1862 if let TypeInner::Array { base, size, .. } = this.module.types[base].inner {
1863 this.write_array_size(base, size)?;
1864 }
1865 }
1866 _ => {}
1867 }
1868
1869 Ok(())
1870 })?;
1871
1872 // Close the parentheses and open braces to start the function body
1873 writeln!(self.out, ") {{")?;
1874
1875 if self.options.zero_initialize_workgroup_memory
1876 && ctx.ty.is_compute_entry_point(self.module)
1877 {
1878 self.write_workgroup_variables_initialization(&ctx)?;
1879 }
1880
1881 // Compose the function arguments from globals, in case of an entry point.
1882 if let back::FunctionType::EntryPoint(ep_index) = ctx.ty {
1883 let stage = self.module.entry_points[ep_index as usize].stage;
1884 for (index, arg) in func.arguments.iter().enumerate() {
1885 write!(self.out, "{}", back::INDENT)?;
1886 self.write_type(arg.ty)?;
1887 let name = &self.names[&NameKey::EntryPointArgument(ep_index, index as u32)];
1888 write!(self.out, " {name}")?;
1889 write!(self.out, " = ")?;
1890 match self.module.types[arg.ty].inner {
1891 TypeInner::Struct { ref members, .. } => {
1892 self.write_type(arg.ty)?;
1893 write!(self.out, "(")?;
1894 for (index, member) in members.iter().enumerate() {
1895 let varying_name = VaryingName {
1896 binding: member.binding.as_ref().unwrap(),
1897 stage,
1898 options: VaryingOptions::from_writer_options(self.options, false),
1899 };
1900 if index != 0 {
1901 write!(self.out, ", ")?;
1902 }
1903 write!(self.out, "{varying_name}")?;
1904 }
1905 writeln!(self.out, ");")?;
1906 }
1907 _ => {
1908 let varying_name = VaryingName {
1909 binding: arg.binding.as_ref().unwrap(),
1910 stage,
1911 options: VaryingOptions::from_writer_options(self.options, false),
1912 };
1913 writeln!(self.out, "{varying_name};")?;
1914 }
1915 }
1916 }
1917 }
1918
1919 // Write all function locals
1920 // Locals are `type name (= init)?;` where the init part (including the =) are optional
1921 //
1922 // Always adds a newline
1923 for (handle, local) in func.local_variables.iter() {
1924 // Write indentation (only for readability) and the type
1925 // `write_type` adds no trailing space
1926 write!(self.out, "{}", back::INDENT)?;
1927 self.write_type(local.ty)?;
1928
1929 // Write the local name
1930 // The leading space is important
1931 write!(self.out, " {}", self.names[&ctx.name_key(handle)])?;
1932 // Write size for array type
1933 if let TypeInner::Array { base, size, .. } = self.module.types[local.ty].inner {
1934 self.write_array_size(base, size)?;
1935 }
1936 // Write the local initializer if needed
1937 if let Some(init) = local.init {
1938 // Put the equal signal only if there's a initializer
1939 // The leading and trailing spaces aren't needed but help with readability
1940 write!(self.out, " = ")?;
1941
1942 // Write the constant
1943 // `write_constant` adds no trailing or leading space/newline
1944 self.write_expr(init, &ctx)?;
1945 } else if is_value_init_supported(self.module, local.ty) {
1946 write!(self.out, " = ")?;
1947 self.write_zero_init_value(local.ty)?;
1948 }
1949
1950 // Finish the local with `;` and add a newline (only for readability)
1951 writeln!(self.out, ";")?
1952 }
1953
1954 // Write the function body (statement list)
1955 for sta in func.body.iter() {
1956 // Write a statement, the indentation should always be 1 when writing the function body
1957 // `write_stmt` adds a newline
1958 self.write_stmt(sta, &ctx, back::Level(1))?;
1959 }
1960
1961 // Close braces and add a newline
1962 writeln!(self.out, "}}")?;
1963
1964 Ok(())
1965 }
1966
1967 fn write_workgroup_variables_initialization(
1968 &mut self,
1969 ctx: &back::FunctionCtx,
1970 ) -> BackendResult {
1971 let mut vars = self
1972 .module
1973 .global_variables
1974 .iter()
1975 .filter(|&(handle, var)| {
1976 !ctx.info[handle].is_empty() && var.space == crate::AddressSpace::WorkGroup
1977 })
1978 .peekable();
1979
1980 if vars.peek().is_some() {
1981 let level = back::Level(1);
1982
1983 writeln!(self.out, "{level}if (gl_LocalInvocationID == uvec3(0u)) {{")?;
1984
1985 for (handle, var) in vars {
1986 let name = &self.names[&NameKey::GlobalVariable(handle)];
1987 write!(self.out, "{}{} = ", level.next(), name)?;
1988 self.write_zero_init_value(var.ty)?;
1989 writeln!(self.out, ";")?;
1990 }
1991
1992 writeln!(self.out, "{level}}}")?;
1993 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1994 }
1995
1996 Ok(())
1997 }
1998
1999 /// Write a list of comma separated `T` values using a writer function `F`.
2000 ///
2001 /// The writer function `F` receives a mutable reference to `self` that if needed won't cause
2002 /// borrow checker issues (using for example a closure with `self` will cause issues), the
2003 /// second argument is the 0 based index of the element on the list, and the last element is
2004 /// a reference to the element `T` being written
2005 ///
2006 /// # Notes
2007 /// - Adds no newlines or leading/trailing whitespace
2008 /// - The last element won't have a trailing `,`
2009 fn write_slice<T, F: FnMut(&mut Self, u32, &T) -> BackendResult>(
2010 &mut self,
2011 data: &[T],
2012 mut f: F,
2013 ) -> BackendResult {
2014 // Loop through `data` invoking `f` for each element
2015 for (index, item) in data.iter().enumerate() {
2016 if index != 0 {
2017 write!(self.out, ", ")?;
2018 }
2019 f(self, index as u32, item)?;
2020 }
2021
2022 Ok(())
2023 }
2024
2025 /// Helper method used to write global constants
2026 fn write_global_constant(&mut self, handle: Handle<crate::Constant>) -> BackendResult {
2027 write!(self.out, "const ")?;
2028 let constant = &self.module.constants[handle];
2029 self.write_type(constant.ty)?;
2030 let name = &self.names[&NameKey::Constant(handle)];
2031 write!(self.out, " {name}")?;
2032 if let TypeInner::Array { base, size, .. } = self.module.types[constant.ty].inner {
2033 self.write_array_size(base, size)?;
2034 }
2035 write!(self.out, " = ")?;
2036 self.write_const_expr(constant.init, &self.module.global_expressions)?;
2037 writeln!(self.out, ";")?;
2038 Ok(())
2039 }
2040
2041 /// Helper method used to output a dot product as an arithmetic expression
2042 ///
2043 fn write_dot_product(
2044 &mut self,
2045 arg: Handle<crate::Expression>,
2046 arg1: Handle<crate::Expression>,
2047 size: usize,
2048 ctx: &back::FunctionCtx,
2049 ) -> BackendResult {
2050 // Write parentheses around the dot product expression to prevent operators
2051 // with different precedences from applying earlier.
2052 write!(self.out, "(")?;
2053
2054 // Cycle through all the components of the vector
2055 for index in 0..size {
2056 let component = back::COMPONENTS[index];
2057 // Write the addition to the previous product
2058 // This will print an extra '+' at the beginning but that is fine in glsl
2059 write!(self.out, " + ")?;
2060 // Write the first vector expression, this expression is marked to be
2061 // cached so unless it can't be cached (for example, it's a Constant)
2062 // it shouldn't produce large expressions.
2063 self.write_expr(arg, ctx)?;
2064 // Access the current component on the first vector
2065 write!(self.out, ".{component} * ")?;
2066 // Write the second vector expression, this expression is marked to be
2067 // cached so unless it can't be cached (for example, it's a Constant)
2068 // it shouldn't produce large expressions.
2069 self.write_expr(arg1, ctx)?;
2070 // Access the current component on the second vector
2071 write!(self.out, ".{component}")?;
2072 }
2073
2074 write!(self.out, ")")?;
2075 Ok(())
2076 }
2077
2078 /// Helper method used to write structs
2079 ///
2080 /// # Notes
2081 /// Ends in a newline
2082 fn write_struct_body(
2083 &mut self,
2084 handle: Handle<crate::Type>,
2085 members: &[crate::StructMember],
2086 ) -> BackendResult {
2087 // glsl structs are written as in C
2088 // `struct name() { members };`
2089 // | `struct` is a keyword
2090 // | `name` is the struct name
2091 // | `members` is a semicolon separated list of `type name`
2092 // | `type` is the member type
2093 // | `name` is the member name
2094 writeln!(self.out, "{{")?;
2095
2096 for (idx, member) in members.iter().enumerate() {
2097 // The indentation is only for readability
2098 write!(self.out, "{}", back::INDENT)?;
2099
2100 match self.module.types[member.ty].inner {
2101 TypeInner::Array {
2102 base,
2103 size,
2104 stride: _,
2105 } => {
2106 self.write_type(base)?;
2107 write!(
2108 self.out,
2109 " {}",
2110 &self.names[&NameKey::StructMember(handle, idx as u32)]
2111 )?;
2112 // Write [size]
2113 self.write_array_size(base, size)?;
2114 // Newline is important
2115 writeln!(self.out, ";")?;
2116 }
2117 _ => {
2118 // Write the member type
2119 // Adds no trailing space
2120 self.write_type(member.ty)?;
2121
2122 // Write the member name and put a semicolon
2123 // The leading space is important
2124 // All members must have a semicolon even the last one
2125 writeln!(
2126 self.out,
2127 " {};",
2128 &self.names[&NameKey::StructMember(handle, idx as u32)]
2129 )?;
2130 }
2131 }
2132 }
2133
2134 write!(self.out, "}}")?;
2135 Ok(())
2136 }
2137
2138 /// Helper method used to write statements
2139 ///
2140 /// # Notes
2141 /// Always adds a newline
2142 fn write_stmt(
2143 &mut self,
2144 sta: &crate::Statement,
2145 ctx: &back::FunctionCtx,
2146 level: back::Level,
2147 ) -> BackendResult {
2148 use crate::Statement;
2149
2150 match *sta {
2151 // This is where we can generate intermediate constants for some expression types.
2152 Statement::Emit(ref range) => {
2153 for handle in range.clone() {
2154 let ptr_class = ctx.resolve_type(handle, &self.module.types).pointer_space();
2155 let expr_name = if ptr_class.is_some() {
2156 // GLSL can't save a pointer-valued expression in a variable,
2157 // but we shouldn't ever need to: they should never be named expressions,
2158 // and none of the expression types flagged by bake_ref_count can be pointer-valued.
2159 None
2160 } else if let Some(name) = ctx.named_expressions.get(&handle) {
2161 // Front end provides names for all variables at the start of writing.
2162 // But we write them to step by step. We need to recache them
2163 // Otherwise, we could accidentally write variable name instead of full expression.
2164 // Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
2165 Some(self.namer.call(name))
2166 } else if self.need_bake_expressions.contains(&handle) {
2167 Some(Baked(handle).to_string())
2168 } else {
2169 None
2170 };
2171
2172 // If we are going to write an `ImageLoad` next and the target image
2173 // is sampled and we are using the `Restrict` policy for bounds
2174 // checking images we need to write a local holding the clamped lod.
2175 if let crate::Expression::ImageLoad {
2176 image,
2177 level: Some(level_expr),
2178 ..
2179 } = ctx.expressions[handle]
2180 {
2181 if let TypeInner::Image {
2182 class: crate::ImageClass::Sampled { .. },
2183 ..
2184 } = *ctx.resolve_type(image, &self.module.types)
2185 {
2186 if let proc::BoundsCheckPolicy::Restrict = self.policies.image_load {
2187 write!(self.out, "{level}")?;
2188 self.write_clamped_lod(ctx, handle, image, level_expr)?
2189 }
2190 }
2191 }
2192
2193 if let Some(name) = expr_name {
2194 write!(self.out, "{level}")?;
2195 self.write_named_expr(handle, name, handle, ctx)?;
2196 }
2197 }
2198 }
2199 // Blocks are simple we just need to write the block statements between braces
2200 // We could also just print the statements but this is more readable and maps more
2201 // closely to the IR
2202 Statement::Block(ref block) => {
2203 write!(self.out, "{level}")?;
2204 writeln!(self.out, "{{")?;
2205 for sta in block.iter() {
2206 // Increase the indentation to help with readability
2207 self.write_stmt(sta, ctx, level.next())?
2208 }
2209 writeln!(self.out, "{level}}}")?
2210 }
2211 // Ifs are written as in C:
2212 // ```
2213 // if(condition) {
2214 // accept
2215 // } else {
2216 // reject
2217 // }
2218 // ```
2219 Statement::If {
2220 condition,
2221 ref accept,
2222 ref reject,
2223 } => {
2224 write!(self.out, "{level}")?;
2225 write!(self.out, "if (")?;
2226 self.write_expr(condition, ctx)?;
2227 writeln!(self.out, ") {{")?;
2228
2229 for sta in accept {
2230 // Increase indentation to help with readability
2231 self.write_stmt(sta, ctx, level.next())?;
2232 }
2233
2234 // If there are no statements in the reject block we skip writing it
2235 // This is only for readability
2236 if !reject.is_empty() {
2237 writeln!(self.out, "{level}}} else {{")?;
2238
2239 for sta in reject {
2240 // Increase indentation to help with readability
2241 self.write_stmt(sta, ctx, level.next())?;
2242 }
2243 }
2244
2245 writeln!(self.out, "{level}}}")?
2246 }
2247 // Switch are written as in C:
2248 // ```
2249 // switch (selector) {
2250 // // Fallthrough
2251 // case label:
2252 // block
2253 // // Non fallthrough
2254 // case label:
2255 // block
2256 // break;
2257 // default:
2258 // block
2259 // }
2260 // ```
2261 // Where the `default` case happens isn't important but we put it last
2262 // so that we don't need to print a `break` for it
2263 Statement::Switch {
2264 selector,
2265 ref cases,
2266 } => {
2267 let l2 = level.next();
2268 // Some GLSL consumers may not handle switches with a single
2269 // body correctly: See wgpu#4514. Write such switch statements
2270 // as a `do {} while(false);` loop instead.
2271 //
2272 // Since doing so may inadvertently capture `continue`
2273 // statements in the switch body, we must apply continue
2274 // forwarding. See the `naga::back::continue_forward` module
2275 // docs for details.
2276 let one_body = cases
2277 .iter()
2278 .rev()
2279 .skip(1)
2280 .all(|case| case.fall_through && case.body.is_empty());
2281 if one_body {
2282 // Unlike HLSL, in GLSL `continue_ctx` only needs to know
2283 // about [`Switch`] statements that are being rendered as
2284 // `do-while` loops.
2285 if let Some(variable) = self.continue_ctx.enter_switch(&mut self.namer) {
2286 writeln!(self.out, "{level}bool {variable} = false;",)?;
2287 };
2288 writeln!(self.out, "{level}do {{")?;
2289 // Note: Expressions have no side-effects so we don't need to emit selector expression.
2290
2291 // Body
2292 if let Some(case) = cases.last() {
2293 for sta in case.body.iter() {
2294 self.write_stmt(sta, ctx, l2)?;
2295 }
2296 }
2297 // End do-while
2298 writeln!(self.out, "{level}}} while(false);")?;
2299
2300 // Handle any forwarded continue statements.
2301 use back::continue_forward::ExitControlFlow;
2302 let op = match self.continue_ctx.exit_switch() {
2303 ExitControlFlow::None => None,
2304 ExitControlFlow::Continue { variable } => Some(("continue", variable)),
2305 ExitControlFlow::Break { variable } => Some(("break", variable)),
2306 };
2307 if let Some((control_flow, variable)) = op {
2308 writeln!(self.out, "{level}if ({variable}) {{")?;
2309 writeln!(self.out, "{l2}{control_flow};")?;
2310 writeln!(self.out, "{level}}}")?;
2311 }
2312 } else {
2313 // Start the switch
2314 write!(self.out, "{level}")?;
2315 write!(self.out, "switch(")?;
2316 self.write_expr(selector, ctx)?;
2317 writeln!(self.out, ") {{")?;
2318
2319 // Write all cases
2320 for case in cases {
2321 match case.value {
2322 crate::SwitchValue::I32(value) => {
2323 write!(self.out, "{l2}case {value}:")?
2324 }
2325 crate::SwitchValue::U32(value) => {
2326 write!(self.out, "{l2}case {value}u:")?
2327 }
2328 crate::SwitchValue::Default => write!(self.out, "{l2}default:")?,
2329 }
2330
2331 let write_block_braces = !(case.fall_through && case.body.is_empty());
2332 if write_block_braces {
2333 writeln!(self.out, " {{")?;
2334 } else {
2335 writeln!(self.out)?;
2336 }
2337
2338 for sta in case.body.iter() {
2339 self.write_stmt(sta, ctx, l2.next())?;
2340 }
2341
2342 if !case.fall_through && case.body.last().is_none_or(|s| !s.is_terminator())
2343 {
2344 writeln!(self.out, "{}break;", l2.next())?;
2345 }
2346
2347 if write_block_braces {
2348 writeln!(self.out, "{l2}}}")?;
2349 }
2350 }
2351
2352 writeln!(self.out, "{level}}}")?
2353 }
2354 }
2355 // Loops in naga IR are based on wgsl loops, glsl can emulate the behaviour by using a
2356 // while true loop and appending the continuing block to the body resulting on:
2357 // ```
2358 // bool loop_init = true;
2359 // while(true) {
2360 // if (!loop_init) { <continuing> }
2361 // loop_init = false;
2362 // <body>
2363 // }
2364 // ```
2365 Statement::Loop {
2366 ref body,
2367 ref continuing,
2368 break_if,
2369 } => {
2370 self.continue_ctx.enter_loop();
2371 if !continuing.is_empty() || break_if.is_some() {
2372 let gate_name = self.namer.call("loop_init");
2373 writeln!(self.out, "{level}bool {gate_name} = true;")?;
2374 writeln!(self.out, "{level}while(true) {{")?;
2375 let l2 = level.next();
2376 let l3 = l2.next();
2377 writeln!(self.out, "{l2}if (!{gate_name}) {{")?;
2378 for sta in continuing {
2379 self.write_stmt(sta, ctx, l3)?;
2380 }
2381 if let Some(condition) = break_if {
2382 write!(self.out, "{l3}if (")?;
2383 self.write_expr(condition, ctx)?;
2384 writeln!(self.out, ") {{")?;
2385 writeln!(self.out, "{}break;", l3.next())?;
2386 writeln!(self.out, "{l3}}}")?;
2387 }
2388 writeln!(self.out, "{l2}}}")?;
2389 writeln!(self.out, "{}{} = false;", level.next(), gate_name)?;
2390 } else {
2391 writeln!(self.out, "{level}while(true) {{")?;
2392 }
2393 for sta in body {
2394 self.write_stmt(sta, ctx, level.next())?;
2395 }
2396 writeln!(self.out, "{level}}}")?;
2397 self.continue_ctx.exit_loop();
2398 }
2399 // Break, continue and return as written as in C
2400 // `break;`
2401 Statement::Break => {
2402 write!(self.out, "{level}")?;
2403 writeln!(self.out, "break;")?
2404 }
2405 // `continue;`
2406 Statement::Continue => {
2407 // Sometimes we must render a `Continue` statement as a `break`.
2408 // See the docs for the `back::continue_forward` module.
2409 if let Some(variable) = self.continue_ctx.continue_encountered() {
2410 writeln!(self.out, "{level}{variable} = true;",)?;
2411 writeln!(self.out, "{level}break;")?
2412 } else {
2413 writeln!(self.out, "{level}continue;")?
2414 }
2415 }
2416 // `return expr;`, `expr` is optional
2417 Statement::Return { value } => {
2418 write!(self.out, "{level}")?;
2419 match ctx.ty {
2420 back::FunctionType::Function(_) => {
2421 write!(self.out, "return")?;
2422 // Write the expression to be returned if needed
2423 if let Some(expr) = value {
2424 write!(self.out, " ")?;
2425 self.write_expr(expr, ctx)?;
2426 }
2427 writeln!(self.out, ";")?;
2428 }
2429 back::FunctionType::EntryPoint(ep_index) => {
2430 let mut has_point_size = false;
2431 let ep = &self.module.entry_points[ep_index as usize];
2432 if let Some(ref result) = ep.function.result {
2433 let value = value.unwrap();
2434 match self.module.types[result.ty].inner {
2435 TypeInner::Struct { ref members, .. } => {
2436 let temp_struct_name = match ctx.expressions[value] {
2437 crate::Expression::Compose { .. } => {
2438 let return_struct = "_tmp_return";
2439 write!(
2440 self.out,
2441 "{} {} = ",
2442 &self.names[&NameKey::Type(result.ty)],
2443 return_struct
2444 )?;
2445 self.write_expr(value, ctx)?;
2446 writeln!(self.out, ";")?;
2447 write!(self.out, "{level}")?;
2448 Some(return_struct)
2449 }
2450 _ => None,
2451 };
2452
2453 for (index, member) in members.iter().enumerate() {
2454 if let Some(crate::Binding::BuiltIn(
2455 crate::BuiltIn::PointSize,
2456 )) = member.binding
2457 {
2458 has_point_size = true;
2459 }
2460
2461 let varying_name = VaryingName {
2462 binding: member.binding.as_ref().unwrap(),
2463 stage: ep.stage,
2464 options: VaryingOptions::from_writer_options(
2465 self.options,
2466 true,
2467 ),
2468 };
2469 write!(self.out, "{varying_name} = ")?;
2470
2471 if let Some(struct_name) = temp_struct_name {
2472 write!(self.out, "{struct_name}")?;
2473 } else {
2474 self.write_expr(value, ctx)?;
2475 }
2476
2477 // Write field name
2478 writeln!(
2479 self.out,
2480 ".{};",
2481 &self.names
2482 [&NameKey::StructMember(result.ty, index as u32)]
2483 )?;
2484 write!(self.out, "{level}")?;
2485 }
2486 }
2487 _ => {
2488 let name = VaryingName {
2489 binding: result.binding.as_ref().unwrap(),
2490 stage: ep.stage,
2491 options: VaryingOptions::from_writer_options(
2492 self.options,
2493 true,
2494 ),
2495 };
2496 write!(self.out, "{name} = ")?;
2497 self.write_expr(value, ctx)?;
2498 writeln!(self.out, ";")?;
2499 write!(self.out, "{level}")?;
2500 }
2501 }
2502 }
2503
2504 let is_vertex_stage = self.module.entry_points[ep_index as usize].stage
2505 == ShaderStage::Vertex;
2506 if is_vertex_stage
2507 && self
2508 .options
2509 .writer_flags
2510 .contains(WriterFlags::ADJUST_COORDINATE_SPACE)
2511 {
2512 writeln!(
2513 self.out,
2514 "gl_Position.yz = vec2(-gl_Position.y, gl_Position.z * 2.0 - gl_Position.w);",
2515 )?;
2516 write!(self.out, "{level}")?;
2517 }
2518
2519 if is_vertex_stage
2520 && self
2521 .options
2522 .writer_flags
2523 .contains(WriterFlags::FORCE_POINT_SIZE)
2524 && !has_point_size
2525 {
2526 writeln!(self.out, "gl_PointSize = 1.0;")?;
2527 write!(self.out, "{level}")?;
2528 }
2529 writeln!(self.out, "return;")?;
2530 }
2531 }
2532 }
2533 // This is one of the places were glsl adds to the syntax of C in this case the discard
2534 // keyword which ceases all further processing in a fragment shader, it's called OpKill
2535 // in spir-v that's why it's called `Statement::Kill`
2536 Statement::Kill => writeln!(self.out, "{level}discard;")?,
2537 Statement::ControlBarrier(flags) => {
2538 self.write_control_barrier(flags, level)?;
2539 }
2540 Statement::MemoryBarrier(flags) => {
2541 self.write_memory_barrier(flags, level)?;
2542 }
2543 // Stores in glsl are just variable assignments written as `pointer = value;`
2544 Statement::Store { pointer, value } => {
2545 write!(self.out, "{level}")?;
2546 self.write_expr(pointer, ctx)?;
2547 write!(self.out, " = ")?;
2548 self.write_expr(value, ctx)?;
2549 writeln!(self.out, ";")?
2550 }
2551 Statement::WorkGroupUniformLoad { pointer, result } => {
2552 // GLSL doesn't have pointers, which means that this backend needs to ensure that
2553 // the actual "loading" is happening between the two barriers.
2554 // This is done in `Emit` by never emitting a variable name for pointer variables
2555 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
2556
2557 let result_name = Baked(result).to_string();
2558 write!(self.out, "{level}")?;
2559 // Expressions cannot have side effects, so just writing the expression here is fine.
2560 self.write_named_expr(pointer, result_name, result, ctx)?;
2561
2562 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
2563 }
2564 // Stores a value into an image.
2565 Statement::ImageStore {
2566 image,
2567 coordinate,
2568 array_index,
2569 value,
2570 } => {
2571 write!(self.out, "{level}")?;
2572 self.write_image_store(ctx, image, coordinate, array_index, value)?
2573 }
2574 // A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list
2575 Statement::Call {
2576 function,
2577 ref arguments,
2578 result,
2579 } => {
2580 write!(self.out, "{level}")?;
2581 if let Some(expr) = result {
2582 let name = Baked(expr).to_string();
2583 let result = self.module.functions[function].result.as_ref().unwrap();
2584 self.write_type(result.ty)?;
2585 write!(self.out, " {name}")?;
2586 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner
2587 {
2588 self.write_array_size(base, size)?
2589 }
2590 write!(self.out, " = ")?;
2591 self.named_expressions.insert(expr, name);
2592 }
2593 write!(self.out, "{}(", &self.names[&NameKey::Function(function)])?;
2594 let arguments: Vec<_> = arguments
2595 .iter()
2596 .enumerate()
2597 .filter_map(|(i, arg)| {
2598 let arg_ty = self.module.functions[function].arguments[i].ty;
2599 match self.module.types[arg_ty].inner {
2600 TypeInner::Sampler { .. } => None,
2601 _ => Some(*arg),
2602 }
2603 })
2604 .collect();
2605 self.write_slice(&arguments, |this, _, arg| this.write_expr(*arg, ctx))?;
2606 writeln!(self.out, ");")?
2607 }
2608 Statement::Atomic {
2609 pointer,
2610 ref fun,
2611 value,
2612 result,
2613 } => {
2614 write!(self.out, "{level}")?;
2615
2616 match *fun {
2617 crate::AtomicFunction::Exchange {
2618 compare: Some(compare_expr),
2619 } => {
2620 let result_handle = result.expect("CompareExchange must have a result");
2621 let res_name = Baked(result_handle).to_string();
2622 self.write_type(ctx.info[result_handle].ty.handle().unwrap())?;
2623 write!(self.out, " {res_name};")?;
2624 write!(self.out, " {res_name}.old_value = atomicCompSwap(")?;
2625 self.write_expr(pointer, ctx)?;
2626 write!(self.out, ", ")?;
2627 self.write_expr(compare_expr, ctx)?;
2628 write!(self.out, ", ")?;
2629 self.write_expr(value, ctx)?;
2630 writeln!(self.out, ");")?;
2631
2632 write!(
2633 self.out,
2634 "{level}{res_name}.exchanged = ({res_name}.old_value == "
2635 )?;
2636 self.write_expr(compare_expr, ctx)?;
2637 writeln!(self.out, ");")?;
2638 self.named_expressions.insert(result_handle, res_name);
2639 }
2640 _ => {
2641 if let Some(result) = result {
2642 let res_name = Baked(result).to_string();
2643 self.write_type(ctx.info[result].ty.handle().unwrap())?;
2644 write!(self.out, " {res_name} = ")?;
2645 self.named_expressions.insert(result, res_name);
2646 }
2647 let fun_str = fun.to_glsl();
2648 write!(self.out, "atomic{fun_str}(")?;
2649 self.write_expr(pointer, ctx)?;
2650 write!(self.out, ", ")?;
2651 if let crate::AtomicFunction::Subtract = *fun {
2652 // Emulate `atomicSub` with `atomicAdd` by negating the value.
2653 write!(self.out, "-")?;
2654 }
2655 self.write_expr(value, ctx)?;
2656 writeln!(self.out, ");")?;
2657 }
2658 }
2659 }
2660 // Stores a value into an image.
2661 Statement::ImageAtomic {
2662 image,
2663 coordinate,
2664 array_index,
2665 fun,
2666 value,
2667 } => {
2668 write!(self.out, "{level}")?;
2669 self.write_image_atomic(ctx, image, coordinate, array_index, fun, value)?
2670 }
2671 Statement::RayQuery { .. } => unreachable!(),
2672 Statement::SubgroupBallot { result, predicate } => {
2673 write!(self.out, "{level}")?;
2674 let res_name = Baked(result).to_string();
2675 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2676 self.write_value_type(res_ty)?;
2677 write!(self.out, " {res_name} = ")?;
2678 self.named_expressions.insert(result, res_name);
2679
2680 write!(self.out, "subgroupBallot(")?;
2681 match predicate {
2682 Some(predicate) => self.write_expr(predicate, ctx)?,
2683 None => write!(self.out, "true")?,
2684 }
2685 writeln!(self.out, ");")?;
2686 }
2687 Statement::SubgroupCollectiveOperation {
2688 op,
2689 collective_op,
2690 argument,
2691 result,
2692 } => {
2693 write!(self.out, "{level}")?;
2694 let res_name = Baked(result).to_string();
2695 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2696 self.write_value_type(res_ty)?;
2697 write!(self.out, " {res_name} = ")?;
2698 self.named_expressions.insert(result, res_name);
2699
2700 match (collective_op, op) {
2701 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::All) => {
2702 write!(self.out, "subgroupAll(")?
2703 }
2704 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Any) => {
2705 write!(self.out, "subgroupAny(")?
2706 }
2707 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Add) => {
2708 write!(self.out, "subgroupAdd(")?
2709 }
2710 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Mul) => {
2711 write!(self.out, "subgroupMul(")?
2712 }
2713 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Max) => {
2714 write!(self.out, "subgroupMax(")?
2715 }
2716 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Min) => {
2717 write!(self.out, "subgroupMin(")?
2718 }
2719 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::And) => {
2720 write!(self.out, "subgroupAnd(")?
2721 }
2722 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Or) => {
2723 write!(self.out, "subgroupOr(")?
2724 }
2725 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Xor) => {
2726 write!(self.out, "subgroupXor(")?
2727 }
2728 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Add) => {
2729 write!(self.out, "subgroupExclusiveAdd(")?
2730 }
2731 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Mul) => {
2732 write!(self.out, "subgroupExclusiveMul(")?
2733 }
2734 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Add) => {
2735 write!(self.out, "subgroupInclusiveAdd(")?
2736 }
2737 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Mul) => {
2738 write!(self.out, "subgroupInclusiveMul(")?
2739 }
2740 _ => unimplemented!(),
2741 }
2742 self.write_expr(argument, ctx)?;
2743 writeln!(self.out, ");")?;
2744 }
2745 Statement::SubgroupGather {
2746 mode,
2747 argument,
2748 result,
2749 } => {
2750 write!(self.out, "{level}")?;
2751 let res_name = Baked(result).to_string();
2752 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2753 self.write_value_type(res_ty)?;
2754 write!(self.out, " {res_name} = ")?;
2755 self.named_expressions.insert(result, res_name);
2756
2757 match mode {
2758 crate::GatherMode::BroadcastFirst => {
2759 write!(self.out, "subgroupBroadcastFirst(")?;
2760 }
2761 crate::GatherMode::Broadcast(_) => {
2762 write!(self.out, "subgroupBroadcast(")?;
2763 }
2764 crate::GatherMode::Shuffle(_) => {
2765 write!(self.out, "subgroupShuffle(")?;
2766 }
2767 crate::GatherMode::ShuffleDown(_) => {
2768 write!(self.out, "subgroupShuffleDown(")?;
2769 }
2770 crate::GatherMode::ShuffleUp(_) => {
2771 write!(self.out, "subgroupShuffleUp(")?;
2772 }
2773 crate::GatherMode::ShuffleXor(_) => {
2774 write!(self.out, "subgroupShuffleXor(")?;
2775 }
2776 crate::GatherMode::QuadBroadcast(_) => {
2777 write!(self.out, "subgroupQuadBroadcast(")?;
2778 }
2779 crate::GatherMode::QuadSwap(direction) => match direction {
2780 crate::Direction::X => {
2781 write!(self.out, "subgroupQuadSwapHorizontal(")?;
2782 }
2783 crate::Direction::Y => {
2784 write!(self.out, "subgroupQuadSwapVertical(")?;
2785 }
2786 crate::Direction::Diagonal => {
2787 write!(self.out, "subgroupQuadSwapDiagonal(")?;
2788 }
2789 },
2790 }
2791 self.write_expr(argument, ctx)?;
2792 match mode {
2793 crate::GatherMode::BroadcastFirst => {}
2794 crate::GatherMode::Broadcast(index)
2795 | crate::GatherMode::Shuffle(index)
2796 | crate::GatherMode::ShuffleDown(index)
2797 | crate::GatherMode::ShuffleUp(index)
2798 | crate::GatherMode::ShuffleXor(index)
2799 | crate::GatherMode::QuadBroadcast(index) => {
2800 write!(self.out, ", ")?;
2801 self.write_expr(index, ctx)?;
2802 }
2803 crate::GatherMode::QuadSwap(_) => {}
2804 }
2805 writeln!(self.out, ");")?;
2806 }
2807 }
2808
2809 Ok(())
2810 }
2811
2812 /// Write a const expression.
2813 ///
2814 /// Write `expr`, a handle to an [`Expression`] in the current [`Module`]'s
2815 /// constant expression arena, as GLSL expression.
2816 ///
2817 /// # Notes
2818 /// Adds no newlines or leading/trailing whitespace
2819 ///
2820 /// [`Expression`]: crate::Expression
2821 /// [`Module`]: crate::Module
2822 fn write_const_expr(
2823 &mut self,
2824 expr: Handle<crate::Expression>,
2825 arena: &crate::Arena<crate::Expression>,
2826 ) -> BackendResult {
2827 self.write_possibly_const_expr(
2828 expr,
2829 arena,
2830 |expr| &self.info[expr],
2831 |writer, expr| writer.write_const_expr(expr, arena),
2832 )
2833 }
2834
2835 /// Write [`Expression`] variants that can occur in both runtime and const expressions.
2836 ///
2837 /// Write `expr`, a handle to an [`Expression`] in the arena `expressions`,
2838 /// as as GLSL expression. This must be one of the [`Expression`] variants
2839 /// that is allowed to occur in constant expressions.
2840 ///
2841 /// Use `write_expression` to write subexpressions.
2842 ///
2843 /// This is the common code for `write_expr`, which handles arbitrary
2844 /// runtime expressions, and `write_const_expr`, which only handles
2845 /// const-expressions. Each of those callers passes itself (essentially) as
2846 /// the `write_expression` callback, so that subexpressions are restricted
2847 /// to the appropriate variants.
2848 ///
2849 /// # Notes
2850 /// Adds no newlines or leading/trailing whitespace
2851 ///
2852 /// [`Expression`]: crate::Expression
2853 fn write_possibly_const_expr<'w, I, E>(
2854 &'w mut self,
2855 expr: Handle<crate::Expression>,
2856 expressions: &crate::Arena<crate::Expression>,
2857 info: I,
2858 write_expression: E,
2859 ) -> BackendResult
2860 where
2861 I: Fn(Handle<crate::Expression>) -> &'w proc::TypeResolution,
2862 E: Fn(&mut Self, Handle<crate::Expression>) -> BackendResult,
2863 {
2864 use crate::Expression;
2865
2866 match expressions[expr] {
2867 Expression::Literal(literal) => {
2868 match literal {
2869 // Floats are written using `Debug` instead of `Display` because it always appends the
2870 // decimal part even it's zero which is needed for a valid glsl float constant
2871 crate::Literal::F64(value) => write!(self.out, "{value:?}LF")?,
2872 crate::Literal::F32(value) => write!(self.out, "{value:?}")?,
2873 crate::Literal::F16(_) => {
2874 return Err(Error::Custom("GLSL has no 16-bit float type".into()));
2875 }
2876 // Unsigned integers need a `u` at the end
2877 //
2878 // While `core` doesn't necessarily need it, it's allowed and since `es` needs it we
2879 // always write it as the extra branch wouldn't have any benefit in readability
2880 crate::Literal::U32(value) => write!(self.out, "{value}u")?,
2881 crate::Literal::I32(value) => write!(self.out, "{value}")?,
2882 crate::Literal::Bool(value) => write!(self.out, "{value}")?,
2883 crate::Literal::I64(_) => {
2884 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2885 }
2886 crate::Literal::U64(_) => {
2887 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2888 }
2889 crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
2890 return Err(Error::Custom(
2891 "Abstract types should not appear in IR presented to backends".into(),
2892 ));
2893 }
2894 }
2895 }
2896 Expression::Constant(handle) => {
2897 let constant = &self.module.constants[handle];
2898 if constant.name.is_some() {
2899 write!(self.out, "{}", self.names[&NameKey::Constant(handle)])?;
2900 } else {
2901 self.write_const_expr(constant.init, &self.module.global_expressions)?;
2902 }
2903 }
2904 Expression::ZeroValue(ty) => {
2905 self.write_zero_init_value(ty)?;
2906 }
2907 Expression::Compose { ty, ref components } => {
2908 self.write_type(ty)?;
2909
2910 if let TypeInner::Array { base, size, .. } = self.module.types[ty].inner {
2911 self.write_array_size(base, size)?;
2912 }
2913
2914 write!(self.out, "(")?;
2915 for (index, component) in components.iter().enumerate() {
2916 if index != 0 {
2917 write!(self.out, ", ")?;
2918 }
2919 write_expression(self, *component)?;
2920 }
2921 write!(self.out, ")")?
2922 }
2923 // `Splat` needs to actually write down a vector, it's not always inferred in GLSL.
2924 Expression::Splat { size: _, value } => {
2925 let resolved = info(expr).inner_with(&self.module.types);
2926 self.write_value_type(resolved)?;
2927 write!(self.out, "(")?;
2928 write_expression(self, value)?;
2929 write!(self.out, ")")?
2930 }
2931 _ => {
2932 return Err(Error::Override);
2933 }
2934 }
2935
2936 Ok(())
2937 }
2938
2939 /// Helper method to write expressions
2940 ///
2941 /// # Notes
2942 /// Doesn't add any newlines or leading/trailing spaces
2943 fn write_expr(
2944 &mut self,
2945 expr: Handle<crate::Expression>,
2946 ctx: &back::FunctionCtx,
2947 ) -> BackendResult {
2948 use crate::Expression;
2949
2950 if let Some(name) = self.named_expressions.get(&expr) {
2951 write!(self.out, "{name}")?;
2952 return Ok(());
2953 }
2954
2955 match ctx.expressions[expr] {
2956 Expression::Literal(_)
2957 | Expression::Constant(_)
2958 | Expression::ZeroValue(_)
2959 | Expression::Compose { .. }
2960 | Expression::Splat { .. } => {
2961 self.write_possibly_const_expr(
2962 expr,
2963 ctx.expressions,
2964 |expr| &ctx.info[expr].ty,
2965 |writer, expr| writer.write_expr(expr, ctx),
2966 )?;
2967 }
2968 Expression::Override(_) => return Err(Error::Override),
2969 // `Access` is applied to arrays, vectors and matrices and is written as indexing
2970 Expression::Access { base, index } => {
2971 self.write_expr(base, ctx)?;
2972 write!(self.out, "[")?;
2973 self.write_expr(index, ctx)?;
2974 write!(self.out, "]")?
2975 }
2976 // `AccessIndex` is the same as `Access` except that the index is a constant and it can
2977 // be applied to structs, in this case we need to find the name of the field at that
2978 // index and write `base.field_name`
2979 Expression::AccessIndex { base, index } => {
2980 self.write_expr(base, ctx)?;
2981
2982 let base_ty_res = &ctx.info[base].ty;
2983 let mut resolved = base_ty_res.inner_with(&self.module.types);
2984 let base_ty_handle = match *resolved {
2985 TypeInner::Pointer { base, space: _ } => {
2986 resolved = &self.module.types[base].inner;
2987 Some(base)
2988 }
2989 _ => base_ty_res.handle(),
2990 };
2991
2992 match *resolved {
2993 TypeInner::Vector { .. } => {
2994 // Write vector access as a swizzle
2995 write!(self.out, ".{}", back::COMPONENTS[index as usize])?
2996 }
2997 TypeInner::Matrix { .. }
2998 | TypeInner::Array { .. }
2999 | TypeInner::ValuePointer { .. } => write!(self.out, "[{index}]")?,
3000 TypeInner::Struct { .. } => {
3001 // This will never panic in case the type is a `Struct`, this is not true
3002 // for other types so we can only check while inside this match arm
3003 let ty = base_ty_handle.unwrap();
3004
3005 write!(
3006 self.out,
3007 ".{}",
3008 &self.names[&NameKey::StructMember(ty, index)]
3009 )?
3010 }
3011 ref other => return Err(Error::Custom(format!("Cannot index {other:?}"))),
3012 }
3013 }
3014 // `Swizzle` adds a few letters behind the dot.
3015 Expression::Swizzle {
3016 size,
3017 vector,
3018 pattern,
3019 } => {
3020 self.write_expr(vector, ctx)?;
3021 write!(self.out, ".")?;
3022 for &sc in pattern[..size as usize].iter() {
3023 self.out.write_char(back::COMPONENTS[sc as usize])?;
3024 }
3025 }
3026 // Function arguments are written as the argument name
3027 Expression::FunctionArgument(pos) => {
3028 write!(self.out, "{}", &self.names[&ctx.argument_key(pos)])?
3029 }
3030 // Global variables need some special work for their name but
3031 // `get_global_name` does the work for us
3032 Expression::GlobalVariable(handle) => {
3033 let global = &self.module.global_variables[handle];
3034 self.write_global_name(handle, global)?
3035 }
3036 // A local is written as it's name
3037 Expression::LocalVariable(handle) => {
3038 write!(self.out, "{}", self.names[&ctx.name_key(handle)])?
3039 }
3040 // glsl has no pointers so there's no load operation, just write the pointer expression
3041 Expression::Load { pointer } => self.write_expr(pointer, ctx)?,
3042 // `ImageSample` is a bit complicated compared to the rest of the IR.
3043 //
3044 // First there are three variations depending whether the sample level is explicitly set,
3045 // if it's automatic or it it's bias:
3046 // `texture(image, coordinate)` - Automatic sample level
3047 // `texture(image, coordinate, bias)` - Bias sample level
3048 // `textureLod(image, coordinate, level)` - Zero or Exact sample level
3049 //
3050 // Furthermore if `depth_ref` is some we need to append it to the coordinate vector
3051 Expression::ImageSample {
3052 image,
3053 sampler: _, //TODO?
3054 gather,
3055 coordinate,
3056 array_index,
3057 offset,
3058 level,
3059 depth_ref,
3060 clamp_to_edge: _,
3061 } => {
3062 let (dim, class, arrayed) = match *ctx.resolve_type(image, &self.module.types) {
3063 TypeInner::Image {
3064 dim,
3065 class,
3066 arrayed,
3067 ..
3068 } => (dim, class, arrayed),
3069 _ => unreachable!(),
3070 };
3071 let mut err = None;
3072 if dim == crate::ImageDimension::Cube {
3073 if offset.is_some() {
3074 err = Some("gsamplerCube[Array][Shadow] doesn't support texture sampling with offsets");
3075 }
3076 if arrayed
3077 && matches!(class, crate::ImageClass::Depth { .. })
3078 && matches!(level, crate::SampleLevel::Gradient { .. })
3079 {
3080 err = Some("samplerCubeArrayShadow don't support textureGrad");
3081 }
3082 }
3083 if gather.is_some() && level != crate::SampleLevel::Zero {
3084 err = Some("textureGather doesn't support LOD parameters");
3085 }
3086 if let Some(err) = err {
3087 return Err(Error::Custom(String::from(err)));
3088 }
3089
3090 // `textureLod[Offset]` on `sampler2DArrayShadow` and `samplerCubeShadow` does not exist in GLSL,
3091 // unless `GL_EXT_texture_shadow_lod` is present.
3092 // But if the target LOD is zero, we can emulate that by using `textureGrad[Offset]` with a constant gradient of 0.
3093 let workaround_lod_with_grad = ((dim == crate::ImageDimension::Cube && !arrayed)
3094 || (dim == crate::ImageDimension::D2 && arrayed))
3095 && level == crate::SampleLevel::Zero
3096 && matches!(class, crate::ImageClass::Depth { .. })
3097 && !self.features.contains(Features::TEXTURE_SHADOW_LOD);
3098
3099 // Write the function to be used depending on the sample level
3100 let fun_name = match level {
3101 crate::SampleLevel::Zero if gather.is_some() => "textureGather",
3102 crate::SampleLevel::Zero if workaround_lod_with_grad => "textureGrad",
3103 crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
3104 crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => "textureLod",
3105 crate::SampleLevel::Gradient { .. } => "textureGrad",
3106 };
3107 let offset_name = match offset {
3108 Some(_) => "Offset",
3109 None => "",
3110 };
3111
3112 write!(self.out, "{fun_name}{offset_name}(")?;
3113
3114 // Write the image that will be used
3115 self.write_expr(image, ctx)?;
3116 // The space here isn't required but it helps with readability
3117 write!(self.out, ", ")?;
3118
3119 // TODO: handle clamp_to_edge
3120 // https://github.com/gfx-rs/wgpu/issues/7791
3121
3122 // We need to get the coordinates vector size to later build a vector that's `size + 1`
3123 // if `depth_ref` is some, if it isn't a vector we panic as that's not a valid expression
3124 let mut coord_dim = match *ctx.resolve_type(coordinate, &self.module.types) {
3125 TypeInner::Vector { size, .. } => size as u8,
3126 TypeInner::Scalar { .. } => 1,
3127 _ => unreachable!(),
3128 };
3129
3130 if array_index.is_some() {
3131 coord_dim += 1;
3132 }
3133 let merge_depth_ref = depth_ref.is_some() && gather.is_none() && coord_dim < 4;
3134 if merge_depth_ref {
3135 coord_dim += 1;
3136 }
3137
3138 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
3139 let is_vec = tex_1d_hack || coord_dim != 1;
3140 // Compose a new texture coordinates vector
3141 if is_vec {
3142 write!(self.out, "vec{}(", coord_dim + tex_1d_hack as u8)?;
3143 }
3144 self.write_expr(coordinate, ctx)?;
3145 if tex_1d_hack {
3146 write!(self.out, ", 0.0")?;
3147 }
3148 if let Some(expr) = array_index {
3149 write!(self.out, ", ")?;
3150 self.write_expr(expr, ctx)?;
3151 }
3152 if merge_depth_ref {
3153 write!(self.out, ", ")?;
3154 self.write_expr(depth_ref.unwrap(), ctx)?;
3155 }
3156 if is_vec {
3157 write!(self.out, ")")?;
3158 }
3159
3160 if let (Some(expr), false) = (depth_ref, merge_depth_ref) {
3161 write!(self.out, ", ")?;
3162 self.write_expr(expr, ctx)?;
3163 }
3164
3165 match level {
3166 // Auto needs no more arguments
3167 crate::SampleLevel::Auto => (),
3168 // Zero needs level set to 0
3169 crate::SampleLevel::Zero => {
3170 if workaround_lod_with_grad {
3171 let vec_dim = match dim {
3172 crate::ImageDimension::Cube => 3,
3173 _ => 2,
3174 };
3175 write!(self.out, ", vec{vec_dim}(0.0), vec{vec_dim}(0.0)")?;
3176 } else if gather.is_none() {
3177 write!(self.out, ", 0.0")?;
3178 }
3179 }
3180 // Exact and bias require another argument
3181 crate::SampleLevel::Exact(expr) => {
3182 write!(self.out, ", ")?;
3183 self.write_expr(expr, ctx)?;
3184 }
3185 crate::SampleLevel::Bias(_) => {
3186 // This needs to be done after the offset writing
3187 }
3188 crate::SampleLevel::Gradient { x, y } => {
3189 // If we are using sampler2D to replace sampler1D, we also
3190 // need to make sure to use vec2 gradients
3191 if tex_1d_hack {
3192 write!(self.out, ", vec2(")?;
3193 self.write_expr(x, ctx)?;
3194 write!(self.out, ", 0.0)")?;
3195 write!(self.out, ", vec2(")?;
3196 self.write_expr(y, ctx)?;
3197 write!(self.out, ", 0.0)")?;
3198 } else {
3199 write!(self.out, ", ")?;
3200 self.write_expr(x, ctx)?;
3201 write!(self.out, ", ")?;
3202 self.write_expr(y, ctx)?;
3203 }
3204 }
3205 }
3206
3207 if let Some(constant) = offset {
3208 write!(self.out, ", ")?;
3209 if tex_1d_hack {
3210 write!(self.out, "ivec2(")?;
3211 }
3212 self.write_const_expr(constant, ctx.expressions)?;
3213 if tex_1d_hack {
3214 write!(self.out, ", 0)")?;
3215 }
3216 }
3217
3218 // Bias is always the last argument
3219 if let crate::SampleLevel::Bias(expr) = level {
3220 write!(self.out, ", ")?;
3221 self.write_expr(expr, ctx)?;
3222 }
3223
3224 if let (Some(component), None) = (gather, depth_ref) {
3225 write!(self.out, ", {}", component as usize)?;
3226 }
3227
3228 // End the function
3229 write!(self.out, ")")?
3230 }
3231 Expression::ImageLoad {
3232 image,
3233 coordinate,
3234 array_index,
3235 sample,
3236 level,
3237 } => self.write_image_load(expr, ctx, image, coordinate, array_index, sample, level)?,
3238 // Query translates into one of the:
3239 // - textureSize/imageSize
3240 // - textureQueryLevels
3241 // - textureSamples/imageSamples
3242 Expression::ImageQuery { image, query } => {
3243 use crate::ImageClass;
3244
3245 // This will only panic if the module is invalid
3246 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
3247 TypeInner::Image {
3248 dim,
3249 arrayed: _,
3250 class,
3251 } => (dim, class),
3252 _ => unreachable!(),
3253 };
3254 let components = match dim {
3255 crate::ImageDimension::D1 => 1,
3256 crate::ImageDimension::D2 => 2,
3257 crate::ImageDimension::D3 => 3,
3258 crate::ImageDimension::Cube => 2,
3259 };
3260
3261 if let crate::ImageQuery::Size { .. } = query {
3262 match components {
3263 1 => write!(self.out, "uint(")?,
3264 _ => write!(self.out, "uvec{components}(")?,
3265 }
3266 } else {
3267 write!(self.out, "uint(")?;
3268 }
3269
3270 match query {
3271 crate::ImageQuery::Size { level } => {
3272 match class {
3273 ImageClass::Sampled { multi, .. } | ImageClass::Depth { multi } => {
3274 write!(self.out, "textureSize(")?;
3275 self.write_expr(image, ctx)?;
3276 if let Some(expr) = level {
3277 let cast_to_int = matches!(
3278 *ctx.resolve_type(expr, &self.module.types),
3279 TypeInner::Scalar(crate::Scalar {
3280 kind: crate::ScalarKind::Uint,
3281 ..
3282 })
3283 );
3284
3285 write!(self.out, ", ")?;
3286
3287 if cast_to_int {
3288 write!(self.out, "int(")?;
3289 }
3290
3291 self.write_expr(expr, ctx)?;
3292
3293 if cast_to_int {
3294 write!(self.out, ")")?;
3295 }
3296 } else if !multi {
3297 // All textureSize calls requires an lod argument
3298 // except for multisampled samplers
3299 write!(self.out, ", 0")?;
3300 }
3301 }
3302 ImageClass::Storage { .. } => {
3303 write!(self.out, "imageSize(")?;
3304 self.write_expr(image, ctx)?;
3305 }
3306 ImageClass::External => unimplemented!(),
3307 }
3308 write!(self.out, ")")?;
3309 if components != 1 || self.options.version.is_es() {
3310 write!(self.out, ".{}", &"xyz"[..components])?;
3311 }
3312 }
3313 crate::ImageQuery::NumLevels => {
3314 write!(self.out, "textureQueryLevels(",)?;
3315 self.write_expr(image, ctx)?;
3316 write!(self.out, ")",)?;
3317 }
3318 crate::ImageQuery::NumLayers => {
3319 let fun_name = match class {
3320 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => "textureSize",
3321 ImageClass::Storage { .. } => "imageSize",
3322 ImageClass::External => unimplemented!(),
3323 };
3324 write!(self.out, "{fun_name}(")?;
3325 self.write_expr(image, ctx)?;
3326 // All textureSize calls requires an lod argument
3327 // except for multisampled samplers
3328 if !class.is_multisampled() {
3329 write!(self.out, ", 0")?;
3330 }
3331 write!(self.out, ")")?;
3332 if components != 1 || self.options.version.is_es() {
3333 write!(self.out, ".{}", back::COMPONENTS[components])?;
3334 }
3335 }
3336 crate::ImageQuery::NumSamples => {
3337 let fun_name = match class {
3338 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
3339 "textureSamples"
3340 }
3341 ImageClass::Storage { .. } => "imageSamples",
3342 ImageClass::External => unimplemented!(),
3343 };
3344 write!(self.out, "{fun_name}(")?;
3345 self.write_expr(image, ctx)?;
3346 write!(self.out, ")",)?;
3347 }
3348 }
3349
3350 write!(self.out, ")")?;
3351 }
3352 Expression::Unary { op, expr } => {
3353 let operator_or_fn = match op {
3354 crate::UnaryOperator::Negate => "-",
3355 crate::UnaryOperator::LogicalNot => {
3356 match *ctx.resolve_type(expr, &self.module.types) {
3357 TypeInner::Vector { .. } => "not",
3358 _ => "!",
3359 }
3360 }
3361 crate::UnaryOperator::BitwiseNot => "~",
3362 };
3363 write!(self.out, "{operator_or_fn}(")?;
3364
3365 self.write_expr(expr, ctx)?;
3366
3367 write!(self.out, ")")?
3368 }
3369 // `Binary` we just write `left op right`, except when dealing with
3370 // comparison operations on vectors as they are implemented with
3371 // builtin functions.
3372 // Once again we wrap everything in parentheses to avoid precedence issues
3373 Expression::Binary {
3374 mut op,
3375 left,
3376 right,
3377 } => {
3378 // Holds `Some(function_name)` if the binary operation is
3379 // implemented as a function call
3380 use crate::{BinaryOperator as Bo, ScalarKind as Sk, TypeInner as Ti};
3381
3382 let left_inner = ctx.resolve_type(left, &self.module.types);
3383 let right_inner = ctx.resolve_type(right, &self.module.types);
3384
3385 let function = match (left_inner, right_inner) {
3386 (&Ti::Vector { scalar, .. }, &Ti::Vector { .. }) => match op {
3387 Bo::Less
3388 | Bo::LessEqual
3389 | Bo::Greater
3390 | Bo::GreaterEqual
3391 | Bo::Equal
3392 | Bo::NotEqual => BinaryOperation::VectorCompare,
3393 Bo::Modulo if scalar.kind == Sk::Float => BinaryOperation::Modulo,
3394 Bo::And if scalar.kind == Sk::Bool => {
3395 op = crate::BinaryOperator::LogicalAnd;
3396 BinaryOperation::VectorComponentWise
3397 }
3398 Bo::InclusiveOr if scalar.kind == Sk::Bool => {
3399 op = crate::BinaryOperator::LogicalOr;
3400 BinaryOperation::VectorComponentWise
3401 }
3402 _ => BinaryOperation::Other,
3403 },
3404 _ => match (left_inner.scalar_kind(), right_inner.scalar_kind()) {
3405 (Some(Sk::Float), _) | (_, Some(Sk::Float)) => match op {
3406 Bo::Modulo => BinaryOperation::Modulo,
3407 _ => BinaryOperation::Other,
3408 },
3409 (Some(Sk::Bool), Some(Sk::Bool)) => match op {
3410 Bo::InclusiveOr => {
3411 op = crate::BinaryOperator::LogicalOr;
3412 BinaryOperation::Other
3413 }
3414 Bo::And => {
3415 op = crate::BinaryOperator::LogicalAnd;
3416 BinaryOperation::Other
3417 }
3418 _ => BinaryOperation::Other,
3419 },
3420 _ => BinaryOperation::Other,
3421 },
3422 };
3423
3424 match function {
3425 BinaryOperation::VectorCompare => {
3426 let op_str = match op {
3427 Bo::Less => "lessThan(",
3428 Bo::LessEqual => "lessThanEqual(",
3429 Bo::Greater => "greaterThan(",
3430 Bo::GreaterEqual => "greaterThanEqual(",
3431 Bo::Equal => "equal(",
3432 Bo::NotEqual => "notEqual(",
3433 _ => unreachable!(),
3434 };
3435 write!(self.out, "{op_str}")?;
3436 self.write_expr(left, ctx)?;
3437 write!(self.out, ", ")?;
3438 self.write_expr(right, ctx)?;
3439 write!(self.out, ")")?;
3440 }
3441 BinaryOperation::VectorComponentWise => {
3442 self.write_value_type(left_inner)?;
3443 write!(self.out, "(")?;
3444
3445 let size = match *left_inner {
3446 Ti::Vector { size, .. } => size,
3447 _ => unreachable!(),
3448 };
3449
3450 for i in 0..size as usize {
3451 if i != 0 {
3452 write!(self.out, ", ")?;
3453 }
3454
3455 self.write_expr(left, ctx)?;
3456 write!(self.out, ".{}", back::COMPONENTS[i])?;
3457
3458 write!(self.out, " {} ", back::binary_operation_str(op))?;
3459
3460 self.write_expr(right, ctx)?;
3461 write!(self.out, ".{}", back::COMPONENTS[i])?;
3462 }
3463
3464 write!(self.out, ")")?;
3465 }
3466 // TODO: handle undefined behavior of BinaryOperator::Modulo
3467 //
3468 // sint:
3469 // if right == 0 return 0
3470 // if left == min(type_of(left)) && right == -1 return 0
3471 // if sign(left) == -1 || sign(right) == -1 return result as defined by WGSL
3472 //
3473 // uint:
3474 // if right == 0 return 0
3475 //
3476 // float:
3477 // if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
3478 BinaryOperation::Modulo => {
3479 write!(self.out, "(")?;
3480
3481 // write `e1 - e2 * trunc(e1 / e2)`
3482 self.write_expr(left, ctx)?;
3483 write!(self.out, " - ")?;
3484 self.write_expr(right, ctx)?;
3485 write!(self.out, " * ")?;
3486 write!(self.out, "trunc(")?;
3487 self.write_expr(left, ctx)?;
3488 write!(self.out, " / ")?;
3489 self.write_expr(right, ctx)?;
3490 write!(self.out, ")")?;
3491
3492 write!(self.out, ")")?;
3493 }
3494 BinaryOperation::Other => {
3495 write!(self.out, "(")?;
3496
3497 self.write_expr(left, ctx)?;
3498 write!(self.out, " {} ", back::binary_operation_str(op))?;
3499 self.write_expr(right, ctx)?;
3500
3501 write!(self.out, ")")?;
3502 }
3503 }
3504 }
3505 // `Select` is written as `condition ? accept : reject`
3506 // We wrap everything in parentheses to avoid precedence issues
3507 Expression::Select {
3508 condition,
3509 accept,
3510 reject,
3511 } => {
3512 let cond_ty = ctx.resolve_type(condition, &self.module.types);
3513 let vec_select = if let TypeInner::Vector { .. } = *cond_ty {
3514 true
3515 } else {
3516 false
3517 };
3518
3519 // TODO: Boolean mix on desktop required GL_EXT_shader_integer_mix
3520 if vec_select {
3521 // Glsl defines that for mix when the condition is a boolean the first element
3522 // is picked if condition is false and the second if condition is true
3523 write!(self.out, "mix(")?;
3524 self.write_expr(reject, ctx)?;
3525 write!(self.out, ", ")?;
3526 self.write_expr(accept, ctx)?;
3527 write!(self.out, ", ")?;
3528 self.write_expr(condition, ctx)?;
3529 } else {
3530 write!(self.out, "(")?;
3531 self.write_expr(condition, ctx)?;
3532 write!(self.out, " ? ")?;
3533 self.write_expr(accept, ctx)?;
3534 write!(self.out, " : ")?;
3535 self.write_expr(reject, ctx)?;
3536 }
3537
3538 write!(self.out, ")")?
3539 }
3540 // `Derivative` is a function call to a glsl provided function
3541 Expression::Derivative { axis, ctrl, expr } => {
3542 use crate::{DerivativeAxis as Axis, DerivativeControl as Ctrl};
3543 let fun_name = if self.options.version.supports_derivative_control() {
3544 match (axis, ctrl) {
3545 (Axis::X, Ctrl::Coarse) => "dFdxCoarse",
3546 (Axis::X, Ctrl::Fine) => "dFdxFine",
3547 (Axis::X, Ctrl::None) => "dFdx",
3548 (Axis::Y, Ctrl::Coarse) => "dFdyCoarse",
3549 (Axis::Y, Ctrl::Fine) => "dFdyFine",
3550 (Axis::Y, Ctrl::None) => "dFdy",
3551 (Axis::Width, Ctrl::Coarse) => "fwidthCoarse",
3552 (Axis::Width, Ctrl::Fine) => "fwidthFine",
3553 (Axis::Width, Ctrl::None) => "fwidth",
3554 }
3555 } else {
3556 match axis {
3557 Axis::X => "dFdx",
3558 Axis::Y => "dFdy",
3559 Axis::Width => "fwidth",
3560 }
3561 };
3562 write!(self.out, "{fun_name}(")?;
3563 self.write_expr(expr, ctx)?;
3564 write!(self.out, ")")?
3565 }
3566 // `Relational` is a normal function call to some glsl provided functions
3567 Expression::Relational { fun, argument } => {
3568 use crate::RelationalFunction as Rf;
3569
3570 let fun_name = match fun {
3571 Rf::IsInf => "isinf",
3572 Rf::IsNan => "isnan",
3573 Rf::All => "all",
3574 Rf::Any => "any",
3575 };
3576 write!(self.out, "{fun_name}(")?;
3577
3578 self.write_expr(argument, ctx)?;
3579
3580 write!(self.out, ")")?
3581 }
3582 Expression::Math {
3583 fun,
3584 arg,
3585 arg1,
3586 arg2,
3587 arg3,
3588 } => {
3589 use crate::MathFunction as Mf;
3590
3591 let fun_name = match fun {
3592 // comparison
3593 Mf::Abs => "abs",
3594 Mf::Min => "min",
3595 Mf::Max => "max",
3596 Mf::Clamp => {
3597 let scalar_kind = ctx
3598 .resolve_type(arg, &self.module.types)
3599 .scalar_kind()
3600 .unwrap();
3601 match scalar_kind {
3602 crate::ScalarKind::Float => "clamp",
3603 // Clamp is undefined if min > max. In practice this means it can use a median-of-three
3604 // instruction to determine the value. This is fine according to the WGSL spec for float
3605 // clamp, but integer clamp _must_ use min-max. As such we write out min/max.
3606 _ => {
3607 write!(self.out, "min(max(")?;
3608 self.write_expr(arg, ctx)?;
3609 write!(self.out, ", ")?;
3610 self.write_expr(arg1.unwrap(), ctx)?;
3611 write!(self.out, "), ")?;
3612 self.write_expr(arg2.unwrap(), ctx)?;
3613 write!(self.out, ")")?;
3614
3615 return Ok(());
3616 }
3617 }
3618 }
3619 Mf::Saturate => {
3620 write!(self.out, "clamp(")?;
3621
3622 self.write_expr(arg, ctx)?;
3623
3624 match *ctx.resolve_type(arg, &self.module.types) {
3625 TypeInner::Vector { size, .. } => write!(
3626 self.out,
3627 ", vec{}(0.0), vec{0}(1.0)",
3628 common::vector_size_str(size)
3629 )?,
3630 _ => write!(self.out, ", 0.0, 1.0")?,
3631 }
3632
3633 write!(self.out, ")")?;
3634
3635 return Ok(());
3636 }
3637 // trigonometry
3638 Mf::Cos => "cos",
3639 Mf::Cosh => "cosh",
3640 Mf::Sin => "sin",
3641 Mf::Sinh => "sinh",
3642 Mf::Tan => "tan",
3643 Mf::Tanh => "tanh",
3644 Mf::Acos => "acos",
3645 Mf::Asin => "asin",
3646 Mf::Atan => "atan",
3647 Mf::Asinh => "asinh",
3648 Mf::Acosh => "acosh",
3649 Mf::Atanh => "atanh",
3650 Mf::Radians => "radians",
3651 Mf::Degrees => "degrees",
3652 // glsl doesn't have atan2 function
3653 // use two-argument variation of the atan function
3654 Mf::Atan2 => "atan",
3655 // decomposition
3656 Mf::Ceil => "ceil",
3657 Mf::Floor => "floor",
3658 Mf::Round => "roundEven",
3659 Mf::Fract => "fract",
3660 Mf::Trunc => "trunc",
3661 Mf::Modf => MODF_FUNCTION,
3662 Mf::Frexp => FREXP_FUNCTION,
3663 Mf::Ldexp => "ldexp",
3664 // exponent
3665 Mf::Exp => "exp",
3666 Mf::Exp2 => "exp2",
3667 Mf::Log => "log",
3668 Mf::Log2 => "log2",
3669 Mf::Pow => "pow",
3670 // geometry
3671 Mf::Dot => match *ctx.resolve_type(arg, &self.module.types) {
3672 TypeInner::Vector {
3673 scalar:
3674 crate::Scalar {
3675 kind: crate::ScalarKind::Float,
3676 ..
3677 },
3678 ..
3679 } => "dot",
3680 TypeInner::Vector { size, .. } => {
3681 return self.write_dot_product(arg, arg1.unwrap(), size as usize, ctx)
3682 }
3683 _ => unreachable!(
3684 "Correct TypeInner for dot product should be already validated"
3685 ),
3686 },
3687 fun @ (Mf::Dot4I8Packed | Mf::Dot4U8Packed) => {
3688 let conversion = match fun {
3689 Mf::Dot4I8Packed => "int",
3690 Mf::Dot4U8Packed => "",
3691 _ => unreachable!(),
3692 };
3693
3694 let arg1 = arg1.unwrap();
3695
3696 // Write parentheses around the dot product expression to prevent operators
3697 // with different precedences from applying earlier.
3698 write!(self.out, "(")?;
3699 for i in 0..4 {
3700 // Since `bitfieldExtract` only sign extends if the value is signed, we
3701 // need to convert the inputs to `int` in case of `Dot4I8Packed`. For
3702 // `Dot4U8Packed`, the code below only introduces parenthesis around
3703 // each factor, which aren't strictly needed because both operands are
3704 // baked, but which don't hurt either.
3705 write!(self.out, "bitfieldExtract({conversion}(")?;
3706 self.write_expr(arg, ctx)?;
3707 write!(self.out, "), {}, 8)", i * 8)?;
3708
3709 write!(self.out, " * bitfieldExtract({conversion}(")?;
3710 self.write_expr(arg1, ctx)?;
3711 write!(self.out, "), {}, 8)", i * 8)?;
3712
3713 if i != 3 {
3714 write!(self.out, " + ")?;
3715 }
3716 }
3717 write!(self.out, ")")?;
3718
3719 return Ok(());
3720 }
3721 Mf::Outer => "outerProduct",
3722 Mf::Cross => "cross",
3723 Mf::Distance => "distance",
3724 Mf::Length => "length",
3725 Mf::Normalize => "normalize",
3726 Mf::FaceForward => "faceforward",
3727 Mf::Reflect => "reflect",
3728 Mf::Refract => "refract",
3729 // computational
3730 Mf::Sign => "sign",
3731 Mf::Fma => {
3732 if self.options.version.supports_fma_function() {
3733 // Use the fma function when available
3734 "fma"
3735 } else {
3736 // No fma support. Transform the function call into an arithmetic expression
3737 write!(self.out, "(")?;
3738
3739 self.write_expr(arg, ctx)?;
3740 write!(self.out, " * ")?;
3741
3742 let arg1 =
3743 arg1.ok_or_else(|| Error::Custom("Missing fma arg1".to_owned()))?;
3744 self.write_expr(arg1, ctx)?;
3745 write!(self.out, " + ")?;
3746
3747 let arg2 =
3748 arg2.ok_or_else(|| Error::Custom("Missing fma arg2".to_owned()))?;
3749 self.write_expr(arg2, ctx)?;
3750 write!(self.out, ")")?;
3751
3752 return Ok(());
3753 }
3754 }
3755 Mf::Mix => "mix",
3756 Mf::Step => "step",
3757 Mf::SmoothStep => "smoothstep",
3758 Mf::Sqrt => "sqrt",
3759 Mf::InverseSqrt => "inversesqrt",
3760 Mf::Inverse => "inverse",
3761 Mf::Transpose => "transpose",
3762 Mf::Determinant => "determinant",
3763 Mf::QuantizeToF16 => match *ctx.resolve_type(arg, &self.module.types) {
3764 TypeInner::Scalar { .. } => {
3765 write!(self.out, "unpackHalf2x16(packHalf2x16(vec2(")?;
3766 self.write_expr(arg, ctx)?;
3767 write!(self.out, "))).x")?;
3768 return Ok(());
3769 }
3770 TypeInner::Vector {
3771 size: crate::VectorSize::Bi,
3772 ..
3773 } => {
3774 write!(self.out, "unpackHalf2x16(packHalf2x16(")?;
3775 self.write_expr(arg, ctx)?;
3776 write!(self.out, "))")?;
3777 return Ok(());
3778 }
3779 TypeInner::Vector {
3780 size: crate::VectorSize::Tri,
3781 ..
3782 } => {
3783 write!(self.out, "vec3(unpackHalf2x16(packHalf2x16(")?;
3784 self.write_expr(arg, ctx)?;
3785 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3786 self.write_expr(arg, ctx)?;
3787 write!(self.out, ".zz)).x)")?;
3788 return Ok(());
3789 }
3790 TypeInner::Vector {
3791 size: crate::VectorSize::Quad,
3792 ..
3793 } => {
3794 write!(self.out, "vec4(unpackHalf2x16(packHalf2x16(")?;
3795 self.write_expr(arg, ctx)?;
3796 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3797 self.write_expr(arg, ctx)?;
3798 write!(self.out, ".zw)))")?;
3799 return Ok(());
3800 }
3801 _ => unreachable!(
3802 "Correct TypeInner for QuantizeToF16 should be already validated"
3803 ),
3804 },
3805 // bits
3806 Mf::CountTrailingZeros => {
3807 match *ctx.resolve_type(arg, &self.module.types) {
3808 TypeInner::Vector { size, scalar, .. } => {
3809 let s = common::vector_size_str(size);
3810 if let crate::ScalarKind::Uint = scalar.kind {
3811 write!(self.out, "min(uvec{s}(findLSB(")?;
3812 self.write_expr(arg, ctx)?;
3813 write!(self.out, ")), uvec{s}(32u))")?;
3814 } else {
3815 write!(self.out, "ivec{s}(min(uvec{s}(findLSB(")?;
3816 self.write_expr(arg, ctx)?;
3817 write!(self.out, ")), uvec{s}(32u)))")?;
3818 }
3819 }
3820 TypeInner::Scalar(scalar) => {
3821 if let crate::ScalarKind::Uint = scalar.kind {
3822 write!(self.out, "min(uint(findLSB(")?;
3823 self.write_expr(arg, ctx)?;
3824 write!(self.out, ")), 32u)")?;
3825 } else {
3826 write!(self.out, "int(min(uint(findLSB(")?;
3827 self.write_expr(arg, ctx)?;
3828 write!(self.out, ")), 32u))")?;
3829 }
3830 }
3831 _ => unreachable!(),
3832 };
3833 return Ok(());
3834 }
3835 Mf::CountLeadingZeros => {
3836 if self.options.version.supports_integer_functions() {
3837 match *ctx.resolve_type(arg, &self.module.types) {
3838 TypeInner::Vector { size, scalar } => {
3839 let s = common::vector_size_str(size);
3840
3841 if let crate::ScalarKind::Uint = scalar.kind {
3842 write!(self.out, "uvec{s}(ivec{s}(31) - findMSB(")?;
3843 self.write_expr(arg, ctx)?;
3844 write!(self.out, "))")?;
3845 } else {
3846 write!(self.out, "mix(ivec{s}(31) - findMSB(")?;
3847 self.write_expr(arg, ctx)?;
3848 write!(self.out, "), ivec{s}(0), lessThan(")?;
3849 self.write_expr(arg, ctx)?;
3850 write!(self.out, ", ivec{s}(0)))")?;
3851 }
3852 }
3853 TypeInner::Scalar(scalar) => {
3854 if let crate::ScalarKind::Uint = scalar.kind {
3855 write!(self.out, "uint(31 - findMSB(")?;
3856 } else {
3857 write!(self.out, "(")?;
3858 self.write_expr(arg, ctx)?;
3859 write!(self.out, " < 0 ? 0 : 31 - findMSB(")?;
3860 }
3861
3862 self.write_expr(arg, ctx)?;
3863 write!(self.out, "))")?;
3864 }
3865 _ => unreachable!(),
3866 };
3867 } else {
3868 match *ctx.resolve_type(arg, &self.module.types) {
3869 TypeInner::Vector { size, scalar } => {
3870 let s = common::vector_size_str(size);
3871
3872 if let crate::ScalarKind::Uint = scalar.kind {
3873 write!(self.out, "uvec{s}(")?;
3874 write!(self.out, "vec{s}(31.0) - floor(log2(vec{s}(")?;
3875 self.write_expr(arg, ctx)?;
3876 write!(self.out, ") + 0.5)))")?;
3877 } else {
3878 write!(self.out, "ivec{s}(")?;
3879 write!(self.out, "mix(vec{s}(31.0) - floor(log2(vec{s}(")?;
3880 self.write_expr(arg, ctx)?;
3881 write!(self.out, ") + 0.5)), ")?;
3882 write!(self.out, "vec{s}(0.0), lessThan(")?;
3883 self.write_expr(arg, ctx)?;
3884 write!(self.out, ", ivec{s}(0u))))")?;
3885 }
3886 }
3887 TypeInner::Scalar(scalar) => {
3888 if let crate::ScalarKind::Uint = scalar.kind {
3889 write!(self.out, "uint(31.0 - floor(log2(float(")?;
3890 self.write_expr(arg, ctx)?;
3891 write!(self.out, ") + 0.5)))")?;
3892 } else {
3893 write!(self.out, "(")?;
3894 self.write_expr(arg, ctx)?;
3895 write!(self.out, " < 0 ? 0 : int(")?;
3896 write!(self.out, "31.0 - floor(log2(float(")?;
3897 self.write_expr(arg, ctx)?;
3898 write!(self.out, ") + 0.5))))")?;
3899 }
3900 }
3901 _ => unreachable!(),
3902 };
3903 }
3904
3905 return Ok(());
3906 }
3907 Mf::CountOneBits => "bitCount",
3908 Mf::ReverseBits => "bitfieldReverse",
3909 Mf::ExtractBits => {
3910 // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
3911 // to first sanitize the offset and count first. If we don't do this, AMD and Intel chips
3912 // will return out-of-spec values if the extracted range is not within the bit width.
3913 //
3914 // This encodes the exact formula specified by the wgsl spec, without temporary values:
3915 // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
3916 //
3917 // w = sizeof(x) * 8
3918 // o = min(offset, w)
3919 // c = min(count, w - o)
3920 //
3921 // bitfieldExtract(x, o, c)
3922 //
3923 // extract_bits(e, min(offset, w), min(count, w - min(offset, w))))
3924 let scalar_bits = ctx
3925 .resolve_type(arg, &self.module.types)
3926 .scalar_width()
3927 .unwrap()
3928 * 8;
3929
3930 write!(self.out, "bitfieldExtract(")?;
3931 self.write_expr(arg, ctx)?;
3932 write!(self.out, ", int(min(")?;
3933 self.write_expr(arg1.unwrap(), ctx)?;
3934 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3935 self.write_expr(arg2.unwrap(), ctx)?;
3936 write!(self.out, ", {scalar_bits}u - min(")?;
3937 self.write_expr(arg1.unwrap(), ctx)?;
3938 write!(self.out, ", {scalar_bits}u))))")?;
3939
3940 return Ok(());
3941 }
3942 Mf::InsertBits => {
3943 // InsertBits has the same considerations as ExtractBits above
3944 let scalar_bits = ctx
3945 .resolve_type(arg, &self.module.types)
3946 .scalar_width()
3947 .unwrap()
3948 * 8;
3949
3950 write!(self.out, "bitfieldInsert(")?;
3951 self.write_expr(arg, ctx)?;
3952 write!(self.out, ", ")?;
3953 self.write_expr(arg1.unwrap(), ctx)?;
3954 write!(self.out, ", int(min(")?;
3955 self.write_expr(arg2.unwrap(), ctx)?;
3956 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3957 self.write_expr(arg3.unwrap(), ctx)?;
3958 write!(self.out, ", {scalar_bits}u - min(")?;
3959 self.write_expr(arg2.unwrap(), ctx)?;
3960 write!(self.out, ", {scalar_bits}u))))")?;
3961
3962 return Ok(());
3963 }
3964 Mf::FirstTrailingBit => "findLSB",
3965 Mf::FirstLeadingBit => "findMSB",
3966 // data packing
3967 Mf::Pack4x8snorm => {
3968 if self.options.version.supports_pack_unpack_4x8() {
3969 "packSnorm4x8"
3970 } else {
3971 // polyfill should go here. Needs a corresponding entry in `need_bake_expression`
3972 return Err(Error::UnsupportedExternal("packSnorm4x8".into()));
3973 }
3974 }
3975 Mf::Pack4x8unorm => {
3976 if self.options.version.supports_pack_unpack_4x8() {
3977 "packUnorm4x8"
3978 } else {
3979 return Err(Error::UnsupportedExternal("packUnorm4x8".to_owned()));
3980 }
3981 }
3982 Mf::Pack2x16snorm => {
3983 if self.options.version.supports_pack_unpack_snorm_2x16() {
3984 "packSnorm2x16"
3985 } else {
3986 return Err(Error::UnsupportedExternal("packSnorm2x16".to_owned()));
3987 }
3988 }
3989 Mf::Pack2x16unorm => {
3990 if self.options.version.supports_pack_unpack_unorm_2x16() {
3991 "packUnorm2x16"
3992 } else {
3993 return Err(Error::UnsupportedExternal("packUnorm2x16".to_owned()));
3994 }
3995 }
3996 Mf::Pack2x16float => {
3997 if self.options.version.supports_pack_unpack_half_2x16() {
3998 "packHalf2x16"
3999 } else {
4000 return Err(Error::UnsupportedExternal("packHalf2x16".to_owned()));
4001 }
4002 }
4003
4004 fun @ (Mf::Pack4xI8 | Mf::Pack4xU8 | Mf::Pack4xI8Clamp | Mf::Pack4xU8Clamp) => {
4005 let was_signed = matches!(fun, Mf::Pack4xI8 | Mf::Pack4xI8Clamp);
4006 let clamp_bounds = match fun {
4007 Mf::Pack4xI8Clamp => Some(("-128", "127")),
4008 Mf::Pack4xU8Clamp => Some(("0", "255")),
4009 _ => None,
4010 };
4011 let const_suffix = if was_signed { "" } else { "u" };
4012 if was_signed {
4013 write!(self.out, "uint(")?;
4014 }
4015 let write_arg = |this: &mut Self| -> BackendResult {
4016 if let Some((min, max)) = clamp_bounds {
4017 write!(this.out, "clamp(")?;
4018 this.write_expr(arg, ctx)?;
4019 write!(this.out, ", {min}{const_suffix}, {max}{const_suffix})")?;
4020 } else {
4021 this.write_expr(arg, ctx)?;
4022 }
4023 Ok(())
4024 };
4025 write!(self.out, "(")?;
4026 write_arg(self)?;
4027 write!(self.out, "[0] & 0xFF{const_suffix}) | ((")?;
4028 write_arg(self)?;
4029 write!(self.out, "[1] & 0xFF{const_suffix}) << 8) | ((")?;
4030 write_arg(self)?;
4031 write!(self.out, "[2] & 0xFF{const_suffix}) << 16) | ((")?;
4032 write_arg(self)?;
4033 write!(self.out, "[3] & 0xFF{const_suffix}) << 24)")?;
4034 if was_signed {
4035 write!(self.out, ")")?;
4036 }
4037
4038 return Ok(());
4039 }
4040 // data unpacking
4041 Mf::Unpack2x16float => {
4042 if self.options.version.supports_pack_unpack_half_2x16() {
4043 "unpackHalf2x16"
4044 } else {
4045 return Err(Error::UnsupportedExternal("unpackHalf2x16".into()));
4046 }
4047 }
4048 Mf::Unpack2x16snorm => {
4049 if self.options.version.supports_pack_unpack_snorm_2x16() {
4050 "unpackSnorm2x16"
4051 } else {
4052 let scale = 32767;
4053
4054 write!(self.out, "(vec2(ivec2(")?;
4055 self.write_expr(arg, ctx)?;
4056 write!(self.out, " << 16, ")?;
4057 self.write_expr(arg, ctx)?;
4058 write!(self.out, ") >> 16) / {scale}.0)")?;
4059 return Ok(());
4060 }
4061 }
4062 Mf::Unpack2x16unorm => {
4063 if self.options.version.supports_pack_unpack_unorm_2x16() {
4064 "unpackUnorm2x16"
4065 } else {
4066 let scale = 65535;
4067
4068 write!(self.out, "(vec2(")?;
4069 self.write_expr(arg, ctx)?;
4070 write!(self.out, " & 0xFFFFu, ")?;
4071 self.write_expr(arg, ctx)?;
4072 write!(self.out, " >> 16) / {scale}.0)")?;
4073 return Ok(());
4074 }
4075 }
4076 Mf::Unpack4x8snorm => {
4077 if self.options.version.supports_pack_unpack_4x8() {
4078 "unpackSnorm4x8"
4079 } else {
4080 let scale = 127;
4081
4082 write!(self.out, "(vec4(ivec4(")?;
4083 self.write_expr(arg, ctx)?;
4084 write!(self.out, " << 24, ")?;
4085 self.write_expr(arg, ctx)?;
4086 write!(self.out, " << 16, ")?;
4087 self.write_expr(arg, ctx)?;
4088 write!(self.out, " << 8, ")?;
4089 self.write_expr(arg, ctx)?;
4090 write!(self.out, ") >> 24) / {scale}.0)")?;
4091 return Ok(());
4092 }
4093 }
4094 Mf::Unpack4x8unorm => {
4095 if self.options.version.supports_pack_unpack_4x8() {
4096 "unpackUnorm4x8"
4097 } else {
4098 let scale = 255;
4099
4100 write!(self.out, "(vec4(")?;
4101 self.write_expr(arg, ctx)?;
4102 write!(self.out, " & 0xFFu, ")?;
4103 self.write_expr(arg, ctx)?;
4104 write!(self.out, " >> 8 & 0xFFu, ")?;
4105 self.write_expr(arg, ctx)?;
4106 write!(self.out, " >> 16 & 0xFFu, ")?;
4107 self.write_expr(arg, ctx)?;
4108 write!(self.out, " >> 24) / {scale}.0)")?;
4109 return Ok(());
4110 }
4111 }
4112 fun @ (Mf::Unpack4xI8 | Mf::Unpack4xU8) => {
4113 let sign_prefix = match fun {
4114 Mf::Unpack4xI8 => 'i',
4115 Mf::Unpack4xU8 => 'u',
4116 _ => unreachable!(),
4117 };
4118 write!(self.out, "{sign_prefix}vec4(")?;
4119 for i in 0..4 {
4120 write!(self.out, "bitfieldExtract(")?;
4121 // Since bitfieldExtract only sign extends if the value is signed, this
4122 // cast is needed
4123 match fun {
4124 Mf::Unpack4xI8 => {
4125 write!(self.out, "int(")?;
4126 self.write_expr(arg, ctx)?;
4127 write!(self.out, ")")?;
4128 }
4129 Mf::Unpack4xU8 => self.write_expr(arg, ctx)?,
4130 _ => unreachable!(),
4131 };
4132 write!(self.out, ", {}, 8)", i * 8)?;
4133 if i != 3 {
4134 write!(self.out, ", ")?;
4135 }
4136 }
4137 write!(self.out, ")")?;
4138
4139 return Ok(());
4140 }
4141 };
4142
4143 let extract_bits = fun == Mf::ExtractBits;
4144 let insert_bits = fun == Mf::InsertBits;
4145
4146 // Some GLSL functions always return signed integers (like findMSB),
4147 // so they need to be cast to uint if the argument is also an uint.
4148 let ret_might_need_int_to_uint = matches!(
4149 fun,
4150 Mf::FirstTrailingBit | Mf::FirstLeadingBit | Mf::CountOneBits | Mf::Abs
4151 );
4152
4153 // Some GLSL functions only accept signed integers (like abs),
4154 // so they need their argument cast from uint to int.
4155 let arg_might_need_uint_to_int = matches!(fun, Mf::Abs);
4156
4157 // Check if the argument is an unsigned integer and return the vector size
4158 // in case it's a vector
4159 let maybe_uint_size = match *ctx.resolve_type(arg, &self.module.types) {
4160 TypeInner::Scalar(crate::Scalar {
4161 kind: crate::ScalarKind::Uint,
4162 ..
4163 }) => Some(None),
4164 TypeInner::Vector {
4165 scalar:
4166 crate::Scalar {
4167 kind: crate::ScalarKind::Uint,
4168 ..
4169 },
4170 size,
4171 } => Some(Some(size)),
4172 _ => None,
4173 };
4174
4175 // Cast to uint if the function needs it
4176 if ret_might_need_int_to_uint {
4177 if let Some(maybe_size) = maybe_uint_size {
4178 match maybe_size {
4179 Some(size) => write!(self.out, "uvec{}(", size as u8)?,
4180 None => write!(self.out, "uint(")?,
4181 }
4182 }
4183 }
4184
4185 write!(self.out, "{fun_name}(")?;
4186
4187 // Cast to int if the function needs it
4188 if arg_might_need_uint_to_int {
4189 if let Some(maybe_size) = maybe_uint_size {
4190 match maybe_size {
4191 Some(size) => write!(self.out, "ivec{}(", size as u8)?,
4192 None => write!(self.out, "int(")?,
4193 }
4194 }
4195 }
4196
4197 self.write_expr(arg, ctx)?;
4198
4199 // Close the cast from uint to int
4200 if arg_might_need_uint_to_int && maybe_uint_size.is_some() {
4201 write!(self.out, ")")?
4202 }
4203
4204 if let Some(arg) = arg1 {
4205 write!(self.out, ", ")?;
4206 if extract_bits {
4207 write!(self.out, "int(")?;
4208 self.write_expr(arg, ctx)?;
4209 write!(self.out, ")")?;
4210 } else {
4211 self.write_expr(arg, ctx)?;
4212 }
4213 }
4214 if let Some(arg) = arg2 {
4215 write!(self.out, ", ")?;
4216 if extract_bits || insert_bits {
4217 write!(self.out, "int(")?;
4218 self.write_expr(arg, ctx)?;
4219 write!(self.out, ")")?;
4220 } else {
4221 self.write_expr(arg, ctx)?;
4222 }
4223 }
4224 if let Some(arg) = arg3 {
4225 write!(self.out, ", ")?;
4226 if insert_bits {
4227 write!(self.out, "int(")?;
4228 self.write_expr(arg, ctx)?;
4229 write!(self.out, ")")?;
4230 } else {
4231 self.write_expr(arg, ctx)?;
4232 }
4233 }
4234 write!(self.out, ")")?;
4235
4236 // Close the cast from int to uint
4237 if ret_might_need_int_to_uint && maybe_uint_size.is_some() {
4238 write!(self.out, ")")?
4239 }
4240 }
4241 // `As` is always a call.
4242 // If `convert` is true the function name is the type
4243 // Else the function name is one of the glsl provided bitcast functions
4244 Expression::As {
4245 expr,
4246 kind: target_kind,
4247 convert,
4248 } => {
4249 let inner = ctx.resolve_type(expr, &self.module.types);
4250 match convert {
4251 Some(width) => {
4252 // this is similar to `write_type`, but with the target kind
4253 let scalar = glsl_scalar(crate::Scalar {
4254 kind: target_kind,
4255 width,
4256 })?;
4257 match *inner {
4258 TypeInner::Matrix { columns, rows, .. } => write!(
4259 self.out,
4260 "{}mat{}x{}",
4261 scalar.prefix, columns as u8, rows as u8
4262 )?,
4263 TypeInner::Vector { size, .. } => {
4264 write!(self.out, "{}vec{}", scalar.prefix, size as u8)?
4265 }
4266 _ => write!(self.out, "{}", scalar.full)?,
4267 }
4268
4269 write!(self.out, "(")?;
4270 self.write_expr(expr, ctx)?;
4271 write!(self.out, ")")?
4272 }
4273 None => {
4274 use crate::ScalarKind as Sk;
4275
4276 let target_vector_type = match *inner {
4277 TypeInner::Vector { size, scalar } => Some(TypeInner::Vector {
4278 size,
4279 scalar: crate::Scalar {
4280 kind: target_kind,
4281 width: scalar.width,
4282 },
4283 }),
4284 _ => None,
4285 };
4286
4287 let source_kind = inner.scalar_kind().unwrap();
4288
4289 match (source_kind, target_kind, target_vector_type) {
4290 // No conversion needed
4291 (Sk::Sint, Sk::Sint, _)
4292 | (Sk::Uint, Sk::Uint, _)
4293 | (Sk::Float, Sk::Float, _)
4294 | (Sk::Bool, Sk::Bool, _) => {
4295 self.write_expr(expr, ctx)?;
4296 return Ok(());
4297 }
4298
4299 // Cast to/from floats
4300 (Sk::Float, Sk::Sint, _) => write!(self.out, "floatBitsToInt")?,
4301 (Sk::Float, Sk::Uint, _) => write!(self.out, "floatBitsToUint")?,
4302 (Sk::Sint, Sk::Float, _) => write!(self.out, "intBitsToFloat")?,
4303 (Sk::Uint, Sk::Float, _) => write!(self.out, "uintBitsToFloat")?,
4304
4305 // Cast between vector types
4306 (_, _, Some(vector)) => {
4307 self.write_value_type(&vector)?;
4308 }
4309
4310 // There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion
4311 (Sk::Uint | Sk::Bool, Sk::Sint, None) => write!(self.out, "int")?,
4312 (Sk::Sint | Sk::Bool, Sk::Uint, None) => write!(self.out, "uint")?,
4313 (Sk::Bool, Sk::Float, None) => write!(self.out, "float")?,
4314 (Sk::Sint | Sk::Uint | Sk::Float, Sk::Bool, None) => {
4315 write!(self.out, "bool")?
4316 }
4317
4318 (Sk::AbstractInt | Sk::AbstractFloat, _, _)
4319 | (_, Sk::AbstractInt | Sk::AbstractFloat, _) => unreachable!(),
4320 };
4321
4322 write!(self.out, "(")?;
4323 self.write_expr(expr, ctx)?;
4324 write!(self.out, ")")?;
4325 }
4326 }
4327 }
4328 // These expressions never show up in `Emit`.
4329 Expression::CallResult(_)
4330 | Expression::AtomicResult { .. }
4331 | Expression::RayQueryProceedResult
4332 | Expression::WorkGroupUniformLoadResult { .. }
4333 | Expression::SubgroupOperationResult { .. }
4334 | Expression::SubgroupBallotResult => unreachable!(),
4335 // `ArrayLength` is written as `expr.length()` and we convert it to a uint
4336 Expression::ArrayLength(expr) => {
4337 write!(self.out, "uint(")?;
4338 self.write_expr(expr, ctx)?;
4339 write!(self.out, ".length())")?
4340 }
4341 // not supported yet
4342 Expression::RayQueryGetIntersection { .. }
4343 | Expression::RayQueryVertexPositions { .. } => unreachable!(),
4344 }
4345
4346 Ok(())
4347 }
4348
4349 /// Helper function to write the local holding the clamped lod
4350 fn write_clamped_lod(
4351 &mut self,
4352 ctx: &back::FunctionCtx,
4353 expr: Handle<crate::Expression>,
4354 image: Handle<crate::Expression>,
4355 level_expr: Handle<crate::Expression>,
4356 ) -> Result<(), Error> {
4357 // Define our local and start a call to `clamp`
4358 write!(
4359 self.out,
4360 "int {}{} = clamp(",
4361 Baked(expr),
4362 CLAMPED_LOD_SUFFIX
4363 )?;
4364 // Write the lod that will be clamped
4365 self.write_expr(level_expr, ctx)?;
4366 // Set the min value to 0 and start a call to `textureQueryLevels` to get
4367 // the maximum value
4368 write!(self.out, ", 0, textureQueryLevels(")?;
4369 // Write the target image as an argument to `textureQueryLevels`
4370 self.write_expr(image, ctx)?;
4371 // Close the call to `textureQueryLevels` subtract 1 from it since
4372 // the lod argument is 0 based, close the `clamp` call and end the
4373 // local declaration statement.
4374 writeln!(self.out, ") - 1);")?;
4375
4376 Ok(())
4377 }
4378
4379 // Helper method used to retrieve how many elements a coordinate vector
4380 // for the images operations need.
4381 fn get_coordinate_vector_size(&self, dim: crate::ImageDimension, arrayed: bool) -> u8 {
4382 // openGL es doesn't have 1D images so we need workaround it
4383 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
4384 // Get how many components the coordinate vector needs for the dimensions only
4385 let tex_coord_size = match dim {
4386 crate::ImageDimension::D1 => 1,
4387 crate::ImageDimension::D2 => 2,
4388 crate::ImageDimension::D3 => 3,
4389 crate::ImageDimension::Cube => 2,
4390 };
4391 // Calculate the true size of the coordinate vector by adding 1 for arrayed images
4392 // and another 1 if we need to workaround 1D images by making them 2D
4393 tex_coord_size + tex_1d_hack as u8 + arrayed as u8
4394 }
4395
4396 /// Helper method to write the coordinate vector for image operations
4397 fn write_texture_coord(
4398 &mut self,
4399 ctx: &back::FunctionCtx,
4400 vector_size: u8,
4401 coordinate: Handle<crate::Expression>,
4402 array_index: Option<Handle<crate::Expression>>,
4403 // Emulate 1D images as 2D for profiles that don't support it (glsl es)
4404 tex_1d_hack: bool,
4405 ) -> Result<(), Error> {
4406 match array_index {
4407 // If the image needs an array indice we need to add it to the end of our
4408 // coordinate vector, to do so we will use the `ivec(ivec, scalar)`
4409 // constructor notation (NOTE: the inner `ivec` can also be a scalar, this
4410 // is important for 1D arrayed images).
4411 Some(layer_expr) => {
4412 write!(self.out, "ivec{vector_size}(")?;
4413 self.write_expr(coordinate, ctx)?;
4414 write!(self.out, ", ")?;
4415 // If we are replacing sampler1D with sampler2D we also need
4416 // to add another zero to the coordinates vector for the y component
4417 if tex_1d_hack {
4418 write!(self.out, "0, ")?;
4419 }
4420 self.write_expr(layer_expr, ctx)?;
4421 write!(self.out, ")")?;
4422 }
4423 // Otherwise write just the expression (and the 1D hack if needed)
4424 None => {
4425 let uvec_size = match *ctx.resolve_type(coordinate, &self.module.types) {
4426 TypeInner::Scalar(crate::Scalar {
4427 kind: crate::ScalarKind::Uint,
4428 ..
4429 }) => Some(None),
4430 TypeInner::Vector {
4431 size,
4432 scalar:
4433 crate::Scalar {
4434 kind: crate::ScalarKind::Uint,
4435 ..
4436 },
4437 } => Some(Some(size as u32)),
4438 _ => None,
4439 };
4440 if tex_1d_hack {
4441 write!(self.out, "ivec2(")?;
4442 } else if uvec_size.is_some() {
4443 match uvec_size {
4444 Some(None) => write!(self.out, "int(")?,
4445 Some(Some(size)) => write!(self.out, "ivec{size}(")?,
4446 _ => {}
4447 }
4448 }
4449 self.write_expr(coordinate, ctx)?;
4450 if tex_1d_hack {
4451 write!(self.out, ", 0)")?;
4452 } else if uvec_size.is_some() {
4453 write!(self.out, ")")?;
4454 }
4455 }
4456 }
4457
4458 Ok(())
4459 }
4460
4461 /// Helper method to write the `ImageStore` statement
4462 fn write_image_store(
4463 &mut self,
4464 ctx: &back::FunctionCtx,
4465 image: Handle<crate::Expression>,
4466 coordinate: Handle<crate::Expression>,
4467 array_index: Option<Handle<crate::Expression>>,
4468 value: Handle<crate::Expression>,
4469 ) -> Result<(), Error> {
4470 use crate::ImageDimension as IDim;
4471
4472 // NOTE: openGL requires that `imageStore`s have no effects when the texel is invalid
4473 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
4474
4475 // This will only panic if the module is invalid
4476 let dim = match *ctx.resolve_type(image, &self.module.types) {
4477 TypeInner::Image { dim, .. } => dim,
4478 _ => unreachable!(),
4479 };
4480
4481 // Begin our call to `imageStore`
4482 write!(self.out, "imageStore(")?;
4483 self.write_expr(image, ctx)?;
4484 // Separate the image argument from the coordinates
4485 write!(self.out, ", ")?;
4486
4487 // openGL es doesn't have 1D images so we need workaround it
4488 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4489 // Write the coordinate vector
4490 self.write_texture_coord(
4491 ctx,
4492 // Get the size of the coordinate vector
4493 self.get_coordinate_vector_size(dim, array_index.is_some()),
4494 coordinate,
4495 array_index,
4496 tex_1d_hack,
4497 )?;
4498
4499 // Separate the coordinate from the value to write and write the expression
4500 // of the value to write.
4501 write!(self.out, ", ")?;
4502 self.write_expr(value, ctx)?;
4503 // End the call to `imageStore` and the statement.
4504 writeln!(self.out, ");")?;
4505
4506 Ok(())
4507 }
4508
4509 /// Helper method to write the `ImageAtomic` statement
4510 fn write_image_atomic(
4511 &mut self,
4512 ctx: &back::FunctionCtx,
4513 image: Handle<crate::Expression>,
4514 coordinate: Handle<crate::Expression>,
4515 array_index: Option<Handle<crate::Expression>>,
4516 fun: crate::AtomicFunction,
4517 value: Handle<crate::Expression>,
4518 ) -> Result<(), Error> {
4519 use crate::ImageDimension as IDim;
4520
4521 // NOTE: openGL requires that `imageAtomic`s have no effects when the texel is invalid
4522 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
4523
4524 // This will only panic if the module is invalid
4525 let dim = match *ctx.resolve_type(image, &self.module.types) {
4526 TypeInner::Image { dim, .. } => dim,
4527 _ => unreachable!(),
4528 };
4529
4530 // Begin our call to `imageAtomic`
4531 let fun_str = fun.to_glsl();
4532 write!(self.out, "imageAtomic{fun_str}(")?;
4533 self.write_expr(image, ctx)?;
4534 // Separate the image argument from the coordinates
4535 write!(self.out, ", ")?;
4536
4537 // openGL es doesn't have 1D images so we need workaround it
4538 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4539 // Write the coordinate vector
4540 self.write_texture_coord(
4541 ctx,
4542 // Get the size of the coordinate vector
4543 self.get_coordinate_vector_size(dim, false),
4544 coordinate,
4545 array_index,
4546 tex_1d_hack,
4547 )?;
4548
4549 // Separate the coordinate from the value to write and write the expression
4550 // of the value to write.
4551 write!(self.out, ", ")?;
4552 self.write_expr(value, ctx)?;
4553 // End the call to `imageAtomic` and the statement.
4554 writeln!(self.out, ");")?;
4555
4556 Ok(())
4557 }
4558
4559 /// Helper method for writing an `ImageLoad` expression.
4560 #[allow(clippy::too_many_arguments)]
4561 fn write_image_load(
4562 &mut self,
4563 handle: Handle<crate::Expression>,
4564 ctx: &back::FunctionCtx,
4565 image: Handle<crate::Expression>,
4566 coordinate: Handle<crate::Expression>,
4567 array_index: Option<Handle<crate::Expression>>,
4568 sample: Option<Handle<crate::Expression>>,
4569 level: Option<Handle<crate::Expression>>,
4570 ) -> Result<(), Error> {
4571 use crate::ImageDimension as IDim;
4572
4573 // `ImageLoad` is a bit complicated.
4574 // There are two functions one for sampled
4575 // images another for storage images, the former uses `texelFetch` and the
4576 // latter uses `imageLoad`.
4577 //
4578 // Furthermore we have `level` which is always `Some` for sampled images
4579 // and `None` for storage images, so we end up with two functions:
4580 // - `texelFetch(image, coordinate, level)` for sampled images
4581 // - `imageLoad(image, coordinate)` for storage images
4582 //
4583 // Finally we also have to consider bounds checking, for storage images
4584 // this is easy since openGL requires that invalid texels always return
4585 // 0, for sampled images we need to either verify that all arguments are
4586 // in bounds (`ReadZeroSkipWrite`) or make them a valid texel (`Restrict`).
4587
4588 // This will only panic if the module is invalid
4589 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
4590 TypeInner::Image {
4591 dim,
4592 arrayed: _,
4593 class,
4594 } => (dim, class),
4595 _ => unreachable!(),
4596 };
4597
4598 // Get the name of the function to be used for the load operation
4599 // and the policy to be used with it.
4600 let (fun_name, policy) = match class {
4601 // Sampled images inherit the policy from the user passed policies
4602 crate::ImageClass::Sampled { .. } => ("texelFetch", self.policies.image_load),
4603 crate::ImageClass::Storage { .. } => {
4604 // OpenGL ES 3.1 mentions in Chapter "8.22 Texture Image Loads and Stores" that:
4605 // "Invalid image loads will return a vector where the value of R, G, and B components
4606 // is 0 and the value of the A component is undefined."
4607 //
4608 // OpenGL 4.2 Core mentions in Chapter "3.9.20 Texture Image Loads and Stores" that:
4609 // "Invalid image loads will return zero."
4610 //
4611 // So, we only inject bounds checks for ES
4612 let policy = if self.options.version.is_es() {
4613 self.policies.image_load
4614 } else {
4615 proc::BoundsCheckPolicy::Unchecked
4616 };
4617 ("imageLoad", policy)
4618 }
4619 // TODO: Is there even a function for this?
4620 crate::ImageClass::Depth { multi: _ } => {
4621 return Err(Error::Custom(
4622 "WGSL `textureLoad` from depth textures is not supported in GLSL".to_string(),
4623 ))
4624 }
4625 crate::ImageClass::External => unimplemented!(),
4626 };
4627
4628 // openGL es doesn't have 1D images so we need workaround it
4629 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4630 // Get the size of the coordinate vector
4631 let vector_size = self.get_coordinate_vector_size(dim, array_index.is_some());
4632
4633 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4634 // To write the bounds checks for `ReadZeroSkipWrite` we will use a
4635 // ternary operator since we are in the middle of an expression and
4636 // need to return a value.
4637 //
4638 // NOTE: glsl does short circuit when evaluating logical
4639 // expressions so we can be sure that after we test a
4640 // condition it will be true for the next ones
4641
4642 // Write parentheses around the ternary operator to prevent problems with
4643 // expressions emitted before or after it having more precedence
4644 write!(self.out, "(",)?;
4645
4646 // The lod check needs to precede the size check since we need
4647 // to use the lod to get the size of the image at that level.
4648 if let Some(level_expr) = level {
4649 self.write_expr(level_expr, ctx)?;
4650 write!(self.out, " < textureQueryLevels(",)?;
4651 self.write_expr(image, ctx)?;
4652 // Chain the next check
4653 write!(self.out, ") && ")?;
4654 }
4655
4656 // Check that the sample arguments doesn't exceed the number of samples
4657 if let Some(sample_expr) = sample {
4658 self.write_expr(sample_expr, ctx)?;
4659 write!(self.out, " < textureSamples(",)?;
4660 self.write_expr(image, ctx)?;
4661 // Chain the next check
4662 write!(self.out, ") && ")?;
4663 }
4664
4665 // We now need to write the size checks for the coordinates and array index
4666 // first we write the comparison function in case the image is 1D non arrayed
4667 // (and no 1D to 2D hack was needed) we are comparing scalars so the less than
4668 // operator will suffice, but otherwise we'll be comparing two vectors so we'll
4669 // need to use the `lessThan` function but it returns a vector of booleans (one
4670 // for each comparison) so we need to fold it all in one scalar boolean, since
4671 // we want all comparisons to pass we use the `all` function which will only
4672 // return `true` if all the elements of the boolean vector are also `true`.
4673 //
4674 // So we'll end with one of the following forms
4675 // - `coord < textureSize(image, lod)` for 1D images
4676 // - `all(lessThan(coord, textureSize(image, lod)))` for normal images
4677 // - `all(lessThan(ivec(coord, array_index), textureSize(image, lod)))`
4678 // for arrayed images
4679 // - `all(lessThan(coord, textureSize(image)))` for multi sampled images
4680
4681 if vector_size != 1 {
4682 write!(self.out, "all(lessThan(")?;
4683 }
4684
4685 // Write the coordinate vector
4686 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4687
4688 if vector_size != 1 {
4689 // If we used the `lessThan` function we need to separate the
4690 // coordinates from the image size.
4691 write!(self.out, ", ")?;
4692 } else {
4693 // If we didn't use it (ie. 1D images) we perform the comparison
4694 // using the less than operator.
4695 write!(self.out, " < ")?;
4696 }
4697
4698 // Call `textureSize` to get our image size
4699 write!(self.out, "textureSize(")?;
4700 self.write_expr(image, ctx)?;
4701 // `textureSize` uses the lod as a second argument for mipmapped images
4702 if let Some(level_expr) = level {
4703 // Separate the image from the lod
4704 write!(self.out, ", ")?;
4705 self.write_expr(level_expr, ctx)?;
4706 }
4707 // Close the `textureSize` call
4708 write!(self.out, ")")?;
4709
4710 if vector_size != 1 {
4711 // Close the `all` and `lessThan` calls
4712 write!(self.out, "))")?;
4713 }
4714
4715 // Finally end the condition part of the ternary operator
4716 write!(self.out, " ? ")?;
4717 }
4718
4719 // Begin the call to the function used to load the texel
4720 write!(self.out, "{fun_name}(")?;
4721 self.write_expr(image, ctx)?;
4722 write!(self.out, ", ")?;
4723
4724 // If we are using `Restrict` bounds checking we need to pass valid texel
4725 // coordinates, to do so we use the `clamp` function to get a value between
4726 // 0 and the image size - 1 (indexing begins at 0)
4727 if let proc::BoundsCheckPolicy::Restrict = policy {
4728 write!(self.out, "clamp(")?;
4729 }
4730
4731 // Write the coordinate vector
4732 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4733
4734 // If we are using `Restrict` bounds checking we need to write the rest of the
4735 // clamp we initiated before writing the coordinates.
4736 if let proc::BoundsCheckPolicy::Restrict = policy {
4737 // Write the min value 0
4738 if vector_size == 1 {
4739 write!(self.out, ", 0")?;
4740 } else {
4741 write!(self.out, ", ivec{vector_size}(0)")?;
4742 }
4743 // Start the `textureSize` call to use as the max value.
4744 write!(self.out, ", textureSize(")?;
4745 self.write_expr(image, ctx)?;
4746 // If the image is mipmapped we need to add the lod argument to the
4747 // `textureSize` call, but this needs to be the clamped lod, this should
4748 // have been generated earlier and put in a local.
4749 if class.is_mipmapped() {
4750 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4751 }
4752 // Close the `textureSize` call
4753 write!(self.out, ")")?;
4754
4755 // Subtract 1 from the `textureSize` call since the coordinates are zero based.
4756 if vector_size == 1 {
4757 write!(self.out, " - 1")?;
4758 } else {
4759 write!(self.out, " - ivec{vector_size}(1)")?;
4760 }
4761
4762 // Close the `clamp` call
4763 write!(self.out, ")")?;
4764
4765 // Add the clamped lod (if present) as the second argument to the
4766 // image load function.
4767 if level.is_some() {
4768 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4769 }
4770
4771 // If a sample argument is needed we need to clamp it between 0 and
4772 // the number of samples the image has.
4773 if let Some(sample_expr) = sample {
4774 write!(self.out, ", clamp(")?;
4775 self.write_expr(sample_expr, ctx)?;
4776 // Set the min value to 0 and start the call to `textureSamples`
4777 write!(self.out, ", 0, textureSamples(")?;
4778 self.write_expr(image, ctx)?;
4779 // Close the `textureSamples` call, subtract 1 from it since the sample
4780 // argument is zero based, and close the `clamp` call
4781 writeln!(self.out, ") - 1)")?;
4782 }
4783 } else if let Some(sample_or_level) = sample.or(level) {
4784 // GLSL only support SInt on this field while WGSL support also UInt
4785 let cast_to_int = matches!(
4786 *ctx.resolve_type(sample_or_level, &self.module.types),
4787 TypeInner::Scalar(crate::Scalar {
4788 kind: crate::ScalarKind::Uint,
4789 ..
4790 })
4791 );
4792
4793 // If no bounds checking is need just add the sample or level argument
4794 // after the coordinates
4795 write!(self.out, ", ")?;
4796
4797 if cast_to_int {
4798 write!(self.out, "int(")?;
4799 }
4800
4801 self.write_expr(sample_or_level, ctx)?;
4802
4803 if cast_to_int {
4804 write!(self.out, ")")?;
4805 }
4806 }
4807
4808 // Close the image load function.
4809 write!(self.out, ")")?;
4810
4811 // If we were using the `ReadZeroSkipWrite` policy we need to end the first branch
4812 // (which is taken if the condition is `true`) with a colon (`:`) and write the
4813 // second branch which is just a 0 value.
4814 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4815 // Get the kind of the output value.
4816 let kind = match class {
4817 // Only sampled images can reach here since storage images
4818 // don't need bounds checks and depth images aren't implemented
4819 crate::ImageClass::Sampled { kind, .. } => kind,
4820 _ => unreachable!(),
4821 };
4822
4823 // End the first branch
4824 write!(self.out, " : ")?;
4825 // Write the 0 value
4826 write!(
4827 self.out,
4828 "{}vec4(",
4829 glsl_scalar(crate::Scalar { kind, width: 4 })?.prefix,
4830 )?;
4831 self.write_zero_init_scalar(kind)?;
4832 // Close the zero value constructor
4833 write!(self.out, ")")?;
4834 // Close the parentheses surrounding our ternary
4835 write!(self.out, ")")?;
4836 }
4837
4838 Ok(())
4839 }
4840
4841 fn write_named_expr(
4842 &mut self,
4843 handle: Handle<crate::Expression>,
4844 name: String,
4845 // The expression which is being named.
4846 // Generally, this is the same as handle, except in WorkGroupUniformLoad
4847 named: Handle<crate::Expression>,
4848 ctx: &back::FunctionCtx,
4849 ) -> BackendResult {
4850 match ctx.info[named].ty {
4851 proc::TypeResolution::Handle(ty_handle) => match self.module.types[ty_handle].inner {
4852 TypeInner::Struct { .. } => {
4853 let ty_name = &self.names[&NameKey::Type(ty_handle)];
4854 write!(self.out, "{ty_name}")?;
4855 }
4856 _ => {
4857 self.write_type(ty_handle)?;
4858 }
4859 },
4860 proc::TypeResolution::Value(ref inner) => {
4861 self.write_value_type(inner)?;
4862 }
4863 }
4864
4865 let resolved = ctx.resolve_type(named, &self.module.types);
4866
4867 write!(self.out, " {name}")?;
4868 if let TypeInner::Array { base, size, .. } = *resolved {
4869 self.write_array_size(base, size)?;
4870 }
4871 write!(self.out, " = ")?;
4872 self.write_expr(handle, ctx)?;
4873 writeln!(self.out, ";")?;
4874 self.named_expressions.insert(named, name);
4875
4876 Ok(())
4877 }
4878
4879 /// Helper function that write string with default zero initialization for supported types
4880 fn write_zero_init_value(&mut self, ty: Handle<crate::Type>) -> BackendResult {
4881 let inner = &self.module.types[ty].inner;
4882 match *inner {
4883 TypeInner::Scalar(scalar) | TypeInner::Atomic(scalar) => {
4884 self.write_zero_init_scalar(scalar.kind)?;
4885 }
4886 TypeInner::Vector { scalar, .. } => {
4887 self.write_value_type(inner)?;
4888 write!(self.out, "(")?;
4889 self.write_zero_init_scalar(scalar.kind)?;
4890 write!(self.out, ")")?;
4891 }
4892 TypeInner::Matrix { .. } => {
4893 self.write_value_type(inner)?;
4894 write!(self.out, "(")?;
4895 self.write_zero_init_scalar(crate::ScalarKind::Float)?;
4896 write!(self.out, ")")?;
4897 }
4898 TypeInner::Array { base, size, .. } => {
4899 let count = match size.resolve(self.module.to_ctx())? {
4900 proc::IndexableLength::Known(count) => count,
4901 proc::IndexableLength::Dynamic => return Ok(()),
4902 };
4903 self.write_type(base)?;
4904 self.write_array_size(base, size)?;
4905 write!(self.out, "(")?;
4906 for _ in 1..count {
4907 self.write_zero_init_value(base)?;
4908 write!(self.out, ", ")?;
4909 }
4910 // write last parameter without comma and space
4911 self.write_zero_init_value(base)?;
4912 write!(self.out, ")")?;
4913 }
4914 TypeInner::Struct { ref members, .. } => {
4915 let name = &self.names[&NameKey::Type(ty)];
4916 write!(self.out, "{name}(")?;
4917 for (index, member) in members.iter().enumerate() {
4918 if index != 0 {
4919 write!(self.out, ", ")?;
4920 }
4921 self.write_zero_init_value(member.ty)?;
4922 }
4923 write!(self.out, ")")?;
4924 }
4925 _ => unreachable!(),
4926 }
4927
4928 Ok(())
4929 }
4930
4931 /// Helper function that write string with zero initialization for scalar
4932 fn write_zero_init_scalar(&mut self, kind: crate::ScalarKind) -> BackendResult {
4933 match kind {
4934 crate::ScalarKind::Bool => write!(self.out, "false")?,
4935 crate::ScalarKind::Uint => write!(self.out, "0u")?,
4936 crate::ScalarKind::Float => write!(self.out, "0.0")?,
4937 crate::ScalarKind::Sint => write!(self.out, "0")?,
4938 crate::ScalarKind::AbstractInt | crate::ScalarKind::AbstractFloat => {
4939 return Err(Error::Custom(
4940 "Abstract types should not appear in IR presented to backends".to_string(),
4941 ))
4942 }
4943 }
4944
4945 Ok(())
4946 }
4947
4948 /// Issue a control barrier.
4949 fn write_control_barrier(
4950 &mut self,
4951 flags: crate::Barrier,
4952 level: back::Level,
4953 ) -> BackendResult {
4954 self.write_memory_barrier(flags, level)?;
4955 writeln!(self.out, "{level}barrier();")?;
4956 Ok(())
4957 }
4958
4959 /// Issue a memory barrier.
4960 fn write_memory_barrier(&mut self, flags: crate::Barrier, level: back::Level) -> BackendResult {
4961 if flags.contains(crate::Barrier::STORAGE) {
4962 writeln!(self.out, "{level}memoryBarrierBuffer();")?;
4963 }
4964 if flags.contains(crate::Barrier::WORK_GROUP) {
4965 writeln!(self.out, "{level}memoryBarrierShared();")?;
4966 }
4967 if flags.contains(crate::Barrier::SUB_GROUP) {
4968 writeln!(self.out, "{level}subgroupMemoryBarrier();")?;
4969 }
4970 if flags.contains(crate::Barrier::TEXTURE) {
4971 writeln!(self.out, "{level}memoryBarrierImage();")?;
4972 }
4973 Ok(())
4974 }
4975
4976 /// Helper function that return the glsl storage access string of [`StorageAccess`](crate::StorageAccess)
4977 ///
4978 /// glsl allows adding both `readonly` and `writeonly` but this means that
4979 /// they can only be used to query information about the resource which isn't what
4980 /// we want here so when storage access is both `LOAD` and `STORE` add no modifiers
4981 fn write_storage_access(&mut self, storage_access: crate::StorageAccess) -> BackendResult {
4982 if storage_access.contains(crate::StorageAccess::ATOMIC) {
4983 return Ok(());
4984 }
4985 if !storage_access.contains(crate::StorageAccess::STORE) {
4986 write!(self.out, "readonly ")?;
4987 }
4988 if !storage_access.contains(crate::StorageAccess::LOAD) {
4989 write!(self.out, "writeonly ")?;
4990 }
4991 Ok(())
4992 }
4993
4994 /// Helper method used to produce the reflection info that's returned to the user
4995 fn collect_reflection_info(&mut self) -> Result<ReflectionInfo, Error> {
4996 let info = self.info.get_entry_point(self.entry_point_idx as usize);
4997 let mut texture_mapping = crate::FastHashMap::default();
4998 let mut uniforms = crate::FastHashMap::default();
4999
5000 for sampling in info.sampling_set.iter() {
5001 let tex_name = self.reflection_names_globals[&sampling.image].clone();
5002
5003 match texture_mapping.entry(tex_name) {
5004 hash_map::Entry::Vacant(v) => {
5005 v.insert(TextureMapping {
5006 texture: sampling.image,
5007 sampler: Some(sampling.sampler),
5008 });
5009 }
5010 hash_map::Entry::Occupied(e) => {
5011 if e.get().sampler != Some(sampling.sampler) {
5012 log::error!("Conflicting samplers for {}", e.key());
5013 return Err(Error::ImageMultipleSamplers);
5014 }
5015 }
5016 }
5017 }
5018
5019 let mut push_constant_info = None;
5020 for (handle, var) in self.module.global_variables.iter() {
5021 if info[handle].is_empty() {
5022 continue;
5023 }
5024 match self.module.types[var.ty].inner {
5025 TypeInner::Image { .. } => {
5026 let tex_name = self.reflection_names_globals[&handle].clone();
5027 match texture_mapping.entry(tex_name) {
5028 hash_map::Entry::Vacant(v) => {
5029 v.insert(TextureMapping {
5030 texture: handle,
5031 sampler: None,
5032 });
5033 }
5034 hash_map::Entry::Occupied(_) => {
5035 // already used with a sampler, do nothing
5036 }
5037 }
5038 }
5039 _ => match var.space {
5040 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => {
5041 let name = self.reflection_names_globals[&handle].clone();
5042 uniforms.insert(handle, name);
5043 }
5044 crate::AddressSpace::PushConstant => {
5045 let name = self.reflection_names_globals[&handle].clone();
5046 push_constant_info = Some((name, var.ty));
5047 }
5048 _ => (),
5049 },
5050 }
5051 }
5052
5053 let mut push_constant_segments = Vec::new();
5054 let mut push_constant_items = vec![];
5055
5056 if let Some((name, ty)) = push_constant_info {
5057 // We don't have a layouter available to us, so we need to create one.
5058 //
5059 // This is potentially a bit wasteful, but the set of types in the program
5060 // shouldn't be too large.
5061 let mut layouter = proc::Layouter::default();
5062 layouter.update(self.module.to_ctx()).unwrap();
5063
5064 // We start with the name of the binding itself.
5065 push_constant_segments.push(name);
5066
5067 // We then recursively collect all the uniform fields of the push constant.
5068 self.collect_push_constant_items(
5069 ty,
5070 &mut push_constant_segments,
5071 &layouter,
5072 &mut 0,
5073 &mut push_constant_items,
5074 );
5075 }
5076
5077 Ok(ReflectionInfo {
5078 texture_mapping,
5079 uniforms,
5080 varying: mem::take(&mut self.varying),
5081 push_constant_items,
5082 clip_distance_count: self.clip_distance_count,
5083 })
5084 }
5085
5086 fn collect_push_constant_items(
5087 &mut self,
5088 ty: Handle<crate::Type>,
5089 segments: &mut Vec<String>,
5090 layouter: &proc::Layouter,
5091 offset: &mut u32,
5092 items: &mut Vec<PushConstantItem>,
5093 ) {
5094 // At this point in the recursion, `segments` contains the path
5095 // needed to access `ty` from the root.
5096
5097 let layout = &layouter[ty];
5098 *offset = layout.alignment.round_up(*offset);
5099 match self.module.types[ty].inner {
5100 // All these types map directly to GL uniforms.
5101 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => {
5102 // Build the full name, by combining all current segments.
5103 let name: String = segments.iter().map(String::as_str).collect();
5104 items.push(PushConstantItem {
5105 access_path: name,
5106 offset: *offset,
5107 ty,
5108 });
5109 *offset += layout.size;
5110 }
5111 // Arrays are recursed into.
5112 TypeInner::Array { base, size, .. } => {
5113 let crate::ArraySize::Constant(count) = size else {
5114 unreachable!("Cannot have dynamic arrays in push constants");
5115 };
5116
5117 for i in 0..count.get() {
5118 // Add the array accessor and recurse.
5119 segments.push(format!("[{i}]"));
5120 self.collect_push_constant_items(base, segments, layouter, offset, items);
5121 segments.pop();
5122 }
5123
5124 // Ensure the stride is kept by rounding up to the alignment.
5125 *offset = layout.alignment.round_up(*offset)
5126 }
5127 TypeInner::Struct { ref members, .. } => {
5128 for (index, member) in members.iter().enumerate() {
5129 // Add struct accessor and recurse.
5130 segments.push(format!(
5131 ".{}",
5132 self.names[&NameKey::StructMember(ty, index as u32)]
5133 ));
5134 self.collect_push_constant_items(member.ty, segments, layouter, offset, items);
5135 segments.pop();
5136 }
5137
5138 // Ensure ending padding is kept by rounding up to the alignment.
5139 *offset = layout.alignment.round_up(*offset)
5140 }
5141 _ => unreachable!(),
5142 }
5143 }
5144}
5145
5146/// Structure returned by [`glsl_scalar`]
5147///
5148/// It contains both a prefix used in other types and the full type name
5149struct ScalarString<'a> {
5150 /// The prefix used to compose other types
5151 prefix: &'a str,
5152 /// The name of the scalar type
5153 full: &'a str,
5154}
5155
5156/// Helper function that returns scalar related strings
5157///
5158/// Check [`ScalarString`] for the information provided
5159///
5160/// # Errors
5161/// If a [`Float`](crate::ScalarKind::Float) with an width that isn't 4 or 8
5162const fn glsl_scalar(scalar: crate::Scalar) -> Result<ScalarString<'static>, Error> {
5163 use crate::ScalarKind as Sk;
5164
5165 Ok(match scalar.kind {
5166 Sk::Sint => ScalarString {
5167 prefix: "i",
5168 full: "int",
5169 },
5170 Sk::Uint => ScalarString {
5171 prefix: "u",
5172 full: "uint",
5173 },
5174 Sk::Float => match scalar.width {
5175 4 => ScalarString {
5176 prefix: "",
5177 full: "float",
5178 },
5179 8 => ScalarString {
5180 prefix: "d",
5181 full: "double",
5182 },
5183 _ => return Err(Error::UnsupportedScalar(scalar)),
5184 },
5185 Sk::Bool => ScalarString {
5186 prefix: "b",
5187 full: "bool",
5188 },
5189 Sk::AbstractInt | Sk::AbstractFloat => {
5190 return Err(Error::UnsupportedScalar(scalar));
5191 }
5192 })
5193}
5194
5195/// Helper function that returns the glsl variable name for a builtin
5196const fn glsl_built_in(built_in: crate::BuiltIn, options: VaryingOptions) -> &'static str {
5197 use crate::BuiltIn as Bi;
5198
5199 match built_in {
5200 Bi::Position { .. } => {
5201 if options.output {
5202 "gl_Position"
5203 } else {
5204 "gl_FragCoord"
5205 }
5206 }
5207 Bi::ViewIndex if options.targeting_webgl => "int(gl_ViewID_OVR)",
5208 Bi::ViewIndex => "gl_ViewIndex",
5209 // vertex
5210 Bi::BaseInstance => "uint(gl_BaseInstance)",
5211 Bi::BaseVertex => "uint(gl_BaseVertex)",
5212 Bi::ClipDistance => "gl_ClipDistance",
5213 Bi::CullDistance => "gl_CullDistance",
5214 Bi::InstanceIndex => {
5215 if options.draw_parameters {
5216 "(uint(gl_InstanceID) + uint(gl_BaseInstanceARB))"
5217 } else {
5218 // Must match FIRST_INSTANCE_BINDING
5219 "(uint(gl_InstanceID) + naga_vs_first_instance)"
5220 }
5221 }
5222 Bi::PointSize => "gl_PointSize",
5223 Bi::VertexIndex => "uint(gl_VertexID)",
5224 Bi::DrawID => "gl_DrawID",
5225 // fragment
5226 Bi::FragDepth => "gl_FragDepth",
5227 Bi::PointCoord => "gl_PointCoord",
5228 Bi::FrontFacing => "gl_FrontFacing",
5229 Bi::PrimitiveIndex => "uint(gl_PrimitiveID)",
5230 Bi::SampleIndex => "gl_SampleID",
5231 Bi::SampleMask => {
5232 if options.output {
5233 "gl_SampleMask"
5234 } else {
5235 "gl_SampleMaskIn"
5236 }
5237 }
5238 // compute
5239 Bi::GlobalInvocationId => "gl_GlobalInvocationID",
5240 Bi::LocalInvocationId => "gl_LocalInvocationID",
5241 Bi::LocalInvocationIndex => "gl_LocalInvocationIndex",
5242 Bi::WorkGroupId => "gl_WorkGroupID",
5243 Bi::WorkGroupSize => "gl_WorkGroupSize",
5244 Bi::NumWorkGroups => "gl_NumWorkGroups",
5245 // subgroup
5246 Bi::NumSubgroups => "gl_NumSubgroups",
5247 Bi::SubgroupId => "gl_SubgroupID",
5248 Bi::SubgroupSize => "gl_SubgroupSize",
5249 Bi::SubgroupInvocationId => "gl_SubgroupInvocationID",
5250 }
5251}
5252
5253/// Helper function that returns the string corresponding to the address space
5254const fn glsl_storage_qualifier(space: crate::AddressSpace) -> Option<&'static str> {
5255 use crate::AddressSpace as As;
5256
5257 match space {
5258 As::Function => None,
5259 As::Private => None,
5260 As::Storage { .. } => Some("buffer"),
5261 As::Uniform => Some("uniform"),
5262 As::Handle => Some("uniform"),
5263 As::WorkGroup => Some("shared"),
5264 As::PushConstant => Some("uniform"),
5265 }
5266}
5267
5268/// Helper function that returns the string corresponding to the glsl interpolation qualifier
5269const fn glsl_interpolation(interpolation: crate::Interpolation) -> &'static str {
5270 use crate::Interpolation as I;
5271
5272 match interpolation {
5273 I::Perspective => "smooth",
5274 I::Linear => "noperspective",
5275 I::Flat => "flat",
5276 }
5277}
5278
5279/// Return the GLSL auxiliary qualifier for the given sampling value.
5280const fn glsl_sampling(sampling: crate::Sampling) -> BackendResult<Option<&'static str>> {
5281 use crate::Sampling as S;
5282
5283 Ok(match sampling {
5284 S::First => return Err(Error::FirstSamplingNotSupported),
5285 S::Center | S::Either => None,
5286 S::Centroid => Some("centroid"),
5287 S::Sample => Some("sample"),
5288 })
5289}
5290
5291/// Helper function that returns the glsl dimension string of [`ImageDimension`](crate::ImageDimension)
5292const fn glsl_dimension(dim: crate::ImageDimension) -> &'static str {
5293 use crate::ImageDimension as IDim;
5294
5295 match dim {
5296 IDim::D1 => "1D",
5297 IDim::D2 => "2D",
5298 IDim::D3 => "3D",
5299 IDim::Cube => "Cube",
5300 }
5301}
5302
5303/// Helper function that returns the glsl storage format string of [`StorageFormat`](crate::StorageFormat)
5304fn glsl_storage_format(format: crate::StorageFormat) -> Result<&'static str, Error> {
5305 use crate::StorageFormat as Sf;
5306
5307 Ok(match format {
5308 Sf::R8Unorm => "r8",
5309 Sf::R8Snorm => "r8_snorm",
5310 Sf::R8Uint => "r8ui",
5311 Sf::R8Sint => "r8i",
5312 Sf::R16Uint => "r16ui",
5313 Sf::R16Sint => "r16i",
5314 Sf::R16Float => "r16f",
5315 Sf::Rg8Unorm => "rg8",
5316 Sf::Rg8Snorm => "rg8_snorm",
5317 Sf::Rg8Uint => "rg8ui",
5318 Sf::Rg8Sint => "rg8i",
5319 Sf::R32Uint => "r32ui",
5320 Sf::R32Sint => "r32i",
5321 Sf::R32Float => "r32f",
5322 Sf::Rg16Uint => "rg16ui",
5323 Sf::Rg16Sint => "rg16i",
5324 Sf::Rg16Float => "rg16f",
5325 Sf::Rgba8Unorm => "rgba8",
5326 Sf::Rgba8Snorm => "rgba8_snorm",
5327 Sf::Rgba8Uint => "rgba8ui",
5328 Sf::Rgba8Sint => "rgba8i",
5329 Sf::Rgb10a2Uint => "rgb10_a2ui",
5330 Sf::Rgb10a2Unorm => "rgb10_a2",
5331 Sf::Rg11b10Ufloat => "r11f_g11f_b10f",
5332 Sf::R64Uint => "r64ui",
5333 Sf::Rg32Uint => "rg32ui",
5334 Sf::Rg32Sint => "rg32i",
5335 Sf::Rg32Float => "rg32f",
5336 Sf::Rgba16Uint => "rgba16ui",
5337 Sf::Rgba16Sint => "rgba16i",
5338 Sf::Rgba16Float => "rgba16f",
5339 Sf::Rgba32Uint => "rgba32ui",
5340 Sf::Rgba32Sint => "rgba32i",
5341 Sf::Rgba32Float => "rgba32f",
5342 Sf::R16Unorm => "r16",
5343 Sf::R16Snorm => "r16_snorm",
5344 Sf::Rg16Unorm => "rg16",
5345 Sf::Rg16Snorm => "rg16_snorm",
5346 Sf::Rgba16Unorm => "rgba16",
5347 Sf::Rgba16Snorm => "rgba16_snorm",
5348
5349 Sf::Bgra8Unorm => {
5350 return Err(Error::Custom(
5351 "Support format BGRA8 is not implemented".into(),
5352 ))
5353 }
5354 })
5355}
5356
5357fn is_value_init_supported(module: &crate::Module, ty: Handle<crate::Type>) -> bool {
5358 match module.types[ty].inner {
5359 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => true,
5360 TypeInner::Array { base, size, .. } => {
5361 size != crate::ArraySize::Dynamic && is_value_init_supported(module, base)
5362 }
5363 TypeInner::Struct { ref members, .. } => members
5364 .iter()
5365 .all(|member| is_value_init_supported(module, member.ty)),
5366 _ => false,
5367 }
5368}