naga/back/glsl/writer.rs
1use super::*;
2
3/// Writer responsible for all code generation.
4pub struct Writer<'a, W> {
5 // Inputs
6 /// The module being written.
7 pub(in crate::back::glsl) module: &'a crate::Module,
8 /// The module analysis.
9 pub(in crate::back::glsl) info: &'a valid::ModuleInfo,
10 /// The output writer.
11 out: W,
12 /// User defined configuration to be used.
13 pub(in crate::back::glsl) options: &'a Options,
14 /// The bound checking policies to be used
15 pub(in crate::back::glsl) policies: proc::BoundsCheckPolicies,
16
17 // Internal State
18 /// Features manager used to store all the needed features and write them.
19 pub(in crate::back::glsl) features: FeaturesManager,
20 namer: proc::Namer,
21 /// A map with all the names needed for writing the module
22 /// (generated by a [`Namer`](crate::proc::Namer)).
23 names: crate::FastHashMap<NameKey, String>,
24 /// A map with the names of global variables needed for reflections.
25 reflection_names_globals: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
26 /// The selected entry point.
27 pub(in crate::back::glsl) entry_point: &'a crate::EntryPoint,
28 /// The index of the selected entry point.
29 pub(in crate::back::glsl) entry_point_idx: proc::EntryPointIndex,
30 /// A generator for unique block numbers.
31 block_id: IdGenerator,
32 /// Set of expressions that have associated temporary variables.
33 named_expressions: crate::NamedExpressions,
34 /// Set of expressions that need to be baked to avoid unnecessary repetition in output
35 need_bake_expressions: back::NeedBakeExpressions,
36 /// Information about nesting of loops and switches.
37 ///
38 /// Used for forwarding continue statements in switches that have been
39 /// transformed to `do {} while(false);` loops.
40 continue_ctx: back::continue_forward::ContinueCtx,
41 /// How many views to render to, if doing multiview rendering.
42 pub(in crate::back::glsl) multiview: Option<core::num::NonZeroU32>,
43 /// Mapping of varying variables to their location. Needed for reflections.
44 varying: crate::FastHashMap<String, VaryingLocation>,
45 /// Number of user-defined clip planes. Only non-zero for vertex shaders.
46 clip_distance_count: u32,
47}
48
49impl<'a, W: Write> Writer<'a, W> {
50 /// Creates a new [`Writer`] instance.
51 ///
52 /// # Errors
53 /// - If the version specified is invalid or supported.
54 /// - If the entry point couldn't be found in the module.
55 /// - If the version specified doesn't support some used features.
56 pub fn new(
57 out: W,
58 module: &'a crate::Module,
59 info: &'a valid::ModuleInfo,
60 options: &'a Options,
61 pipeline_options: &'a PipelineOptions,
62 policies: proc::BoundsCheckPolicies,
63 ) -> Result<Self, Error> {
64 // Check if the requested version is supported
65 if !options.version.is_supported() {
66 log::error!("Version {}", options.version);
67 return Err(Error::VersionNotSupported);
68 }
69
70 // Try to find the entry point and corresponding index
71 let ep_idx = module
72 .entry_points
73 .iter()
74 .position(|ep| {
75 pipeline_options.shader_stage == ep.stage && pipeline_options.entry_point == ep.name
76 })
77 .ok_or(Error::EntryPointNotFound)?;
78
79 // Generate a map with names required to write the module
80 let mut names = crate::FastHashMap::default();
81 let mut namer = proc::Namer::default();
82 namer.reset(
83 module,
84 &keywords::RESERVED_KEYWORD_SET,
85 proc::KeywordSet::empty(),
86 proc::CaseInsensitiveKeywordSet::empty(),
87 &[
88 "gl_", // all GL built-in variables
89 "_group", // all normal bindings
90 "_immediates_binding_", // all immediate data bindings
91 ],
92 &mut names,
93 );
94
95 // Build the instance
96 let mut this = Self {
97 module,
98 info,
99 out,
100 options,
101 policies,
102
103 namer,
104 features: FeaturesManager::new(),
105 names,
106 reflection_names_globals: crate::FastHashMap::default(),
107 entry_point: &module.entry_points[ep_idx],
108 entry_point_idx: ep_idx as u16,
109 multiview: pipeline_options.multiview,
110 block_id: IdGenerator::default(),
111 named_expressions: Default::default(),
112 need_bake_expressions: Default::default(),
113 continue_ctx: back::continue_forward::ContinueCtx::default(),
114 varying: Default::default(),
115 clip_distance_count: 0,
116 };
117
118 // Find all features required to print this module
119 this.collect_required_features()?;
120
121 Ok(this)
122 }
123
124 /// Writes the [`Module`](crate::Module) as glsl to the output
125 ///
126 /// # Notes
127 /// If an error occurs while writing, the output might have been written partially
128 ///
129 /// # Panics
130 /// Might panic if the module is invalid
131 pub fn write(&mut self) -> Result<ReflectionInfo, Error> {
132 // We use `writeln!(self.out)` throughout the write to add newlines
133 // to make the output more readable
134
135 let es = self.options.version.is_es();
136
137 // Write the version (It must be the first thing or it isn't a valid glsl output)
138 writeln!(self.out, "#version {}", self.options.version)?;
139 // Write all the needed extensions
140 //
141 // This used to be the last thing being written as it allowed to search for features while
142 // writing the module saving some loops but some older versions (420 or less) required the
143 // extensions to appear before being used, even though extensions are part of the
144 // preprocessor not the processor ¯\_(ツ)_/¯
145 self.features.write(self.options, &mut self.out)?;
146
147 // glsl es requires a precision to be specified for floats and ints
148 // TODO: Should this be user configurable?
149 if es {
150 writeln!(self.out)?;
151 writeln!(self.out, "precision highp float;")?;
152 writeln!(self.out, "precision highp int;")?;
153 writeln!(self.out)?;
154 }
155
156 if self.entry_point.stage == ShaderStage::Compute {
157 let workgroup_size = self.entry_point.workgroup_size;
158 writeln!(
159 self.out,
160 "layout(local_size_x = {}, local_size_y = {}, local_size_z = {}) in;",
161 workgroup_size[0], workgroup_size[1], workgroup_size[2]
162 )?;
163 writeln!(self.out)?;
164 }
165
166 if self.entry_point.stage == ShaderStage::Vertex
167 && !self
168 .options
169 .writer_flags
170 .contains(WriterFlags::DRAW_PARAMETERS)
171 && self.features.contains(Features::INSTANCE_INDEX)
172 {
173 writeln!(self.out, "uniform uint {FIRST_INSTANCE_BINDING};")?;
174 writeln!(self.out)?;
175 }
176
177 // Enable early depth tests if needed
178 if let Some(early_depth_test) = self.entry_point.early_depth_test {
179 // If early depth test is supported for this version of GLSL
180 if self.options.version.supports_early_depth_test() {
181 match early_depth_test {
182 crate::EarlyDepthTest::Force => {
183 writeln!(self.out, "layout(early_fragment_tests) in;")?;
184 }
185 crate::EarlyDepthTest::Allow { conservative, .. } => {
186 use crate::ConservativeDepth as Cd;
187 let depth = match conservative {
188 Cd::GreaterEqual => "greater",
189 Cd::LessEqual => "less",
190 Cd::Unchanged => "unchanged",
191 };
192 writeln!(self.out, "layout (depth_{depth}) out float gl_FragDepth;")?;
193 }
194 }
195 } else {
196 log::warn!(
197 "Early depth testing is not supported for this version of GLSL: {}",
198 self.options.version
199 );
200 }
201 }
202
203 if self.entry_point.stage == ShaderStage::Vertex && self.options.version.is_webgl() {
204 if let Some(multiview) = self.multiview.as_ref() {
205 writeln!(self.out, "layout(num_views = {multiview}) in;")?;
206 writeln!(self.out)?;
207 }
208 }
209
210 // Write struct types.
211 //
212 // This are always ordered because the IR is structured in a way that
213 // you can't make a struct without adding all of its members first.
214 for (handle, ty) in self.module.types.iter() {
215 if let TypeInner::Struct { ref members, .. } = ty.inner {
216 let struct_name = &self.names[&NameKey::Type(handle)];
217
218 // Structures ending with runtime-sized arrays can only be
219 // rendered as shader storage blocks in GLSL, not stand-alone
220 // struct types.
221 if !self.module.types[members.last().unwrap().ty]
222 .inner
223 .is_dynamically_sized(&self.module.types)
224 {
225 write!(self.out, "struct {struct_name} ")?;
226 self.write_struct_body(handle, members)?;
227 writeln!(self.out, ";")?;
228 }
229 }
230 }
231
232 // Write functions for special types.
233 for (type_key, struct_ty) in self.module.special_types.predeclared_types.iter() {
234 match type_key {
235 &crate::PredeclaredType::ModfResult { size, scalar }
236 | &crate::PredeclaredType::FrexpResult { size, scalar } => {
237 let struct_name = &self.names[&NameKey::Type(*struct_ty)];
238 let arg_type_name_owner;
239 let arg_type_name = if let Some(size) = size {
240 arg_type_name_owner = format!(
241 "{}vec{}",
242 if scalar.width == 8 { "d" } else { "" },
243 size as u8
244 );
245 &arg_type_name_owner
246 } else if scalar.width == 8 {
247 "double"
248 } else {
249 "float"
250 };
251
252 let other_type_name_owner;
253 let (defined_func_name, called_func_name, other_type_name) =
254 if matches!(type_key, &crate::PredeclaredType::ModfResult { .. }) {
255 (MODF_FUNCTION, "modf", arg_type_name)
256 } else {
257 let other_type_name = if let Some(size) = size {
258 other_type_name_owner = format!("ivec{}", size as u8);
259 &other_type_name_owner
260 } else {
261 "int"
262 };
263 (FREXP_FUNCTION, "frexp", other_type_name)
264 };
265
266 writeln!(self.out)?;
267 if !self.options.version.supports_frexp_function()
268 && matches!(type_key, &crate::PredeclaredType::FrexpResult { .. })
269 {
270 writeln!(
271 self.out,
272 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
273 {other_type_name} other = arg == {arg_type_name}(0) ? {other_type_name}(0) : {other_type_name}({arg_type_name}(1) + log2(arg));
274 {arg_type_name} fract = arg * exp2({arg_type_name}(-other));
275 return {struct_name}(fract, other);
276}}",
277 )?;
278 } else {
279 writeln!(
280 self.out,
281 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
282 {other_type_name} other;
283 {arg_type_name} fract = {called_func_name}(arg, other);
284 return {struct_name}(fract, other);
285}}",
286 )?;
287 }
288 }
289 &crate::PredeclaredType::AtomicCompareExchangeWeakResult(_) => {
290 // Handled by the general struct writing loop earlier.
291 }
292 }
293 }
294
295 // Write all named constants
296 let mut constants = self
297 .module
298 .constants
299 .iter()
300 .filter(|&(_, c)| c.name.is_some())
301 .peekable();
302 while let Some((handle, _)) = constants.next() {
303 self.write_global_constant(handle)?;
304 // Add extra newline for readability on last iteration
305 if constants.peek().is_none() {
306 writeln!(self.out)?;
307 }
308 }
309
310 let ep_info = self.info.get_entry_point(self.entry_point_idx as usize);
311
312 // Write the globals
313 //
314 // Unless explicitly disabled with WriterFlags::INCLUDE_UNUSED_ITEMS,
315 // we filter all globals that aren't used by the selected entry point as they might be
316 // interfere with each other (i.e. two globals with the same location but different with
317 // different classes)
318 let include_unused = self
319 .options
320 .writer_flags
321 .contains(WriterFlags::INCLUDE_UNUSED_ITEMS);
322 for (handle, global) in self.module.global_variables.iter() {
323 let is_unused = ep_info[handle].is_empty();
324 if !include_unused && is_unused {
325 continue;
326 }
327
328 match self.module.types[global.ty].inner {
329 // We treat images separately because they might require
330 // writing the storage format
331 TypeInner::Image {
332 mut dim,
333 arrayed,
334 class,
335 } => {
336 // Gather the storage format if needed
337 let storage_format_access = match self.module.types[global.ty].inner {
338 TypeInner::Image {
339 class: crate::ImageClass::Storage { format, access },
340 ..
341 } => Some((format, access)),
342 _ => None,
343 };
344
345 if dim == crate::ImageDimension::D1 && es {
346 dim = crate::ImageDimension::D2
347 }
348
349 // Gether the location if needed
350 let layout_binding = if self.options.version.supports_explicit_locations() {
351 let br = global.binding.as_ref().unwrap();
352 self.options.binding_map.get(br).cloned()
353 } else {
354 None
355 };
356
357 // Write all the layout qualifiers
358 if layout_binding.is_some() || storage_format_access.is_some() {
359 write!(self.out, "layout(")?;
360 if let Some(binding) = layout_binding {
361 write!(self.out, "binding = {binding}")?;
362 }
363 if let Some((format, _)) = storage_format_access {
364 let format_str = glsl_storage_format(format)?;
365 let separator = match layout_binding {
366 Some(_) => ",",
367 None => "",
368 };
369 write!(self.out, "{separator}{format_str}")?;
370 }
371 write!(self.out, ") ")?;
372 }
373
374 if let Some((_, access)) = storage_format_access {
375 self.write_storage_access(access)?;
376 }
377
378 // All images in glsl are `uniform`
379 // The trailing space is important
380 write!(self.out, "uniform ")?;
381
382 // write the type
383 //
384 // This is way we need the leading space because `write_image_type` doesn't add
385 // any spaces at the beginning or end
386 self.write_image_type(dim, arrayed, class)?;
387
388 // Finally write the name and end the global with a `;`
389 // The leading space is important
390 let global_name = self.get_global_name(handle, global);
391 writeln!(self.out, " {global_name};")?;
392 writeln!(self.out)?;
393
394 self.reflection_names_globals.insert(handle, global_name);
395 }
396 // glsl has no concept of samplers so we just ignore it
397 TypeInner::Sampler { .. } => continue,
398 // All other globals are written by `write_global`
399 _ => {
400 self.write_global(handle, global)?;
401 // Add a newline (only for readability)
402 writeln!(self.out)?;
403 }
404 }
405 }
406
407 for arg in self.entry_point.function.arguments.iter() {
408 self.write_varying(arg.binding.as_ref(), arg.ty, false)?;
409 }
410 if let Some(ref result) = self.entry_point.function.result {
411 self.write_varying(result.binding.as_ref(), result.ty, true)?;
412 }
413 writeln!(self.out)?;
414
415 // Write all regular functions
416 for (handle, function) in self.module.functions.iter() {
417 // Check that the function doesn't use globals that aren't supported
418 // by the current entry point
419 if !include_unused && !ep_info.dominates_global_use(&self.info[handle]) {
420 continue;
421 }
422
423 let fun_info = &self.info[handle];
424
425 // Skip functions that that are not compatible with this entry point's stage.
426 //
427 // When validation is enabled, it rejects modules whose entry points try to call
428 // incompatible functions, so if we got this far, then any functions incompatible
429 // with our selected entry point must not be used.
430 //
431 // When validation is disabled, `fun_info.available_stages` is always just
432 // `ShaderStages::all()`, so this will write all functions in the module, and
433 // the downstream GLSL compiler will catch any problems.
434 if !fun_info.available_stages.contains(ep_info.available_stages) {
435 continue;
436 }
437
438 // Write the function
439 self.write_function(back::FunctionType::Function(handle), function, fun_info)?;
440
441 writeln!(self.out)?;
442 }
443
444 self.write_function(
445 back::FunctionType::EntryPoint(self.entry_point_idx),
446 &self.entry_point.function,
447 ep_info,
448 )?;
449
450 // Add newline at the end of file
451 writeln!(self.out)?;
452
453 // Collect all reflection info and return it to the user
454 self.collect_reflection_info()
455 }
456
457 fn write_array_size(
458 &mut self,
459 base: Handle<crate::Type>,
460 size: crate::ArraySize,
461 ) -> BackendResult {
462 write!(self.out, "[")?;
463
464 // Write the array size
465 // Writes nothing if `IndexableLength::Dynamic`
466 match size.resolve(self.module.to_ctx())? {
467 proc::IndexableLength::Known(size) => {
468 write!(self.out, "{size}")?;
469 }
470 proc::IndexableLength::Dynamic => (),
471 }
472
473 write!(self.out, "]")?;
474
475 if let TypeInner::Array {
476 base: next_base,
477 size: next_size,
478 ..
479 } = self.module.types[base].inner
480 {
481 self.write_array_size(next_base, next_size)?;
482 }
483
484 Ok(())
485 }
486
487 /// Helper method used to write value types
488 ///
489 /// # Notes
490 /// Adds no trailing or leading whitespace
491 fn write_value_type(&mut self, inner: &TypeInner) -> BackendResult {
492 match *inner {
493 // Scalars are simple we just get the full name from `glsl_scalar`
494 TypeInner::Scalar(scalar)
495 | TypeInner::Atomic(scalar)
496 | TypeInner::ValuePointer {
497 size: None,
498 scalar,
499 space: _,
500 } => write!(self.out, "{}", glsl_scalar(scalar)?.full)?,
501 // Vectors are just `gvecN` where `g` is the scalar prefix and `N` is the vector size
502 TypeInner::Vector { size, scalar }
503 | TypeInner::ValuePointer {
504 size: Some(size),
505 scalar,
506 space: _,
507 } => write!(self.out, "{}vec{}", glsl_scalar(scalar)?.prefix, size as u8)?,
508 // Matrices are written with `gmatMxN` where `g` is the scalar prefix (only floats and
509 // doubles are allowed), `M` is the columns count and `N` is the rows count
510 //
511 // glsl supports a matrix shorthand `gmatN` where `N` = `M` but it doesn't justify the
512 // extra branch to write matrices this way
513 TypeInner::Matrix {
514 columns,
515 rows,
516 scalar,
517 } => write!(
518 self.out,
519 "{}mat{}x{}",
520 glsl_scalar(scalar)?.prefix,
521 columns as u8,
522 rows as u8
523 )?,
524 // GLSL arrays are written as `type name[size]`
525 // Here we only write the size of the array i.e. `[size]`
526 // Base `type` and `name` should be written outside
527 TypeInner::Array { base, size, .. } => self.write_array_size(base, size)?,
528 // Write all variants instead of `_` so that if new variants are added a
529 // no exhaustiveness error is thrown
530 TypeInner::Pointer { .. }
531 | TypeInner::Struct { .. }
532 | TypeInner::Image { .. }
533 | TypeInner::Sampler { .. }
534 | TypeInner::AccelerationStructure { .. }
535 | TypeInner::RayQuery { .. }
536 | TypeInner::BindingArray { .. }
537 | TypeInner::CooperativeMatrix { .. } => {
538 return Err(Error::Custom(format!("Unable to write type {inner:?}")))
539 }
540 }
541
542 Ok(())
543 }
544
545 /// Helper method used to write non image/sampler types
546 ///
547 /// # Notes
548 /// Adds no trailing or leading whitespace
549 fn write_type(&mut self, ty: Handle<crate::Type>) -> BackendResult {
550 match self.module.types[ty].inner {
551 // glsl has no pointer types so just write types as normal and loads are skipped
552 TypeInner::Pointer { base, .. } => self.write_type(base),
553 // glsl structs are written as just the struct name
554 TypeInner::Struct { .. } => {
555 // Get the struct name
556 let name = &self.names[&NameKey::Type(ty)];
557 write!(self.out, "{name}")?;
558 Ok(())
559 }
560 // glsl array has the size separated from the base type
561 TypeInner::Array { base, .. } => self.write_type(base),
562 ref other => self.write_value_type(other),
563 }
564 }
565
566 /// Helper method to write a image type
567 ///
568 /// # Notes
569 /// Adds no leading or trailing whitespace
570 fn write_image_type(
571 &mut self,
572 dim: crate::ImageDimension,
573 arrayed: bool,
574 class: crate::ImageClass,
575 ) -> BackendResult {
576 // glsl images consist of four parts the scalar prefix, the image "type", the dimensions
577 // and modifiers
578 //
579 // There exists two image types
580 // - sampler - for sampled images
581 // - image - for storage images
582 //
583 // There are three possible modifiers that can be used together and must be written in
584 // this order to be valid
585 // - MS - used if it's a multisampled image
586 // - Array - used if it's an image array
587 // - Shadow - used if it's a depth image
588 use crate::ImageClass as Ic;
589 use crate::Scalar as S;
590 let float = S {
591 kind: crate::ScalarKind::Float,
592 width: 4,
593 };
594 let (base, scalar, ms, comparison) = match class {
595 Ic::Sampled { kind, multi: true } => ("sampler", S { kind, width: 4 }, "MS", ""),
596 Ic::Sampled { kind, multi: false } => ("sampler", S { kind, width: 4 }, "", ""),
597 Ic::Depth { multi: true } => ("sampler", float, "MS", ""),
598 Ic::Depth { multi: false } => ("sampler", float, "", "Shadow"),
599 Ic::Storage { format, .. } => ("image", format.into(), "", ""),
600 Ic::External => unimplemented!(),
601 };
602
603 let precision = if self.options.version.is_es() {
604 "highp "
605 } else {
606 ""
607 };
608
609 write!(
610 self.out,
611 "{}{}{}{}{}{}{}",
612 precision,
613 glsl_scalar(scalar)?.prefix,
614 base,
615 glsl_dimension(dim),
616 ms,
617 if arrayed { "Array" } else { "" },
618 comparison
619 )?;
620
621 Ok(())
622 }
623
624 /// Helper method used by [Self::write_global] to write just the layout part of
625 /// a non image/sampler global variable, if applicable.
626 ///
627 /// # Notes
628 ///
629 /// Adds trailing whitespace if any layout qualifier is written
630 fn write_global_layout(&mut self, global: &crate::GlobalVariable) -> BackendResult {
631 // Determine which (if any) explicit memory layout to use, and whether we support it
632 let layout = match global.space {
633 crate::AddressSpace::Uniform => {
634 if !self.options.version.supports_std140_layout() {
635 return Err(Error::Custom(
636 "Uniform address space requires std140 layout support".to_string(),
637 ));
638 }
639
640 Some("std140")
641 }
642 crate::AddressSpace::Storage { .. } => {
643 if !self.options.version.supports_std430_layout() {
644 return Err(Error::Custom(
645 "Storage address space requires std430 layout support".to_string(),
646 ));
647 }
648
649 Some("std430")
650 }
651 _ => None,
652 };
653
654 // If our version supports explicit layouts, we can also output the explicit binding
655 // if we have it
656 if self.options.version.supports_explicit_locations() {
657 if let Some(ref br) = global.binding {
658 match self.options.binding_map.get(br) {
659 Some(binding) => {
660 write!(self.out, "layout(")?;
661
662 if let Some(layout) = layout {
663 write!(self.out, "{layout}, ")?;
664 }
665
666 write!(self.out, "binding = {binding}) ")?;
667
668 return Ok(());
669 }
670 None => {
671 log::debug!("unassigned binding for {:?}", global.name);
672 }
673 }
674 }
675 }
676
677 // Either no explicit bindings are supported or we didn't have any.
678 // Write just the memory layout.
679 if let Some(layout) = layout {
680 write!(self.out, "layout({layout}) ")?;
681 }
682
683 Ok(())
684 }
685
686 /// Helper method used to write non images/sampler globals
687 ///
688 /// # Notes
689 /// Adds a newline
690 ///
691 /// # Panics
692 /// If the global has type sampler
693 fn write_global(
694 &mut self,
695 handle: Handle<crate::GlobalVariable>,
696 global: &crate::GlobalVariable,
697 ) -> BackendResult {
698 self.write_global_layout(global)?;
699
700 if let crate::AddressSpace::Storage { access } = global.space {
701 self.write_storage_access(access)?;
702 if global
703 .memory_decorations
704 .contains(crate::MemoryDecorations::COHERENT)
705 {
706 write!(self.out, "coherent ")?;
707 }
708 if global
709 .memory_decorations
710 .contains(crate::MemoryDecorations::VOLATILE)
711 {
712 write!(self.out, "volatile ")?;
713 }
714 }
715
716 if let Some(storage_qualifier) = glsl_storage_qualifier(global.space) {
717 write!(self.out, "{storage_qualifier} ")?;
718 }
719
720 match global.space {
721 crate::AddressSpace::Private => {
722 self.write_simple_global(handle, global)?;
723 }
724 crate::AddressSpace::WorkGroup => {
725 self.write_simple_global(handle, global)?;
726 }
727 crate::AddressSpace::Immediate => {
728 self.write_simple_global(handle, global)?;
729 }
730 crate::AddressSpace::Uniform => {
731 self.write_interface_block(handle, global)?;
732 }
733 crate::AddressSpace::Storage { .. } => {
734 self.write_interface_block(handle, global)?;
735 }
736 crate::AddressSpace::TaskPayload => {
737 self.write_interface_block(handle, global)?;
738 }
739 // A global variable in the `Function` address space is a
740 // contradiction in terms.
741 crate::AddressSpace::Function => unreachable!(),
742 // Textures and samplers are handled directly in `Writer::write`.
743 crate::AddressSpace::Handle => unreachable!(),
744 // ray tracing pipelines unsupported
745 crate::AddressSpace::RayPayload | crate::AddressSpace::IncomingRayPayload => {
746 unreachable!()
747 }
748 }
749
750 Ok(())
751 }
752
753 fn write_simple_global(
754 &mut self,
755 handle: Handle<crate::GlobalVariable>,
756 global: &crate::GlobalVariable,
757 ) -> BackendResult {
758 self.write_type(global.ty)?;
759 write!(self.out, " ")?;
760 self.write_global_name(handle, global)?;
761
762 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
763 self.write_array_size(base, size)?;
764 }
765
766 if global.space.initializable() && is_value_init_supported(self.module, global.ty) {
767 write!(self.out, " = ")?;
768 if let Some(init) = global.init {
769 self.write_const_expr(init, &self.module.global_expressions)?;
770 } else {
771 self.write_zero_init_value(global.ty)?;
772 }
773 }
774
775 writeln!(self.out, ";")?;
776
777 if let crate::AddressSpace::Immediate = global.space {
778 let global_name = self.get_global_name(handle, global);
779 self.reflection_names_globals.insert(handle, global_name);
780 }
781
782 Ok(())
783 }
784
785 /// Write an interface block for a single Naga global.
786 ///
787 /// Write `block_name { members }`. Since `block_name` must be unique
788 /// between blocks and structs, we add `_block_ID` where `ID` is a
789 /// `IdGenerator` generated number. Write `members` in the same way we write
790 /// a struct's members.
791 fn write_interface_block(
792 &mut self,
793 handle: Handle<crate::GlobalVariable>,
794 global: &crate::GlobalVariable,
795 ) -> BackendResult {
796 // Write the block name, it's just the struct name appended with `_block_ID`
797 let ty_name = &self.names[&NameKey::Type(global.ty)];
798 let block_name = format!(
799 "{}_block_{}{:?}",
800 // avoid double underscores as they are reserved in GLSL
801 ty_name.trim_end_matches('_'),
802 self.block_id.generate(),
803 self.entry_point.stage,
804 );
805 write!(self.out, "{block_name} ")?;
806 self.reflection_names_globals.insert(handle, block_name);
807
808 match self.module.types[global.ty].inner {
809 TypeInner::Struct { ref members, .. }
810 if self.module.types[members.last().unwrap().ty]
811 .inner
812 .is_dynamically_sized(&self.module.types) =>
813 {
814 // Structs with dynamically sized arrays must have their
815 // members lifted up as members of the interface block. GLSL
816 // can't write such struct types anyway.
817 self.write_struct_body(global.ty, members)?;
818 write!(self.out, " ")?;
819 self.write_global_name(handle, global)?;
820 }
821 _ => {
822 // A global of any other type is written as the sole member
823 // of the interface block. Since the interface block is
824 // anonymous, this becomes visible in the global scope.
825 write!(self.out, "{{ ")?;
826 self.write_type(global.ty)?;
827 write!(self.out, " ")?;
828 self.write_global_name(handle, global)?;
829 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
830 self.write_array_size(base, size)?;
831 }
832 write!(self.out, "; }}")?;
833 }
834 }
835
836 writeln!(self.out, ";")?;
837
838 Ok(())
839 }
840
841 /// Helper method used to find which expressions of a given function require baking
842 ///
843 /// # Notes
844 /// Clears `need_bake_expressions` set before adding to it
845 fn update_expressions_to_bake(&mut self, func: &crate::Function, info: &valid::FunctionInfo) {
846 use crate::Expression;
847 self.need_bake_expressions.clear();
848 for (fun_handle, expr) in func.expressions.iter() {
849 let expr_info = &info[fun_handle];
850 let min_ref_count = func.expressions[fun_handle].bake_ref_count();
851 if min_ref_count <= expr_info.ref_count {
852 self.need_bake_expressions.insert(fun_handle);
853 }
854
855 let inner = expr_info.ty.inner_with(&self.module.types);
856
857 if let Expression::Math {
858 fun,
859 arg,
860 arg1,
861 arg2,
862 ..
863 } = *expr
864 {
865 match fun {
866 crate::MathFunction::Dot => {
867 // if the expression is a Dot product with integer arguments,
868 // then the args needs baking as well
869 if let TypeInner::Scalar(crate::Scalar {
870 kind: crate::ScalarKind::Sint | crate::ScalarKind::Uint,
871 ..
872 }) = *inner
873 {
874 self.need_bake_expressions.insert(arg);
875 self.need_bake_expressions.insert(arg1.unwrap());
876 }
877 }
878 crate::MathFunction::Dot4U8Packed | crate::MathFunction::Dot4I8Packed => {
879 self.need_bake_expressions.insert(arg);
880 self.need_bake_expressions.insert(arg1.unwrap());
881 }
882 crate::MathFunction::Pack4xI8
883 | crate::MathFunction::Pack4xU8
884 | crate::MathFunction::Pack4xI8Clamp
885 | crate::MathFunction::Pack4xU8Clamp
886 | crate::MathFunction::Unpack4xI8
887 | crate::MathFunction::Unpack4xU8
888 | crate::MathFunction::QuantizeToF16 => {
889 self.need_bake_expressions.insert(arg);
890 }
891 /* crate::MathFunction::Pack4x8unorm | */
892 crate::MathFunction::Unpack4x8snorm
893 if !self.options.version.supports_pack_unpack_4x8() =>
894 {
895 // We have a fallback if the platform doesn't natively support these
896 self.need_bake_expressions.insert(arg);
897 }
898 /* crate::MathFunction::Pack4x8unorm | */
899 crate::MathFunction::Unpack4x8unorm
900 if !self.options.version.supports_pack_unpack_4x8() =>
901 {
902 self.need_bake_expressions.insert(arg);
903 }
904 /* crate::MathFunction::Pack2x16snorm | */
905 crate::MathFunction::Unpack2x16snorm
906 if !self.options.version.supports_pack_unpack_snorm_2x16() =>
907 {
908 self.need_bake_expressions.insert(arg);
909 }
910 /* crate::MathFunction::Pack2x16unorm | */
911 crate::MathFunction::Unpack2x16unorm
912 if !self.options.version.supports_pack_unpack_unorm_2x16() =>
913 {
914 self.need_bake_expressions.insert(arg);
915 }
916 crate::MathFunction::ExtractBits => {
917 // Only argument 1 is re-used.
918 self.need_bake_expressions.insert(arg1.unwrap());
919 }
920 crate::MathFunction::InsertBits => {
921 // Only argument 2 is re-used.
922 self.need_bake_expressions.insert(arg2.unwrap());
923 }
924 crate::MathFunction::CountLeadingZeros => {
925 if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
926 self.need_bake_expressions.insert(arg);
927 }
928 }
929 _ => {}
930 }
931 }
932 }
933
934 for statement in func.body.iter() {
935 match *statement {
936 crate::Statement::Atomic {
937 fun: crate::AtomicFunction::Exchange { compare: Some(cmp) },
938 ..
939 } => {
940 self.need_bake_expressions.insert(cmp);
941 }
942 _ => {}
943 }
944 }
945 }
946
947 /// Helper method used to get a name for a global
948 ///
949 /// Globals have different naming schemes depending on their binding:
950 /// - Globals without bindings use the name from the [`Namer`](crate::proc::Namer)
951 /// - Globals with resource binding are named `_group_X_binding_Y` where `X`
952 /// is the group and `Y` is the binding
953 fn get_global_name(
954 &self,
955 handle: Handle<crate::GlobalVariable>,
956 global: &crate::GlobalVariable,
957 ) -> String {
958 match (&global.binding, global.space) {
959 (&Some(ref br), _) => {
960 format!(
961 "_group_{}_binding_{}_{}",
962 br.group,
963 br.binding,
964 self.entry_point.stage.to_str()
965 )
966 }
967 (&None, crate::AddressSpace::Immediate) => {
968 format!("_immediates_binding_{}", self.entry_point.stage.to_str())
969 }
970 (&None, _) => self.names[&NameKey::GlobalVariable(handle)].clone(),
971 }
972 }
973
974 /// Helper method used to write a name for a global without additional heap allocation
975 fn write_global_name(
976 &mut self,
977 handle: Handle<crate::GlobalVariable>,
978 global: &crate::GlobalVariable,
979 ) -> BackendResult {
980 match (&global.binding, global.space) {
981 (&Some(ref br), _) => write!(
982 self.out,
983 "_group_{}_binding_{}_{}",
984 br.group,
985 br.binding,
986 self.entry_point.stage.to_str()
987 )?,
988 (&None, crate::AddressSpace::Immediate) => write!(
989 self.out,
990 "_immediates_binding_{}",
991 self.entry_point.stage.to_str()
992 )?,
993 (&None, _) => write!(
994 self.out,
995 "{}",
996 &self.names[&NameKey::GlobalVariable(handle)]
997 )?,
998 }
999
1000 Ok(())
1001 }
1002
1003 /// Write a GLSL global that will carry a Naga entry point's argument or return value.
1004 ///
1005 /// A Naga entry point's arguments and return value are rendered in GLSL as
1006 /// variables at global scope with the `in` and `out` storage qualifiers.
1007 /// The code we generate for `main` loads from all the `in` globals into
1008 /// appropriately named locals. Before it returns, `main` assigns the
1009 /// components of its return value into all the `out` globals.
1010 ///
1011 /// This function writes a declaration for one such GLSL global,
1012 /// representing a value passed into or returned from [`self.entry_point`]
1013 /// that has a [`Location`] binding. The global's name is generated based on
1014 /// the location index and the shader stages being connected; see
1015 /// [`VaryingName`]. This means we don't need to know the names of
1016 /// arguments, just their types and bindings.
1017 ///
1018 /// Emit nothing for entry point arguments or return values with [`BuiltIn`]
1019 /// bindings; `main` will read from or assign to the appropriate GLSL
1020 /// special variable; these are pre-declared. As an exception, we do declare
1021 /// `gl_Position` or `gl_FragCoord` with the `invariant` qualifier if
1022 /// needed.
1023 ///
1024 /// Use `output` together with [`self.entry_point.stage`] to determine which
1025 /// shader stages are being connected, and choose the `in` or `out` storage
1026 /// qualifier.
1027 ///
1028 /// [`self.entry_point`]: Writer::entry_point
1029 /// [`self.entry_point.stage`]: crate::EntryPoint::stage
1030 /// [`Location`]: crate::Binding::Location
1031 /// [`BuiltIn`]: crate::Binding::BuiltIn
1032 fn write_varying(
1033 &mut self,
1034 binding: Option<&crate::Binding>,
1035 ty: Handle<crate::Type>,
1036 output: bool,
1037 ) -> Result<(), Error> {
1038 // For a struct, emit a separate global for each member with a binding.
1039 if let TypeInner::Struct { ref members, .. } = self.module.types[ty].inner {
1040 for member in members {
1041 self.write_varying(member.binding.as_ref(), member.ty, output)?;
1042 }
1043 return Ok(());
1044 }
1045
1046 let binding = match binding {
1047 None => return Ok(()),
1048 Some(binding) => binding,
1049 };
1050
1051 let (location, interpolation, sampling, blend_src) = match *binding {
1052 crate::Binding::Location {
1053 location,
1054 interpolation,
1055 sampling,
1056 blend_src,
1057 per_primitive: _,
1058 } => (location, interpolation, sampling, blend_src),
1059 crate::Binding::BuiltIn(built_in) => {
1060 match built_in {
1061 crate::BuiltIn::Position { invariant: true } => {
1062 match (self.options.version, self.entry_point.stage) {
1063 (
1064 Version::Embedded {
1065 version: 300,
1066 is_webgl: true,
1067 },
1068 ShaderStage::Fragment,
1069 ) => {
1070 // `invariant gl_FragCoord` is not allowed in WebGL2 and possibly
1071 // OpenGL ES in general (waiting on confirmation).
1072 //
1073 // See https://github.com/KhronosGroup/WebGL/issues/3518
1074 }
1075 _ => {
1076 writeln!(
1077 self.out,
1078 "invariant {};",
1079 glsl_built_in(
1080 built_in,
1081 VaryingOptions::from_writer_options(self.options, output)
1082 )
1083 )?;
1084 }
1085 }
1086 }
1087 crate::BuiltIn::ClipDistances => {
1088 // Re-declare `gl_ClipDistance` with number of clip planes.
1089 let TypeInner::Array { size, .. } = self.module.types[ty].inner else {
1090 unreachable!();
1091 };
1092 let proc::IndexableLength::Known(size) =
1093 size.resolve(self.module.to_ctx())?
1094 else {
1095 unreachable!();
1096 };
1097 self.clip_distance_count = size;
1098 writeln!(self.out, "out float gl_ClipDistance[{size}];")?;
1099 }
1100 _ => {}
1101 }
1102 return Ok(());
1103 }
1104 };
1105
1106 // Write the interpolation modifier if needed
1107 //
1108 // We ignore all interpolation and auxiliary modifiers that aren't used in fragment
1109 // shaders' input globals or vertex shaders' output globals.
1110 let emit_interpolation_and_auxiliary = match self.entry_point.stage {
1111 ShaderStage::Vertex => output,
1112 ShaderStage::Fragment => !output,
1113 ShaderStage::Compute => false,
1114 ShaderStage::Task
1115 | ShaderStage::Mesh
1116 | ShaderStage::RayGeneration
1117 | ShaderStage::AnyHit
1118 | ShaderStage::ClosestHit
1119 | ShaderStage::Miss => unreachable!(),
1120 };
1121
1122 // Write the I/O locations, if allowed
1123 let io_location = if self.options.version.supports_explicit_locations()
1124 || !emit_interpolation_and_auxiliary
1125 {
1126 if self.options.version.supports_io_locations() {
1127 if let Some(blend_src) = blend_src {
1128 write!(
1129 self.out,
1130 "layout(location = {location}, index = {blend_src}) "
1131 )?;
1132 } else {
1133 write!(self.out, "layout(location = {location}) ")?;
1134 }
1135 None
1136 } else {
1137 Some(VaryingLocation {
1138 location,
1139 index: blend_src.unwrap_or(0),
1140 })
1141 }
1142 } else {
1143 None
1144 };
1145
1146 // Write the interpolation qualifier.
1147 if let Some(interp) = interpolation {
1148 if emit_interpolation_and_auxiliary {
1149 write!(self.out, "{} ", glsl_interpolation(interp))?;
1150 }
1151 }
1152
1153 // Write the sampling auxiliary qualifier.
1154 //
1155 // Before GLSL 4.2, the `centroid` and `sample` qualifiers were required to appear
1156 // immediately before the `in` / `out` qualifier, so we'll just follow that rule
1157 // here, regardless of the version.
1158 if let Some(sampling) = sampling {
1159 if emit_interpolation_and_auxiliary {
1160 if let Some(qualifier) = glsl_sampling(sampling)? {
1161 write!(self.out, "{qualifier} ")?;
1162 }
1163 }
1164 }
1165
1166 // Write the input/output qualifier.
1167 write!(self.out, "{} ", if output { "out" } else { "in" })?;
1168
1169 // Write the type
1170 // `write_type` adds no leading or trailing spaces
1171 self.write_type(ty)?;
1172
1173 // Finally write the global name and end the global with a `;` and a newline
1174 // Leading space is important
1175 let vname = VaryingName {
1176 binding: &crate::Binding::Location {
1177 location,
1178 interpolation: None,
1179 sampling: None,
1180 blend_src,
1181 per_primitive: false,
1182 },
1183 stage: self.entry_point.stage,
1184 options: VaryingOptions::from_writer_options(self.options, output),
1185 };
1186 writeln!(self.out, " {vname};")?;
1187
1188 if let Some(location) = io_location {
1189 self.varying.insert(vname.to_string(), location);
1190 }
1191
1192 Ok(())
1193 }
1194
1195 /// Helper method used to write functions (both entry points and regular functions)
1196 ///
1197 /// # Notes
1198 /// Adds a newline
1199 fn write_function(
1200 &mut self,
1201 ty: back::FunctionType,
1202 func: &crate::Function,
1203 info: &valid::FunctionInfo,
1204 ) -> BackendResult {
1205 // Create a function context for the function being written
1206 let ctx = back::FunctionCtx {
1207 ty,
1208 info,
1209 expressions: &func.expressions,
1210 named_expressions: &func.named_expressions,
1211 };
1212
1213 self.named_expressions.clear();
1214 self.update_expressions_to_bake(func, info);
1215
1216 // Write the function header
1217 //
1218 // glsl headers are the same as in c:
1219 // `ret_type name(args)`
1220 // `ret_type` is the return type
1221 // `name` is the function name
1222 // `args` is a comma separated list of `type name`
1223 // | - `type` is the argument type
1224 // | - `name` is the argument name
1225
1226 // Start by writing the return type if any otherwise write void
1227 // This is the only place where `void` is a valid type
1228 // (though it's more a keyword than a type)
1229 if let back::FunctionType::EntryPoint(_) = ctx.ty {
1230 write!(self.out, "void")?;
1231 } else if let Some(ref result) = func.result {
1232 self.write_type(result.ty)?;
1233 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner {
1234 self.write_array_size(base, size)?
1235 }
1236 } else {
1237 write!(self.out, "void")?;
1238 }
1239
1240 // Write the function name and open parentheses for the argument list
1241 let function_name = match ctx.ty {
1242 back::FunctionType::Function(handle) => &self.names[&NameKey::Function(handle)],
1243 back::FunctionType::EntryPoint(_) => "main",
1244 };
1245 write!(self.out, " {function_name}(")?;
1246
1247 // Write the comma separated argument list
1248 //
1249 // We need access to `Self` here so we use the reference passed to the closure as an
1250 // argument instead of capturing as that would cause a borrow checker error
1251 let arguments = match ctx.ty {
1252 back::FunctionType::EntryPoint(_) => &[][..],
1253 back::FunctionType::Function(_) => &func.arguments,
1254 };
1255 let arguments: Vec<_> = arguments
1256 .iter()
1257 .enumerate()
1258 .filter(|&(_, arg)| match self.module.types[arg.ty].inner {
1259 TypeInner::Sampler { .. } => false,
1260 _ => true,
1261 })
1262 .collect();
1263 self.write_slice(&arguments, |this, _, &(i, arg)| {
1264 // Write the argument type
1265 match this.module.types[arg.ty].inner {
1266 // We treat images separately because they might require
1267 // writing the storage format
1268 TypeInner::Image {
1269 dim,
1270 arrayed,
1271 class,
1272 } => {
1273 // Write the storage format if needed
1274 if let TypeInner::Image {
1275 class: crate::ImageClass::Storage { format, .. },
1276 ..
1277 } = this.module.types[arg.ty].inner
1278 {
1279 write!(this.out, "layout({}) ", glsl_storage_format(format)?)?;
1280 }
1281
1282 // write the type
1283 //
1284 // This is way we need the leading space because `write_image_type` doesn't add
1285 // any spaces at the beginning or end
1286 this.write_image_type(dim, arrayed, class)?;
1287 }
1288 TypeInner::Pointer { base, .. } => {
1289 // write parameter qualifiers
1290 write!(this.out, "inout ")?;
1291 this.write_type(base)?;
1292 }
1293 // All other types are written by `write_type`
1294 _ => {
1295 this.write_type(arg.ty)?;
1296 }
1297 }
1298
1299 // Write the argument name
1300 // The leading space is important
1301 write!(this.out, " {}", &this.names[&ctx.argument_key(i as u32)])?;
1302
1303 // Write array size
1304 match this.module.types[arg.ty].inner {
1305 TypeInner::Array { base, size, .. } => {
1306 this.write_array_size(base, size)?;
1307 }
1308 TypeInner::Pointer { base, .. } => {
1309 if let TypeInner::Array { base, size, .. } = this.module.types[base].inner {
1310 this.write_array_size(base, size)?;
1311 }
1312 }
1313 _ => {}
1314 }
1315
1316 Ok(())
1317 })?;
1318
1319 // Close the parentheses and open braces to start the function body
1320 writeln!(self.out, ") {{")?;
1321
1322 if self.options.zero_initialize_workgroup_memory
1323 && ctx.ty.is_compute_like_entry_point(self.module)
1324 {
1325 self.write_workgroup_variables_initialization(&ctx)?;
1326 }
1327
1328 // Compose the function arguments from globals, in case of an entry point.
1329 if let back::FunctionType::EntryPoint(ep_index) = ctx.ty {
1330 let stage = self.module.entry_points[ep_index as usize].stage;
1331 for (index, arg) in func.arguments.iter().enumerate() {
1332 write!(self.out, "{}", back::INDENT)?;
1333 self.write_type(arg.ty)?;
1334 let name = &self.names[&NameKey::EntryPointArgument(ep_index, index as u32)];
1335 write!(self.out, " {name}")?;
1336 write!(self.out, " = ")?;
1337 match self.module.types[arg.ty].inner {
1338 TypeInner::Struct { ref members, .. } => {
1339 self.write_type(arg.ty)?;
1340 write!(self.out, "(")?;
1341 for (index, member) in members.iter().enumerate() {
1342 let varying_name = VaryingName {
1343 binding: member.binding.as_ref().unwrap(),
1344 stage,
1345 options: VaryingOptions::from_writer_options(self.options, false),
1346 };
1347 if index != 0 {
1348 write!(self.out, ", ")?;
1349 }
1350 write!(self.out, "{varying_name}")?;
1351 }
1352 writeln!(self.out, ");")?;
1353 }
1354 _ => {
1355 let varying_name = VaryingName {
1356 binding: arg.binding.as_ref().unwrap(),
1357 stage,
1358 options: VaryingOptions::from_writer_options(self.options, false),
1359 };
1360 writeln!(self.out, "{varying_name};")?;
1361 }
1362 }
1363 }
1364 }
1365
1366 // Write all function locals
1367 // Locals are `type name (= init)?;` where the init part (including the =) are optional
1368 //
1369 // Always adds a newline
1370 for (handle, local) in func.local_variables.iter() {
1371 // Write indentation (only for readability) and the type
1372 // `write_type` adds no trailing space
1373 write!(self.out, "{}", back::INDENT)?;
1374 self.write_type(local.ty)?;
1375
1376 // Write the local name
1377 // The leading space is important
1378 write!(self.out, " {}", self.names[&ctx.name_key(handle)])?;
1379 // Write size for array type
1380 if let TypeInner::Array { base, size, .. } = self.module.types[local.ty].inner {
1381 self.write_array_size(base, size)?;
1382 }
1383 // Write the local initializer if needed
1384 if let Some(init) = local.init {
1385 // Put the equal signal only if there's a initializer
1386 // The leading and trailing spaces aren't needed but help with readability
1387 write!(self.out, " = ")?;
1388
1389 // Write the constant
1390 // `write_constant` adds no trailing or leading space/newline
1391 self.write_expr(init, &ctx)?;
1392 } else if is_value_init_supported(self.module, local.ty) {
1393 write!(self.out, " = ")?;
1394 self.write_zero_init_value(local.ty)?;
1395 }
1396
1397 // Finish the local with `;` and add a newline (only for readability)
1398 writeln!(self.out, ";")?
1399 }
1400
1401 // Write the function body (statement list)
1402 for sta in func.body.iter() {
1403 // Write a statement, the indentation should always be 1 when writing the function body
1404 // `write_stmt` adds a newline
1405 self.write_stmt(sta, &ctx, back::Level(1))?;
1406 }
1407
1408 // Close braces and add a newline
1409 writeln!(self.out, "}}")?;
1410
1411 Ok(())
1412 }
1413
1414 fn write_workgroup_variables_initialization(
1415 &mut self,
1416 ctx: &back::FunctionCtx,
1417 ) -> BackendResult {
1418 let mut vars = self
1419 .module
1420 .global_variables
1421 .iter()
1422 .filter(|&(handle, var)| {
1423 !ctx.info[handle].is_empty() && var.space == crate::AddressSpace::WorkGroup
1424 })
1425 .peekable();
1426
1427 if vars.peek().is_some() {
1428 let level = back::Level(1);
1429
1430 writeln!(self.out, "{level}if (gl_LocalInvocationID == uvec3(0u)) {{")?;
1431
1432 for (handle, var) in vars {
1433 let name = &self.names[&NameKey::GlobalVariable(handle)];
1434 write!(self.out, "{}{} = ", level.next(), name)?;
1435 self.write_zero_init_value(var.ty)?;
1436 writeln!(self.out, ";")?;
1437 }
1438
1439 writeln!(self.out, "{level}}}")?;
1440 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1441 }
1442
1443 Ok(())
1444 }
1445
1446 /// Write a list of comma separated `T` values using a writer function `F`.
1447 ///
1448 /// The writer function `F` receives a mutable reference to `self` that if needed won't cause
1449 /// borrow checker issues (using for example a closure with `self` will cause issues), the
1450 /// second argument is the 0 based index of the element on the list, and the last element is
1451 /// a reference to the element `T` being written
1452 ///
1453 /// # Notes
1454 /// - Adds no newlines or leading/trailing whitespace
1455 /// - The last element won't have a trailing `,`
1456 fn write_slice<T, F: FnMut(&mut Self, u32, &T) -> BackendResult>(
1457 &mut self,
1458 data: &[T],
1459 mut f: F,
1460 ) -> BackendResult {
1461 // Loop through `data` invoking `f` for each element
1462 for (index, item) in data.iter().enumerate() {
1463 if index != 0 {
1464 write!(self.out, ", ")?;
1465 }
1466 f(self, index as u32, item)?;
1467 }
1468
1469 Ok(())
1470 }
1471
1472 /// Helper method used to write global constants
1473 fn write_global_constant(&mut self, handle: Handle<crate::Constant>) -> BackendResult {
1474 write!(self.out, "const ")?;
1475 let constant = &self.module.constants[handle];
1476 self.write_type(constant.ty)?;
1477 let name = &self.names[&NameKey::Constant(handle)];
1478 write!(self.out, " {name}")?;
1479 if let TypeInner::Array { base, size, .. } = self.module.types[constant.ty].inner {
1480 self.write_array_size(base, size)?;
1481 }
1482 write!(self.out, " = ")?;
1483 self.write_const_expr(constant.init, &self.module.global_expressions)?;
1484 writeln!(self.out, ";")?;
1485 Ok(())
1486 }
1487
1488 /// Helper method used to output a dot product as an arithmetic expression
1489 ///
1490 fn write_dot_product(
1491 &mut self,
1492 arg: Handle<crate::Expression>,
1493 arg1: Handle<crate::Expression>,
1494 size: usize,
1495 ctx: &back::FunctionCtx,
1496 ) -> BackendResult {
1497 // Write parentheses around the dot product expression to prevent operators
1498 // with different precedences from applying earlier.
1499 write!(self.out, "(")?;
1500
1501 // Cycle through all the components of the vector
1502 for index in 0..size {
1503 let component = back::COMPONENTS[index];
1504 // Write the addition to the previous product
1505 // This will print an extra '+' at the beginning but that is fine in glsl
1506 write!(self.out, " + ")?;
1507 // Write the first vector expression, this expression is marked to be
1508 // cached so unless it can't be cached (for example, it's a Constant)
1509 // it shouldn't produce large expressions.
1510 self.write_expr(arg, ctx)?;
1511 // Access the current component on the first vector
1512 write!(self.out, ".{component} * ")?;
1513 // Write the second vector expression, this expression is marked to be
1514 // cached so unless it can't be cached (for example, it's a Constant)
1515 // it shouldn't produce large expressions.
1516 self.write_expr(arg1, ctx)?;
1517 // Access the current component on the second vector
1518 write!(self.out, ".{component}")?;
1519 }
1520
1521 write!(self.out, ")")?;
1522 Ok(())
1523 }
1524
1525 /// Helper method used to write structs
1526 ///
1527 /// # Notes
1528 /// Ends in a newline
1529 fn write_struct_body(
1530 &mut self,
1531 handle: Handle<crate::Type>,
1532 members: &[crate::StructMember],
1533 ) -> BackendResult {
1534 // glsl structs are written as in C
1535 // `struct name() { members };`
1536 // | `struct` is a keyword
1537 // | `name` is the struct name
1538 // | `members` is a semicolon separated list of `type name`
1539 // | `type` is the member type
1540 // | `name` is the member name
1541 writeln!(self.out, "{{")?;
1542
1543 for (idx, member) in members.iter().enumerate() {
1544 // The indentation is only for readability
1545 write!(self.out, "{}", back::INDENT)?;
1546
1547 match self.module.types[member.ty].inner {
1548 TypeInner::Array {
1549 base,
1550 size,
1551 stride: _,
1552 } => {
1553 self.write_type(base)?;
1554 write!(
1555 self.out,
1556 " {}",
1557 &self.names[&NameKey::StructMember(handle, idx as u32)]
1558 )?;
1559 // Write [size]
1560 self.write_array_size(base, size)?;
1561 // Newline is important
1562 writeln!(self.out, ";")?;
1563 }
1564 _ => {
1565 // Write the member type
1566 // Adds no trailing space
1567 self.write_type(member.ty)?;
1568
1569 // Write the member name and put a semicolon
1570 // The leading space is important
1571 // All members must have a semicolon even the last one
1572 writeln!(
1573 self.out,
1574 " {};",
1575 &self.names[&NameKey::StructMember(handle, idx as u32)]
1576 )?;
1577 }
1578 }
1579 }
1580
1581 write!(self.out, "}}")?;
1582 Ok(())
1583 }
1584
1585 /// Helper method used to write statements
1586 ///
1587 /// # Notes
1588 /// Always adds a newline
1589 fn write_stmt(
1590 &mut self,
1591 sta: &crate::Statement,
1592 ctx: &back::FunctionCtx,
1593 level: back::Level,
1594 ) -> BackendResult {
1595 use crate::Statement;
1596
1597 match *sta {
1598 // This is where we can generate intermediate constants for some expression types.
1599 Statement::Emit(ref range) => {
1600 for handle in range.clone() {
1601 let ptr_class = ctx.resolve_type(handle, &self.module.types).pointer_space();
1602 let expr_name = if ptr_class.is_some() {
1603 // GLSL can't save a pointer-valued expression in a variable,
1604 // but we shouldn't ever need to: they should never be named expressions,
1605 // and none of the expression types flagged by bake_ref_count can be pointer-valued.
1606 None
1607 } else if let Some(name) = ctx.named_expressions.get(&handle) {
1608 // Front end provides names for all variables at the start of writing.
1609 // But we write them to step by step. We need to recache them
1610 // Otherwise, we could accidentally write variable name instead of full expression.
1611 // Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
1612 Some(self.namer.call(name))
1613 } else if self.need_bake_expressions.contains(&handle) {
1614 Some(Baked(handle).to_string())
1615 } else {
1616 None
1617 };
1618
1619 // If we are going to write an `ImageLoad` next and the target image
1620 // is sampled and we are using the `Restrict` policy for bounds
1621 // checking images we need to write a local holding the clamped lod.
1622 if let crate::Expression::ImageLoad {
1623 image,
1624 level: Some(level_expr),
1625 ..
1626 } = ctx.expressions[handle]
1627 {
1628 if let TypeInner::Image {
1629 class: crate::ImageClass::Sampled { .. },
1630 ..
1631 } = *ctx.resolve_type(image, &self.module.types)
1632 {
1633 if let proc::BoundsCheckPolicy::Restrict = self.policies.image_load {
1634 write!(self.out, "{level}")?;
1635 self.write_clamped_lod(ctx, handle, image, level_expr)?
1636 }
1637 }
1638 }
1639
1640 if let Some(name) = expr_name {
1641 write!(self.out, "{level}")?;
1642 self.write_named_expr(handle, name, handle, ctx)?;
1643 }
1644 }
1645 }
1646 // Blocks are simple we just need to write the block statements between braces
1647 // We could also just print the statements but this is more readable and maps more
1648 // closely to the IR
1649 Statement::Block(ref block) => {
1650 write!(self.out, "{level}")?;
1651 writeln!(self.out, "{{")?;
1652 for sta in block.iter() {
1653 // Increase the indentation to help with readability
1654 self.write_stmt(sta, ctx, level.next())?
1655 }
1656 writeln!(self.out, "{level}}}")?
1657 }
1658 // Ifs are written as in C:
1659 // ```
1660 // if(condition) {
1661 // accept
1662 // } else {
1663 // reject
1664 // }
1665 // ```
1666 Statement::If {
1667 condition,
1668 ref accept,
1669 ref reject,
1670 } => {
1671 write!(self.out, "{level}")?;
1672 write!(self.out, "if (")?;
1673 self.write_expr(condition, ctx)?;
1674 writeln!(self.out, ") {{")?;
1675
1676 for sta in accept {
1677 // Increase indentation to help with readability
1678 self.write_stmt(sta, ctx, level.next())?;
1679 }
1680
1681 // If there are no statements in the reject block we skip writing it
1682 // This is only for readability
1683 if !reject.is_empty() {
1684 writeln!(self.out, "{level}}} else {{")?;
1685
1686 for sta in reject {
1687 // Increase indentation to help with readability
1688 self.write_stmt(sta, ctx, level.next())?;
1689 }
1690 }
1691
1692 writeln!(self.out, "{level}}}")?
1693 }
1694 // Switch are written as in C:
1695 // ```
1696 // switch (selector) {
1697 // // Fallthrough
1698 // case label:
1699 // block
1700 // // Non fallthrough
1701 // case label:
1702 // block
1703 // break;
1704 // default:
1705 // block
1706 // }
1707 // ```
1708 // Where the `default` case happens isn't important but we put it last
1709 // so that we don't need to print a `break` for it
1710 Statement::Switch {
1711 selector,
1712 ref cases,
1713 } => {
1714 let l2 = level.next();
1715 // Some GLSL consumers may not handle switches with a single
1716 // body correctly: See wgpu#4514. Write such switch statements
1717 // as a `do {} while(false);` loop instead.
1718 //
1719 // Since doing so may inadvertently capture `continue`
1720 // statements in the switch body, we must apply continue
1721 // forwarding. See the `naga::back::continue_forward` module
1722 // docs for details.
1723 let one_body = cases
1724 .iter()
1725 .rev()
1726 .skip(1)
1727 .all(|case| case.fall_through && case.body.is_empty());
1728 if one_body {
1729 // Unlike HLSL, in GLSL `continue_ctx` only needs to know
1730 // about [`Switch`] statements that are being rendered as
1731 // `do-while` loops.
1732 if let Some(variable) = self.continue_ctx.enter_switch(&mut self.namer) {
1733 writeln!(self.out, "{level}bool {variable} = false;",)?;
1734 };
1735 writeln!(self.out, "{level}do {{")?;
1736 // Note: Expressions have no side-effects so we don't need to emit selector expression.
1737
1738 // Body
1739 if let Some(case) = cases.last() {
1740 for sta in case.body.iter() {
1741 self.write_stmt(sta, ctx, l2)?;
1742 }
1743 }
1744 // End do-while
1745 writeln!(self.out, "{level}}} while(false);")?;
1746
1747 // Handle any forwarded continue statements.
1748 use back::continue_forward::ExitControlFlow;
1749 let op = match self.continue_ctx.exit_switch() {
1750 ExitControlFlow::None => None,
1751 ExitControlFlow::Continue { variable } => Some(("continue", variable)),
1752 ExitControlFlow::Break { variable } => Some(("break", variable)),
1753 };
1754 if let Some((control_flow, variable)) = op {
1755 writeln!(self.out, "{level}if ({variable}) {{")?;
1756 writeln!(self.out, "{l2}{control_flow};")?;
1757 writeln!(self.out, "{level}}}")?;
1758 }
1759 } else {
1760 // Start the switch
1761 write!(self.out, "{level}")?;
1762 write!(self.out, "switch(")?;
1763 self.write_expr(selector, ctx)?;
1764 writeln!(self.out, ") {{")?;
1765
1766 // Write all cases
1767 for case in cases {
1768 match case.value {
1769 crate::SwitchValue::I32(value) => {
1770 write!(self.out, "{l2}case {value}:")?
1771 }
1772 crate::SwitchValue::U32(value) => {
1773 write!(self.out, "{l2}case {value}u:")?
1774 }
1775 crate::SwitchValue::Default => write!(self.out, "{l2}default:")?,
1776 }
1777
1778 let write_block_braces = !(case.fall_through && case.body.is_empty());
1779 if write_block_braces {
1780 writeln!(self.out, " {{")?;
1781 } else {
1782 writeln!(self.out)?;
1783 }
1784
1785 for sta in case.body.iter() {
1786 self.write_stmt(sta, ctx, l2.next())?;
1787 }
1788
1789 if !case.fall_through && case.body.last().is_none_or(|s| !s.is_terminator())
1790 {
1791 writeln!(self.out, "{}break;", l2.next())?;
1792 }
1793
1794 if write_block_braces {
1795 writeln!(self.out, "{l2}}}")?;
1796 }
1797 }
1798
1799 writeln!(self.out, "{level}}}")?
1800 }
1801 }
1802 // Loops in naga IR are based on wgsl loops, glsl can emulate the behaviour by using a
1803 // while true loop and appending the continuing block to the body resulting on:
1804 // ```
1805 // bool loop_init = true;
1806 // while(true) {
1807 // if (!loop_init) { <continuing> }
1808 // loop_init = false;
1809 // <body>
1810 // }
1811 // ```
1812 Statement::Loop {
1813 ref body,
1814 ref continuing,
1815 break_if,
1816 } => {
1817 self.continue_ctx.enter_loop();
1818 if !continuing.is_empty() || break_if.is_some() {
1819 let gate_name = self.namer.call("loop_init");
1820 writeln!(self.out, "{level}bool {gate_name} = true;")?;
1821 writeln!(self.out, "{level}while(true) {{")?;
1822 let l2 = level.next();
1823 let l3 = l2.next();
1824 writeln!(self.out, "{l2}if (!{gate_name}) {{")?;
1825 for sta in continuing {
1826 self.write_stmt(sta, ctx, l3)?;
1827 }
1828 if let Some(condition) = break_if {
1829 write!(self.out, "{l3}if (")?;
1830 self.write_expr(condition, ctx)?;
1831 writeln!(self.out, ") {{")?;
1832 writeln!(self.out, "{}break;", l3.next())?;
1833 writeln!(self.out, "{l3}}}")?;
1834 }
1835 writeln!(self.out, "{l2}}}")?;
1836 writeln!(self.out, "{}{} = false;", level.next(), gate_name)?;
1837 } else {
1838 writeln!(self.out, "{level}while(true) {{")?;
1839 }
1840 for sta in body {
1841 self.write_stmt(sta, ctx, level.next())?;
1842 }
1843 writeln!(self.out, "{level}}}")?;
1844 self.continue_ctx.exit_loop();
1845 }
1846 // Break, continue and return as written as in C
1847 // `break;`
1848 Statement::Break => {
1849 write!(self.out, "{level}")?;
1850 writeln!(self.out, "break;")?
1851 }
1852 // `continue;`
1853 Statement::Continue => {
1854 // Sometimes we must render a `Continue` statement as a `break`.
1855 // See the docs for the `back::continue_forward` module.
1856 if let Some(variable) = self.continue_ctx.continue_encountered() {
1857 writeln!(self.out, "{level}{variable} = true;",)?;
1858 writeln!(self.out, "{level}break;")?
1859 } else {
1860 writeln!(self.out, "{level}continue;")?
1861 }
1862 }
1863 // `return expr;`, `expr` is optional
1864 Statement::Return { value } => {
1865 write!(self.out, "{level}")?;
1866 match ctx.ty {
1867 back::FunctionType::Function(_) => {
1868 write!(self.out, "return")?;
1869 // Write the expression to be returned if needed
1870 if let Some(expr) = value {
1871 write!(self.out, " ")?;
1872 self.write_expr(expr, ctx)?;
1873 }
1874 writeln!(self.out, ";")?;
1875 }
1876 back::FunctionType::EntryPoint(ep_index) => {
1877 let mut has_point_size = false;
1878 let ep = &self.module.entry_points[ep_index as usize];
1879 if let Some(ref result) = ep.function.result {
1880 let value = value.unwrap();
1881 match self.module.types[result.ty].inner {
1882 TypeInner::Struct { ref members, .. } => {
1883 let temp_struct_name = match ctx.expressions[value] {
1884 crate::Expression::Compose { .. } => {
1885 let return_struct = "_tmp_return";
1886 write!(
1887 self.out,
1888 "{} {} = ",
1889 &self.names[&NameKey::Type(result.ty)],
1890 return_struct
1891 )?;
1892 self.write_expr(value, ctx)?;
1893 writeln!(self.out, ";")?;
1894 write!(self.out, "{level}")?;
1895 Some(return_struct)
1896 }
1897 _ => None,
1898 };
1899
1900 for (index, member) in members.iter().enumerate() {
1901 if let Some(crate::Binding::BuiltIn(
1902 crate::BuiltIn::PointSize,
1903 )) = member.binding
1904 {
1905 has_point_size = true;
1906 }
1907
1908 let varying_name = VaryingName {
1909 binding: member.binding.as_ref().unwrap(),
1910 stage: ep.stage,
1911 options: VaryingOptions::from_writer_options(
1912 self.options,
1913 true,
1914 ),
1915 };
1916 write!(self.out, "{varying_name} = ")?;
1917
1918 if let Some(struct_name) = temp_struct_name {
1919 write!(self.out, "{struct_name}")?;
1920 } else {
1921 self.write_expr(value, ctx)?;
1922 }
1923
1924 // Write field name
1925 writeln!(
1926 self.out,
1927 ".{};",
1928 &self.names
1929 [&NameKey::StructMember(result.ty, index as u32)]
1930 )?;
1931 write!(self.out, "{level}")?;
1932 }
1933 }
1934 _ => {
1935 let name = VaryingName {
1936 binding: result.binding.as_ref().unwrap(),
1937 stage: ep.stage,
1938 options: VaryingOptions::from_writer_options(
1939 self.options,
1940 true,
1941 ),
1942 };
1943 write!(self.out, "{name} = ")?;
1944 self.write_expr(value, ctx)?;
1945 writeln!(self.out, ";")?;
1946 write!(self.out, "{level}")?;
1947 }
1948 }
1949 }
1950
1951 let is_vertex_stage = self.module.entry_points[ep_index as usize].stage
1952 == ShaderStage::Vertex;
1953 if is_vertex_stage
1954 && self
1955 .options
1956 .writer_flags
1957 .contains(WriterFlags::ADJUST_COORDINATE_SPACE)
1958 {
1959 writeln!(
1960 self.out,
1961 "gl_Position.yz = vec2(-gl_Position.y, gl_Position.z * 2.0 - gl_Position.w);",
1962 )?;
1963 write!(self.out, "{level}")?;
1964 }
1965
1966 if is_vertex_stage
1967 && self
1968 .options
1969 .writer_flags
1970 .contains(WriterFlags::FORCE_POINT_SIZE)
1971 && !has_point_size
1972 {
1973 writeln!(self.out, "gl_PointSize = 1.0;")?;
1974 write!(self.out, "{level}")?;
1975 }
1976 writeln!(self.out, "return;")?;
1977 }
1978 }
1979 }
1980 // This is one of the places were glsl adds to the syntax of C in this case the discard
1981 // keyword which ceases all further processing in a fragment shader, it's called OpKill
1982 // in spir-v that's why it's called `Statement::Kill`
1983 Statement::Kill => writeln!(self.out, "{level}discard;")?,
1984 Statement::ControlBarrier(flags) => {
1985 self.write_control_barrier(flags, level)?;
1986 }
1987 Statement::MemoryBarrier(flags) => {
1988 self.write_memory_barrier(flags, level)?;
1989 }
1990 // Stores in glsl are just variable assignments written as `pointer = value;`
1991 Statement::Store { pointer, value } => {
1992 write!(self.out, "{level}")?;
1993 let is_atomic_pointer = ctx
1994 .resolve_type(pointer, &self.module.types)
1995 .is_atomic_pointer(&self.module.types);
1996 if is_atomic_pointer {
1997 write!(self.out, "atomicExchange(")?;
1998 self.write_expr(pointer, ctx)?;
1999 write!(self.out, ", ")?;
2000 self.write_expr(value, ctx)?;
2001 writeln!(self.out, ");")?
2002 } else {
2003 self.write_expr(pointer, ctx)?;
2004 write!(self.out, " = ")?;
2005 self.write_expr(value, ctx)?;
2006 writeln!(self.out, ";")?
2007 }
2008 }
2009 Statement::WorkGroupUniformLoad { pointer, result } => {
2010 // GLSL doesn't have pointers, which means that this backend needs to ensure that
2011 // the actual "loading" is happening between the two barriers.
2012 // This is done in `Emit` by never emitting a variable name for pointer variables
2013 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
2014
2015 let result_name = Baked(result).to_string();
2016 write!(self.out, "{level}")?;
2017 // Expressions cannot have side effects, so just writing the expression here is fine.
2018 self.write_named_expr(pointer, result_name, result, ctx)?;
2019
2020 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
2021 }
2022 // Stores a value into an image.
2023 Statement::ImageStore {
2024 image,
2025 coordinate,
2026 array_index,
2027 value,
2028 } => {
2029 write!(self.out, "{level}")?;
2030 self.write_image_store(ctx, image, coordinate, array_index, value)?
2031 }
2032 // A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list
2033 Statement::Call {
2034 function,
2035 ref arguments,
2036 result,
2037 } => {
2038 write!(self.out, "{level}")?;
2039 if let Some(expr) = result {
2040 let name = Baked(expr).to_string();
2041 let result = self.module.functions[function].result.as_ref().unwrap();
2042 self.write_type(result.ty)?;
2043 write!(self.out, " {name}")?;
2044 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner
2045 {
2046 self.write_array_size(base, size)?
2047 }
2048 write!(self.out, " = ")?;
2049 self.named_expressions.insert(expr, name);
2050 }
2051 write!(self.out, "{}(", &self.names[&NameKey::Function(function)])?;
2052 let arguments: Vec<_> = arguments
2053 .iter()
2054 .enumerate()
2055 .filter_map(|(i, arg)| {
2056 let arg_ty = self.module.functions[function].arguments[i].ty;
2057 match self.module.types[arg_ty].inner {
2058 TypeInner::Sampler { .. } => None,
2059 _ => Some(*arg),
2060 }
2061 })
2062 .collect();
2063 self.write_slice(&arguments, |this, _, arg| this.write_expr(*arg, ctx))?;
2064 writeln!(self.out, ");")?
2065 }
2066 Statement::Atomic {
2067 pointer,
2068 ref fun,
2069 value,
2070 result,
2071 } => {
2072 write!(self.out, "{level}")?;
2073
2074 match *fun {
2075 crate::AtomicFunction::Exchange {
2076 compare: Some(compare_expr),
2077 } => {
2078 let result_handle = result.expect("CompareExchange must have a result");
2079 let res_name = Baked(result_handle).to_string();
2080 self.write_type(ctx.info[result_handle].ty.handle().unwrap())?;
2081 write!(self.out, " {res_name};")?;
2082 write!(self.out, " {res_name}.old_value = atomicCompSwap(")?;
2083 self.write_expr(pointer, ctx)?;
2084 write!(self.out, ", ")?;
2085 self.write_expr(compare_expr, ctx)?;
2086 write!(self.out, ", ")?;
2087 self.write_expr(value, ctx)?;
2088 writeln!(self.out, ");")?;
2089
2090 write!(
2091 self.out,
2092 "{level}{res_name}.exchanged = ({res_name}.old_value == "
2093 )?;
2094 self.write_expr(compare_expr, ctx)?;
2095 writeln!(self.out, ");")?;
2096 self.named_expressions.insert(result_handle, res_name);
2097 }
2098 _ => {
2099 if let Some(result) = result {
2100 let res_name = Baked(result).to_string();
2101 self.write_type(ctx.info[result].ty.handle().unwrap())?;
2102 write!(self.out, " {res_name} = ")?;
2103 self.named_expressions.insert(result, res_name);
2104 }
2105 let fun_str = fun.to_glsl();
2106 write!(self.out, "atomic{fun_str}(")?;
2107 self.write_expr(pointer, ctx)?;
2108 write!(self.out, ", ")?;
2109 if let crate::AtomicFunction::Subtract = *fun {
2110 // Emulate `atomicSub` with `atomicAdd` by negating the value.
2111 write!(self.out, "-")?;
2112 }
2113 self.write_expr(value, ctx)?;
2114 writeln!(self.out, ");")?;
2115 }
2116 }
2117 }
2118 // Stores a value into an image.
2119 Statement::ImageAtomic {
2120 image,
2121 coordinate,
2122 array_index,
2123 fun,
2124 value,
2125 } => {
2126 write!(self.out, "{level}")?;
2127 self.write_image_atomic(ctx, image, coordinate, array_index, fun, value)?
2128 }
2129 Statement::RayQuery { .. } => unreachable!(),
2130 Statement::SubgroupBallot { result, predicate } => {
2131 write!(self.out, "{level}")?;
2132 let res_name = Baked(result).to_string();
2133 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2134 self.write_value_type(res_ty)?;
2135 write!(self.out, " {res_name} = ")?;
2136 self.named_expressions.insert(result, res_name);
2137
2138 write!(self.out, "subgroupBallot(")?;
2139 match predicate {
2140 Some(predicate) => self.write_expr(predicate, ctx)?,
2141 None => write!(self.out, "true")?,
2142 }
2143 writeln!(self.out, ");")?;
2144 }
2145 Statement::SubgroupCollectiveOperation {
2146 op,
2147 collective_op,
2148 argument,
2149 result,
2150 } => {
2151 write!(self.out, "{level}")?;
2152 let res_name = Baked(result).to_string();
2153 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2154 self.write_value_type(res_ty)?;
2155 write!(self.out, " {res_name} = ")?;
2156 self.named_expressions.insert(result, res_name);
2157
2158 match (collective_op, op) {
2159 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::All) => {
2160 write!(self.out, "subgroupAll(")?
2161 }
2162 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Any) => {
2163 write!(self.out, "subgroupAny(")?
2164 }
2165 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Add) => {
2166 write!(self.out, "subgroupAdd(")?
2167 }
2168 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Mul) => {
2169 write!(self.out, "subgroupMul(")?
2170 }
2171 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Max) => {
2172 write!(self.out, "subgroupMax(")?
2173 }
2174 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Min) => {
2175 write!(self.out, "subgroupMin(")?
2176 }
2177 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::And) => {
2178 write!(self.out, "subgroupAnd(")?
2179 }
2180 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Or) => {
2181 write!(self.out, "subgroupOr(")?
2182 }
2183 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Xor) => {
2184 write!(self.out, "subgroupXor(")?
2185 }
2186 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Add) => {
2187 write!(self.out, "subgroupExclusiveAdd(")?
2188 }
2189 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Mul) => {
2190 write!(self.out, "subgroupExclusiveMul(")?
2191 }
2192 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Add) => {
2193 write!(self.out, "subgroupInclusiveAdd(")?
2194 }
2195 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Mul) => {
2196 write!(self.out, "subgroupInclusiveMul(")?
2197 }
2198 _ => unimplemented!(),
2199 }
2200 self.write_expr(argument, ctx)?;
2201 writeln!(self.out, ");")?;
2202 }
2203 Statement::SubgroupGather {
2204 mode,
2205 argument,
2206 result,
2207 } => {
2208 write!(self.out, "{level}")?;
2209 let res_name = Baked(result).to_string();
2210 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2211 self.write_value_type(res_ty)?;
2212 write!(self.out, " {res_name} = ")?;
2213 self.named_expressions.insert(result, res_name);
2214
2215 match mode {
2216 crate::GatherMode::BroadcastFirst => {
2217 write!(self.out, "subgroupBroadcastFirst(")?;
2218 }
2219 crate::GatherMode::Broadcast(_) => {
2220 write!(self.out, "subgroupBroadcast(")?;
2221 }
2222 crate::GatherMode::Shuffle(_) => {
2223 write!(self.out, "subgroupShuffle(")?;
2224 }
2225 crate::GatherMode::ShuffleDown(_) => {
2226 write!(self.out, "subgroupShuffleDown(")?;
2227 }
2228 crate::GatherMode::ShuffleUp(_) => {
2229 write!(self.out, "subgroupShuffleUp(")?;
2230 }
2231 crate::GatherMode::ShuffleXor(_) => {
2232 write!(self.out, "subgroupShuffleXor(")?;
2233 }
2234 crate::GatherMode::QuadBroadcast(_) => {
2235 write!(self.out, "subgroupQuadBroadcast(")?;
2236 }
2237 crate::GatherMode::QuadSwap(direction) => match direction {
2238 crate::Direction::X => {
2239 write!(self.out, "subgroupQuadSwapHorizontal(")?;
2240 }
2241 crate::Direction::Y => {
2242 write!(self.out, "subgroupQuadSwapVertical(")?;
2243 }
2244 crate::Direction::Diagonal => {
2245 write!(self.out, "subgroupQuadSwapDiagonal(")?;
2246 }
2247 },
2248 }
2249 self.write_expr(argument, ctx)?;
2250 match mode {
2251 crate::GatherMode::BroadcastFirst => {}
2252 crate::GatherMode::Broadcast(index)
2253 | crate::GatherMode::Shuffle(index)
2254 | crate::GatherMode::ShuffleDown(index)
2255 | crate::GatherMode::ShuffleUp(index)
2256 | crate::GatherMode::ShuffleXor(index)
2257 | crate::GatherMode::QuadBroadcast(index) => {
2258 write!(self.out, ", ")?;
2259 self.write_expr(index, ctx)?;
2260 }
2261 crate::GatherMode::QuadSwap(_) => {}
2262 }
2263 writeln!(self.out, ");")?;
2264 }
2265 Statement::CooperativeStore { .. } => unimplemented!(),
2266 Statement::RayPipelineFunction(_) => unimplemented!(),
2267 }
2268
2269 Ok(())
2270 }
2271
2272 /// Write a const expression.
2273 ///
2274 /// Write `expr`, a handle to an [`Expression`] in the current [`Module`]'s
2275 /// constant expression arena, as GLSL expression.
2276 ///
2277 /// # Notes
2278 /// Adds no newlines or leading/trailing whitespace
2279 ///
2280 /// [`Expression`]: crate::Expression
2281 /// [`Module`]: crate::Module
2282 fn write_const_expr(
2283 &mut self,
2284 expr: Handle<crate::Expression>,
2285 arena: &crate::Arena<crate::Expression>,
2286 ) -> BackendResult {
2287 self.write_possibly_const_expr(
2288 expr,
2289 arena,
2290 |expr| &self.info[expr],
2291 |writer, expr| writer.write_const_expr(expr, arena),
2292 )
2293 }
2294
2295 /// Write [`Expression`] variants that can occur in both runtime and const expressions.
2296 ///
2297 /// Write `expr`, a handle to an [`Expression`] in the arena `expressions`,
2298 /// as as GLSL expression. This must be one of the [`Expression`] variants
2299 /// that is allowed to occur in constant expressions.
2300 ///
2301 /// Use `write_expression` to write subexpressions.
2302 ///
2303 /// This is the common code for `write_expr`, which handles arbitrary
2304 /// runtime expressions, and `write_const_expr`, which only handles
2305 /// const-expressions. Each of those callers passes itself (essentially) as
2306 /// the `write_expression` callback, so that subexpressions are restricted
2307 /// to the appropriate variants.
2308 ///
2309 /// # Notes
2310 /// Adds no newlines or leading/trailing whitespace
2311 ///
2312 /// [`Expression`]: crate::Expression
2313 fn write_possibly_const_expr<'w, I, E>(
2314 &'w mut self,
2315 expr: Handle<crate::Expression>,
2316 expressions: &crate::Arena<crate::Expression>,
2317 info: I,
2318 write_expression: E,
2319 ) -> BackendResult
2320 where
2321 I: Fn(Handle<crate::Expression>) -> &'w proc::TypeResolution,
2322 E: Fn(&mut Self, Handle<crate::Expression>) -> BackendResult,
2323 {
2324 use crate::Expression;
2325
2326 match expressions[expr] {
2327 Expression::Literal(literal) => {
2328 match literal {
2329 // Floats are written using `Debug` instead of `Display` because it always appends the
2330 // decimal part even it's zero which is needed for a valid glsl float constant
2331 crate::Literal::F64(value) => write!(self.out, "{value:?}LF")?,
2332 crate::Literal::F32(value) => write!(self.out, "{value:?}")?,
2333 crate::Literal::F16(_) => {
2334 return Err(Error::Custom("GLSL has no 16-bit float type".into()));
2335 }
2336 // Unsigned integers need a `u` at the end
2337 //
2338 // While `core` doesn't necessarily need it, it's allowed and since `es` needs it we
2339 // always write it as the extra branch wouldn't have any benefit in readability
2340 crate::Literal::U16(value) => write!(self.out, "uint16_t({value})")?,
2341 crate::Literal::I16(value) => write!(self.out, "int16_t({value})")?,
2342 crate::Literal::U32(value) => write!(self.out, "{value}u")?,
2343 crate::Literal::I32(value) => write!(self.out, "{value}")?,
2344 crate::Literal::Bool(value) => write!(self.out, "{value}")?,
2345 crate::Literal::I64(_) => {
2346 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2347 }
2348 crate::Literal::U64(_) => {
2349 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2350 }
2351 crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
2352 return Err(Error::Custom(
2353 "Abstract types should not appear in IR presented to backends".into(),
2354 ));
2355 }
2356 }
2357 }
2358 Expression::Constant(handle) => {
2359 let constant = &self.module.constants[handle];
2360 if constant.name.is_some() {
2361 write!(self.out, "{}", self.names[&NameKey::Constant(handle)])?;
2362 } else {
2363 self.write_const_expr(constant.init, &self.module.global_expressions)?;
2364 }
2365 }
2366 Expression::ZeroValue(ty) => {
2367 self.write_zero_init_value(ty)?;
2368 }
2369 Expression::Compose { ty, ref components } => {
2370 self.write_type(ty)?;
2371
2372 if let TypeInner::Array { base, size, .. } = self.module.types[ty].inner {
2373 self.write_array_size(base, size)?;
2374 }
2375
2376 write!(self.out, "(")?;
2377 for (index, component) in components.iter().enumerate() {
2378 if index != 0 {
2379 write!(self.out, ", ")?;
2380 }
2381 write_expression(self, *component)?;
2382 }
2383 write!(self.out, ")")?
2384 }
2385 // `Splat` needs to actually write down a vector, it's not always inferred in GLSL.
2386 Expression::Splat { size: _, value } => {
2387 let resolved = info(expr).inner_with(&self.module.types);
2388 self.write_value_type(resolved)?;
2389 write!(self.out, "(")?;
2390 write_expression(self, value)?;
2391 write!(self.out, ")")?
2392 }
2393 _ => {
2394 return Err(Error::Override);
2395 }
2396 }
2397
2398 Ok(())
2399 }
2400
2401 /// Helper method to write expressions
2402 ///
2403 /// # Notes
2404 /// Doesn't add any newlines or leading/trailing spaces
2405 fn write_expr(
2406 &mut self,
2407 expr: Handle<crate::Expression>,
2408 ctx: &back::FunctionCtx,
2409 ) -> BackendResult {
2410 use crate::Expression;
2411
2412 if let Some(name) = self.named_expressions.get(&expr) {
2413 write!(self.out, "{name}")?;
2414 return Ok(());
2415 }
2416
2417 match ctx.expressions[expr] {
2418 Expression::Literal(_)
2419 | Expression::Constant(_)
2420 | Expression::ZeroValue(_)
2421 | Expression::Compose { .. }
2422 | Expression::Splat { .. } => {
2423 self.write_possibly_const_expr(
2424 expr,
2425 ctx.expressions,
2426 |expr| &ctx.info[expr].ty,
2427 |writer, expr| writer.write_expr(expr, ctx),
2428 )?;
2429 }
2430 Expression::Override(_) => return Err(Error::Override),
2431 // `Access` is applied to arrays, vectors and matrices and is written as indexing
2432 Expression::Access { base, index } => {
2433 self.write_expr(base, ctx)?;
2434 write!(self.out, "[")?;
2435 self.write_expr(index, ctx)?;
2436 write!(self.out, "]")?
2437 }
2438 // `AccessIndex` is the same as `Access` except that the index is a constant and it can
2439 // be applied to structs, in this case we need to find the name of the field at that
2440 // index and write `base.field_name`
2441 Expression::AccessIndex { base, index } => {
2442 self.write_expr(base, ctx)?;
2443
2444 let base_ty_res = &ctx.info[base].ty;
2445 let mut resolved = base_ty_res.inner_with(&self.module.types);
2446 let base_ty_handle = match *resolved {
2447 TypeInner::Pointer { base, space: _ } => {
2448 resolved = &self.module.types[base].inner;
2449 Some(base)
2450 }
2451 _ => base_ty_res.handle(),
2452 };
2453
2454 match *resolved {
2455 TypeInner::Vector { .. } => {
2456 // Write vector access as a swizzle
2457 write!(self.out, ".{}", back::COMPONENTS[index as usize])?
2458 }
2459 TypeInner::Matrix { .. }
2460 | TypeInner::Array { .. }
2461 | TypeInner::ValuePointer { .. } => write!(self.out, "[{index}]")?,
2462 TypeInner::Struct { .. } => {
2463 // This will never panic in case the type is a `Struct`, this is not true
2464 // for other types so we can only check while inside this match arm
2465 let ty = base_ty_handle.unwrap();
2466
2467 write!(
2468 self.out,
2469 ".{}",
2470 &self.names[&NameKey::StructMember(ty, index)]
2471 )?
2472 }
2473 ref other => return Err(Error::Custom(format!("Cannot index {other:?}"))),
2474 }
2475 }
2476 // `Swizzle` adds a few letters behind the dot.
2477 Expression::Swizzle {
2478 size,
2479 vector,
2480 pattern,
2481 } => {
2482 self.write_expr(vector, ctx)?;
2483 write!(self.out, ".")?;
2484 for &sc in pattern[..size as usize].iter() {
2485 self.out.write_char(back::COMPONENTS[sc as usize])?;
2486 }
2487 }
2488 // Function arguments are written as the argument name
2489 Expression::FunctionArgument(pos) => {
2490 write!(self.out, "{}", &self.names[&ctx.argument_key(pos)])?
2491 }
2492 // Global variables need some special work for their name but
2493 // `get_global_name` does the work for us
2494 Expression::GlobalVariable(handle) => {
2495 let global = &self.module.global_variables[handle];
2496 self.write_global_name(handle, global)?
2497 }
2498 // A local is written as it's name
2499 Expression::LocalVariable(handle) => {
2500 write!(self.out, "{}", self.names[&ctx.name_key(handle)])?
2501 }
2502 // glsl has no pointers so there's no load operation, just write the pointer expression
2503 Expression::Load { pointer } => {
2504 let ty_inner = ctx.resolve_type(pointer, &self.module.types);
2505 if ty_inner.is_atomic_pointer(&self.module.types) {
2506 let mut suffix = "";
2507 if let TypeInner::Pointer { base, .. } = *ty_inner {
2508 if let TypeInner::Atomic(scalar) = self.module.types[base].inner {
2509 suffix = match (scalar.kind, scalar.width) {
2510 (crate::ScalarKind::Uint, 8) => "ul",
2511 (crate::ScalarKind::Sint, 8) => "l",
2512 (crate::ScalarKind::Uint, _) => "u",
2513 _ => "",
2514 };
2515 }
2516 }
2517 write!(self.out, "atomicOr(")?;
2518 self.write_expr(pointer, ctx)?;
2519 write!(self.out, ", 0{})", suffix)?
2520 } else {
2521 self.write_expr(pointer, ctx)?
2522 }
2523 }
2524 // `ImageSample` is a bit complicated compared to the rest of the IR.
2525 //
2526 // First there are three variations depending whether the sample level is explicitly set,
2527 // if it's automatic or it it's bias:
2528 // `texture(image, coordinate)` - Automatic sample level
2529 // `texture(image, coordinate, bias)` - Bias sample level
2530 // `textureLod(image, coordinate, level)` - Zero or Exact sample level
2531 //
2532 // Furthermore if `depth_ref` is some we need to append it to the coordinate vector
2533 Expression::ImageSample {
2534 image,
2535 sampler: _, //TODO?
2536 gather,
2537 coordinate,
2538 array_index,
2539 offset,
2540 level,
2541 depth_ref,
2542 clamp_to_edge: _,
2543 } => {
2544 let (dim, class, arrayed) = match *ctx.resolve_type(image, &self.module.types) {
2545 TypeInner::Image {
2546 dim,
2547 class,
2548 arrayed,
2549 ..
2550 } => (dim, class, arrayed),
2551 _ => unreachable!(),
2552 };
2553 let mut err = None;
2554 if dim == crate::ImageDimension::Cube {
2555 if offset.is_some() {
2556 err = Some("gsamplerCube[Array][Shadow] doesn't support texture sampling with offsets");
2557 }
2558 if arrayed
2559 && matches!(class, crate::ImageClass::Depth { .. })
2560 && matches!(level, crate::SampleLevel::Gradient { .. })
2561 {
2562 err = Some("samplerCubeArrayShadow don't support textureGrad");
2563 }
2564 }
2565 if gather.is_some() && level != crate::SampleLevel::Zero {
2566 err = Some("textureGather doesn't support LOD parameters");
2567 }
2568 if let Some(err) = err {
2569 return Err(Error::Custom(String::from(err)));
2570 }
2571
2572 // `textureLod[Offset]` on `sampler2DArrayShadow` and `samplerCubeShadow` does not exist in GLSL,
2573 // unless `GL_EXT_texture_shadow_lod` is present.
2574 // But if the target LOD is zero, we can emulate that by using `textureGrad[Offset]` with a constant gradient of 0.
2575 let workaround_lod_with_grad = ((dim == crate::ImageDimension::Cube && !arrayed)
2576 || (dim == crate::ImageDimension::D2 && arrayed))
2577 && level == crate::SampleLevel::Zero
2578 && matches!(class, crate::ImageClass::Depth { .. })
2579 && !self.features.contains(Features::TEXTURE_SHADOW_LOD);
2580
2581 // Write the function to be used depending on the sample level
2582 let fun_name = match level {
2583 crate::SampleLevel::Zero if gather.is_some() => "textureGather",
2584 crate::SampleLevel::Zero if workaround_lod_with_grad => "textureGrad",
2585 crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
2586 crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => "textureLod",
2587 crate::SampleLevel::Gradient { .. } => "textureGrad",
2588 };
2589 let offset_name = match offset {
2590 Some(_) => "Offset",
2591 None => "",
2592 };
2593
2594 write!(self.out, "{fun_name}{offset_name}(")?;
2595
2596 // Write the image that will be used
2597 self.write_expr(image, ctx)?;
2598 // The space here isn't required but it helps with readability
2599 write!(self.out, ", ")?;
2600
2601 // TODO: handle clamp_to_edge
2602 // https://github.com/gfx-rs/wgpu/issues/7791
2603
2604 // We need to get the coordinates vector size to later build a vector that's `size + 1`
2605 // if `depth_ref` is some, if it isn't a vector we panic as that's not a valid expression
2606 let mut coord_dim = match *ctx.resolve_type(coordinate, &self.module.types) {
2607 TypeInner::Vector { size, .. } => size as u8,
2608 TypeInner::Scalar { .. } => 1,
2609 _ => unreachable!(),
2610 };
2611
2612 if array_index.is_some() {
2613 coord_dim += 1;
2614 }
2615 let merge_depth_ref = depth_ref.is_some() && gather.is_none() && coord_dim < 4;
2616 if merge_depth_ref {
2617 coord_dim += 1;
2618 }
2619
2620 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
2621 let is_vec = tex_1d_hack || coord_dim != 1;
2622 // Compose a new texture coordinates vector
2623 if is_vec {
2624 write!(self.out, "vec{}(", coord_dim + tex_1d_hack as u8)?;
2625 }
2626 self.write_expr(coordinate, ctx)?;
2627 if tex_1d_hack {
2628 write!(self.out, ", 0.0")?;
2629 }
2630 if let Some(expr) = array_index {
2631 write!(self.out, ", ")?;
2632 self.write_expr(expr, ctx)?;
2633 }
2634 if merge_depth_ref {
2635 write!(self.out, ", ")?;
2636 self.write_expr(depth_ref.unwrap(), ctx)?;
2637 }
2638 if is_vec {
2639 write!(self.out, ")")?;
2640 }
2641
2642 if let (Some(expr), false) = (depth_ref, merge_depth_ref) {
2643 write!(self.out, ", ")?;
2644 self.write_expr(expr, ctx)?;
2645 }
2646
2647 match level {
2648 // Auto needs no more arguments
2649 crate::SampleLevel::Auto => (),
2650 // Zero needs level set to 0
2651 crate::SampleLevel::Zero => {
2652 if workaround_lod_with_grad {
2653 let vec_dim = match dim {
2654 crate::ImageDimension::Cube => 3,
2655 _ => 2,
2656 };
2657 write!(self.out, ", vec{vec_dim}(0.0), vec{vec_dim}(0.0)")?;
2658 } else if gather.is_none() {
2659 write!(self.out, ", 0.0")?;
2660 }
2661 }
2662 // Exact and bias require another argument
2663 crate::SampleLevel::Exact(expr) => {
2664 write!(self.out, ", ")?;
2665 self.write_expr(expr, ctx)?;
2666 }
2667 crate::SampleLevel::Bias(_) => {
2668 // This needs to be done after the offset writing
2669 }
2670 crate::SampleLevel::Gradient { x, y } => {
2671 // If we are using sampler2D to replace sampler1D, we also
2672 // need to make sure to use vec2 gradients
2673 if tex_1d_hack {
2674 write!(self.out, ", vec2(")?;
2675 self.write_expr(x, ctx)?;
2676 write!(self.out, ", 0.0)")?;
2677 write!(self.out, ", vec2(")?;
2678 self.write_expr(y, ctx)?;
2679 write!(self.out, ", 0.0)")?;
2680 } else {
2681 write!(self.out, ", ")?;
2682 self.write_expr(x, ctx)?;
2683 write!(self.out, ", ")?;
2684 self.write_expr(y, ctx)?;
2685 }
2686 }
2687 }
2688
2689 if let Some(constant) = offset {
2690 write!(self.out, ", ")?;
2691 if tex_1d_hack {
2692 write!(self.out, "ivec2(")?;
2693 }
2694 self.write_const_expr(constant, ctx.expressions)?;
2695 if tex_1d_hack {
2696 write!(self.out, ", 0)")?;
2697 }
2698 }
2699
2700 // Bias is always the last argument
2701 if let crate::SampleLevel::Bias(expr) = level {
2702 write!(self.out, ", ")?;
2703 self.write_expr(expr, ctx)?;
2704 }
2705
2706 if let (Some(component), None) = (gather, depth_ref) {
2707 write!(self.out, ", {}", component as usize)?;
2708 }
2709
2710 // End the function
2711 write!(self.out, ")")?
2712 }
2713 Expression::ImageLoad {
2714 image,
2715 coordinate,
2716 array_index,
2717 sample,
2718 level,
2719 } => self.write_image_load(expr, ctx, image, coordinate, array_index, sample, level)?,
2720 // Query translates into one of the:
2721 // - textureSize/imageSize
2722 // - textureQueryLevels
2723 // - textureSamples/imageSamples
2724 Expression::ImageQuery { image, query } => {
2725 use crate::ImageClass;
2726
2727 // This will only panic if the module is invalid
2728 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
2729 TypeInner::Image {
2730 dim,
2731 arrayed: _,
2732 class,
2733 } => (dim, class),
2734 _ => unreachable!(),
2735 };
2736 let components = match dim {
2737 crate::ImageDimension::D1 => 1,
2738 crate::ImageDimension::D2 => 2,
2739 crate::ImageDimension::D3 => 3,
2740 crate::ImageDimension::Cube => 2,
2741 };
2742
2743 if let crate::ImageQuery::Size { .. } = query {
2744 match components {
2745 1 => write!(self.out, "uint(")?,
2746 _ => write!(self.out, "uvec{components}(")?,
2747 }
2748 } else {
2749 write!(self.out, "uint(")?;
2750 }
2751
2752 match query {
2753 crate::ImageQuery::Size { level } => {
2754 match class {
2755 ImageClass::Sampled { multi, .. } | ImageClass::Depth { multi } => {
2756 write!(self.out, "textureSize(")?;
2757 self.write_expr(image, ctx)?;
2758 if let Some(expr) = level {
2759 let cast_to_int = matches!(
2760 *ctx.resolve_type(expr, &self.module.types),
2761 TypeInner::Scalar(crate::Scalar {
2762 kind: crate::ScalarKind::Uint,
2763 ..
2764 })
2765 );
2766
2767 write!(self.out, ", ")?;
2768
2769 if cast_to_int {
2770 write!(self.out, "int(")?;
2771 }
2772
2773 self.write_expr(expr, ctx)?;
2774
2775 if cast_to_int {
2776 write!(self.out, ")")?;
2777 }
2778 } else if !multi {
2779 // All textureSize calls requires an lod argument
2780 // except for multisampled samplers
2781 write!(self.out, ", 0")?;
2782 }
2783 }
2784 ImageClass::Storage { .. } => {
2785 write!(self.out, "imageSize(")?;
2786 self.write_expr(image, ctx)?;
2787 }
2788 ImageClass::External => unimplemented!(),
2789 }
2790 write!(self.out, ")")?;
2791 if components != 1 || self.options.version.is_es() {
2792 write!(self.out, ".{}", &"xyz"[..components])?;
2793 }
2794 }
2795 crate::ImageQuery::NumLevels => {
2796 write!(self.out, "textureQueryLevels(",)?;
2797 self.write_expr(image, ctx)?;
2798 write!(self.out, ")",)?;
2799 }
2800 crate::ImageQuery::NumLayers => {
2801 let fun_name = match class {
2802 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => "textureSize",
2803 ImageClass::Storage { .. } => "imageSize",
2804 ImageClass::External => unimplemented!(),
2805 };
2806 write!(self.out, "{fun_name}(")?;
2807 self.write_expr(image, ctx)?;
2808 // All textureSize calls requires an lod argument
2809 // except for multisampled samplers
2810 if !class.is_multisampled() {
2811 write!(self.out, ", 0")?;
2812 }
2813 write!(self.out, ")")?;
2814 if components != 1 || self.options.version.is_es() {
2815 write!(self.out, ".{}", back::COMPONENTS[components])?;
2816 }
2817 }
2818 crate::ImageQuery::NumSamples => {
2819 let fun_name = match class {
2820 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
2821 "textureSamples"
2822 }
2823 ImageClass::Storage { .. } => "imageSamples",
2824 ImageClass::External => unimplemented!(),
2825 };
2826 write!(self.out, "{fun_name}(")?;
2827 self.write_expr(image, ctx)?;
2828 write!(self.out, ")",)?;
2829 }
2830 }
2831
2832 write!(self.out, ")")?;
2833 }
2834 Expression::Unary { op, expr } => {
2835 let operator_or_fn = match op {
2836 crate::UnaryOperator::Negate => "-",
2837 crate::UnaryOperator::LogicalNot => {
2838 match *ctx.resolve_type(expr, &self.module.types) {
2839 TypeInner::Vector { .. } => "not",
2840 _ => "!",
2841 }
2842 }
2843 crate::UnaryOperator::BitwiseNot => "~",
2844 };
2845 write!(self.out, "{operator_or_fn}(")?;
2846
2847 self.write_expr(expr, ctx)?;
2848
2849 write!(self.out, ")")?
2850 }
2851 // `Binary` we just write `left op right`, except when dealing with
2852 // comparison operations on vectors as they are implemented with
2853 // builtin functions.
2854 // Once again we wrap everything in parentheses to avoid precedence issues
2855 Expression::Binary {
2856 mut op,
2857 left,
2858 right,
2859 } => {
2860 // Holds `Some(function_name)` if the binary operation is
2861 // implemented as a function call
2862 use crate::{BinaryOperator as Bo, ScalarKind as Sk, TypeInner as Ti};
2863
2864 let left_inner = ctx.resolve_type(left, &self.module.types);
2865 let right_inner = ctx.resolve_type(right, &self.module.types);
2866
2867 let function = match (left_inner, right_inner) {
2868 (&Ti::Vector { scalar, .. }, &Ti::Vector { .. }) => match op {
2869 Bo::Less
2870 | Bo::LessEqual
2871 | Bo::Greater
2872 | Bo::GreaterEqual
2873 | Bo::Equal
2874 | Bo::NotEqual => BinaryOperation::VectorCompare,
2875 Bo::Modulo if scalar.kind == Sk::Float => BinaryOperation::Modulo,
2876 Bo::And if scalar.kind == Sk::Bool => {
2877 op = crate::BinaryOperator::LogicalAnd;
2878 BinaryOperation::VectorComponentWise
2879 }
2880 Bo::InclusiveOr if scalar.kind == Sk::Bool => {
2881 op = crate::BinaryOperator::LogicalOr;
2882 BinaryOperation::VectorComponentWise
2883 }
2884 _ => BinaryOperation::Other,
2885 },
2886 _ => match (left_inner.scalar_kind(), right_inner.scalar_kind()) {
2887 (Some(Sk::Float), _) | (_, Some(Sk::Float)) => match op {
2888 Bo::Modulo => BinaryOperation::Modulo,
2889 _ => BinaryOperation::Other,
2890 },
2891 (Some(Sk::Bool), Some(Sk::Bool)) => match op {
2892 Bo::InclusiveOr => {
2893 op = crate::BinaryOperator::LogicalOr;
2894 BinaryOperation::Other
2895 }
2896 Bo::And => {
2897 op = crate::BinaryOperator::LogicalAnd;
2898 BinaryOperation::Other
2899 }
2900 _ => BinaryOperation::Other,
2901 },
2902 _ => BinaryOperation::Other,
2903 },
2904 };
2905
2906 match function {
2907 BinaryOperation::VectorCompare => {
2908 let op_str = match op {
2909 Bo::Less => "lessThan(",
2910 Bo::LessEqual => "lessThanEqual(",
2911 Bo::Greater => "greaterThan(",
2912 Bo::GreaterEqual => "greaterThanEqual(",
2913 Bo::Equal => "equal(",
2914 Bo::NotEqual => "notEqual(",
2915 _ => unreachable!(),
2916 };
2917 write!(self.out, "{op_str}")?;
2918 self.write_expr(left, ctx)?;
2919 write!(self.out, ", ")?;
2920 self.write_expr(right, ctx)?;
2921 write!(self.out, ")")?;
2922 }
2923 BinaryOperation::VectorComponentWise => {
2924 self.write_value_type(left_inner)?;
2925 write!(self.out, "(")?;
2926
2927 let size = match *left_inner {
2928 Ti::Vector { size, .. } => size,
2929 _ => unreachable!(),
2930 };
2931
2932 for i in 0..size as usize {
2933 if i != 0 {
2934 write!(self.out, ", ")?;
2935 }
2936
2937 self.write_expr(left, ctx)?;
2938 write!(self.out, ".{}", back::COMPONENTS[i])?;
2939
2940 write!(self.out, " {} ", back::binary_operation_str(op))?;
2941
2942 self.write_expr(right, ctx)?;
2943 write!(self.out, ".{}", back::COMPONENTS[i])?;
2944 }
2945
2946 write!(self.out, ")")?;
2947 }
2948 // TODO: handle undefined behavior of BinaryOperator::Modulo
2949 //
2950 // sint:
2951 // if right == 0 return 0
2952 // if left == min(type_of(left)) && right == -1 return 0
2953 // if sign(left) == -1 || sign(right) == -1 return result as defined by WGSL
2954 //
2955 // uint:
2956 // if right == 0 return 0
2957 //
2958 // float:
2959 // if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
2960 BinaryOperation::Modulo => {
2961 write!(self.out, "(")?;
2962
2963 // write `e1 - e2 * trunc(e1 / e2)`
2964 self.write_expr(left, ctx)?;
2965 write!(self.out, " - ")?;
2966 self.write_expr(right, ctx)?;
2967 write!(self.out, " * ")?;
2968 write!(self.out, "trunc(")?;
2969 self.write_expr(left, ctx)?;
2970 write!(self.out, " / ")?;
2971 self.write_expr(right, ctx)?;
2972 write!(self.out, ")")?;
2973
2974 write!(self.out, ")")?;
2975 }
2976 BinaryOperation::Other => {
2977 write!(self.out, "(")?;
2978
2979 self.write_expr(left, ctx)?;
2980 write!(self.out, " {} ", back::binary_operation_str(op))?;
2981 self.write_expr(right, ctx)?;
2982
2983 write!(self.out, ")")?;
2984 }
2985 }
2986 }
2987 // `Select` is written as `condition ? accept : reject`
2988 // We wrap everything in parentheses to avoid precedence issues
2989 Expression::Select {
2990 condition,
2991 accept,
2992 reject,
2993 } => {
2994 let cond_ty = ctx.resolve_type(condition, &self.module.types);
2995 let vec_select = if let TypeInner::Vector { .. } = *cond_ty {
2996 true
2997 } else {
2998 false
2999 };
3000
3001 // TODO: Boolean mix on desktop required GL_EXT_shader_integer_mix
3002 if vec_select {
3003 // Glsl defines that for mix when the condition is a boolean the first element
3004 // is picked if condition is false and the second if condition is true
3005 write!(self.out, "mix(")?;
3006 self.write_expr(reject, ctx)?;
3007 write!(self.out, ", ")?;
3008 self.write_expr(accept, ctx)?;
3009 write!(self.out, ", ")?;
3010 self.write_expr(condition, ctx)?;
3011 } else {
3012 write!(self.out, "(")?;
3013 self.write_expr(condition, ctx)?;
3014 write!(self.out, " ? ")?;
3015 self.write_expr(accept, ctx)?;
3016 write!(self.out, " : ")?;
3017 self.write_expr(reject, ctx)?;
3018 }
3019
3020 write!(self.out, ")")?
3021 }
3022 // `Derivative` is a function call to a glsl provided function
3023 Expression::Derivative { axis, ctrl, expr } => {
3024 use crate::{DerivativeAxis as Axis, DerivativeControl as Ctrl};
3025 let fun_name = if self.options.version.supports_derivative_control() {
3026 match (axis, ctrl) {
3027 (Axis::X, Ctrl::Coarse) => "dFdxCoarse",
3028 (Axis::X, Ctrl::Fine) => "dFdxFine",
3029 (Axis::X, Ctrl::None) => "dFdx",
3030 (Axis::Y, Ctrl::Coarse) => "dFdyCoarse",
3031 (Axis::Y, Ctrl::Fine) => "dFdyFine",
3032 (Axis::Y, Ctrl::None) => "dFdy",
3033 (Axis::Width, Ctrl::Coarse) => "fwidthCoarse",
3034 (Axis::Width, Ctrl::Fine) => "fwidthFine",
3035 (Axis::Width, Ctrl::None) => "fwidth",
3036 }
3037 } else {
3038 match axis {
3039 Axis::X => "dFdx",
3040 Axis::Y => "dFdy",
3041 Axis::Width => "fwidth",
3042 }
3043 };
3044 write!(self.out, "{fun_name}(")?;
3045 self.write_expr(expr, ctx)?;
3046 write!(self.out, ")")?
3047 }
3048 // `Relational` is a normal function call to some glsl provided functions
3049 Expression::Relational { fun, argument } => {
3050 use crate::RelationalFunction as Rf;
3051
3052 let fun_name = match fun {
3053 Rf::IsInf => "isinf",
3054 Rf::IsNan => "isnan",
3055 Rf::All => "all",
3056 Rf::Any => "any",
3057 };
3058 write!(self.out, "{fun_name}(")?;
3059
3060 self.write_expr(argument, ctx)?;
3061
3062 write!(self.out, ")")?
3063 }
3064 Expression::Math {
3065 fun,
3066 arg,
3067 arg1,
3068 arg2,
3069 arg3,
3070 } => {
3071 use crate::MathFunction as Mf;
3072
3073 let fun_name = match fun {
3074 // comparison
3075 Mf::Abs => "abs",
3076 Mf::Min => "min",
3077 Mf::Max => "max",
3078 Mf::Clamp => {
3079 let scalar_kind = ctx
3080 .resolve_type(arg, &self.module.types)
3081 .scalar_kind()
3082 .unwrap();
3083 match scalar_kind {
3084 crate::ScalarKind::Float => "clamp",
3085 // Clamp is undefined if min > max. In practice this means it can use a median-of-three
3086 // instruction to determine the value. This is fine according to the WGSL spec for float
3087 // clamp, but integer clamp _must_ use min-max. As such we write out min/max.
3088 _ => {
3089 write!(self.out, "min(max(")?;
3090 self.write_expr(arg, ctx)?;
3091 write!(self.out, ", ")?;
3092 self.write_expr(arg1.unwrap(), ctx)?;
3093 write!(self.out, "), ")?;
3094 self.write_expr(arg2.unwrap(), ctx)?;
3095 write!(self.out, ")")?;
3096
3097 return Ok(());
3098 }
3099 }
3100 }
3101 Mf::Saturate => {
3102 write!(self.out, "clamp(")?;
3103
3104 self.write_expr(arg, ctx)?;
3105
3106 match *ctx.resolve_type(arg, &self.module.types) {
3107 TypeInner::Vector { size, .. } => write!(
3108 self.out,
3109 ", vec{}(0.0), vec{0}(1.0)",
3110 common::vector_size_str(size)
3111 )?,
3112 _ => write!(self.out, ", 0.0, 1.0")?,
3113 }
3114
3115 write!(self.out, ")")?;
3116
3117 return Ok(());
3118 }
3119 // trigonometry
3120 Mf::Cos => "cos",
3121 Mf::Cosh => "cosh",
3122 Mf::Sin => "sin",
3123 Mf::Sinh => "sinh",
3124 Mf::Tan => "tan",
3125 Mf::Tanh => "tanh",
3126 Mf::Acos => "acos",
3127 Mf::Asin => "asin",
3128 Mf::Atan => "atan",
3129 Mf::Asinh => "asinh",
3130 Mf::Acosh => "acosh",
3131 Mf::Atanh => "atanh",
3132 Mf::Radians => "radians",
3133 Mf::Degrees => "degrees",
3134 // glsl doesn't have atan2 function
3135 // use two-argument variation of the atan function
3136 Mf::Atan2 => "atan",
3137 // decomposition
3138 Mf::Ceil => "ceil",
3139 Mf::Floor => "floor",
3140 Mf::Round => "roundEven",
3141 Mf::Fract => "fract",
3142 Mf::Trunc => "trunc",
3143 Mf::Modf => MODF_FUNCTION,
3144 Mf::Frexp => FREXP_FUNCTION,
3145 Mf::Ldexp => "ldexp",
3146 // exponent
3147 Mf::Exp => "exp",
3148 Mf::Exp2 => "exp2",
3149 Mf::Log => "log",
3150 Mf::Log2 => "log2",
3151 Mf::Pow => "pow",
3152 // geometry
3153 Mf::Dot => match *ctx.resolve_type(arg, &self.module.types) {
3154 TypeInner::Vector {
3155 scalar:
3156 crate::Scalar {
3157 kind: crate::ScalarKind::Float,
3158 ..
3159 },
3160 ..
3161 } => "dot",
3162 TypeInner::Vector { size, .. } => {
3163 return self.write_dot_product(arg, arg1.unwrap(), size as usize, ctx)
3164 }
3165 _ => unreachable!(
3166 "Correct TypeInner for dot product should be already validated"
3167 ),
3168 },
3169 fun @ (Mf::Dot4I8Packed | Mf::Dot4U8Packed) => {
3170 let conversion = match fun {
3171 Mf::Dot4I8Packed => "int",
3172 Mf::Dot4U8Packed => "",
3173 _ => unreachable!(),
3174 };
3175
3176 let arg1 = arg1.unwrap();
3177
3178 // Write parentheses around the dot product expression to prevent operators
3179 // with different precedences from applying earlier.
3180 write!(self.out, "(")?;
3181 for i in 0..4 {
3182 // Since `bitfieldExtract` only sign extends if the value is signed, we
3183 // need to convert the inputs to `int` in case of `Dot4I8Packed`. For
3184 // `Dot4U8Packed`, the code below only introduces parenthesis around
3185 // each factor, which aren't strictly needed because both operands are
3186 // baked, but which don't hurt either.
3187 write!(self.out, "bitfieldExtract({conversion}(")?;
3188 self.write_expr(arg, ctx)?;
3189 write!(self.out, "), {}, 8)", i * 8)?;
3190
3191 write!(self.out, " * bitfieldExtract({conversion}(")?;
3192 self.write_expr(arg1, ctx)?;
3193 write!(self.out, "), {}, 8)", i * 8)?;
3194
3195 if i != 3 {
3196 write!(self.out, " + ")?;
3197 }
3198 }
3199 write!(self.out, ")")?;
3200
3201 return Ok(());
3202 }
3203 Mf::Outer => "outerProduct",
3204 Mf::Cross => "cross",
3205 Mf::Distance => "distance",
3206 Mf::Length => "length",
3207 Mf::Normalize => "normalize",
3208 Mf::FaceForward => "faceforward",
3209 Mf::Reflect => "reflect",
3210 Mf::Refract => "refract",
3211 // computational
3212 Mf::Sign => "sign",
3213 Mf::Fma => {
3214 if self.options.version.supports_fma_function() {
3215 // Use the fma function when available
3216 "fma"
3217 } else {
3218 // No fma support. Transform the function call into an arithmetic expression
3219 write!(self.out, "(")?;
3220
3221 self.write_expr(arg, ctx)?;
3222 write!(self.out, " * ")?;
3223
3224 let arg1 =
3225 arg1.ok_or_else(|| Error::Custom("Missing fma arg1".to_owned()))?;
3226 self.write_expr(arg1, ctx)?;
3227 write!(self.out, " + ")?;
3228
3229 let arg2 =
3230 arg2.ok_or_else(|| Error::Custom("Missing fma arg2".to_owned()))?;
3231 self.write_expr(arg2, ctx)?;
3232 write!(self.out, ")")?;
3233
3234 return Ok(());
3235 }
3236 }
3237 Mf::Mix => "mix",
3238 Mf::Step => "step",
3239 Mf::SmoothStep => "smoothstep",
3240 Mf::Sqrt => "sqrt",
3241 Mf::InverseSqrt => "inversesqrt",
3242 Mf::Inverse => "inverse",
3243 Mf::Transpose => "transpose",
3244 Mf::Determinant => "determinant",
3245 Mf::QuantizeToF16 => match *ctx.resolve_type(arg, &self.module.types) {
3246 TypeInner::Scalar { .. } => {
3247 write!(self.out, "unpackHalf2x16(packHalf2x16(vec2(")?;
3248 self.write_expr(arg, ctx)?;
3249 write!(self.out, "))).x")?;
3250 return Ok(());
3251 }
3252 TypeInner::Vector {
3253 size: crate::VectorSize::Bi,
3254 ..
3255 } => {
3256 write!(self.out, "unpackHalf2x16(packHalf2x16(")?;
3257 self.write_expr(arg, ctx)?;
3258 write!(self.out, "))")?;
3259 return Ok(());
3260 }
3261 TypeInner::Vector {
3262 size: crate::VectorSize::Tri,
3263 ..
3264 } => {
3265 write!(self.out, "vec3(unpackHalf2x16(packHalf2x16(")?;
3266 self.write_expr(arg, ctx)?;
3267 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3268 self.write_expr(arg, ctx)?;
3269 write!(self.out, ".zz)).x)")?;
3270 return Ok(());
3271 }
3272 TypeInner::Vector {
3273 size: crate::VectorSize::Quad,
3274 ..
3275 } => {
3276 write!(self.out, "vec4(unpackHalf2x16(packHalf2x16(")?;
3277 self.write_expr(arg, ctx)?;
3278 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3279 self.write_expr(arg, ctx)?;
3280 write!(self.out, ".zw)))")?;
3281 return Ok(());
3282 }
3283 _ => unreachable!(
3284 "Correct TypeInner for QuantizeToF16 should be already validated"
3285 ),
3286 },
3287 // bits
3288 Mf::CountTrailingZeros => {
3289 match *ctx.resolve_type(arg, &self.module.types) {
3290 TypeInner::Vector { size, scalar, .. } => {
3291 let s = common::vector_size_str(size);
3292 if let crate::ScalarKind::Uint = scalar.kind {
3293 write!(self.out, "min(uvec{s}(findLSB(")?;
3294 self.write_expr(arg, ctx)?;
3295 write!(self.out, ")), uvec{s}(32u))")?;
3296 } else {
3297 write!(self.out, "ivec{s}(min(uvec{s}(findLSB(")?;
3298 self.write_expr(arg, ctx)?;
3299 write!(self.out, ")), uvec{s}(32u)))")?;
3300 }
3301 }
3302 TypeInner::Scalar(scalar) => {
3303 if let crate::ScalarKind::Uint = scalar.kind {
3304 write!(self.out, "min(uint(findLSB(")?;
3305 self.write_expr(arg, ctx)?;
3306 write!(self.out, ")), 32u)")?;
3307 } else {
3308 write!(self.out, "int(min(uint(findLSB(")?;
3309 self.write_expr(arg, ctx)?;
3310 write!(self.out, ")), 32u))")?;
3311 }
3312 }
3313 _ => unreachable!(),
3314 };
3315 return Ok(());
3316 }
3317 Mf::CountLeadingZeros => {
3318 if self.options.version.supports_integer_functions() {
3319 match *ctx.resolve_type(arg, &self.module.types) {
3320 TypeInner::Vector { size, scalar } => {
3321 let s = common::vector_size_str(size);
3322
3323 if let crate::ScalarKind::Uint = scalar.kind {
3324 write!(self.out, "uvec{s}(ivec{s}(31) - findMSB(")?;
3325 self.write_expr(arg, ctx)?;
3326 write!(self.out, "))")?;
3327 } else {
3328 write!(self.out, "mix(ivec{s}(31) - findMSB(")?;
3329 self.write_expr(arg, ctx)?;
3330 write!(self.out, "), ivec{s}(0), lessThan(")?;
3331 self.write_expr(arg, ctx)?;
3332 write!(self.out, ", ivec{s}(0)))")?;
3333 }
3334 }
3335 TypeInner::Scalar(scalar) => {
3336 if let crate::ScalarKind::Uint = scalar.kind {
3337 write!(self.out, "uint(31 - findMSB(")?;
3338 } else {
3339 write!(self.out, "(")?;
3340 self.write_expr(arg, ctx)?;
3341 write!(self.out, " < 0 ? 0 : 31 - findMSB(")?;
3342 }
3343
3344 self.write_expr(arg, ctx)?;
3345 write!(self.out, "))")?;
3346 }
3347 _ => unreachable!(),
3348 };
3349 } else {
3350 match *ctx.resolve_type(arg, &self.module.types) {
3351 TypeInner::Vector { size, scalar } => {
3352 let s = common::vector_size_str(size);
3353
3354 if let crate::ScalarKind::Uint = scalar.kind {
3355 write!(self.out, "uvec{s}(")?;
3356 write!(self.out, "vec{s}(31.0) - floor(log2(vec{s}(")?;
3357 self.write_expr(arg, ctx)?;
3358 write!(self.out, ") + 0.5)))")?;
3359 } else {
3360 write!(self.out, "ivec{s}(")?;
3361 write!(self.out, "mix(vec{s}(31.0) - floor(log2(vec{s}(")?;
3362 self.write_expr(arg, ctx)?;
3363 write!(self.out, ") + 0.5)), ")?;
3364 write!(self.out, "vec{s}(0.0), lessThan(")?;
3365 self.write_expr(arg, ctx)?;
3366 write!(self.out, ", ivec{s}(0u))))")?;
3367 }
3368 }
3369 TypeInner::Scalar(scalar) => {
3370 if let crate::ScalarKind::Uint = scalar.kind {
3371 write!(self.out, "uint(31.0 - floor(log2(float(")?;
3372 self.write_expr(arg, ctx)?;
3373 write!(self.out, ") + 0.5)))")?;
3374 } else {
3375 write!(self.out, "(")?;
3376 self.write_expr(arg, ctx)?;
3377 write!(self.out, " < 0 ? 0 : int(")?;
3378 write!(self.out, "31.0 - floor(log2(float(")?;
3379 self.write_expr(arg, ctx)?;
3380 write!(self.out, ") + 0.5))))")?;
3381 }
3382 }
3383 _ => unreachable!(),
3384 };
3385 }
3386
3387 return Ok(());
3388 }
3389 Mf::CountOneBits => "bitCount",
3390 Mf::ReverseBits => "bitfieldReverse",
3391 Mf::ExtractBits => {
3392 // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
3393 // to first sanitize the offset and count first. If we don't do this, AMD and Intel chips
3394 // will return out-of-spec values if the extracted range is not within the bit width.
3395 //
3396 // This encodes the exact formula specified by the wgsl spec, without temporary values:
3397 // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
3398 //
3399 // w = sizeof(x) * 8
3400 // o = min(offset, w)
3401 // c = min(count, w - o)
3402 //
3403 // bitfieldExtract(x, o, c)
3404 //
3405 // extract_bits(e, min(offset, w), min(count, w - min(offset, w))))
3406 let scalar_bits = ctx
3407 .resolve_type(arg, &self.module.types)
3408 .scalar_width()
3409 .unwrap()
3410 * 8;
3411
3412 write!(self.out, "bitfieldExtract(")?;
3413 self.write_expr(arg, ctx)?;
3414 write!(self.out, ", int(min(")?;
3415 self.write_expr(arg1.unwrap(), ctx)?;
3416 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3417 self.write_expr(arg2.unwrap(), ctx)?;
3418 write!(self.out, ", {scalar_bits}u - min(")?;
3419 self.write_expr(arg1.unwrap(), ctx)?;
3420 write!(self.out, ", {scalar_bits}u))))")?;
3421
3422 return Ok(());
3423 }
3424 Mf::InsertBits => {
3425 // InsertBits has the same considerations as ExtractBits above
3426 let scalar_bits = ctx
3427 .resolve_type(arg, &self.module.types)
3428 .scalar_width()
3429 .unwrap()
3430 * 8;
3431
3432 write!(self.out, "bitfieldInsert(")?;
3433 self.write_expr(arg, ctx)?;
3434 write!(self.out, ", ")?;
3435 self.write_expr(arg1.unwrap(), ctx)?;
3436 write!(self.out, ", int(min(")?;
3437 self.write_expr(arg2.unwrap(), ctx)?;
3438 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3439 self.write_expr(arg3.unwrap(), ctx)?;
3440 write!(self.out, ", {scalar_bits}u - min(")?;
3441 self.write_expr(arg2.unwrap(), ctx)?;
3442 write!(self.out, ", {scalar_bits}u))))")?;
3443
3444 return Ok(());
3445 }
3446 Mf::FirstTrailingBit => "findLSB",
3447 Mf::FirstLeadingBit => "findMSB",
3448 // data packing
3449 Mf::Pack4x8snorm => {
3450 if self.options.version.supports_pack_unpack_4x8() {
3451 "packSnorm4x8"
3452 } else {
3453 // polyfill should go here. Needs a corresponding entry in `need_bake_expression`
3454 return Err(Error::UnsupportedExternal("packSnorm4x8".into()));
3455 }
3456 }
3457 Mf::Pack4x8unorm => {
3458 if self.options.version.supports_pack_unpack_4x8() {
3459 "packUnorm4x8"
3460 } else {
3461 return Err(Error::UnsupportedExternal("packUnorm4x8".to_owned()));
3462 }
3463 }
3464 Mf::Pack2x16snorm => {
3465 if self.options.version.supports_pack_unpack_snorm_2x16() {
3466 "packSnorm2x16"
3467 } else {
3468 return Err(Error::UnsupportedExternal("packSnorm2x16".to_owned()));
3469 }
3470 }
3471 Mf::Pack2x16unorm => {
3472 if self.options.version.supports_pack_unpack_unorm_2x16() {
3473 "packUnorm2x16"
3474 } else {
3475 return Err(Error::UnsupportedExternal("packUnorm2x16".to_owned()));
3476 }
3477 }
3478 Mf::Pack2x16float => {
3479 if self.options.version.supports_pack_unpack_half_2x16() {
3480 "packHalf2x16"
3481 } else {
3482 return Err(Error::UnsupportedExternal("packHalf2x16".to_owned()));
3483 }
3484 }
3485
3486 fun @ (Mf::Pack4xI8 | Mf::Pack4xU8 | Mf::Pack4xI8Clamp | Mf::Pack4xU8Clamp) => {
3487 let was_signed = matches!(fun, Mf::Pack4xI8 | Mf::Pack4xI8Clamp);
3488 let clamp_bounds = match fun {
3489 Mf::Pack4xI8Clamp => Some(("-128", "127")),
3490 Mf::Pack4xU8Clamp => Some(("0", "255")),
3491 _ => None,
3492 };
3493 let const_suffix = if was_signed { "" } else { "u" };
3494 if was_signed {
3495 write!(self.out, "uint(")?;
3496 }
3497 let write_arg = |this: &mut Self| -> BackendResult {
3498 if let Some((min, max)) = clamp_bounds {
3499 write!(this.out, "clamp(")?;
3500 this.write_expr(arg, ctx)?;
3501 write!(this.out, ", {min}{const_suffix}, {max}{const_suffix})")?;
3502 } else {
3503 this.write_expr(arg, ctx)?;
3504 }
3505 Ok(())
3506 };
3507 write!(self.out, "(")?;
3508 write_arg(self)?;
3509 write!(self.out, "[0] & 0xFF{const_suffix}) | ((")?;
3510 write_arg(self)?;
3511 write!(self.out, "[1] & 0xFF{const_suffix}) << 8) | ((")?;
3512 write_arg(self)?;
3513 write!(self.out, "[2] & 0xFF{const_suffix}) << 16) | ((")?;
3514 write_arg(self)?;
3515 write!(self.out, "[3] & 0xFF{const_suffix}) << 24)")?;
3516 if was_signed {
3517 write!(self.out, ")")?;
3518 }
3519
3520 return Ok(());
3521 }
3522 // data unpacking
3523 Mf::Unpack2x16float => {
3524 if self.options.version.supports_pack_unpack_half_2x16() {
3525 "unpackHalf2x16"
3526 } else {
3527 return Err(Error::UnsupportedExternal("unpackHalf2x16".into()));
3528 }
3529 }
3530 Mf::Unpack2x16snorm => {
3531 if self.options.version.supports_pack_unpack_snorm_2x16() {
3532 "unpackSnorm2x16"
3533 } else {
3534 let scale = 32767;
3535
3536 write!(self.out, "(vec2(ivec2(")?;
3537 self.write_expr(arg, ctx)?;
3538 write!(self.out, " << 16, ")?;
3539 self.write_expr(arg, ctx)?;
3540 write!(self.out, ") >> 16) / {scale}.0)")?;
3541 return Ok(());
3542 }
3543 }
3544 Mf::Unpack2x16unorm => {
3545 if self.options.version.supports_pack_unpack_unorm_2x16() {
3546 "unpackUnorm2x16"
3547 } else {
3548 let scale = 65535;
3549
3550 write!(self.out, "(vec2(")?;
3551 self.write_expr(arg, ctx)?;
3552 write!(self.out, " & 0xFFFFu, ")?;
3553 self.write_expr(arg, ctx)?;
3554 write!(self.out, " >> 16) / {scale}.0)")?;
3555 return Ok(());
3556 }
3557 }
3558 Mf::Unpack4x8snorm => {
3559 if self.options.version.supports_pack_unpack_4x8() {
3560 "unpackSnorm4x8"
3561 } else {
3562 let scale = 127;
3563
3564 write!(self.out, "(vec4(ivec4(")?;
3565 self.write_expr(arg, ctx)?;
3566 write!(self.out, " << 24, ")?;
3567 self.write_expr(arg, ctx)?;
3568 write!(self.out, " << 16, ")?;
3569 self.write_expr(arg, ctx)?;
3570 write!(self.out, " << 8, ")?;
3571 self.write_expr(arg, ctx)?;
3572 write!(self.out, ") >> 24) / {scale}.0)")?;
3573 return Ok(());
3574 }
3575 }
3576 Mf::Unpack4x8unorm => {
3577 if self.options.version.supports_pack_unpack_4x8() {
3578 "unpackUnorm4x8"
3579 } else {
3580 let scale = 255;
3581
3582 write!(self.out, "(vec4(")?;
3583 self.write_expr(arg, ctx)?;
3584 write!(self.out, " & 0xFFu, ")?;
3585 self.write_expr(arg, ctx)?;
3586 write!(self.out, " >> 8 & 0xFFu, ")?;
3587 self.write_expr(arg, ctx)?;
3588 write!(self.out, " >> 16 & 0xFFu, ")?;
3589 self.write_expr(arg, ctx)?;
3590 write!(self.out, " >> 24) / {scale}.0)")?;
3591 return Ok(());
3592 }
3593 }
3594 fun @ (Mf::Unpack4xI8 | Mf::Unpack4xU8) => {
3595 let sign_prefix = match fun {
3596 Mf::Unpack4xI8 => 'i',
3597 Mf::Unpack4xU8 => 'u',
3598 _ => unreachable!(),
3599 };
3600 write!(self.out, "{sign_prefix}vec4(")?;
3601 for i in 0..4 {
3602 write!(self.out, "bitfieldExtract(")?;
3603 // Since bitfieldExtract only sign extends if the value is signed, this
3604 // cast is needed
3605 match fun {
3606 Mf::Unpack4xI8 => {
3607 write!(self.out, "int(")?;
3608 self.write_expr(arg, ctx)?;
3609 write!(self.out, ")")?;
3610 }
3611 Mf::Unpack4xU8 => self.write_expr(arg, ctx)?,
3612 _ => unreachable!(),
3613 };
3614 write!(self.out, ", {}, 8)", i * 8)?;
3615 if i != 3 {
3616 write!(self.out, ", ")?;
3617 }
3618 }
3619 write!(self.out, ")")?;
3620
3621 return Ok(());
3622 }
3623 };
3624
3625 let extract_bits = fun == Mf::ExtractBits;
3626 let insert_bits = fun == Mf::InsertBits;
3627
3628 // Some GLSL functions always return signed integers (like findMSB),
3629 // so they need to be cast to uint if the argument is also an uint.
3630 let ret_might_need_int_to_uint = matches!(
3631 fun,
3632 Mf::FirstTrailingBit | Mf::FirstLeadingBit | Mf::CountOneBits | Mf::Abs
3633 );
3634
3635 // Some GLSL functions only accept signed integers (like abs),
3636 // so they need their argument cast from uint to int.
3637 let arg_might_need_uint_to_int = matches!(fun, Mf::Abs);
3638
3639 // Check if the argument is an unsigned integer and return the vector size
3640 // in case it's a vector
3641 let maybe_uint_size = match *ctx.resolve_type(arg, &self.module.types) {
3642 TypeInner::Scalar(crate::Scalar {
3643 kind: crate::ScalarKind::Uint,
3644 ..
3645 }) => Some(None),
3646 TypeInner::Vector {
3647 scalar:
3648 crate::Scalar {
3649 kind: crate::ScalarKind::Uint,
3650 ..
3651 },
3652 size,
3653 } => Some(Some(size)),
3654 _ => None,
3655 };
3656
3657 // Cast to uint if the function needs it
3658 if ret_might_need_int_to_uint {
3659 if let Some(maybe_size) = maybe_uint_size {
3660 match maybe_size {
3661 Some(size) => write!(self.out, "uvec{}(", size as u8)?,
3662 None => write!(self.out, "uint(")?,
3663 }
3664 }
3665 }
3666
3667 write!(self.out, "{fun_name}(")?;
3668
3669 // Cast to int if the function needs it
3670 if arg_might_need_uint_to_int {
3671 if let Some(maybe_size) = maybe_uint_size {
3672 match maybe_size {
3673 Some(size) => write!(self.out, "ivec{}(", size as u8)?,
3674 None => write!(self.out, "int(")?,
3675 }
3676 }
3677 }
3678
3679 self.write_expr(arg, ctx)?;
3680
3681 // Close the cast from uint to int
3682 if arg_might_need_uint_to_int && maybe_uint_size.is_some() {
3683 write!(self.out, ")")?
3684 }
3685
3686 if let Some(arg) = arg1 {
3687 write!(self.out, ", ")?;
3688 if extract_bits {
3689 write!(self.out, "int(")?;
3690 self.write_expr(arg, ctx)?;
3691 write!(self.out, ")")?;
3692 } else {
3693 self.write_expr(arg, ctx)?;
3694 }
3695 }
3696 if let Some(arg) = arg2 {
3697 write!(self.out, ", ")?;
3698 if extract_bits || insert_bits {
3699 write!(self.out, "int(")?;
3700 self.write_expr(arg, ctx)?;
3701 write!(self.out, ")")?;
3702 } else {
3703 self.write_expr(arg, ctx)?;
3704 }
3705 }
3706 if let Some(arg) = arg3 {
3707 write!(self.out, ", ")?;
3708 if insert_bits {
3709 write!(self.out, "int(")?;
3710 self.write_expr(arg, ctx)?;
3711 write!(self.out, ")")?;
3712 } else {
3713 self.write_expr(arg, ctx)?;
3714 }
3715 }
3716 write!(self.out, ")")?;
3717
3718 // Close the cast from int to uint
3719 if ret_might_need_int_to_uint && maybe_uint_size.is_some() {
3720 write!(self.out, ")")?
3721 }
3722 }
3723 // `As` is always a call.
3724 // If `convert` is true the function name is the type
3725 // Else the function name is one of the glsl provided bitcast functions
3726 Expression::As {
3727 expr,
3728 kind: target_kind,
3729 convert,
3730 } => {
3731 let inner = ctx.resolve_type(expr, &self.module.types);
3732 match convert {
3733 Some(width) => {
3734 // this is similar to `write_type`, but with the target kind
3735 let scalar = glsl_scalar(crate::Scalar {
3736 kind: target_kind,
3737 width,
3738 })?;
3739 match *inner {
3740 TypeInner::Matrix { columns, rows, .. } => write!(
3741 self.out,
3742 "{}mat{}x{}",
3743 scalar.prefix, columns as u8, rows as u8
3744 )?,
3745 TypeInner::Vector { size, .. } => {
3746 write!(self.out, "{}vec{}", scalar.prefix, size as u8)?
3747 }
3748 _ => write!(self.out, "{}", scalar.full)?,
3749 }
3750
3751 write!(self.out, "(")?;
3752 self.write_expr(expr, ctx)?;
3753 write!(self.out, ")")?
3754 }
3755 None => {
3756 use crate::ScalarKind as Sk;
3757
3758 let target_vector_type = match *inner {
3759 TypeInner::Vector { size, scalar } => Some(TypeInner::Vector {
3760 size,
3761 scalar: crate::Scalar {
3762 kind: target_kind,
3763 width: scalar.width,
3764 },
3765 }),
3766 _ => None,
3767 };
3768
3769 let source_kind = inner.scalar_kind().unwrap();
3770
3771 match (source_kind, target_kind, target_vector_type) {
3772 // No conversion needed
3773 (Sk::Sint, Sk::Sint, _)
3774 | (Sk::Uint, Sk::Uint, _)
3775 | (Sk::Float, Sk::Float, _)
3776 | (Sk::Bool, Sk::Bool, _) => {
3777 self.write_expr(expr, ctx)?;
3778 return Ok(());
3779 }
3780
3781 // Cast to/from floats
3782 (Sk::Float, Sk::Sint, _) => write!(self.out, "floatBitsToInt")?,
3783 (Sk::Float, Sk::Uint, _) => write!(self.out, "floatBitsToUint")?,
3784 (Sk::Sint, Sk::Float, _) => write!(self.out, "intBitsToFloat")?,
3785 (Sk::Uint, Sk::Float, _) => write!(self.out, "uintBitsToFloat")?,
3786
3787 // Cast between vector types
3788 (_, _, Some(vector)) => {
3789 self.write_value_type(&vector)?;
3790 }
3791
3792 // There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion
3793 (Sk::Uint | Sk::Bool, Sk::Sint, None) => write!(self.out, "int")?,
3794 (Sk::Sint | Sk::Bool, Sk::Uint, None) => write!(self.out, "uint")?,
3795 (Sk::Bool, Sk::Float, None) => write!(self.out, "float")?,
3796 (Sk::Sint | Sk::Uint | Sk::Float, Sk::Bool, None) => {
3797 write!(self.out, "bool")?
3798 }
3799
3800 (Sk::AbstractInt | Sk::AbstractFloat, _, _)
3801 | (_, Sk::AbstractInt | Sk::AbstractFloat, _) => unreachable!(),
3802 };
3803
3804 write!(self.out, "(")?;
3805 self.write_expr(expr, ctx)?;
3806 write!(self.out, ")")?;
3807 }
3808 }
3809 }
3810 // These expressions never show up in `Emit`.
3811 Expression::CallResult(_)
3812 | Expression::AtomicResult { .. }
3813 | Expression::RayQueryProceedResult
3814 | Expression::WorkGroupUniformLoadResult { .. }
3815 | Expression::SubgroupOperationResult { .. }
3816 | Expression::SubgroupBallotResult => unreachable!(),
3817 // `ArrayLength` is written as `expr.length()` and we convert it to a uint
3818 Expression::ArrayLength(expr) => {
3819 write!(self.out, "uint(")?;
3820 self.write_expr(expr, ctx)?;
3821 write!(self.out, ".length())")?
3822 }
3823 // not supported yet
3824 Expression::RayQueryGetIntersection { .. }
3825 | Expression::RayQueryVertexPositions { .. }
3826 | Expression::CooperativeLoad { .. }
3827 | Expression::CooperativeMultiplyAdd { .. } => unreachable!(),
3828 }
3829
3830 Ok(())
3831 }
3832
3833 /// Helper function to write the local holding the clamped lod
3834 fn write_clamped_lod(
3835 &mut self,
3836 ctx: &back::FunctionCtx,
3837 expr: Handle<crate::Expression>,
3838 image: Handle<crate::Expression>,
3839 level_expr: Handle<crate::Expression>,
3840 ) -> Result<(), Error> {
3841 // Define our local and start a call to `clamp`
3842 write!(
3843 self.out,
3844 "int {}{} = clamp(",
3845 Baked(expr),
3846 CLAMPED_LOD_SUFFIX
3847 )?;
3848 // Write the lod that will be clamped
3849 self.write_expr(level_expr, ctx)?;
3850 // Set the min value to 0 and start a call to `textureQueryLevels` to get
3851 // the maximum value
3852 write!(self.out, ", 0, textureQueryLevels(")?;
3853 // Write the target image as an argument to `textureQueryLevels`
3854 self.write_expr(image, ctx)?;
3855 // Close the call to `textureQueryLevels` subtract 1 from it since
3856 // the lod argument is 0 based, close the `clamp` call and end the
3857 // local declaration statement.
3858 writeln!(self.out, ") - 1);")?;
3859
3860 Ok(())
3861 }
3862
3863 // Helper method used to retrieve how many elements a coordinate vector
3864 // for the images operations need.
3865 fn get_coordinate_vector_size(&self, dim: crate::ImageDimension, arrayed: bool) -> u8 {
3866 // openGL es doesn't have 1D images so we need workaround it
3867 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
3868 // Get how many components the coordinate vector needs for the dimensions only
3869 let tex_coord_size = match dim {
3870 crate::ImageDimension::D1 => 1,
3871 crate::ImageDimension::D2 => 2,
3872 crate::ImageDimension::D3 => 3,
3873 crate::ImageDimension::Cube => 2,
3874 };
3875 // Calculate the true size of the coordinate vector by adding 1 for arrayed images
3876 // and another 1 if we need to workaround 1D images by making them 2D
3877 tex_coord_size + tex_1d_hack as u8 + arrayed as u8
3878 }
3879
3880 /// Helper method to write the coordinate vector for image operations
3881 fn write_texture_coord(
3882 &mut self,
3883 ctx: &back::FunctionCtx,
3884 vector_size: u8,
3885 coordinate: Handle<crate::Expression>,
3886 array_index: Option<Handle<crate::Expression>>,
3887 // Emulate 1D images as 2D for profiles that don't support it (glsl es)
3888 tex_1d_hack: bool,
3889 ) -> Result<(), Error> {
3890 match array_index {
3891 // If the image needs an array indice we need to add it to the end of our
3892 // coordinate vector, to do so we will use the `ivec(ivec, scalar)`
3893 // constructor notation (NOTE: the inner `ivec` can also be a scalar, this
3894 // is important for 1D arrayed images).
3895 Some(layer_expr) => {
3896 write!(self.out, "ivec{vector_size}(")?;
3897 self.write_expr(coordinate, ctx)?;
3898 write!(self.out, ", ")?;
3899 // If we are replacing sampler1D with sampler2D we also need
3900 // to add another zero to the coordinates vector for the y component
3901 if tex_1d_hack {
3902 write!(self.out, "0, ")?;
3903 }
3904 self.write_expr(layer_expr, ctx)?;
3905 write!(self.out, ")")?;
3906 }
3907 // Otherwise write just the expression (and the 1D hack if needed)
3908 None => {
3909 let uvec_size = match *ctx.resolve_type(coordinate, &self.module.types) {
3910 TypeInner::Scalar(crate::Scalar {
3911 kind: crate::ScalarKind::Uint,
3912 ..
3913 }) => Some(None),
3914 TypeInner::Vector {
3915 size,
3916 scalar:
3917 crate::Scalar {
3918 kind: crate::ScalarKind::Uint,
3919 ..
3920 },
3921 } => Some(Some(size as u32)),
3922 _ => None,
3923 };
3924 if tex_1d_hack {
3925 write!(self.out, "ivec2(")?;
3926 } else if uvec_size.is_some() {
3927 match uvec_size {
3928 Some(None) => write!(self.out, "int(")?,
3929 Some(Some(size)) => write!(self.out, "ivec{size}(")?,
3930 _ => {}
3931 }
3932 }
3933 self.write_expr(coordinate, ctx)?;
3934 if tex_1d_hack {
3935 write!(self.out, ", 0)")?;
3936 } else if uvec_size.is_some() {
3937 write!(self.out, ")")?;
3938 }
3939 }
3940 }
3941
3942 Ok(())
3943 }
3944
3945 /// Helper method to write the `ImageStore` statement
3946 fn write_image_store(
3947 &mut self,
3948 ctx: &back::FunctionCtx,
3949 image: Handle<crate::Expression>,
3950 coordinate: Handle<crate::Expression>,
3951 array_index: Option<Handle<crate::Expression>>,
3952 value: Handle<crate::Expression>,
3953 ) -> Result<(), Error> {
3954 use crate::ImageDimension as IDim;
3955
3956 // NOTE: openGL requires that `imageStore`s have no effects when the texel is invalid
3957 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
3958
3959 // This will only panic if the module is invalid
3960 let dim = match *ctx.resolve_type(image, &self.module.types) {
3961 TypeInner::Image { dim, .. } => dim,
3962 _ => unreachable!(),
3963 };
3964
3965 // Begin our call to `imageStore`
3966 write!(self.out, "imageStore(")?;
3967 self.write_expr(image, ctx)?;
3968 // Separate the image argument from the coordinates
3969 write!(self.out, ", ")?;
3970
3971 // openGL es doesn't have 1D images so we need workaround it
3972 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
3973 // Write the coordinate vector
3974 self.write_texture_coord(
3975 ctx,
3976 // Get the size of the coordinate vector
3977 self.get_coordinate_vector_size(dim, array_index.is_some()),
3978 coordinate,
3979 array_index,
3980 tex_1d_hack,
3981 )?;
3982
3983 // Separate the coordinate from the value to write and write the expression
3984 // of the value to write.
3985 write!(self.out, ", ")?;
3986 self.write_expr(value, ctx)?;
3987 // End the call to `imageStore` and the statement.
3988 writeln!(self.out, ");")?;
3989
3990 Ok(())
3991 }
3992
3993 /// Helper method to write the `ImageAtomic` statement
3994 fn write_image_atomic(
3995 &mut self,
3996 ctx: &back::FunctionCtx,
3997 image: Handle<crate::Expression>,
3998 coordinate: Handle<crate::Expression>,
3999 array_index: Option<Handle<crate::Expression>>,
4000 fun: crate::AtomicFunction,
4001 value: Handle<crate::Expression>,
4002 ) -> Result<(), Error> {
4003 use crate::ImageDimension as IDim;
4004
4005 // NOTE: openGL requires that `imageAtomic`s have no effects when the texel is invalid
4006 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
4007
4008 // This will only panic if the module is invalid
4009 let dim = match *ctx.resolve_type(image, &self.module.types) {
4010 TypeInner::Image { dim, .. } => dim,
4011 _ => unreachable!(),
4012 };
4013
4014 // Begin our call to `imageAtomic`
4015 let fun_str = fun.to_glsl();
4016 write!(self.out, "imageAtomic{fun_str}(")?;
4017 self.write_expr(image, ctx)?;
4018 // Separate the image argument from the coordinates
4019 write!(self.out, ", ")?;
4020
4021 // openGL es doesn't have 1D images so we need workaround it
4022 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4023 // Write the coordinate vector
4024 self.write_texture_coord(
4025 ctx,
4026 // Get the size of the coordinate vector
4027 self.get_coordinate_vector_size(dim, false),
4028 coordinate,
4029 array_index,
4030 tex_1d_hack,
4031 )?;
4032
4033 // Separate the coordinate from the value to write and write the expression
4034 // of the value to write.
4035 write!(self.out, ", ")?;
4036 self.write_expr(value, ctx)?;
4037 // End the call to `imageAtomic` and the statement.
4038 writeln!(self.out, ");")?;
4039
4040 Ok(())
4041 }
4042
4043 /// Helper method for writing an `ImageLoad` expression.
4044 #[allow(clippy::too_many_arguments)]
4045 fn write_image_load(
4046 &mut self,
4047 handle: Handle<crate::Expression>,
4048 ctx: &back::FunctionCtx,
4049 image: Handle<crate::Expression>,
4050 coordinate: Handle<crate::Expression>,
4051 array_index: Option<Handle<crate::Expression>>,
4052 sample: Option<Handle<crate::Expression>>,
4053 level: Option<Handle<crate::Expression>>,
4054 ) -> Result<(), Error> {
4055 use crate::ImageDimension as IDim;
4056
4057 // `ImageLoad` is a bit complicated.
4058 // There are two functions one for sampled
4059 // images another for storage images, the former uses `texelFetch` and the
4060 // latter uses `imageLoad`.
4061 //
4062 // Furthermore we have `level` which is always `Some` for sampled images
4063 // and `None` for storage images, so we end up with two functions:
4064 // - `texelFetch(image, coordinate, level)` for sampled images
4065 // - `imageLoad(image, coordinate)` for storage images
4066 //
4067 // Finally we also have to consider bounds checking, for storage images
4068 // this is easy since openGL requires that invalid texels always return
4069 // 0, for sampled images we need to either verify that all arguments are
4070 // in bounds (`ReadZeroSkipWrite`) or make them a valid texel (`Restrict`).
4071
4072 // This will only panic if the module is invalid
4073 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
4074 TypeInner::Image {
4075 dim,
4076 arrayed: _,
4077 class,
4078 } => (dim, class),
4079 _ => unreachable!(),
4080 };
4081
4082 // Get the name of the function to be used for the load operation
4083 // and the policy to be used with it.
4084 let (fun_name, policy) = match class {
4085 // Sampled images inherit the policy from the user passed policies
4086 crate::ImageClass::Sampled { .. } => ("texelFetch", self.policies.image_load),
4087 crate::ImageClass::Storage { .. } => {
4088 // OpenGL ES 3.1 mentions in Chapter "8.22 Texture Image Loads and Stores" that:
4089 // "Invalid image loads will return a vector where the value of R, G, and B components
4090 // is 0 and the value of the A component is undefined."
4091 //
4092 // OpenGL 4.2 Core mentions in Chapter "3.9.20 Texture Image Loads and Stores" that:
4093 // "Invalid image loads will return zero."
4094 //
4095 // So, we only inject bounds checks for ES
4096 let policy = if self.options.version.is_es() {
4097 self.policies.image_load
4098 } else {
4099 proc::BoundsCheckPolicy::Unchecked
4100 };
4101 ("imageLoad", policy)
4102 }
4103 // TODO: Is there even a function for this?
4104 crate::ImageClass::Depth { multi: _ } => {
4105 return Err(Error::Custom(
4106 "WGSL `textureLoad` from depth textures is not supported in GLSL".to_string(),
4107 ))
4108 }
4109 crate::ImageClass::External => unimplemented!(),
4110 };
4111
4112 // openGL es doesn't have 1D images so we need workaround it
4113 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4114 // Get the size of the coordinate vector
4115 let vector_size = self.get_coordinate_vector_size(dim, array_index.is_some());
4116
4117 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4118 // To write the bounds checks for `ReadZeroSkipWrite` we will use a
4119 // ternary operator since we are in the middle of an expression and
4120 // need to return a value.
4121 //
4122 // NOTE: glsl does short circuit when evaluating logical
4123 // expressions so we can be sure that after we test a
4124 // condition it will be true for the next ones
4125
4126 // Write parentheses around the ternary operator to prevent problems with
4127 // expressions emitted before or after it having more precedence
4128 write!(self.out, "(",)?;
4129
4130 // The lod check needs to precede the size check since we need
4131 // to use the lod to get the size of the image at that level.
4132 if let Some(level_expr) = level {
4133 self.write_expr(level_expr, ctx)?;
4134 write!(self.out, " < textureQueryLevels(",)?;
4135 self.write_expr(image, ctx)?;
4136 // Chain the next check
4137 write!(self.out, ") && ")?;
4138 }
4139
4140 // Check that the sample arguments doesn't exceed the number of samples
4141 if let Some(sample_expr) = sample {
4142 self.write_expr(sample_expr, ctx)?;
4143 write!(self.out, " < textureSamples(",)?;
4144 self.write_expr(image, ctx)?;
4145 // Chain the next check
4146 write!(self.out, ") && ")?;
4147 }
4148
4149 // We now need to write the size checks for the coordinates and array index
4150 // first we write the comparison function in case the image is 1D non arrayed
4151 // (and no 1D to 2D hack was needed) we are comparing scalars so the less than
4152 // operator will suffice, but otherwise we'll be comparing two vectors so we'll
4153 // need to use the `lessThan` function but it returns a vector of booleans (one
4154 // for each comparison) so we need to fold it all in one scalar boolean, since
4155 // we want all comparisons to pass we use the `all` function which will only
4156 // return `true` if all the elements of the boolean vector are also `true`.
4157 //
4158 // So we'll end with one of the following forms
4159 // - `coord < textureSize(image, lod)` for 1D images
4160 // - `all(lessThan(coord, textureSize(image, lod)))` for normal images
4161 // - `all(lessThan(ivec(coord, array_index), textureSize(image, lod)))`
4162 // for arrayed images
4163 // - `all(lessThan(coord, textureSize(image)))` for multi sampled images
4164
4165 if vector_size != 1 {
4166 write!(self.out, "all(lessThan(")?;
4167 }
4168
4169 // Write the coordinate vector
4170 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4171
4172 if vector_size != 1 {
4173 // If we used the `lessThan` function we need to separate the
4174 // coordinates from the image size.
4175 write!(self.out, ", ")?;
4176 } else {
4177 // If we didn't use it (ie. 1D images) we perform the comparison
4178 // using the less than operator.
4179 write!(self.out, " < ")?;
4180 }
4181
4182 // Call `textureSize` to get our image size
4183 write!(self.out, "textureSize(")?;
4184 self.write_expr(image, ctx)?;
4185 // `textureSize` uses the lod as a second argument for mipmapped images
4186 if let Some(level_expr) = level {
4187 // Separate the image from the lod
4188 write!(self.out, ", ")?;
4189 self.write_expr(level_expr, ctx)?;
4190 }
4191 // Close the `textureSize` call
4192 write!(self.out, ")")?;
4193
4194 if vector_size != 1 {
4195 // Close the `all` and `lessThan` calls
4196 write!(self.out, "))")?;
4197 }
4198
4199 // Finally end the condition part of the ternary operator
4200 write!(self.out, " ? ")?;
4201 }
4202
4203 // Begin the call to the function used to load the texel
4204 write!(self.out, "{fun_name}(")?;
4205 self.write_expr(image, ctx)?;
4206 write!(self.out, ", ")?;
4207
4208 // If we are using `Restrict` bounds checking we need to pass valid texel
4209 // coordinates, to do so we use the `clamp` function to get a value between
4210 // 0 and the image size - 1 (indexing begins at 0)
4211 if let proc::BoundsCheckPolicy::Restrict = policy {
4212 write!(self.out, "clamp(")?;
4213 }
4214
4215 // Write the coordinate vector
4216 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4217
4218 // If we are using `Restrict` bounds checking we need to write the rest of the
4219 // clamp we initiated before writing the coordinates.
4220 if let proc::BoundsCheckPolicy::Restrict = policy {
4221 // Write the min value 0
4222 if vector_size == 1 {
4223 write!(self.out, ", 0")?;
4224 } else {
4225 write!(self.out, ", ivec{vector_size}(0)")?;
4226 }
4227 // Start the `textureSize` call to use as the max value.
4228 write!(self.out, ", textureSize(")?;
4229 self.write_expr(image, ctx)?;
4230 // If the image is mipmapped we need to add the lod argument to the
4231 // `textureSize` call, but this needs to be the clamped lod, this should
4232 // have been generated earlier and put in a local.
4233 if class.is_mipmapped() {
4234 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4235 }
4236 // Close the `textureSize` call
4237 write!(self.out, ")")?;
4238
4239 // Subtract 1 from the `textureSize` call since the coordinates are zero based.
4240 if vector_size == 1 {
4241 write!(self.out, " - 1")?;
4242 } else {
4243 write!(self.out, " - ivec{vector_size}(1)")?;
4244 }
4245
4246 // Close the `clamp` call
4247 write!(self.out, ")")?;
4248
4249 // Add the clamped lod (if present) as the second argument to the
4250 // image load function.
4251 if level.is_some() {
4252 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4253 }
4254
4255 // If a sample argument is needed we need to clamp it between 0 and
4256 // the number of samples the image has.
4257 if let Some(sample_expr) = sample {
4258 write!(self.out, ", clamp(")?;
4259 self.write_expr(sample_expr, ctx)?;
4260 // Set the min value to 0 and start the call to `textureSamples`
4261 write!(self.out, ", 0, textureSamples(")?;
4262 self.write_expr(image, ctx)?;
4263 // Close the `textureSamples` call, subtract 1 from it since the sample
4264 // argument is zero based, and close the `clamp` call
4265 writeln!(self.out, ") - 1)")?;
4266 }
4267 } else if let Some(sample_or_level) = sample.or(level) {
4268 // GLSL only support SInt on this field while WGSL support also UInt
4269 let cast_to_int = matches!(
4270 *ctx.resolve_type(sample_or_level, &self.module.types),
4271 TypeInner::Scalar(crate::Scalar {
4272 kind: crate::ScalarKind::Uint,
4273 ..
4274 })
4275 );
4276
4277 // If no bounds checking is need just add the sample or level argument
4278 // after the coordinates
4279 write!(self.out, ", ")?;
4280
4281 if cast_to_int {
4282 write!(self.out, "int(")?;
4283 }
4284
4285 self.write_expr(sample_or_level, ctx)?;
4286
4287 if cast_to_int {
4288 write!(self.out, ")")?;
4289 }
4290 }
4291
4292 // Close the image load function.
4293 write!(self.out, ")")?;
4294
4295 // If we were using the `ReadZeroSkipWrite` policy we need to end the first branch
4296 // (which is taken if the condition is `true`) with a colon (`:`) and write the
4297 // second branch which is just a 0 value.
4298 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4299 // Get the kind of the output value.
4300 let kind = match class {
4301 // Only sampled images can reach here since storage images
4302 // don't need bounds checks and depth images aren't implemented
4303 crate::ImageClass::Sampled { kind, .. } => kind,
4304 _ => unreachable!(),
4305 };
4306
4307 // End the first branch
4308 write!(self.out, " : ")?;
4309 // Write the 0 value
4310 write!(
4311 self.out,
4312 "{}vec4(",
4313 glsl_scalar(crate::Scalar { kind, width: 4 })?.prefix,
4314 )?;
4315 self.write_zero_init_scalar(kind)?;
4316 // Close the zero value constructor
4317 write!(self.out, ")")?;
4318 // Close the parentheses surrounding our ternary
4319 write!(self.out, ")")?;
4320 }
4321
4322 Ok(())
4323 }
4324
4325 fn write_named_expr(
4326 &mut self,
4327 handle: Handle<crate::Expression>,
4328 name: String,
4329 // The expression which is being named.
4330 // Generally, this is the same as handle, except in WorkGroupUniformLoad
4331 named: Handle<crate::Expression>,
4332 ctx: &back::FunctionCtx,
4333 ) -> BackendResult {
4334 match ctx.info[named].ty {
4335 proc::TypeResolution::Handle(ty_handle) => match self.module.types[ty_handle].inner {
4336 TypeInner::Struct { .. } => {
4337 let ty_name = &self.names[&NameKey::Type(ty_handle)];
4338 write!(self.out, "{ty_name}")?;
4339 }
4340 _ => {
4341 self.write_type(ty_handle)?;
4342 }
4343 },
4344 proc::TypeResolution::Value(ref inner) => {
4345 self.write_value_type(inner)?;
4346 }
4347 }
4348
4349 let resolved = ctx.resolve_type(named, &self.module.types);
4350
4351 write!(self.out, " {name}")?;
4352 if let TypeInner::Array { base, size, .. } = *resolved {
4353 self.write_array_size(base, size)?;
4354 }
4355 write!(self.out, " = ")?;
4356 self.write_expr(handle, ctx)?;
4357 writeln!(self.out, ";")?;
4358 self.named_expressions.insert(named, name);
4359
4360 Ok(())
4361 }
4362
4363 /// Helper function that write string with default zero initialization for supported types
4364 fn write_zero_init_value(&mut self, ty: Handle<crate::Type>) -> BackendResult {
4365 let inner = &self.module.types[ty].inner;
4366 match *inner {
4367 TypeInner::Scalar(scalar) | TypeInner::Atomic(scalar) => {
4368 self.write_zero_init_scalar(scalar.kind)?;
4369 }
4370 TypeInner::Vector { scalar, .. } => {
4371 self.write_value_type(inner)?;
4372 write!(self.out, "(")?;
4373 self.write_zero_init_scalar(scalar.kind)?;
4374 write!(self.out, ")")?;
4375 }
4376 TypeInner::Matrix { .. } => {
4377 self.write_value_type(inner)?;
4378 write!(self.out, "(")?;
4379 self.write_zero_init_scalar(crate::ScalarKind::Float)?;
4380 write!(self.out, ")")?;
4381 }
4382 TypeInner::Array { base, size, .. } => {
4383 let count = match size.resolve(self.module.to_ctx())? {
4384 proc::IndexableLength::Known(count) => count,
4385 proc::IndexableLength::Dynamic => return Ok(()),
4386 };
4387 self.write_type(base)?;
4388 self.write_array_size(base, size)?;
4389 write!(self.out, "(")?;
4390 for _ in 1..count {
4391 self.write_zero_init_value(base)?;
4392 write!(self.out, ", ")?;
4393 }
4394 // write last parameter without comma and space
4395 self.write_zero_init_value(base)?;
4396 write!(self.out, ")")?;
4397 }
4398 TypeInner::Struct { ref members, .. } => {
4399 let name = &self.names[&NameKey::Type(ty)];
4400 write!(self.out, "{name}(")?;
4401 for (index, member) in members.iter().enumerate() {
4402 if index != 0 {
4403 write!(self.out, ", ")?;
4404 }
4405 self.write_zero_init_value(member.ty)?;
4406 }
4407 write!(self.out, ")")?;
4408 }
4409 _ => unreachable!(),
4410 }
4411
4412 Ok(())
4413 }
4414
4415 /// Helper function that write string with zero initialization for scalar
4416 fn write_zero_init_scalar(&mut self, kind: crate::ScalarKind) -> BackendResult {
4417 match kind {
4418 crate::ScalarKind::Bool => write!(self.out, "false")?,
4419 crate::ScalarKind::Uint => write!(self.out, "0u")?,
4420 crate::ScalarKind::Float => write!(self.out, "0.0")?,
4421 crate::ScalarKind::Sint => write!(self.out, "0")?,
4422 crate::ScalarKind::AbstractInt | crate::ScalarKind::AbstractFloat => {
4423 return Err(Error::Custom(
4424 "Abstract types should not appear in IR presented to backends".to_string(),
4425 ))
4426 }
4427 }
4428
4429 Ok(())
4430 }
4431
4432 /// Issue a control barrier.
4433 fn write_control_barrier(
4434 &mut self,
4435 flags: crate::Barrier,
4436 level: back::Level,
4437 ) -> BackendResult {
4438 self.write_memory_barrier(flags, level)?;
4439 writeln!(self.out, "{level}barrier();")?;
4440 Ok(())
4441 }
4442
4443 /// Issue a memory barrier.
4444 fn write_memory_barrier(&mut self, flags: crate::Barrier, level: back::Level) -> BackendResult {
4445 if flags.contains(crate::Barrier::STORAGE) {
4446 writeln!(self.out, "{level}memoryBarrierBuffer();")?;
4447 }
4448 if flags.contains(crate::Barrier::WORK_GROUP) {
4449 writeln!(self.out, "{level}memoryBarrierShared();")?;
4450 }
4451 if flags.contains(crate::Barrier::SUB_GROUP) {
4452 writeln!(self.out, "{level}subgroupMemoryBarrier();")?;
4453 }
4454 if flags.contains(crate::Barrier::TEXTURE) {
4455 writeln!(self.out, "{level}memoryBarrierImage();")?;
4456 }
4457 Ok(())
4458 }
4459
4460 /// Helper function that return the glsl storage access string of [`StorageAccess`](crate::StorageAccess)
4461 ///
4462 /// glsl allows adding both `readonly` and `writeonly` but this means that
4463 /// they can only be used to query information about the resource which isn't what
4464 /// we want here so when storage access is both `LOAD` and `STORE` add no modifiers
4465 fn write_storage_access(&mut self, storage_access: crate::StorageAccess) -> BackendResult {
4466 if storage_access.contains(crate::StorageAccess::ATOMIC) {
4467 return Ok(());
4468 }
4469 if !storage_access.contains(crate::StorageAccess::STORE) {
4470 write!(self.out, "readonly ")?;
4471 }
4472 if !storage_access.contains(crate::StorageAccess::LOAD) {
4473 write!(self.out, "writeonly ")?;
4474 }
4475 Ok(())
4476 }
4477
4478 /// Helper method used to produce the reflection info that's returned to the user
4479 fn collect_reflection_info(&mut self) -> Result<ReflectionInfo, Error> {
4480 let info = self.info.get_entry_point(self.entry_point_idx as usize);
4481 let mut texture_mapping = crate::FastHashMap::default();
4482 let mut uniforms = crate::FastHashMap::default();
4483
4484 for sampling in info.sampling_set.iter() {
4485 let tex_name = self.reflection_names_globals[&sampling.image].clone();
4486
4487 match texture_mapping.entry(tex_name) {
4488 hash_map::Entry::Vacant(v) => {
4489 v.insert(TextureMapping {
4490 texture: sampling.image,
4491 sampler: Some(sampling.sampler),
4492 });
4493 }
4494 hash_map::Entry::Occupied(e) => {
4495 if e.get().sampler != Some(sampling.sampler) {
4496 log::error!("Conflicting samplers for {}", e.key());
4497 return Err(Error::ImageMultipleSamplers);
4498 }
4499 }
4500 }
4501 }
4502
4503 let mut immediates_info = None;
4504 for (handle, var) in self.module.global_variables.iter() {
4505 if info[handle].is_empty() {
4506 continue;
4507 }
4508 match self.module.types[var.ty].inner {
4509 TypeInner::Image { .. } => {
4510 let tex_name = self.reflection_names_globals[&handle].clone();
4511 match texture_mapping.entry(tex_name) {
4512 hash_map::Entry::Vacant(v) => {
4513 v.insert(TextureMapping {
4514 texture: handle,
4515 sampler: None,
4516 });
4517 }
4518 hash_map::Entry::Occupied(_) => {
4519 // already used with a sampler, do nothing
4520 }
4521 }
4522 }
4523 _ => match var.space {
4524 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => {
4525 let name = self.reflection_names_globals[&handle].clone();
4526 uniforms.insert(handle, name);
4527 }
4528 crate::AddressSpace::Immediate => {
4529 let name = self.reflection_names_globals[&handle].clone();
4530 immediates_info = Some((name, var.ty));
4531 }
4532 _ => (),
4533 },
4534 }
4535 }
4536
4537 let mut immediates_segments = Vec::new();
4538 let mut immediates_items = vec![];
4539
4540 if let Some((name, ty)) = immediates_info {
4541 // We don't have a layouter available to us, so we need to create one.
4542 //
4543 // This is potentially a bit wasteful, but the set of types in the program
4544 // shouldn't be too large.
4545 let mut layouter = proc::Layouter::default();
4546 layouter.update(self.module.to_ctx()).unwrap();
4547
4548 // We start with the name of the binding itself.
4549 immediates_segments.push(name);
4550
4551 // We then recursively collect all the uniform fields of the immediate data.
4552 self.collect_immediates_items(
4553 ty,
4554 &mut immediates_segments,
4555 &layouter,
4556 &mut 0,
4557 &mut immediates_items,
4558 );
4559 }
4560
4561 Ok(ReflectionInfo {
4562 texture_mapping,
4563 uniforms,
4564 varying: mem::take(&mut self.varying),
4565 immediates_items,
4566 clip_distance_count: self.clip_distance_count,
4567 })
4568 }
4569
4570 fn collect_immediates_items(
4571 &mut self,
4572 ty: Handle<crate::Type>,
4573 segments: &mut Vec<String>,
4574 layouter: &proc::Layouter,
4575 offset: &mut u32,
4576 items: &mut Vec<ImmediateItem>,
4577 ) {
4578 // At this point in the recursion, `segments` contains the path
4579 // needed to access `ty` from the root.
4580
4581 let layout = &layouter[ty];
4582 *offset = layout.alignment.round_up(*offset);
4583 match self.module.types[ty].inner {
4584 // All these types map directly to GL uniforms.
4585 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => {
4586 // Build the full name, by combining all current segments.
4587 let name: String = segments.iter().map(String::as_str).collect();
4588 items.push(ImmediateItem {
4589 access_path: name,
4590 offset: *offset,
4591 ty,
4592 });
4593 *offset += layout.size;
4594 }
4595 // Arrays are recursed into.
4596 TypeInner::Array { base, size, .. } => {
4597 let crate::ArraySize::Constant(count) = size else {
4598 unreachable!("Cannot have dynamic arrays in immediates");
4599 };
4600
4601 for i in 0..count.get() {
4602 // Add the array accessor and recurse.
4603 segments.push(format!("[{i}]"));
4604 self.collect_immediates_items(base, segments, layouter, offset, items);
4605 segments.pop();
4606 }
4607
4608 // Ensure the stride is kept by rounding up to the alignment.
4609 *offset = layout.alignment.round_up(*offset)
4610 }
4611 TypeInner::Struct { ref members, .. } => {
4612 for (index, member) in members.iter().enumerate() {
4613 // Add struct accessor and recurse.
4614 segments.push(format!(
4615 ".{}",
4616 self.names[&NameKey::StructMember(ty, index as u32)]
4617 ));
4618 self.collect_immediates_items(member.ty, segments, layouter, offset, items);
4619 segments.pop();
4620 }
4621
4622 // Ensure ending padding is kept by rounding up to the alignment.
4623 *offset = layout.alignment.round_up(*offset)
4624 }
4625 _ => unreachable!(),
4626 }
4627 }
4628}