naga/back/glsl/writer.rs
1use super::*;
2
3/// Writer responsible for all code generation.
4pub struct Writer<'a, W> {
5 // Inputs
6 /// The module being written.
7 pub(in crate::back::glsl) module: &'a crate::Module,
8 /// The module analysis.
9 pub(in crate::back::glsl) info: &'a valid::ModuleInfo,
10 /// The output writer.
11 out: W,
12 /// User defined configuration to be used.
13 pub(in crate::back::glsl) options: &'a Options,
14 /// The bound checking policies to be used
15 pub(in crate::back::glsl) policies: proc::BoundsCheckPolicies,
16
17 // Internal State
18 /// Features manager used to store all the needed features and write them.
19 pub(in crate::back::glsl) features: FeaturesManager,
20 namer: proc::Namer,
21 /// A map with all the names needed for writing the module
22 /// (generated by a [`Namer`](crate::proc::Namer)).
23 names: crate::FastHashMap<NameKey, String>,
24 /// A map with the names of global variables needed for reflections.
25 reflection_names_globals: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
26 /// The selected entry point.
27 pub(in crate::back::glsl) entry_point: &'a crate::EntryPoint,
28 /// The index of the selected entry point.
29 pub(in crate::back::glsl) entry_point_idx: proc::EntryPointIndex,
30 /// A generator for unique block numbers.
31 block_id: IdGenerator,
32 /// Set of expressions that have associated temporary variables.
33 named_expressions: crate::NamedExpressions,
34 /// Set of expressions that need to be baked to avoid unnecessary repetition in output
35 need_bake_expressions: back::NeedBakeExpressions,
36 /// Information about nesting of loops and switches.
37 ///
38 /// Used for forwarding continue statements in switches that have been
39 /// transformed to `do {} while(false);` loops.
40 continue_ctx: back::continue_forward::ContinueCtx,
41 /// How many views to render to, if doing multiview rendering.
42 pub(in crate::back::glsl) multiview: Option<core::num::NonZeroU32>,
43 /// Mapping of varying variables to their location. Needed for reflections.
44 varying: crate::FastHashMap<String, VaryingLocation>,
45 /// Number of user-defined clip planes. Only non-zero for vertex shaders.
46 clip_distance_count: u32,
47}
48
49impl<'a, W: Write> Writer<'a, W> {
50 /// Creates a new [`Writer`] instance.
51 ///
52 /// # Errors
53 /// - If the version specified is invalid or supported.
54 /// - If the entry point couldn't be found in the module.
55 /// - If the version specified doesn't support some used features.
56 pub fn new(
57 out: W,
58 module: &'a crate::Module,
59 info: &'a valid::ModuleInfo,
60 options: &'a Options,
61 pipeline_options: &'a PipelineOptions,
62 policies: proc::BoundsCheckPolicies,
63 ) -> Result<Self, Error> {
64 // Check if the requested version is supported
65 if !options.version.is_supported() {
66 log::error!("Version {}", options.version);
67 return Err(Error::VersionNotSupported);
68 }
69
70 // Try to find the entry point and corresponding index
71 let ep_idx = module
72 .entry_points
73 .iter()
74 .position(|ep| {
75 pipeline_options.shader_stage == ep.stage && pipeline_options.entry_point == ep.name
76 })
77 .ok_or(Error::EntryPointNotFound)?;
78
79 // Generate a map with names required to write the module
80 let mut names = crate::FastHashMap::default();
81 let mut namer = proc::Namer::default();
82 namer.reset(
83 module,
84 &keywords::RESERVED_KEYWORD_SET,
85 proc::CaseInsensitiveKeywordSet::empty(),
86 &[
87 "gl_", // all GL built-in variables
88 "_group", // all normal bindings
89 "_immediates_binding_", // all immediate data bindings
90 ],
91 &mut names,
92 );
93
94 // Build the instance
95 let mut this = Self {
96 module,
97 info,
98 out,
99 options,
100 policies,
101
102 namer,
103 features: FeaturesManager::new(),
104 names,
105 reflection_names_globals: crate::FastHashMap::default(),
106 entry_point: &module.entry_points[ep_idx],
107 entry_point_idx: ep_idx as u16,
108 multiview: pipeline_options.multiview,
109 block_id: IdGenerator::default(),
110 named_expressions: Default::default(),
111 need_bake_expressions: Default::default(),
112 continue_ctx: back::continue_forward::ContinueCtx::default(),
113 varying: Default::default(),
114 clip_distance_count: 0,
115 };
116
117 // Find all features required to print this module
118 this.collect_required_features()?;
119
120 Ok(this)
121 }
122
123 /// Writes the [`Module`](crate::Module) as glsl to the output
124 ///
125 /// # Notes
126 /// If an error occurs while writing, the output might have been written partially
127 ///
128 /// # Panics
129 /// Might panic if the module is invalid
130 pub fn write(&mut self) -> Result<ReflectionInfo, Error> {
131 // We use `writeln!(self.out)` throughout the write to add newlines
132 // to make the output more readable
133
134 let es = self.options.version.is_es();
135
136 // Write the version (It must be the first thing or it isn't a valid glsl output)
137 writeln!(self.out, "#version {}", self.options.version)?;
138 // Write all the needed extensions
139 //
140 // This used to be the last thing being written as it allowed to search for features while
141 // writing the module saving some loops but some older versions (420 or less) required the
142 // extensions to appear before being used, even though extensions are part of the
143 // preprocessor not the processor ¯\_(ツ)_/¯
144 self.features.write(self.options, &mut self.out)?;
145
146 // glsl es requires a precision to be specified for floats and ints
147 // TODO: Should this be user configurable?
148 if es {
149 writeln!(self.out)?;
150 writeln!(self.out, "precision highp float;")?;
151 writeln!(self.out, "precision highp int;")?;
152 writeln!(self.out)?;
153 }
154
155 if self.entry_point.stage == ShaderStage::Compute {
156 let workgroup_size = self.entry_point.workgroup_size;
157 writeln!(
158 self.out,
159 "layout(local_size_x = {}, local_size_y = {}, local_size_z = {}) in;",
160 workgroup_size[0], workgroup_size[1], workgroup_size[2]
161 )?;
162 writeln!(self.out)?;
163 }
164
165 if self.entry_point.stage == ShaderStage::Vertex
166 && !self
167 .options
168 .writer_flags
169 .contains(WriterFlags::DRAW_PARAMETERS)
170 && self.features.contains(Features::INSTANCE_INDEX)
171 {
172 writeln!(self.out, "uniform uint {FIRST_INSTANCE_BINDING};")?;
173 writeln!(self.out)?;
174 }
175
176 // Enable early depth tests if needed
177 if let Some(early_depth_test) = self.entry_point.early_depth_test {
178 // If early depth test is supported for this version of GLSL
179 if self.options.version.supports_early_depth_test() {
180 match early_depth_test {
181 crate::EarlyDepthTest::Force => {
182 writeln!(self.out, "layout(early_fragment_tests) in;")?;
183 }
184 crate::EarlyDepthTest::Allow { conservative, .. } => {
185 use crate::ConservativeDepth as Cd;
186 let depth = match conservative {
187 Cd::GreaterEqual => "greater",
188 Cd::LessEqual => "less",
189 Cd::Unchanged => "unchanged",
190 };
191 writeln!(self.out, "layout (depth_{depth}) out float gl_FragDepth;")?;
192 }
193 }
194 } else {
195 log::warn!(
196 "Early depth testing is not supported for this version of GLSL: {}",
197 self.options.version
198 );
199 }
200 }
201
202 if self.entry_point.stage == ShaderStage::Vertex && self.options.version.is_webgl() {
203 if let Some(multiview) = self.multiview.as_ref() {
204 writeln!(self.out, "layout(num_views = {multiview}) in;")?;
205 writeln!(self.out)?;
206 }
207 }
208
209 // Write struct types.
210 //
211 // This are always ordered because the IR is structured in a way that
212 // you can't make a struct without adding all of its members first.
213 for (handle, ty) in self.module.types.iter() {
214 if let TypeInner::Struct { ref members, .. } = ty.inner {
215 let struct_name = &self.names[&NameKey::Type(handle)];
216
217 // Structures ending with runtime-sized arrays can only be
218 // rendered as shader storage blocks in GLSL, not stand-alone
219 // struct types.
220 if !self.module.types[members.last().unwrap().ty]
221 .inner
222 .is_dynamically_sized(&self.module.types)
223 {
224 write!(self.out, "struct {struct_name} ")?;
225 self.write_struct_body(handle, members)?;
226 writeln!(self.out, ";")?;
227 }
228 }
229 }
230
231 // Write functions for special types.
232 for (type_key, struct_ty) in self.module.special_types.predeclared_types.iter() {
233 match type_key {
234 &crate::PredeclaredType::ModfResult { size, scalar }
235 | &crate::PredeclaredType::FrexpResult { size, scalar } => {
236 let struct_name = &self.names[&NameKey::Type(*struct_ty)];
237 let arg_type_name_owner;
238 let arg_type_name = if let Some(size) = size {
239 arg_type_name_owner = format!(
240 "{}vec{}",
241 if scalar.width == 8 { "d" } else { "" },
242 size as u8
243 );
244 &arg_type_name_owner
245 } else if scalar.width == 8 {
246 "double"
247 } else {
248 "float"
249 };
250
251 let other_type_name_owner;
252 let (defined_func_name, called_func_name, other_type_name) =
253 if matches!(type_key, &crate::PredeclaredType::ModfResult { .. }) {
254 (MODF_FUNCTION, "modf", arg_type_name)
255 } else {
256 let other_type_name = if let Some(size) = size {
257 other_type_name_owner = format!("ivec{}", size as u8);
258 &other_type_name_owner
259 } else {
260 "int"
261 };
262 (FREXP_FUNCTION, "frexp", other_type_name)
263 };
264
265 writeln!(self.out)?;
266 if !self.options.version.supports_frexp_function()
267 && matches!(type_key, &crate::PredeclaredType::FrexpResult { .. })
268 {
269 writeln!(
270 self.out,
271 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
272 {other_type_name} other = arg == {arg_type_name}(0) ? {other_type_name}(0) : {other_type_name}({arg_type_name}(1) + log2(arg));
273 {arg_type_name} fract = arg * exp2({arg_type_name}(-other));
274 return {struct_name}(fract, other);
275}}",
276 )?;
277 } else {
278 writeln!(
279 self.out,
280 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
281 {other_type_name} other;
282 {arg_type_name} fract = {called_func_name}(arg, other);
283 return {struct_name}(fract, other);
284}}",
285 )?;
286 }
287 }
288 &crate::PredeclaredType::AtomicCompareExchangeWeakResult(_) => {
289 // Handled by the general struct writing loop earlier.
290 }
291 }
292 }
293
294 // Write all named constants
295 let mut constants = self
296 .module
297 .constants
298 .iter()
299 .filter(|&(_, c)| c.name.is_some())
300 .peekable();
301 while let Some((handle, _)) = constants.next() {
302 self.write_global_constant(handle)?;
303 // Add extra newline for readability on last iteration
304 if constants.peek().is_none() {
305 writeln!(self.out)?;
306 }
307 }
308
309 let ep_info = self.info.get_entry_point(self.entry_point_idx as usize);
310
311 // Write the globals
312 //
313 // Unless explicitly disabled with WriterFlags::INCLUDE_UNUSED_ITEMS,
314 // we filter all globals that aren't used by the selected entry point as they might be
315 // interfere with each other (i.e. two globals with the same location but different with
316 // different classes)
317 let include_unused = self
318 .options
319 .writer_flags
320 .contains(WriterFlags::INCLUDE_UNUSED_ITEMS);
321 for (handle, global) in self.module.global_variables.iter() {
322 let is_unused = ep_info[handle].is_empty();
323 if !include_unused && is_unused {
324 continue;
325 }
326
327 match self.module.types[global.ty].inner {
328 // We treat images separately because they might require
329 // writing the storage format
330 TypeInner::Image {
331 mut dim,
332 arrayed,
333 class,
334 } => {
335 // Gather the storage format if needed
336 let storage_format_access = match self.module.types[global.ty].inner {
337 TypeInner::Image {
338 class: crate::ImageClass::Storage { format, access },
339 ..
340 } => Some((format, access)),
341 _ => None,
342 };
343
344 if dim == crate::ImageDimension::D1 && es {
345 dim = crate::ImageDimension::D2
346 }
347
348 // Gether the location if needed
349 let layout_binding = if self.options.version.supports_explicit_locations() {
350 let br = global.binding.as_ref().unwrap();
351 self.options.binding_map.get(br).cloned()
352 } else {
353 None
354 };
355
356 // Write all the layout qualifiers
357 if layout_binding.is_some() || storage_format_access.is_some() {
358 write!(self.out, "layout(")?;
359 if let Some(binding) = layout_binding {
360 write!(self.out, "binding = {binding}")?;
361 }
362 if let Some((format, _)) = storage_format_access {
363 let format_str = glsl_storage_format(format)?;
364 let separator = match layout_binding {
365 Some(_) => ",",
366 None => "",
367 };
368 write!(self.out, "{separator}{format_str}")?;
369 }
370 write!(self.out, ") ")?;
371 }
372
373 if let Some((_, access)) = storage_format_access {
374 self.write_storage_access(access)?;
375 }
376
377 // All images in glsl are `uniform`
378 // The trailing space is important
379 write!(self.out, "uniform ")?;
380
381 // write the type
382 //
383 // This is way we need the leading space because `write_image_type` doesn't add
384 // any spaces at the beginning or end
385 self.write_image_type(dim, arrayed, class)?;
386
387 // Finally write the name and end the global with a `;`
388 // The leading space is important
389 let global_name = self.get_global_name(handle, global);
390 writeln!(self.out, " {global_name};")?;
391 writeln!(self.out)?;
392
393 self.reflection_names_globals.insert(handle, global_name);
394 }
395 // glsl has no concept of samplers so we just ignore it
396 TypeInner::Sampler { .. } => continue,
397 // All other globals are written by `write_global`
398 _ => {
399 self.write_global(handle, global)?;
400 // Add a newline (only for readability)
401 writeln!(self.out)?;
402 }
403 }
404 }
405
406 for arg in self.entry_point.function.arguments.iter() {
407 self.write_varying(arg.binding.as_ref(), arg.ty, false)?;
408 }
409 if let Some(ref result) = self.entry_point.function.result {
410 self.write_varying(result.binding.as_ref(), result.ty, true)?;
411 }
412 writeln!(self.out)?;
413
414 // Write all regular functions
415 for (handle, function) in self.module.functions.iter() {
416 // Check that the function doesn't use globals that aren't supported
417 // by the current entry point
418 if !include_unused && !ep_info.dominates_global_use(&self.info[handle]) {
419 continue;
420 }
421
422 let fun_info = &self.info[handle];
423
424 // Skip functions that that are not compatible with this entry point's stage.
425 //
426 // When validation is enabled, it rejects modules whose entry points try to call
427 // incompatible functions, so if we got this far, then any functions incompatible
428 // with our selected entry point must not be used.
429 //
430 // When validation is disabled, `fun_info.available_stages` is always just
431 // `ShaderStages::all()`, so this will write all functions in the module, and
432 // the downstream GLSL compiler will catch any problems.
433 if !fun_info.available_stages.contains(ep_info.available_stages) {
434 continue;
435 }
436
437 // Write the function
438 self.write_function(back::FunctionType::Function(handle), function, fun_info)?;
439
440 writeln!(self.out)?;
441 }
442
443 self.write_function(
444 back::FunctionType::EntryPoint(self.entry_point_idx),
445 &self.entry_point.function,
446 ep_info,
447 )?;
448
449 // Add newline at the end of file
450 writeln!(self.out)?;
451
452 // Collect all reflection info and return it to the user
453 self.collect_reflection_info()
454 }
455
456 fn write_array_size(
457 &mut self,
458 base: Handle<crate::Type>,
459 size: crate::ArraySize,
460 ) -> BackendResult {
461 write!(self.out, "[")?;
462
463 // Write the array size
464 // Writes nothing if `IndexableLength::Dynamic`
465 match size.resolve(self.module.to_ctx())? {
466 proc::IndexableLength::Known(size) => {
467 write!(self.out, "{size}")?;
468 }
469 proc::IndexableLength::Dynamic => (),
470 }
471
472 write!(self.out, "]")?;
473
474 if let TypeInner::Array {
475 base: next_base,
476 size: next_size,
477 ..
478 } = self.module.types[base].inner
479 {
480 self.write_array_size(next_base, next_size)?;
481 }
482
483 Ok(())
484 }
485
486 /// Helper method used to write value types
487 ///
488 /// # Notes
489 /// Adds no trailing or leading whitespace
490 fn write_value_type(&mut self, inner: &TypeInner) -> BackendResult {
491 match *inner {
492 // Scalars are simple we just get the full name from `glsl_scalar`
493 TypeInner::Scalar(scalar)
494 | TypeInner::Atomic(scalar)
495 | TypeInner::ValuePointer {
496 size: None,
497 scalar,
498 space: _,
499 } => write!(self.out, "{}", glsl_scalar(scalar)?.full)?,
500 // Vectors are just `gvecN` where `g` is the scalar prefix and `N` is the vector size
501 TypeInner::Vector { size, scalar }
502 | TypeInner::ValuePointer {
503 size: Some(size),
504 scalar,
505 space: _,
506 } => write!(self.out, "{}vec{}", glsl_scalar(scalar)?.prefix, size as u8)?,
507 // Matrices are written with `gmatMxN` where `g` is the scalar prefix (only floats and
508 // doubles are allowed), `M` is the columns count and `N` is the rows count
509 //
510 // glsl supports a matrix shorthand `gmatN` where `N` = `M` but it doesn't justify the
511 // extra branch to write matrices this way
512 TypeInner::Matrix {
513 columns,
514 rows,
515 scalar,
516 } => write!(
517 self.out,
518 "{}mat{}x{}",
519 glsl_scalar(scalar)?.prefix,
520 columns as u8,
521 rows as u8
522 )?,
523 // GLSL arrays are written as `type name[size]`
524 // Here we only write the size of the array i.e. `[size]`
525 // Base `type` and `name` should be written outside
526 TypeInner::Array { base, size, .. } => self.write_array_size(base, size)?,
527 // Write all variants instead of `_` so that if new variants are added a
528 // no exhaustiveness error is thrown
529 TypeInner::Pointer { .. }
530 | TypeInner::Struct { .. }
531 | TypeInner::Image { .. }
532 | TypeInner::Sampler { .. }
533 | TypeInner::AccelerationStructure { .. }
534 | TypeInner::RayQuery { .. }
535 | TypeInner::BindingArray { .. } => {
536 return Err(Error::Custom(format!("Unable to write type {inner:?}")))
537 }
538 }
539
540 Ok(())
541 }
542
543 /// Helper method used to write non image/sampler types
544 ///
545 /// # Notes
546 /// Adds no trailing or leading whitespace
547 fn write_type(&mut self, ty: Handle<crate::Type>) -> BackendResult {
548 match self.module.types[ty].inner {
549 // glsl has no pointer types so just write types as normal and loads are skipped
550 TypeInner::Pointer { base, .. } => self.write_type(base),
551 // glsl structs are written as just the struct name
552 TypeInner::Struct { .. } => {
553 // Get the struct name
554 let name = &self.names[&NameKey::Type(ty)];
555 write!(self.out, "{name}")?;
556 Ok(())
557 }
558 // glsl array has the size separated from the base type
559 TypeInner::Array { base, .. } => self.write_type(base),
560 ref other => self.write_value_type(other),
561 }
562 }
563
564 /// Helper method to write a image type
565 ///
566 /// # Notes
567 /// Adds no leading or trailing whitespace
568 fn write_image_type(
569 &mut self,
570 dim: crate::ImageDimension,
571 arrayed: bool,
572 class: crate::ImageClass,
573 ) -> BackendResult {
574 // glsl images consist of four parts the scalar prefix, the image "type", the dimensions
575 // and modifiers
576 //
577 // There exists two image types
578 // - sampler - for sampled images
579 // - image - for storage images
580 //
581 // There are three possible modifiers that can be used together and must be written in
582 // this order to be valid
583 // - MS - used if it's a multisampled image
584 // - Array - used if it's an image array
585 // - Shadow - used if it's a depth image
586 use crate::ImageClass as Ic;
587 use crate::Scalar as S;
588 let float = S {
589 kind: crate::ScalarKind::Float,
590 width: 4,
591 };
592 let (base, scalar, ms, comparison) = match class {
593 Ic::Sampled { kind, multi: true } => ("sampler", S { kind, width: 4 }, "MS", ""),
594 Ic::Sampled { kind, multi: false } => ("sampler", S { kind, width: 4 }, "", ""),
595 Ic::Depth { multi: true } => ("sampler", float, "MS", ""),
596 Ic::Depth { multi: false } => ("sampler", float, "", "Shadow"),
597 Ic::Storage { format, .. } => ("image", format.into(), "", ""),
598 Ic::External => unimplemented!(),
599 };
600
601 let precision = if self.options.version.is_es() {
602 "highp "
603 } else {
604 ""
605 };
606
607 write!(
608 self.out,
609 "{}{}{}{}{}{}{}",
610 precision,
611 glsl_scalar(scalar)?.prefix,
612 base,
613 glsl_dimension(dim),
614 ms,
615 if arrayed { "Array" } else { "" },
616 comparison
617 )?;
618
619 Ok(())
620 }
621
622 /// Helper method used by [Self::write_global] to write just the layout part of
623 /// a non image/sampler global variable, if applicable.
624 ///
625 /// # Notes
626 ///
627 /// Adds trailing whitespace if any layout qualifier is written
628 fn write_global_layout(&mut self, global: &crate::GlobalVariable) -> BackendResult {
629 // Determine which (if any) explicit memory layout to use, and whether we support it
630 let layout = match global.space {
631 crate::AddressSpace::Uniform => {
632 if !self.options.version.supports_std140_layout() {
633 return Err(Error::Custom(
634 "Uniform address space requires std140 layout support".to_string(),
635 ));
636 }
637
638 Some("std140")
639 }
640 crate::AddressSpace::Storage { .. } => {
641 if !self.options.version.supports_std430_layout() {
642 return Err(Error::Custom(
643 "Storage address space requires std430 layout support".to_string(),
644 ));
645 }
646
647 Some("std430")
648 }
649 _ => None,
650 };
651
652 // If our version supports explicit layouts, we can also output the explicit binding
653 // if we have it
654 if self.options.version.supports_explicit_locations() {
655 if let Some(ref br) = global.binding {
656 match self.options.binding_map.get(br) {
657 Some(binding) => {
658 write!(self.out, "layout(")?;
659
660 if let Some(layout) = layout {
661 write!(self.out, "{layout}, ")?;
662 }
663
664 write!(self.out, "binding = {binding}) ")?;
665
666 return Ok(());
667 }
668 None => {
669 log::debug!("unassigned binding for {:?}", global.name);
670 }
671 }
672 }
673 }
674
675 // Either no explicit bindings are supported or we didn't have any.
676 // Write just the memory layout.
677 if let Some(layout) = layout {
678 write!(self.out, "layout({layout}) ")?;
679 }
680
681 Ok(())
682 }
683
684 /// Helper method used to write non images/sampler globals
685 ///
686 /// # Notes
687 /// Adds a newline
688 ///
689 /// # Panics
690 /// If the global has type sampler
691 fn write_global(
692 &mut self,
693 handle: Handle<crate::GlobalVariable>,
694 global: &crate::GlobalVariable,
695 ) -> BackendResult {
696 self.write_global_layout(global)?;
697
698 if let crate::AddressSpace::Storage { access } = global.space {
699 self.write_storage_access(access)?;
700 }
701
702 if let Some(storage_qualifier) = glsl_storage_qualifier(global.space) {
703 write!(self.out, "{storage_qualifier} ")?;
704 }
705
706 match global.space {
707 crate::AddressSpace::Private => {
708 self.write_simple_global(handle, global)?;
709 }
710 crate::AddressSpace::WorkGroup => {
711 self.write_simple_global(handle, global)?;
712 }
713 crate::AddressSpace::Immediate => {
714 self.write_simple_global(handle, global)?;
715 }
716 crate::AddressSpace::Uniform => {
717 self.write_interface_block(handle, global)?;
718 }
719 crate::AddressSpace::Storage { .. } => {
720 self.write_interface_block(handle, global)?;
721 }
722 crate::AddressSpace::TaskPayload => {
723 self.write_interface_block(handle, global)?;
724 }
725 // A global variable in the `Function` address space is a
726 // contradiction in terms.
727 crate::AddressSpace::Function => unreachable!(),
728 // Textures and samplers are handled directly in `Writer::write`.
729 crate::AddressSpace::Handle => unreachable!(),
730 }
731
732 Ok(())
733 }
734
735 fn write_simple_global(
736 &mut self,
737 handle: Handle<crate::GlobalVariable>,
738 global: &crate::GlobalVariable,
739 ) -> BackendResult {
740 self.write_type(global.ty)?;
741 write!(self.out, " ")?;
742 self.write_global_name(handle, global)?;
743
744 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
745 self.write_array_size(base, size)?;
746 }
747
748 if global.space.initializable() && is_value_init_supported(self.module, global.ty) {
749 write!(self.out, " = ")?;
750 if let Some(init) = global.init {
751 self.write_const_expr(init, &self.module.global_expressions)?;
752 } else {
753 self.write_zero_init_value(global.ty)?;
754 }
755 }
756
757 writeln!(self.out, ";")?;
758
759 if let crate::AddressSpace::Immediate = global.space {
760 let global_name = self.get_global_name(handle, global);
761 self.reflection_names_globals.insert(handle, global_name);
762 }
763
764 Ok(())
765 }
766
767 /// Write an interface block for a single Naga global.
768 ///
769 /// Write `block_name { members }`. Since `block_name` must be unique
770 /// between blocks and structs, we add `_block_ID` where `ID` is a
771 /// `IdGenerator` generated number. Write `members` in the same way we write
772 /// a struct's members.
773 fn write_interface_block(
774 &mut self,
775 handle: Handle<crate::GlobalVariable>,
776 global: &crate::GlobalVariable,
777 ) -> BackendResult {
778 // Write the block name, it's just the struct name appended with `_block_ID`
779 let ty_name = &self.names[&NameKey::Type(global.ty)];
780 let block_name = format!(
781 "{}_block_{}{:?}",
782 // avoid double underscores as they are reserved in GLSL
783 ty_name.trim_end_matches('_'),
784 self.block_id.generate(),
785 self.entry_point.stage,
786 );
787 write!(self.out, "{block_name} ")?;
788 self.reflection_names_globals.insert(handle, block_name);
789
790 match self.module.types[global.ty].inner {
791 TypeInner::Struct { ref members, .. }
792 if self.module.types[members.last().unwrap().ty]
793 .inner
794 .is_dynamically_sized(&self.module.types) =>
795 {
796 // Structs with dynamically sized arrays must have their
797 // members lifted up as members of the interface block. GLSL
798 // can't write such struct types anyway.
799 self.write_struct_body(global.ty, members)?;
800 write!(self.out, " ")?;
801 self.write_global_name(handle, global)?;
802 }
803 _ => {
804 // A global of any other type is written as the sole member
805 // of the interface block. Since the interface block is
806 // anonymous, this becomes visible in the global scope.
807 write!(self.out, "{{ ")?;
808 self.write_type(global.ty)?;
809 write!(self.out, " ")?;
810 self.write_global_name(handle, global)?;
811 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
812 self.write_array_size(base, size)?;
813 }
814 write!(self.out, "; }}")?;
815 }
816 }
817
818 writeln!(self.out, ";")?;
819
820 Ok(())
821 }
822
823 /// Helper method used to find which expressions of a given function require baking
824 ///
825 /// # Notes
826 /// Clears `need_bake_expressions` set before adding to it
827 fn update_expressions_to_bake(&mut self, func: &crate::Function, info: &valid::FunctionInfo) {
828 use crate::Expression;
829 self.need_bake_expressions.clear();
830 for (fun_handle, expr) in func.expressions.iter() {
831 let expr_info = &info[fun_handle];
832 let min_ref_count = func.expressions[fun_handle].bake_ref_count();
833 if min_ref_count <= expr_info.ref_count {
834 self.need_bake_expressions.insert(fun_handle);
835 }
836
837 let inner = expr_info.ty.inner_with(&self.module.types);
838
839 if let Expression::Math {
840 fun,
841 arg,
842 arg1,
843 arg2,
844 ..
845 } = *expr
846 {
847 match fun {
848 crate::MathFunction::Dot => {
849 // if the expression is a Dot product with integer arguments,
850 // then the args needs baking as well
851 if let TypeInner::Scalar(crate::Scalar {
852 kind: crate::ScalarKind::Sint | crate::ScalarKind::Uint,
853 ..
854 }) = *inner
855 {
856 self.need_bake_expressions.insert(arg);
857 self.need_bake_expressions.insert(arg1.unwrap());
858 }
859 }
860 crate::MathFunction::Dot4U8Packed | crate::MathFunction::Dot4I8Packed => {
861 self.need_bake_expressions.insert(arg);
862 self.need_bake_expressions.insert(arg1.unwrap());
863 }
864 crate::MathFunction::Pack4xI8
865 | crate::MathFunction::Pack4xU8
866 | crate::MathFunction::Pack4xI8Clamp
867 | crate::MathFunction::Pack4xU8Clamp
868 | crate::MathFunction::Unpack4xI8
869 | crate::MathFunction::Unpack4xU8
870 | crate::MathFunction::QuantizeToF16 => {
871 self.need_bake_expressions.insert(arg);
872 }
873 /* crate::MathFunction::Pack4x8unorm | */
874 crate::MathFunction::Unpack4x8snorm
875 if !self.options.version.supports_pack_unpack_4x8() =>
876 {
877 // We have a fallback if the platform doesn't natively support these
878 self.need_bake_expressions.insert(arg);
879 }
880 /* crate::MathFunction::Pack4x8unorm | */
881 crate::MathFunction::Unpack4x8unorm
882 if !self.options.version.supports_pack_unpack_4x8() =>
883 {
884 self.need_bake_expressions.insert(arg);
885 }
886 /* crate::MathFunction::Pack2x16snorm | */
887 crate::MathFunction::Unpack2x16snorm
888 if !self.options.version.supports_pack_unpack_snorm_2x16() =>
889 {
890 self.need_bake_expressions.insert(arg);
891 }
892 /* crate::MathFunction::Pack2x16unorm | */
893 crate::MathFunction::Unpack2x16unorm
894 if !self.options.version.supports_pack_unpack_unorm_2x16() =>
895 {
896 self.need_bake_expressions.insert(arg);
897 }
898 crate::MathFunction::ExtractBits => {
899 // Only argument 1 is re-used.
900 self.need_bake_expressions.insert(arg1.unwrap());
901 }
902 crate::MathFunction::InsertBits => {
903 // Only argument 2 is re-used.
904 self.need_bake_expressions.insert(arg2.unwrap());
905 }
906 crate::MathFunction::CountLeadingZeros => {
907 if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
908 self.need_bake_expressions.insert(arg);
909 }
910 }
911 _ => {}
912 }
913 }
914 }
915
916 for statement in func.body.iter() {
917 match *statement {
918 crate::Statement::Atomic {
919 fun: crate::AtomicFunction::Exchange { compare: Some(cmp) },
920 ..
921 } => {
922 self.need_bake_expressions.insert(cmp);
923 }
924 _ => {}
925 }
926 }
927 }
928
929 /// Helper method used to get a name for a global
930 ///
931 /// Globals have different naming schemes depending on their binding:
932 /// - Globals without bindings use the name from the [`Namer`](crate::proc::Namer)
933 /// - Globals with resource binding are named `_group_X_binding_Y` where `X`
934 /// is the group and `Y` is the binding
935 fn get_global_name(
936 &self,
937 handle: Handle<crate::GlobalVariable>,
938 global: &crate::GlobalVariable,
939 ) -> String {
940 match (&global.binding, global.space) {
941 (&Some(ref br), _) => {
942 format!(
943 "_group_{}_binding_{}_{}",
944 br.group,
945 br.binding,
946 self.entry_point.stage.to_str()
947 )
948 }
949 (&None, crate::AddressSpace::Immediate) => {
950 format!("_immediates_binding_{}", self.entry_point.stage.to_str())
951 }
952 (&None, _) => self.names[&NameKey::GlobalVariable(handle)].clone(),
953 }
954 }
955
956 /// Helper method used to write a name for a global without additional heap allocation
957 fn write_global_name(
958 &mut self,
959 handle: Handle<crate::GlobalVariable>,
960 global: &crate::GlobalVariable,
961 ) -> BackendResult {
962 match (&global.binding, global.space) {
963 (&Some(ref br), _) => write!(
964 self.out,
965 "_group_{}_binding_{}_{}",
966 br.group,
967 br.binding,
968 self.entry_point.stage.to_str()
969 )?,
970 (&None, crate::AddressSpace::Immediate) => write!(
971 self.out,
972 "_immediates_binding_{}",
973 self.entry_point.stage.to_str()
974 )?,
975 (&None, _) => write!(
976 self.out,
977 "{}",
978 &self.names[&NameKey::GlobalVariable(handle)]
979 )?,
980 }
981
982 Ok(())
983 }
984
985 /// Write a GLSL global that will carry a Naga entry point's argument or return value.
986 ///
987 /// A Naga entry point's arguments and return value are rendered in GLSL as
988 /// variables at global scope with the `in` and `out` storage qualifiers.
989 /// The code we generate for `main` loads from all the `in` globals into
990 /// appropriately named locals. Before it returns, `main` assigns the
991 /// components of its return value into all the `out` globals.
992 ///
993 /// This function writes a declaration for one such GLSL global,
994 /// representing a value passed into or returned from [`self.entry_point`]
995 /// that has a [`Location`] binding. The global's name is generated based on
996 /// the location index and the shader stages being connected; see
997 /// [`VaryingName`]. This means we don't need to know the names of
998 /// arguments, just their types and bindings.
999 ///
1000 /// Emit nothing for entry point arguments or return values with [`BuiltIn`]
1001 /// bindings; `main` will read from or assign to the appropriate GLSL
1002 /// special variable; these are pre-declared. As an exception, we do declare
1003 /// `gl_Position` or `gl_FragCoord` with the `invariant` qualifier if
1004 /// needed.
1005 ///
1006 /// Use `output` together with [`self.entry_point.stage`] to determine which
1007 /// shader stages are being connected, and choose the `in` or `out` storage
1008 /// qualifier.
1009 ///
1010 /// [`self.entry_point`]: Writer::entry_point
1011 /// [`self.entry_point.stage`]: crate::EntryPoint::stage
1012 /// [`Location`]: crate::Binding::Location
1013 /// [`BuiltIn`]: crate::Binding::BuiltIn
1014 fn write_varying(
1015 &mut self,
1016 binding: Option<&crate::Binding>,
1017 ty: Handle<crate::Type>,
1018 output: bool,
1019 ) -> Result<(), Error> {
1020 // For a struct, emit a separate global for each member with a binding.
1021 if let TypeInner::Struct { ref members, .. } = self.module.types[ty].inner {
1022 for member in members {
1023 self.write_varying(member.binding.as_ref(), member.ty, output)?;
1024 }
1025 return Ok(());
1026 }
1027
1028 let binding = match binding {
1029 None => return Ok(()),
1030 Some(binding) => binding,
1031 };
1032
1033 let (location, interpolation, sampling, blend_src) = match *binding {
1034 crate::Binding::Location {
1035 location,
1036 interpolation,
1037 sampling,
1038 blend_src,
1039 per_primitive: _,
1040 } => (location, interpolation, sampling, blend_src),
1041 crate::Binding::BuiltIn(built_in) => {
1042 match built_in {
1043 crate::BuiltIn::Position { invariant: true } => {
1044 match (self.options.version, self.entry_point.stage) {
1045 (
1046 Version::Embedded {
1047 version: 300,
1048 is_webgl: true,
1049 },
1050 ShaderStage::Fragment,
1051 ) => {
1052 // `invariant gl_FragCoord` is not allowed in WebGL2 and possibly
1053 // OpenGL ES in general (waiting on confirmation).
1054 //
1055 // See https://github.com/KhronosGroup/WebGL/issues/3518
1056 }
1057 _ => {
1058 writeln!(
1059 self.out,
1060 "invariant {};",
1061 glsl_built_in(
1062 built_in,
1063 VaryingOptions::from_writer_options(self.options, output)
1064 )
1065 )?;
1066 }
1067 }
1068 }
1069 crate::BuiltIn::ClipDistance => {
1070 // Re-declare `gl_ClipDistance` with number of clip planes.
1071 let TypeInner::Array { size, .. } = self.module.types[ty].inner else {
1072 unreachable!();
1073 };
1074 let proc::IndexableLength::Known(size) =
1075 size.resolve(self.module.to_ctx())?
1076 else {
1077 unreachable!();
1078 };
1079 self.clip_distance_count = size;
1080 writeln!(self.out, "out float gl_ClipDistance[{size}];")?;
1081 }
1082 _ => {}
1083 }
1084 return Ok(());
1085 }
1086 };
1087
1088 // Write the interpolation modifier if needed
1089 //
1090 // We ignore all interpolation and auxiliary modifiers that aren't used in fragment
1091 // shaders' input globals or vertex shaders' output globals.
1092 let emit_interpolation_and_auxiliary = match self.entry_point.stage {
1093 ShaderStage::Vertex => output,
1094 ShaderStage::Fragment => !output,
1095 ShaderStage::Compute => false,
1096 ShaderStage::Task | ShaderStage::Mesh => unreachable!(),
1097 };
1098
1099 // Write the I/O locations, if allowed
1100 let io_location = if self.options.version.supports_explicit_locations()
1101 || !emit_interpolation_and_auxiliary
1102 {
1103 if self.options.version.supports_io_locations() {
1104 if let Some(blend_src) = blend_src {
1105 write!(
1106 self.out,
1107 "layout(location = {location}, index = {blend_src}) "
1108 )?;
1109 } else {
1110 write!(self.out, "layout(location = {location}) ")?;
1111 }
1112 None
1113 } else {
1114 Some(VaryingLocation {
1115 location,
1116 index: blend_src.unwrap_or(0),
1117 })
1118 }
1119 } else {
1120 None
1121 };
1122
1123 // Write the interpolation qualifier.
1124 if let Some(interp) = interpolation {
1125 if emit_interpolation_and_auxiliary {
1126 write!(self.out, "{} ", glsl_interpolation(interp))?;
1127 }
1128 }
1129
1130 // Write the sampling auxiliary qualifier.
1131 //
1132 // Before GLSL 4.2, the `centroid` and `sample` qualifiers were required to appear
1133 // immediately before the `in` / `out` qualifier, so we'll just follow that rule
1134 // here, regardless of the version.
1135 if let Some(sampling) = sampling {
1136 if emit_interpolation_and_auxiliary {
1137 if let Some(qualifier) = glsl_sampling(sampling)? {
1138 write!(self.out, "{qualifier} ")?;
1139 }
1140 }
1141 }
1142
1143 // Write the input/output qualifier.
1144 write!(self.out, "{} ", if output { "out" } else { "in" })?;
1145
1146 // Write the type
1147 // `write_type` adds no leading or trailing spaces
1148 self.write_type(ty)?;
1149
1150 // Finally write the global name and end the global with a `;` and a newline
1151 // Leading space is important
1152 let vname = VaryingName {
1153 binding: &crate::Binding::Location {
1154 location,
1155 interpolation: None,
1156 sampling: None,
1157 blend_src,
1158 per_primitive: false,
1159 },
1160 stage: self.entry_point.stage,
1161 options: VaryingOptions::from_writer_options(self.options, output),
1162 };
1163 writeln!(self.out, " {vname};")?;
1164
1165 if let Some(location) = io_location {
1166 self.varying.insert(vname.to_string(), location);
1167 }
1168
1169 Ok(())
1170 }
1171
1172 /// Helper method used to write functions (both entry points and regular functions)
1173 ///
1174 /// # Notes
1175 /// Adds a newline
1176 fn write_function(
1177 &mut self,
1178 ty: back::FunctionType,
1179 func: &crate::Function,
1180 info: &valid::FunctionInfo,
1181 ) -> BackendResult {
1182 // Create a function context for the function being written
1183 let ctx = back::FunctionCtx {
1184 ty,
1185 info,
1186 expressions: &func.expressions,
1187 named_expressions: &func.named_expressions,
1188 };
1189
1190 self.named_expressions.clear();
1191 self.update_expressions_to_bake(func, info);
1192
1193 // Write the function header
1194 //
1195 // glsl headers are the same as in c:
1196 // `ret_type name(args)`
1197 // `ret_type` is the return type
1198 // `name` is the function name
1199 // `args` is a comma separated list of `type name`
1200 // | - `type` is the argument type
1201 // | - `name` is the argument name
1202
1203 // Start by writing the return type if any otherwise write void
1204 // This is the only place where `void` is a valid type
1205 // (though it's more a keyword than a type)
1206 if let back::FunctionType::EntryPoint(_) = ctx.ty {
1207 write!(self.out, "void")?;
1208 } else if let Some(ref result) = func.result {
1209 self.write_type(result.ty)?;
1210 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner {
1211 self.write_array_size(base, size)?
1212 }
1213 } else {
1214 write!(self.out, "void")?;
1215 }
1216
1217 // Write the function name and open parentheses for the argument list
1218 let function_name = match ctx.ty {
1219 back::FunctionType::Function(handle) => &self.names[&NameKey::Function(handle)],
1220 back::FunctionType::EntryPoint(_) => "main",
1221 };
1222 write!(self.out, " {function_name}(")?;
1223
1224 // Write the comma separated argument list
1225 //
1226 // We need access to `Self` here so we use the reference passed to the closure as an
1227 // argument instead of capturing as that would cause a borrow checker error
1228 let arguments = match ctx.ty {
1229 back::FunctionType::EntryPoint(_) => &[][..],
1230 back::FunctionType::Function(_) => &func.arguments,
1231 };
1232 let arguments: Vec<_> = arguments
1233 .iter()
1234 .enumerate()
1235 .filter(|&(_, arg)| match self.module.types[arg.ty].inner {
1236 TypeInner::Sampler { .. } => false,
1237 _ => true,
1238 })
1239 .collect();
1240 self.write_slice(&arguments, |this, _, &(i, arg)| {
1241 // Write the argument type
1242 match this.module.types[arg.ty].inner {
1243 // We treat images separately because they might require
1244 // writing the storage format
1245 TypeInner::Image {
1246 dim,
1247 arrayed,
1248 class,
1249 } => {
1250 // Write the storage format if needed
1251 if let TypeInner::Image {
1252 class: crate::ImageClass::Storage { format, .. },
1253 ..
1254 } = this.module.types[arg.ty].inner
1255 {
1256 write!(this.out, "layout({}) ", glsl_storage_format(format)?)?;
1257 }
1258
1259 // write the type
1260 //
1261 // This is way we need the leading space because `write_image_type` doesn't add
1262 // any spaces at the beginning or end
1263 this.write_image_type(dim, arrayed, class)?;
1264 }
1265 TypeInner::Pointer { base, .. } => {
1266 // write parameter qualifiers
1267 write!(this.out, "inout ")?;
1268 this.write_type(base)?;
1269 }
1270 // All other types are written by `write_type`
1271 _ => {
1272 this.write_type(arg.ty)?;
1273 }
1274 }
1275
1276 // Write the argument name
1277 // The leading space is important
1278 write!(this.out, " {}", &this.names[&ctx.argument_key(i as u32)])?;
1279
1280 // Write array size
1281 match this.module.types[arg.ty].inner {
1282 TypeInner::Array { base, size, .. } => {
1283 this.write_array_size(base, size)?;
1284 }
1285 TypeInner::Pointer { base, .. } => {
1286 if let TypeInner::Array { base, size, .. } = this.module.types[base].inner {
1287 this.write_array_size(base, size)?;
1288 }
1289 }
1290 _ => {}
1291 }
1292
1293 Ok(())
1294 })?;
1295
1296 // Close the parentheses and open braces to start the function body
1297 writeln!(self.out, ") {{")?;
1298
1299 if self.options.zero_initialize_workgroup_memory
1300 && ctx.ty.is_compute_like_entry_point(self.module)
1301 {
1302 self.write_workgroup_variables_initialization(&ctx)?;
1303 }
1304
1305 // Compose the function arguments from globals, in case of an entry point.
1306 if let back::FunctionType::EntryPoint(ep_index) = ctx.ty {
1307 let stage = self.module.entry_points[ep_index as usize].stage;
1308 for (index, arg) in func.arguments.iter().enumerate() {
1309 write!(self.out, "{}", back::INDENT)?;
1310 self.write_type(arg.ty)?;
1311 let name = &self.names[&NameKey::EntryPointArgument(ep_index, index as u32)];
1312 write!(self.out, " {name}")?;
1313 write!(self.out, " = ")?;
1314 match self.module.types[arg.ty].inner {
1315 TypeInner::Struct { ref members, .. } => {
1316 self.write_type(arg.ty)?;
1317 write!(self.out, "(")?;
1318 for (index, member) in members.iter().enumerate() {
1319 let varying_name = VaryingName {
1320 binding: member.binding.as_ref().unwrap(),
1321 stage,
1322 options: VaryingOptions::from_writer_options(self.options, false),
1323 };
1324 if index != 0 {
1325 write!(self.out, ", ")?;
1326 }
1327 write!(self.out, "{varying_name}")?;
1328 }
1329 writeln!(self.out, ");")?;
1330 }
1331 _ => {
1332 let varying_name = VaryingName {
1333 binding: arg.binding.as_ref().unwrap(),
1334 stage,
1335 options: VaryingOptions::from_writer_options(self.options, false),
1336 };
1337 writeln!(self.out, "{varying_name};")?;
1338 }
1339 }
1340 }
1341 }
1342
1343 // Write all function locals
1344 // Locals are `type name (= init)?;` where the init part (including the =) are optional
1345 //
1346 // Always adds a newline
1347 for (handle, local) in func.local_variables.iter() {
1348 // Write indentation (only for readability) and the type
1349 // `write_type` adds no trailing space
1350 write!(self.out, "{}", back::INDENT)?;
1351 self.write_type(local.ty)?;
1352
1353 // Write the local name
1354 // The leading space is important
1355 write!(self.out, " {}", self.names[&ctx.name_key(handle)])?;
1356 // Write size for array type
1357 if let TypeInner::Array { base, size, .. } = self.module.types[local.ty].inner {
1358 self.write_array_size(base, size)?;
1359 }
1360 // Write the local initializer if needed
1361 if let Some(init) = local.init {
1362 // Put the equal signal only if there's a initializer
1363 // The leading and trailing spaces aren't needed but help with readability
1364 write!(self.out, " = ")?;
1365
1366 // Write the constant
1367 // `write_constant` adds no trailing or leading space/newline
1368 self.write_expr(init, &ctx)?;
1369 } else if is_value_init_supported(self.module, local.ty) {
1370 write!(self.out, " = ")?;
1371 self.write_zero_init_value(local.ty)?;
1372 }
1373
1374 // Finish the local with `;` and add a newline (only for readability)
1375 writeln!(self.out, ";")?
1376 }
1377
1378 // Write the function body (statement list)
1379 for sta in func.body.iter() {
1380 // Write a statement, the indentation should always be 1 when writing the function body
1381 // `write_stmt` adds a newline
1382 self.write_stmt(sta, &ctx, back::Level(1))?;
1383 }
1384
1385 // Close braces and add a newline
1386 writeln!(self.out, "}}")?;
1387
1388 Ok(())
1389 }
1390
1391 fn write_workgroup_variables_initialization(
1392 &mut self,
1393 ctx: &back::FunctionCtx,
1394 ) -> BackendResult {
1395 let mut vars = self
1396 .module
1397 .global_variables
1398 .iter()
1399 .filter(|&(handle, var)| {
1400 !ctx.info[handle].is_empty() && var.space == crate::AddressSpace::WorkGroup
1401 })
1402 .peekable();
1403
1404 if vars.peek().is_some() {
1405 let level = back::Level(1);
1406
1407 writeln!(self.out, "{level}if (gl_LocalInvocationID == uvec3(0u)) {{")?;
1408
1409 for (handle, var) in vars {
1410 let name = &self.names[&NameKey::GlobalVariable(handle)];
1411 write!(self.out, "{}{} = ", level.next(), name)?;
1412 self.write_zero_init_value(var.ty)?;
1413 writeln!(self.out, ";")?;
1414 }
1415
1416 writeln!(self.out, "{level}}}")?;
1417 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1418 }
1419
1420 Ok(())
1421 }
1422
1423 /// Write a list of comma separated `T` values using a writer function `F`.
1424 ///
1425 /// The writer function `F` receives a mutable reference to `self` that if needed won't cause
1426 /// borrow checker issues (using for example a closure with `self` will cause issues), the
1427 /// second argument is the 0 based index of the element on the list, and the last element is
1428 /// a reference to the element `T` being written
1429 ///
1430 /// # Notes
1431 /// - Adds no newlines or leading/trailing whitespace
1432 /// - The last element won't have a trailing `,`
1433 fn write_slice<T, F: FnMut(&mut Self, u32, &T) -> BackendResult>(
1434 &mut self,
1435 data: &[T],
1436 mut f: F,
1437 ) -> BackendResult {
1438 // Loop through `data` invoking `f` for each element
1439 for (index, item) in data.iter().enumerate() {
1440 if index != 0 {
1441 write!(self.out, ", ")?;
1442 }
1443 f(self, index as u32, item)?;
1444 }
1445
1446 Ok(())
1447 }
1448
1449 /// Helper method used to write global constants
1450 fn write_global_constant(&mut self, handle: Handle<crate::Constant>) -> BackendResult {
1451 write!(self.out, "const ")?;
1452 let constant = &self.module.constants[handle];
1453 self.write_type(constant.ty)?;
1454 let name = &self.names[&NameKey::Constant(handle)];
1455 write!(self.out, " {name}")?;
1456 if let TypeInner::Array { base, size, .. } = self.module.types[constant.ty].inner {
1457 self.write_array_size(base, size)?;
1458 }
1459 write!(self.out, " = ")?;
1460 self.write_const_expr(constant.init, &self.module.global_expressions)?;
1461 writeln!(self.out, ";")?;
1462 Ok(())
1463 }
1464
1465 /// Helper method used to output a dot product as an arithmetic expression
1466 ///
1467 fn write_dot_product(
1468 &mut self,
1469 arg: Handle<crate::Expression>,
1470 arg1: Handle<crate::Expression>,
1471 size: usize,
1472 ctx: &back::FunctionCtx,
1473 ) -> BackendResult {
1474 // Write parentheses around the dot product expression to prevent operators
1475 // with different precedences from applying earlier.
1476 write!(self.out, "(")?;
1477
1478 // Cycle through all the components of the vector
1479 for index in 0..size {
1480 let component = back::COMPONENTS[index];
1481 // Write the addition to the previous product
1482 // This will print an extra '+' at the beginning but that is fine in glsl
1483 write!(self.out, " + ")?;
1484 // Write the first vector expression, this expression is marked to be
1485 // cached so unless it can't be cached (for example, it's a Constant)
1486 // it shouldn't produce large expressions.
1487 self.write_expr(arg, ctx)?;
1488 // Access the current component on the first vector
1489 write!(self.out, ".{component} * ")?;
1490 // Write the second vector expression, this expression is marked to be
1491 // cached so unless it can't be cached (for example, it's a Constant)
1492 // it shouldn't produce large expressions.
1493 self.write_expr(arg1, ctx)?;
1494 // Access the current component on the second vector
1495 write!(self.out, ".{component}")?;
1496 }
1497
1498 write!(self.out, ")")?;
1499 Ok(())
1500 }
1501
1502 /// Helper method used to write structs
1503 ///
1504 /// # Notes
1505 /// Ends in a newline
1506 fn write_struct_body(
1507 &mut self,
1508 handle: Handle<crate::Type>,
1509 members: &[crate::StructMember],
1510 ) -> BackendResult {
1511 // glsl structs are written as in C
1512 // `struct name() { members };`
1513 // | `struct` is a keyword
1514 // | `name` is the struct name
1515 // | `members` is a semicolon separated list of `type name`
1516 // | `type` is the member type
1517 // | `name` is the member name
1518 writeln!(self.out, "{{")?;
1519
1520 for (idx, member) in members.iter().enumerate() {
1521 // The indentation is only for readability
1522 write!(self.out, "{}", back::INDENT)?;
1523
1524 match self.module.types[member.ty].inner {
1525 TypeInner::Array {
1526 base,
1527 size,
1528 stride: _,
1529 } => {
1530 self.write_type(base)?;
1531 write!(
1532 self.out,
1533 " {}",
1534 &self.names[&NameKey::StructMember(handle, idx as u32)]
1535 )?;
1536 // Write [size]
1537 self.write_array_size(base, size)?;
1538 // Newline is important
1539 writeln!(self.out, ";")?;
1540 }
1541 _ => {
1542 // Write the member type
1543 // Adds no trailing space
1544 self.write_type(member.ty)?;
1545
1546 // Write the member name and put a semicolon
1547 // The leading space is important
1548 // All members must have a semicolon even the last one
1549 writeln!(
1550 self.out,
1551 " {};",
1552 &self.names[&NameKey::StructMember(handle, idx as u32)]
1553 )?;
1554 }
1555 }
1556 }
1557
1558 write!(self.out, "}}")?;
1559 Ok(())
1560 }
1561
1562 /// Helper method used to write statements
1563 ///
1564 /// # Notes
1565 /// Always adds a newline
1566 fn write_stmt(
1567 &mut self,
1568 sta: &crate::Statement,
1569 ctx: &back::FunctionCtx,
1570 level: back::Level,
1571 ) -> BackendResult {
1572 use crate::Statement;
1573
1574 match *sta {
1575 // This is where we can generate intermediate constants for some expression types.
1576 Statement::Emit(ref range) => {
1577 for handle in range.clone() {
1578 let ptr_class = ctx.resolve_type(handle, &self.module.types).pointer_space();
1579 let expr_name = if ptr_class.is_some() {
1580 // GLSL can't save a pointer-valued expression in a variable,
1581 // but we shouldn't ever need to: they should never be named expressions,
1582 // and none of the expression types flagged by bake_ref_count can be pointer-valued.
1583 None
1584 } else if let Some(name) = ctx.named_expressions.get(&handle) {
1585 // Front end provides names for all variables at the start of writing.
1586 // But we write them to step by step. We need to recache them
1587 // Otherwise, we could accidentally write variable name instead of full expression.
1588 // Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
1589 Some(self.namer.call(name))
1590 } else if self.need_bake_expressions.contains(&handle) {
1591 Some(Baked(handle).to_string())
1592 } else {
1593 None
1594 };
1595
1596 // If we are going to write an `ImageLoad` next and the target image
1597 // is sampled and we are using the `Restrict` policy for bounds
1598 // checking images we need to write a local holding the clamped lod.
1599 if let crate::Expression::ImageLoad {
1600 image,
1601 level: Some(level_expr),
1602 ..
1603 } = ctx.expressions[handle]
1604 {
1605 if let TypeInner::Image {
1606 class: crate::ImageClass::Sampled { .. },
1607 ..
1608 } = *ctx.resolve_type(image, &self.module.types)
1609 {
1610 if let proc::BoundsCheckPolicy::Restrict = self.policies.image_load {
1611 write!(self.out, "{level}")?;
1612 self.write_clamped_lod(ctx, handle, image, level_expr)?
1613 }
1614 }
1615 }
1616
1617 if let Some(name) = expr_name {
1618 write!(self.out, "{level}")?;
1619 self.write_named_expr(handle, name, handle, ctx)?;
1620 }
1621 }
1622 }
1623 // Blocks are simple we just need to write the block statements between braces
1624 // We could also just print the statements but this is more readable and maps more
1625 // closely to the IR
1626 Statement::Block(ref block) => {
1627 write!(self.out, "{level}")?;
1628 writeln!(self.out, "{{")?;
1629 for sta in block.iter() {
1630 // Increase the indentation to help with readability
1631 self.write_stmt(sta, ctx, level.next())?
1632 }
1633 writeln!(self.out, "{level}}}")?
1634 }
1635 // Ifs are written as in C:
1636 // ```
1637 // if(condition) {
1638 // accept
1639 // } else {
1640 // reject
1641 // }
1642 // ```
1643 Statement::If {
1644 condition,
1645 ref accept,
1646 ref reject,
1647 } => {
1648 write!(self.out, "{level}")?;
1649 write!(self.out, "if (")?;
1650 self.write_expr(condition, ctx)?;
1651 writeln!(self.out, ") {{")?;
1652
1653 for sta in accept {
1654 // Increase indentation to help with readability
1655 self.write_stmt(sta, ctx, level.next())?;
1656 }
1657
1658 // If there are no statements in the reject block we skip writing it
1659 // This is only for readability
1660 if !reject.is_empty() {
1661 writeln!(self.out, "{level}}} else {{")?;
1662
1663 for sta in reject {
1664 // Increase indentation to help with readability
1665 self.write_stmt(sta, ctx, level.next())?;
1666 }
1667 }
1668
1669 writeln!(self.out, "{level}}}")?
1670 }
1671 // Switch are written as in C:
1672 // ```
1673 // switch (selector) {
1674 // // Fallthrough
1675 // case label:
1676 // block
1677 // // Non fallthrough
1678 // case label:
1679 // block
1680 // break;
1681 // default:
1682 // block
1683 // }
1684 // ```
1685 // Where the `default` case happens isn't important but we put it last
1686 // so that we don't need to print a `break` for it
1687 Statement::Switch {
1688 selector,
1689 ref cases,
1690 } => {
1691 let l2 = level.next();
1692 // Some GLSL consumers may not handle switches with a single
1693 // body correctly: See wgpu#4514. Write such switch statements
1694 // as a `do {} while(false);` loop instead.
1695 //
1696 // Since doing so may inadvertently capture `continue`
1697 // statements in the switch body, we must apply continue
1698 // forwarding. See the `naga::back::continue_forward` module
1699 // docs for details.
1700 let one_body = cases
1701 .iter()
1702 .rev()
1703 .skip(1)
1704 .all(|case| case.fall_through && case.body.is_empty());
1705 if one_body {
1706 // Unlike HLSL, in GLSL `continue_ctx` only needs to know
1707 // about [`Switch`] statements that are being rendered as
1708 // `do-while` loops.
1709 if let Some(variable) = self.continue_ctx.enter_switch(&mut self.namer) {
1710 writeln!(self.out, "{level}bool {variable} = false;",)?;
1711 };
1712 writeln!(self.out, "{level}do {{")?;
1713 // Note: Expressions have no side-effects so we don't need to emit selector expression.
1714
1715 // Body
1716 if let Some(case) = cases.last() {
1717 for sta in case.body.iter() {
1718 self.write_stmt(sta, ctx, l2)?;
1719 }
1720 }
1721 // End do-while
1722 writeln!(self.out, "{level}}} while(false);")?;
1723
1724 // Handle any forwarded continue statements.
1725 use back::continue_forward::ExitControlFlow;
1726 let op = match self.continue_ctx.exit_switch() {
1727 ExitControlFlow::None => None,
1728 ExitControlFlow::Continue { variable } => Some(("continue", variable)),
1729 ExitControlFlow::Break { variable } => Some(("break", variable)),
1730 };
1731 if let Some((control_flow, variable)) = op {
1732 writeln!(self.out, "{level}if ({variable}) {{")?;
1733 writeln!(self.out, "{l2}{control_flow};")?;
1734 writeln!(self.out, "{level}}}")?;
1735 }
1736 } else {
1737 // Start the switch
1738 write!(self.out, "{level}")?;
1739 write!(self.out, "switch(")?;
1740 self.write_expr(selector, ctx)?;
1741 writeln!(self.out, ") {{")?;
1742
1743 // Write all cases
1744 for case in cases {
1745 match case.value {
1746 crate::SwitchValue::I32(value) => {
1747 write!(self.out, "{l2}case {value}:")?
1748 }
1749 crate::SwitchValue::U32(value) => {
1750 write!(self.out, "{l2}case {value}u:")?
1751 }
1752 crate::SwitchValue::Default => write!(self.out, "{l2}default:")?,
1753 }
1754
1755 let write_block_braces = !(case.fall_through && case.body.is_empty());
1756 if write_block_braces {
1757 writeln!(self.out, " {{")?;
1758 } else {
1759 writeln!(self.out)?;
1760 }
1761
1762 for sta in case.body.iter() {
1763 self.write_stmt(sta, ctx, l2.next())?;
1764 }
1765
1766 if !case.fall_through && case.body.last().is_none_or(|s| !s.is_terminator())
1767 {
1768 writeln!(self.out, "{}break;", l2.next())?;
1769 }
1770
1771 if write_block_braces {
1772 writeln!(self.out, "{l2}}}")?;
1773 }
1774 }
1775
1776 writeln!(self.out, "{level}}}")?
1777 }
1778 }
1779 // Loops in naga IR are based on wgsl loops, glsl can emulate the behaviour by using a
1780 // while true loop and appending the continuing block to the body resulting on:
1781 // ```
1782 // bool loop_init = true;
1783 // while(true) {
1784 // if (!loop_init) { <continuing> }
1785 // loop_init = false;
1786 // <body>
1787 // }
1788 // ```
1789 Statement::Loop {
1790 ref body,
1791 ref continuing,
1792 break_if,
1793 } => {
1794 self.continue_ctx.enter_loop();
1795 if !continuing.is_empty() || break_if.is_some() {
1796 let gate_name = self.namer.call("loop_init");
1797 writeln!(self.out, "{level}bool {gate_name} = true;")?;
1798 writeln!(self.out, "{level}while(true) {{")?;
1799 let l2 = level.next();
1800 let l3 = l2.next();
1801 writeln!(self.out, "{l2}if (!{gate_name}) {{")?;
1802 for sta in continuing {
1803 self.write_stmt(sta, ctx, l3)?;
1804 }
1805 if let Some(condition) = break_if {
1806 write!(self.out, "{l3}if (")?;
1807 self.write_expr(condition, ctx)?;
1808 writeln!(self.out, ") {{")?;
1809 writeln!(self.out, "{}break;", l3.next())?;
1810 writeln!(self.out, "{l3}}}")?;
1811 }
1812 writeln!(self.out, "{l2}}}")?;
1813 writeln!(self.out, "{}{} = false;", level.next(), gate_name)?;
1814 } else {
1815 writeln!(self.out, "{level}while(true) {{")?;
1816 }
1817 for sta in body {
1818 self.write_stmt(sta, ctx, level.next())?;
1819 }
1820 writeln!(self.out, "{level}}}")?;
1821 self.continue_ctx.exit_loop();
1822 }
1823 // Break, continue and return as written as in C
1824 // `break;`
1825 Statement::Break => {
1826 write!(self.out, "{level}")?;
1827 writeln!(self.out, "break;")?
1828 }
1829 // `continue;`
1830 Statement::Continue => {
1831 // Sometimes we must render a `Continue` statement as a `break`.
1832 // See the docs for the `back::continue_forward` module.
1833 if let Some(variable) = self.continue_ctx.continue_encountered() {
1834 writeln!(self.out, "{level}{variable} = true;",)?;
1835 writeln!(self.out, "{level}break;")?
1836 } else {
1837 writeln!(self.out, "{level}continue;")?
1838 }
1839 }
1840 // `return expr;`, `expr` is optional
1841 Statement::Return { value } => {
1842 write!(self.out, "{level}")?;
1843 match ctx.ty {
1844 back::FunctionType::Function(_) => {
1845 write!(self.out, "return")?;
1846 // Write the expression to be returned if needed
1847 if let Some(expr) = value {
1848 write!(self.out, " ")?;
1849 self.write_expr(expr, ctx)?;
1850 }
1851 writeln!(self.out, ";")?;
1852 }
1853 back::FunctionType::EntryPoint(ep_index) => {
1854 let mut has_point_size = false;
1855 let ep = &self.module.entry_points[ep_index as usize];
1856 if let Some(ref result) = ep.function.result {
1857 let value = value.unwrap();
1858 match self.module.types[result.ty].inner {
1859 TypeInner::Struct { ref members, .. } => {
1860 let temp_struct_name = match ctx.expressions[value] {
1861 crate::Expression::Compose { .. } => {
1862 let return_struct = "_tmp_return";
1863 write!(
1864 self.out,
1865 "{} {} = ",
1866 &self.names[&NameKey::Type(result.ty)],
1867 return_struct
1868 )?;
1869 self.write_expr(value, ctx)?;
1870 writeln!(self.out, ";")?;
1871 write!(self.out, "{level}")?;
1872 Some(return_struct)
1873 }
1874 _ => None,
1875 };
1876
1877 for (index, member) in members.iter().enumerate() {
1878 if let Some(crate::Binding::BuiltIn(
1879 crate::BuiltIn::PointSize,
1880 )) = member.binding
1881 {
1882 has_point_size = true;
1883 }
1884
1885 let varying_name = VaryingName {
1886 binding: member.binding.as_ref().unwrap(),
1887 stage: ep.stage,
1888 options: VaryingOptions::from_writer_options(
1889 self.options,
1890 true,
1891 ),
1892 };
1893 write!(self.out, "{varying_name} = ")?;
1894
1895 if let Some(struct_name) = temp_struct_name {
1896 write!(self.out, "{struct_name}")?;
1897 } else {
1898 self.write_expr(value, ctx)?;
1899 }
1900
1901 // Write field name
1902 writeln!(
1903 self.out,
1904 ".{};",
1905 &self.names
1906 [&NameKey::StructMember(result.ty, index as u32)]
1907 )?;
1908 write!(self.out, "{level}")?;
1909 }
1910 }
1911 _ => {
1912 let name = VaryingName {
1913 binding: result.binding.as_ref().unwrap(),
1914 stage: ep.stage,
1915 options: VaryingOptions::from_writer_options(
1916 self.options,
1917 true,
1918 ),
1919 };
1920 write!(self.out, "{name} = ")?;
1921 self.write_expr(value, ctx)?;
1922 writeln!(self.out, ";")?;
1923 write!(self.out, "{level}")?;
1924 }
1925 }
1926 }
1927
1928 let is_vertex_stage = self.module.entry_points[ep_index as usize].stage
1929 == ShaderStage::Vertex;
1930 if is_vertex_stage
1931 && self
1932 .options
1933 .writer_flags
1934 .contains(WriterFlags::ADJUST_COORDINATE_SPACE)
1935 {
1936 writeln!(
1937 self.out,
1938 "gl_Position.yz = vec2(-gl_Position.y, gl_Position.z * 2.0 - gl_Position.w);",
1939 )?;
1940 write!(self.out, "{level}")?;
1941 }
1942
1943 if is_vertex_stage
1944 && self
1945 .options
1946 .writer_flags
1947 .contains(WriterFlags::FORCE_POINT_SIZE)
1948 && !has_point_size
1949 {
1950 writeln!(self.out, "gl_PointSize = 1.0;")?;
1951 write!(self.out, "{level}")?;
1952 }
1953 writeln!(self.out, "return;")?;
1954 }
1955 }
1956 }
1957 // This is one of the places were glsl adds to the syntax of C in this case the discard
1958 // keyword which ceases all further processing in a fragment shader, it's called OpKill
1959 // in spir-v that's why it's called `Statement::Kill`
1960 Statement::Kill => writeln!(self.out, "{level}discard;")?,
1961 Statement::ControlBarrier(flags) => {
1962 self.write_control_barrier(flags, level)?;
1963 }
1964 Statement::MemoryBarrier(flags) => {
1965 self.write_memory_barrier(flags, level)?;
1966 }
1967 // Stores in glsl are just variable assignments written as `pointer = value;`
1968 Statement::Store { pointer, value } => {
1969 write!(self.out, "{level}")?;
1970 self.write_expr(pointer, ctx)?;
1971 write!(self.out, " = ")?;
1972 self.write_expr(value, ctx)?;
1973 writeln!(self.out, ";")?
1974 }
1975 Statement::WorkGroupUniformLoad { pointer, result } => {
1976 // GLSL doesn't have pointers, which means that this backend needs to ensure that
1977 // the actual "loading" is happening between the two barriers.
1978 // This is done in `Emit` by never emitting a variable name for pointer variables
1979 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1980
1981 let result_name = Baked(result).to_string();
1982 write!(self.out, "{level}")?;
1983 // Expressions cannot have side effects, so just writing the expression here is fine.
1984 self.write_named_expr(pointer, result_name, result, ctx)?;
1985
1986 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1987 }
1988 // Stores a value into an image.
1989 Statement::ImageStore {
1990 image,
1991 coordinate,
1992 array_index,
1993 value,
1994 } => {
1995 write!(self.out, "{level}")?;
1996 self.write_image_store(ctx, image, coordinate, array_index, value)?
1997 }
1998 // A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list
1999 Statement::Call {
2000 function,
2001 ref arguments,
2002 result,
2003 } => {
2004 write!(self.out, "{level}")?;
2005 if let Some(expr) = result {
2006 let name = Baked(expr).to_string();
2007 let result = self.module.functions[function].result.as_ref().unwrap();
2008 self.write_type(result.ty)?;
2009 write!(self.out, " {name}")?;
2010 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner
2011 {
2012 self.write_array_size(base, size)?
2013 }
2014 write!(self.out, " = ")?;
2015 self.named_expressions.insert(expr, name);
2016 }
2017 write!(self.out, "{}(", &self.names[&NameKey::Function(function)])?;
2018 let arguments: Vec<_> = arguments
2019 .iter()
2020 .enumerate()
2021 .filter_map(|(i, arg)| {
2022 let arg_ty = self.module.functions[function].arguments[i].ty;
2023 match self.module.types[arg_ty].inner {
2024 TypeInner::Sampler { .. } => None,
2025 _ => Some(*arg),
2026 }
2027 })
2028 .collect();
2029 self.write_slice(&arguments, |this, _, arg| this.write_expr(*arg, ctx))?;
2030 writeln!(self.out, ");")?
2031 }
2032 Statement::Atomic {
2033 pointer,
2034 ref fun,
2035 value,
2036 result,
2037 } => {
2038 write!(self.out, "{level}")?;
2039
2040 match *fun {
2041 crate::AtomicFunction::Exchange {
2042 compare: Some(compare_expr),
2043 } => {
2044 let result_handle = result.expect("CompareExchange must have a result");
2045 let res_name = Baked(result_handle).to_string();
2046 self.write_type(ctx.info[result_handle].ty.handle().unwrap())?;
2047 write!(self.out, " {res_name};")?;
2048 write!(self.out, " {res_name}.old_value = atomicCompSwap(")?;
2049 self.write_expr(pointer, ctx)?;
2050 write!(self.out, ", ")?;
2051 self.write_expr(compare_expr, ctx)?;
2052 write!(self.out, ", ")?;
2053 self.write_expr(value, ctx)?;
2054 writeln!(self.out, ");")?;
2055
2056 write!(
2057 self.out,
2058 "{level}{res_name}.exchanged = ({res_name}.old_value == "
2059 )?;
2060 self.write_expr(compare_expr, ctx)?;
2061 writeln!(self.out, ");")?;
2062 self.named_expressions.insert(result_handle, res_name);
2063 }
2064 _ => {
2065 if let Some(result) = result {
2066 let res_name = Baked(result).to_string();
2067 self.write_type(ctx.info[result].ty.handle().unwrap())?;
2068 write!(self.out, " {res_name} = ")?;
2069 self.named_expressions.insert(result, res_name);
2070 }
2071 let fun_str = fun.to_glsl();
2072 write!(self.out, "atomic{fun_str}(")?;
2073 self.write_expr(pointer, ctx)?;
2074 write!(self.out, ", ")?;
2075 if let crate::AtomicFunction::Subtract = *fun {
2076 // Emulate `atomicSub` with `atomicAdd` by negating the value.
2077 write!(self.out, "-")?;
2078 }
2079 self.write_expr(value, ctx)?;
2080 writeln!(self.out, ");")?;
2081 }
2082 }
2083 }
2084 // Stores a value into an image.
2085 Statement::ImageAtomic {
2086 image,
2087 coordinate,
2088 array_index,
2089 fun,
2090 value,
2091 } => {
2092 write!(self.out, "{level}")?;
2093 self.write_image_atomic(ctx, image, coordinate, array_index, fun, value)?
2094 }
2095 Statement::RayQuery { .. } => unreachable!(),
2096 Statement::SubgroupBallot { result, predicate } => {
2097 write!(self.out, "{level}")?;
2098 let res_name = Baked(result).to_string();
2099 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2100 self.write_value_type(res_ty)?;
2101 write!(self.out, " {res_name} = ")?;
2102 self.named_expressions.insert(result, res_name);
2103
2104 write!(self.out, "subgroupBallot(")?;
2105 match predicate {
2106 Some(predicate) => self.write_expr(predicate, ctx)?,
2107 None => write!(self.out, "true")?,
2108 }
2109 writeln!(self.out, ");")?;
2110 }
2111 Statement::SubgroupCollectiveOperation {
2112 op,
2113 collective_op,
2114 argument,
2115 result,
2116 } => {
2117 write!(self.out, "{level}")?;
2118 let res_name = Baked(result).to_string();
2119 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2120 self.write_value_type(res_ty)?;
2121 write!(self.out, " {res_name} = ")?;
2122 self.named_expressions.insert(result, res_name);
2123
2124 match (collective_op, op) {
2125 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::All) => {
2126 write!(self.out, "subgroupAll(")?
2127 }
2128 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Any) => {
2129 write!(self.out, "subgroupAny(")?
2130 }
2131 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Add) => {
2132 write!(self.out, "subgroupAdd(")?
2133 }
2134 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Mul) => {
2135 write!(self.out, "subgroupMul(")?
2136 }
2137 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Max) => {
2138 write!(self.out, "subgroupMax(")?
2139 }
2140 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Min) => {
2141 write!(self.out, "subgroupMin(")?
2142 }
2143 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::And) => {
2144 write!(self.out, "subgroupAnd(")?
2145 }
2146 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Or) => {
2147 write!(self.out, "subgroupOr(")?
2148 }
2149 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Xor) => {
2150 write!(self.out, "subgroupXor(")?
2151 }
2152 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Add) => {
2153 write!(self.out, "subgroupExclusiveAdd(")?
2154 }
2155 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Mul) => {
2156 write!(self.out, "subgroupExclusiveMul(")?
2157 }
2158 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Add) => {
2159 write!(self.out, "subgroupInclusiveAdd(")?
2160 }
2161 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Mul) => {
2162 write!(self.out, "subgroupInclusiveMul(")?
2163 }
2164 _ => unimplemented!(),
2165 }
2166 self.write_expr(argument, ctx)?;
2167 writeln!(self.out, ");")?;
2168 }
2169 Statement::SubgroupGather {
2170 mode,
2171 argument,
2172 result,
2173 } => {
2174 write!(self.out, "{level}")?;
2175 let res_name = Baked(result).to_string();
2176 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2177 self.write_value_type(res_ty)?;
2178 write!(self.out, " {res_name} = ")?;
2179 self.named_expressions.insert(result, res_name);
2180
2181 match mode {
2182 crate::GatherMode::BroadcastFirst => {
2183 write!(self.out, "subgroupBroadcastFirst(")?;
2184 }
2185 crate::GatherMode::Broadcast(_) => {
2186 write!(self.out, "subgroupBroadcast(")?;
2187 }
2188 crate::GatherMode::Shuffle(_) => {
2189 write!(self.out, "subgroupShuffle(")?;
2190 }
2191 crate::GatherMode::ShuffleDown(_) => {
2192 write!(self.out, "subgroupShuffleDown(")?;
2193 }
2194 crate::GatherMode::ShuffleUp(_) => {
2195 write!(self.out, "subgroupShuffleUp(")?;
2196 }
2197 crate::GatherMode::ShuffleXor(_) => {
2198 write!(self.out, "subgroupShuffleXor(")?;
2199 }
2200 crate::GatherMode::QuadBroadcast(_) => {
2201 write!(self.out, "subgroupQuadBroadcast(")?;
2202 }
2203 crate::GatherMode::QuadSwap(direction) => match direction {
2204 crate::Direction::X => {
2205 write!(self.out, "subgroupQuadSwapHorizontal(")?;
2206 }
2207 crate::Direction::Y => {
2208 write!(self.out, "subgroupQuadSwapVertical(")?;
2209 }
2210 crate::Direction::Diagonal => {
2211 write!(self.out, "subgroupQuadSwapDiagonal(")?;
2212 }
2213 },
2214 }
2215 self.write_expr(argument, ctx)?;
2216 match mode {
2217 crate::GatherMode::BroadcastFirst => {}
2218 crate::GatherMode::Broadcast(index)
2219 | crate::GatherMode::Shuffle(index)
2220 | crate::GatherMode::ShuffleDown(index)
2221 | crate::GatherMode::ShuffleUp(index)
2222 | crate::GatherMode::ShuffleXor(index)
2223 | crate::GatherMode::QuadBroadcast(index) => {
2224 write!(self.out, ", ")?;
2225 self.write_expr(index, ctx)?;
2226 }
2227 crate::GatherMode::QuadSwap(_) => {}
2228 }
2229 writeln!(self.out, ");")?;
2230 }
2231 }
2232
2233 Ok(())
2234 }
2235
2236 /// Write a const expression.
2237 ///
2238 /// Write `expr`, a handle to an [`Expression`] in the current [`Module`]'s
2239 /// constant expression arena, as GLSL expression.
2240 ///
2241 /// # Notes
2242 /// Adds no newlines or leading/trailing whitespace
2243 ///
2244 /// [`Expression`]: crate::Expression
2245 /// [`Module`]: crate::Module
2246 fn write_const_expr(
2247 &mut self,
2248 expr: Handle<crate::Expression>,
2249 arena: &crate::Arena<crate::Expression>,
2250 ) -> BackendResult {
2251 self.write_possibly_const_expr(
2252 expr,
2253 arena,
2254 |expr| &self.info[expr],
2255 |writer, expr| writer.write_const_expr(expr, arena),
2256 )
2257 }
2258
2259 /// Write [`Expression`] variants that can occur in both runtime and const expressions.
2260 ///
2261 /// Write `expr`, a handle to an [`Expression`] in the arena `expressions`,
2262 /// as as GLSL expression. This must be one of the [`Expression`] variants
2263 /// that is allowed to occur in constant expressions.
2264 ///
2265 /// Use `write_expression` to write subexpressions.
2266 ///
2267 /// This is the common code for `write_expr`, which handles arbitrary
2268 /// runtime expressions, and `write_const_expr`, which only handles
2269 /// const-expressions. Each of those callers passes itself (essentially) as
2270 /// the `write_expression` callback, so that subexpressions are restricted
2271 /// to the appropriate variants.
2272 ///
2273 /// # Notes
2274 /// Adds no newlines or leading/trailing whitespace
2275 ///
2276 /// [`Expression`]: crate::Expression
2277 fn write_possibly_const_expr<'w, I, E>(
2278 &'w mut self,
2279 expr: Handle<crate::Expression>,
2280 expressions: &crate::Arena<crate::Expression>,
2281 info: I,
2282 write_expression: E,
2283 ) -> BackendResult
2284 where
2285 I: Fn(Handle<crate::Expression>) -> &'w proc::TypeResolution,
2286 E: Fn(&mut Self, Handle<crate::Expression>) -> BackendResult,
2287 {
2288 use crate::Expression;
2289
2290 match expressions[expr] {
2291 Expression::Literal(literal) => {
2292 match literal {
2293 // Floats are written using `Debug` instead of `Display` because it always appends the
2294 // decimal part even it's zero which is needed for a valid glsl float constant
2295 crate::Literal::F64(value) => write!(self.out, "{value:?}LF")?,
2296 crate::Literal::F32(value) => write!(self.out, "{value:?}")?,
2297 crate::Literal::F16(_) => {
2298 return Err(Error::Custom("GLSL has no 16-bit float type".into()));
2299 }
2300 // Unsigned integers need a `u` at the end
2301 //
2302 // While `core` doesn't necessarily need it, it's allowed and since `es` needs it we
2303 // always write it as the extra branch wouldn't have any benefit in readability
2304 crate::Literal::U32(value) => write!(self.out, "{value}u")?,
2305 crate::Literal::I32(value) => write!(self.out, "{value}")?,
2306 crate::Literal::Bool(value) => write!(self.out, "{value}")?,
2307 crate::Literal::I64(_) => {
2308 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2309 }
2310 crate::Literal::U64(_) => {
2311 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2312 }
2313 crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
2314 return Err(Error::Custom(
2315 "Abstract types should not appear in IR presented to backends".into(),
2316 ));
2317 }
2318 }
2319 }
2320 Expression::Constant(handle) => {
2321 let constant = &self.module.constants[handle];
2322 if constant.name.is_some() {
2323 write!(self.out, "{}", self.names[&NameKey::Constant(handle)])?;
2324 } else {
2325 self.write_const_expr(constant.init, &self.module.global_expressions)?;
2326 }
2327 }
2328 Expression::ZeroValue(ty) => {
2329 self.write_zero_init_value(ty)?;
2330 }
2331 Expression::Compose { ty, ref components } => {
2332 self.write_type(ty)?;
2333
2334 if let TypeInner::Array { base, size, .. } = self.module.types[ty].inner {
2335 self.write_array_size(base, size)?;
2336 }
2337
2338 write!(self.out, "(")?;
2339 for (index, component) in components.iter().enumerate() {
2340 if index != 0 {
2341 write!(self.out, ", ")?;
2342 }
2343 write_expression(self, *component)?;
2344 }
2345 write!(self.out, ")")?
2346 }
2347 // `Splat` needs to actually write down a vector, it's not always inferred in GLSL.
2348 Expression::Splat { size: _, value } => {
2349 let resolved = info(expr).inner_with(&self.module.types);
2350 self.write_value_type(resolved)?;
2351 write!(self.out, "(")?;
2352 write_expression(self, value)?;
2353 write!(self.out, ")")?
2354 }
2355 _ => {
2356 return Err(Error::Override);
2357 }
2358 }
2359
2360 Ok(())
2361 }
2362
2363 /// Helper method to write expressions
2364 ///
2365 /// # Notes
2366 /// Doesn't add any newlines or leading/trailing spaces
2367 fn write_expr(
2368 &mut self,
2369 expr: Handle<crate::Expression>,
2370 ctx: &back::FunctionCtx,
2371 ) -> BackendResult {
2372 use crate::Expression;
2373
2374 if let Some(name) = self.named_expressions.get(&expr) {
2375 write!(self.out, "{name}")?;
2376 return Ok(());
2377 }
2378
2379 match ctx.expressions[expr] {
2380 Expression::Literal(_)
2381 | Expression::Constant(_)
2382 | Expression::ZeroValue(_)
2383 | Expression::Compose { .. }
2384 | Expression::Splat { .. } => {
2385 self.write_possibly_const_expr(
2386 expr,
2387 ctx.expressions,
2388 |expr| &ctx.info[expr].ty,
2389 |writer, expr| writer.write_expr(expr, ctx),
2390 )?;
2391 }
2392 Expression::Override(_) => return Err(Error::Override),
2393 // `Access` is applied to arrays, vectors and matrices and is written as indexing
2394 Expression::Access { base, index } => {
2395 self.write_expr(base, ctx)?;
2396 write!(self.out, "[")?;
2397 self.write_expr(index, ctx)?;
2398 write!(self.out, "]")?
2399 }
2400 // `AccessIndex` is the same as `Access` except that the index is a constant and it can
2401 // be applied to structs, in this case we need to find the name of the field at that
2402 // index and write `base.field_name`
2403 Expression::AccessIndex { base, index } => {
2404 self.write_expr(base, ctx)?;
2405
2406 let base_ty_res = &ctx.info[base].ty;
2407 let mut resolved = base_ty_res.inner_with(&self.module.types);
2408 let base_ty_handle = match *resolved {
2409 TypeInner::Pointer { base, space: _ } => {
2410 resolved = &self.module.types[base].inner;
2411 Some(base)
2412 }
2413 _ => base_ty_res.handle(),
2414 };
2415
2416 match *resolved {
2417 TypeInner::Vector { .. } => {
2418 // Write vector access as a swizzle
2419 write!(self.out, ".{}", back::COMPONENTS[index as usize])?
2420 }
2421 TypeInner::Matrix { .. }
2422 | TypeInner::Array { .. }
2423 | TypeInner::ValuePointer { .. } => write!(self.out, "[{index}]")?,
2424 TypeInner::Struct { .. } => {
2425 // This will never panic in case the type is a `Struct`, this is not true
2426 // for other types so we can only check while inside this match arm
2427 let ty = base_ty_handle.unwrap();
2428
2429 write!(
2430 self.out,
2431 ".{}",
2432 &self.names[&NameKey::StructMember(ty, index)]
2433 )?
2434 }
2435 ref other => return Err(Error::Custom(format!("Cannot index {other:?}"))),
2436 }
2437 }
2438 // `Swizzle` adds a few letters behind the dot.
2439 Expression::Swizzle {
2440 size,
2441 vector,
2442 pattern,
2443 } => {
2444 self.write_expr(vector, ctx)?;
2445 write!(self.out, ".")?;
2446 for &sc in pattern[..size as usize].iter() {
2447 self.out.write_char(back::COMPONENTS[sc as usize])?;
2448 }
2449 }
2450 // Function arguments are written as the argument name
2451 Expression::FunctionArgument(pos) => {
2452 write!(self.out, "{}", &self.names[&ctx.argument_key(pos)])?
2453 }
2454 // Global variables need some special work for their name but
2455 // `get_global_name` does the work for us
2456 Expression::GlobalVariable(handle) => {
2457 let global = &self.module.global_variables[handle];
2458 self.write_global_name(handle, global)?
2459 }
2460 // A local is written as it's name
2461 Expression::LocalVariable(handle) => {
2462 write!(self.out, "{}", self.names[&ctx.name_key(handle)])?
2463 }
2464 // glsl has no pointers so there's no load operation, just write the pointer expression
2465 Expression::Load { pointer } => self.write_expr(pointer, ctx)?,
2466 // `ImageSample` is a bit complicated compared to the rest of the IR.
2467 //
2468 // First there are three variations depending whether the sample level is explicitly set,
2469 // if it's automatic or it it's bias:
2470 // `texture(image, coordinate)` - Automatic sample level
2471 // `texture(image, coordinate, bias)` - Bias sample level
2472 // `textureLod(image, coordinate, level)` - Zero or Exact sample level
2473 //
2474 // Furthermore if `depth_ref` is some we need to append it to the coordinate vector
2475 Expression::ImageSample {
2476 image,
2477 sampler: _, //TODO?
2478 gather,
2479 coordinate,
2480 array_index,
2481 offset,
2482 level,
2483 depth_ref,
2484 clamp_to_edge: _,
2485 } => {
2486 let (dim, class, arrayed) = match *ctx.resolve_type(image, &self.module.types) {
2487 TypeInner::Image {
2488 dim,
2489 class,
2490 arrayed,
2491 ..
2492 } => (dim, class, arrayed),
2493 _ => unreachable!(),
2494 };
2495 let mut err = None;
2496 if dim == crate::ImageDimension::Cube {
2497 if offset.is_some() {
2498 err = Some("gsamplerCube[Array][Shadow] doesn't support texture sampling with offsets");
2499 }
2500 if arrayed
2501 && matches!(class, crate::ImageClass::Depth { .. })
2502 && matches!(level, crate::SampleLevel::Gradient { .. })
2503 {
2504 err = Some("samplerCubeArrayShadow don't support textureGrad");
2505 }
2506 }
2507 if gather.is_some() && level != crate::SampleLevel::Zero {
2508 err = Some("textureGather doesn't support LOD parameters");
2509 }
2510 if let Some(err) = err {
2511 return Err(Error::Custom(String::from(err)));
2512 }
2513
2514 // `textureLod[Offset]` on `sampler2DArrayShadow` and `samplerCubeShadow` does not exist in GLSL,
2515 // unless `GL_EXT_texture_shadow_lod` is present.
2516 // But if the target LOD is zero, we can emulate that by using `textureGrad[Offset]` with a constant gradient of 0.
2517 let workaround_lod_with_grad = ((dim == crate::ImageDimension::Cube && !arrayed)
2518 || (dim == crate::ImageDimension::D2 && arrayed))
2519 && level == crate::SampleLevel::Zero
2520 && matches!(class, crate::ImageClass::Depth { .. })
2521 && !self.features.contains(Features::TEXTURE_SHADOW_LOD);
2522
2523 // Write the function to be used depending on the sample level
2524 let fun_name = match level {
2525 crate::SampleLevel::Zero if gather.is_some() => "textureGather",
2526 crate::SampleLevel::Zero if workaround_lod_with_grad => "textureGrad",
2527 crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
2528 crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => "textureLod",
2529 crate::SampleLevel::Gradient { .. } => "textureGrad",
2530 };
2531 let offset_name = match offset {
2532 Some(_) => "Offset",
2533 None => "",
2534 };
2535
2536 write!(self.out, "{fun_name}{offset_name}(")?;
2537
2538 // Write the image that will be used
2539 self.write_expr(image, ctx)?;
2540 // The space here isn't required but it helps with readability
2541 write!(self.out, ", ")?;
2542
2543 // TODO: handle clamp_to_edge
2544 // https://github.com/gfx-rs/wgpu/issues/7791
2545
2546 // We need to get the coordinates vector size to later build a vector that's `size + 1`
2547 // if `depth_ref` is some, if it isn't a vector we panic as that's not a valid expression
2548 let mut coord_dim = match *ctx.resolve_type(coordinate, &self.module.types) {
2549 TypeInner::Vector { size, .. } => size as u8,
2550 TypeInner::Scalar { .. } => 1,
2551 _ => unreachable!(),
2552 };
2553
2554 if array_index.is_some() {
2555 coord_dim += 1;
2556 }
2557 let merge_depth_ref = depth_ref.is_some() && gather.is_none() && coord_dim < 4;
2558 if merge_depth_ref {
2559 coord_dim += 1;
2560 }
2561
2562 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
2563 let is_vec = tex_1d_hack || coord_dim != 1;
2564 // Compose a new texture coordinates vector
2565 if is_vec {
2566 write!(self.out, "vec{}(", coord_dim + tex_1d_hack as u8)?;
2567 }
2568 self.write_expr(coordinate, ctx)?;
2569 if tex_1d_hack {
2570 write!(self.out, ", 0.0")?;
2571 }
2572 if let Some(expr) = array_index {
2573 write!(self.out, ", ")?;
2574 self.write_expr(expr, ctx)?;
2575 }
2576 if merge_depth_ref {
2577 write!(self.out, ", ")?;
2578 self.write_expr(depth_ref.unwrap(), ctx)?;
2579 }
2580 if is_vec {
2581 write!(self.out, ")")?;
2582 }
2583
2584 if let (Some(expr), false) = (depth_ref, merge_depth_ref) {
2585 write!(self.out, ", ")?;
2586 self.write_expr(expr, ctx)?;
2587 }
2588
2589 match level {
2590 // Auto needs no more arguments
2591 crate::SampleLevel::Auto => (),
2592 // Zero needs level set to 0
2593 crate::SampleLevel::Zero => {
2594 if workaround_lod_with_grad {
2595 let vec_dim = match dim {
2596 crate::ImageDimension::Cube => 3,
2597 _ => 2,
2598 };
2599 write!(self.out, ", vec{vec_dim}(0.0), vec{vec_dim}(0.0)")?;
2600 } else if gather.is_none() {
2601 write!(self.out, ", 0.0")?;
2602 }
2603 }
2604 // Exact and bias require another argument
2605 crate::SampleLevel::Exact(expr) => {
2606 write!(self.out, ", ")?;
2607 self.write_expr(expr, ctx)?;
2608 }
2609 crate::SampleLevel::Bias(_) => {
2610 // This needs to be done after the offset writing
2611 }
2612 crate::SampleLevel::Gradient { x, y } => {
2613 // If we are using sampler2D to replace sampler1D, we also
2614 // need to make sure to use vec2 gradients
2615 if tex_1d_hack {
2616 write!(self.out, ", vec2(")?;
2617 self.write_expr(x, ctx)?;
2618 write!(self.out, ", 0.0)")?;
2619 write!(self.out, ", vec2(")?;
2620 self.write_expr(y, ctx)?;
2621 write!(self.out, ", 0.0)")?;
2622 } else {
2623 write!(self.out, ", ")?;
2624 self.write_expr(x, ctx)?;
2625 write!(self.out, ", ")?;
2626 self.write_expr(y, ctx)?;
2627 }
2628 }
2629 }
2630
2631 if let Some(constant) = offset {
2632 write!(self.out, ", ")?;
2633 if tex_1d_hack {
2634 write!(self.out, "ivec2(")?;
2635 }
2636 self.write_const_expr(constant, ctx.expressions)?;
2637 if tex_1d_hack {
2638 write!(self.out, ", 0)")?;
2639 }
2640 }
2641
2642 // Bias is always the last argument
2643 if let crate::SampleLevel::Bias(expr) = level {
2644 write!(self.out, ", ")?;
2645 self.write_expr(expr, ctx)?;
2646 }
2647
2648 if let (Some(component), None) = (gather, depth_ref) {
2649 write!(self.out, ", {}", component as usize)?;
2650 }
2651
2652 // End the function
2653 write!(self.out, ")")?
2654 }
2655 Expression::ImageLoad {
2656 image,
2657 coordinate,
2658 array_index,
2659 sample,
2660 level,
2661 } => self.write_image_load(expr, ctx, image, coordinate, array_index, sample, level)?,
2662 // Query translates into one of the:
2663 // - textureSize/imageSize
2664 // - textureQueryLevels
2665 // - textureSamples/imageSamples
2666 Expression::ImageQuery { image, query } => {
2667 use crate::ImageClass;
2668
2669 // This will only panic if the module is invalid
2670 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
2671 TypeInner::Image {
2672 dim,
2673 arrayed: _,
2674 class,
2675 } => (dim, class),
2676 _ => unreachable!(),
2677 };
2678 let components = match dim {
2679 crate::ImageDimension::D1 => 1,
2680 crate::ImageDimension::D2 => 2,
2681 crate::ImageDimension::D3 => 3,
2682 crate::ImageDimension::Cube => 2,
2683 };
2684
2685 if let crate::ImageQuery::Size { .. } = query {
2686 match components {
2687 1 => write!(self.out, "uint(")?,
2688 _ => write!(self.out, "uvec{components}(")?,
2689 }
2690 } else {
2691 write!(self.out, "uint(")?;
2692 }
2693
2694 match query {
2695 crate::ImageQuery::Size { level } => {
2696 match class {
2697 ImageClass::Sampled { multi, .. } | ImageClass::Depth { multi } => {
2698 write!(self.out, "textureSize(")?;
2699 self.write_expr(image, ctx)?;
2700 if let Some(expr) = level {
2701 let cast_to_int = matches!(
2702 *ctx.resolve_type(expr, &self.module.types),
2703 TypeInner::Scalar(crate::Scalar {
2704 kind: crate::ScalarKind::Uint,
2705 ..
2706 })
2707 );
2708
2709 write!(self.out, ", ")?;
2710
2711 if cast_to_int {
2712 write!(self.out, "int(")?;
2713 }
2714
2715 self.write_expr(expr, ctx)?;
2716
2717 if cast_to_int {
2718 write!(self.out, ")")?;
2719 }
2720 } else if !multi {
2721 // All textureSize calls requires an lod argument
2722 // except for multisampled samplers
2723 write!(self.out, ", 0")?;
2724 }
2725 }
2726 ImageClass::Storage { .. } => {
2727 write!(self.out, "imageSize(")?;
2728 self.write_expr(image, ctx)?;
2729 }
2730 ImageClass::External => unimplemented!(),
2731 }
2732 write!(self.out, ")")?;
2733 if components != 1 || self.options.version.is_es() {
2734 write!(self.out, ".{}", &"xyz"[..components])?;
2735 }
2736 }
2737 crate::ImageQuery::NumLevels => {
2738 write!(self.out, "textureQueryLevels(",)?;
2739 self.write_expr(image, ctx)?;
2740 write!(self.out, ")",)?;
2741 }
2742 crate::ImageQuery::NumLayers => {
2743 let fun_name = match class {
2744 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => "textureSize",
2745 ImageClass::Storage { .. } => "imageSize",
2746 ImageClass::External => unimplemented!(),
2747 };
2748 write!(self.out, "{fun_name}(")?;
2749 self.write_expr(image, ctx)?;
2750 // All textureSize calls requires an lod argument
2751 // except for multisampled samplers
2752 if !class.is_multisampled() {
2753 write!(self.out, ", 0")?;
2754 }
2755 write!(self.out, ")")?;
2756 if components != 1 || self.options.version.is_es() {
2757 write!(self.out, ".{}", back::COMPONENTS[components])?;
2758 }
2759 }
2760 crate::ImageQuery::NumSamples => {
2761 let fun_name = match class {
2762 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
2763 "textureSamples"
2764 }
2765 ImageClass::Storage { .. } => "imageSamples",
2766 ImageClass::External => unimplemented!(),
2767 };
2768 write!(self.out, "{fun_name}(")?;
2769 self.write_expr(image, ctx)?;
2770 write!(self.out, ")",)?;
2771 }
2772 }
2773
2774 write!(self.out, ")")?;
2775 }
2776 Expression::Unary { op, expr } => {
2777 let operator_or_fn = match op {
2778 crate::UnaryOperator::Negate => "-",
2779 crate::UnaryOperator::LogicalNot => {
2780 match *ctx.resolve_type(expr, &self.module.types) {
2781 TypeInner::Vector { .. } => "not",
2782 _ => "!",
2783 }
2784 }
2785 crate::UnaryOperator::BitwiseNot => "~",
2786 };
2787 write!(self.out, "{operator_or_fn}(")?;
2788
2789 self.write_expr(expr, ctx)?;
2790
2791 write!(self.out, ")")?
2792 }
2793 // `Binary` we just write `left op right`, except when dealing with
2794 // comparison operations on vectors as they are implemented with
2795 // builtin functions.
2796 // Once again we wrap everything in parentheses to avoid precedence issues
2797 Expression::Binary {
2798 mut op,
2799 left,
2800 right,
2801 } => {
2802 // Holds `Some(function_name)` if the binary operation is
2803 // implemented as a function call
2804 use crate::{BinaryOperator as Bo, ScalarKind as Sk, TypeInner as Ti};
2805
2806 let left_inner = ctx.resolve_type(left, &self.module.types);
2807 let right_inner = ctx.resolve_type(right, &self.module.types);
2808
2809 let function = match (left_inner, right_inner) {
2810 (&Ti::Vector { scalar, .. }, &Ti::Vector { .. }) => match op {
2811 Bo::Less
2812 | Bo::LessEqual
2813 | Bo::Greater
2814 | Bo::GreaterEqual
2815 | Bo::Equal
2816 | Bo::NotEqual => BinaryOperation::VectorCompare,
2817 Bo::Modulo if scalar.kind == Sk::Float => BinaryOperation::Modulo,
2818 Bo::And if scalar.kind == Sk::Bool => {
2819 op = crate::BinaryOperator::LogicalAnd;
2820 BinaryOperation::VectorComponentWise
2821 }
2822 Bo::InclusiveOr if scalar.kind == Sk::Bool => {
2823 op = crate::BinaryOperator::LogicalOr;
2824 BinaryOperation::VectorComponentWise
2825 }
2826 _ => BinaryOperation::Other,
2827 },
2828 _ => match (left_inner.scalar_kind(), right_inner.scalar_kind()) {
2829 (Some(Sk::Float), _) | (_, Some(Sk::Float)) => match op {
2830 Bo::Modulo => BinaryOperation::Modulo,
2831 _ => BinaryOperation::Other,
2832 },
2833 (Some(Sk::Bool), Some(Sk::Bool)) => match op {
2834 Bo::InclusiveOr => {
2835 op = crate::BinaryOperator::LogicalOr;
2836 BinaryOperation::Other
2837 }
2838 Bo::And => {
2839 op = crate::BinaryOperator::LogicalAnd;
2840 BinaryOperation::Other
2841 }
2842 _ => BinaryOperation::Other,
2843 },
2844 _ => BinaryOperation::Other,
2845 },
2846 };
2847
2848 match function {
2849 BinaryOperation::VectorCompare => {
2850 let op_str = match op {
2851 Bo::Less => "lessThan(",
2852 Bo::LessEqual => "lessThanEqual(",
2853 Bo::Greater => "greaterThan(",
2854 Bo::GreaterEqual => "greaterThanEqual(",
2855 Bo::Equal => "equal(",
2856 Bo::NotEqual => "notEqual(",
2857 _ => unreachable!(),
2858 };
2859 write!(self.out, "{op_str}")?;
2860 self.write_expr(left, ctx)?;
2861 write!(self.out, ", ")?;
2862 self.write_expr(right, ctx)?;
2863 write!(self.out, ")")?;
2864 }
2865 BinaryOperation::VectorComponentWise => {
2866 self.write_value_type(left_inner)?;
2867 write!(self.out, "(")?;
2868
2869 let size = match *left_inner {
2870 Ti::Vector { size, .. } => size,
2871 _ => unreachable!(),
2872 };
2873
2874 for i in 0..size as usize {
2875 if i != 0 {
2876 write!(self.out, ", ")?;
2877 }
2878
2879 self.write_expr(left, ctx)?;
2880 write!(self.out, ".{}", back::COMPONENTS[i])?;
2881
2882 write!(self.out, " {} ", back::binary_operation_str(op))?;
2883
2884 self.write_expr(right, ctx)?;
2885 write!(self.out, ".{}", back::COMPONENTS[i])?;
2886 }
2887
2888 write!(self.out, ")")?;
2889 }
2890 // TODO: handle undefined behavior of BinaryOperator::Modulo
2891 //
2892 // sint:
2893 // if right == 0 return 0
2894 // if left == min(type_of(left)) && right == -1 return 0
2895 // if sign(left) == -1 || sign(right) == -1 return result as defined by WGSL
2896 //
2897 // uint:
2898 // if right == 0 return 0
2899 //
2900 // float:
2901 // if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
2902 BinaryOperation::Modulo => {
2903 write!(self.out, "(")?;
2904
2905 // write `e1 - e2 * trunc(e1 / e2)`
2906 self.write_expr(left, ctx)?;
2907 write!(self.out, " - ")?;
2908 self.write_expr(right, ctx)?;
2909 write!(self.out, " * ")?;
2910 write!(self.out, "trunc(")?;
2911 self.write_expr(left, ctx)?;
2912 write!(self.out, " / ")?;
2913 self.write_expr(right, ctx)?;
2914 write!(self.out, ")")?;
2915
2916 write!(self.out, ")")?;
2917 }
2918 BinaryOperation::Other => {
2919 write!(self.out, "(")?;
2920
2921 self.write_expr(left, ctx)?;
2922 write!(self.out, " {} ", back::binary_operation_str(op))?;
2923 self.write_expr(right, ctx)?;
2924
2925 write!(self.out, ")")?;
2926 }
2927 }
2928 }
2929 // `Select` is written as `condition ? accept : reject`
2930 // We wrap everything in parentheses to avoid precedence issues
2931 Expression::Select {
2932 condition,
2933 accept,
2934 reject,
2935 } => {
2936 let cond_ty = ctx.resolve_type(condition, &self.module.types);
2937 let vec_select = if let TypeInner::Vector { .. } = *cond_ty {
2938 true
2939 } else {
2940 false
2941 };
2942
2943 // TODO: Boolean mix on desktop required GL_EXT_shader_integer_mix
2944 if vec_select {
2945 // Glsl defines that for mix when the condition is a boolean the first element
2946 // is picked if condition is false and the second if condition is true
2947 write!(self.out, "mix(")?;
2948 self.write_expr(reject, ctx)?;
2949 write!(self.out, ", ")?;
2950 self.write_expr(accept, ctx)?;
2951 write!(self.out, ", ")?;
2952 self.write_expr(condition, ctx)?;
2953 } else {
2954 write!(self.out, "(")?;
2955 self.write_expr(condition, ctx)?;
2956 write!(self.out, " ? ")?;
2957 self.write_expr(accept, ctx)?;
2958 write!(self.out, " : ")?;
2959 self.write_expr(reject, ctx)?;
2960 }
2961
2962 write!(self.out, ")")?
2963 }
2964 // `Derivative` is a function call to a glsl provided function
2965 Expression::Derivative { axis, ctrl, expr } => {
2966 use crate::{DerivativeAxis as Axis, DerivativeControl as Ctrl};
2967 let fun_name = if self.options.version.supports_derivative_control() {
2968 match (axis, ctrl) {
2969 (Axis::X, Ctrl::Coarse) => "dFdxCoarse",
2970 (Axis::X, Ctrl::Fine) => "dFdxFine",
2971 (Axis::X, Ctrl::None) => "dFdx",
2972 (Axis::Y, Ctrl::Coarse) => "dFdyCoarse",
2973 (Axis::Y, Ctrl::Fine) => "dFdyFine",
2974 (Axis::Y, Ctrl::None) => "dFdy",
2975 (Axis::Width, Ctrl::Coarse) => "fwidthCoarse",
2976 (Axis::Width, Ctrl::Fine) => "fwidthFine",
2977 (Axis::Width, Ctrl::None) => "fwidth",
2978 }
2979 } else {
2980 match axis {
2981 Axis::X => "dFdx",
2982 Axis::Y => "dFdy",
2983 Axis::Width => "fwidth",
2984 }
2985 };
2986 write!(self.out, "{fun_name}(")?;
2987 self.write_expr(expr, ctx)?;
2988 write!(self.out, ")")?
2989 }
2990 // `Relational` is a normal function call to some glsl provided functions
2991 Expression::Relational { fun, argument } => {
2992 use crate::RelationalFunction as Rf;
2993
2994 let fun_name = match fun {
2995 Rf::IsInf => "isinf",
2996 Rf::IsNan => "isnan",
2997 Rf::All => "all",
2998 Rf::Any => "any",
2999 };
3000 write!(self.out, "{fun_name}(")?;
3001
3002 self.write_expr(argument, ctx)?;
3003
3004 write!(self.out, ")")?
3005 }
3006 Expression::Math {
3007 fun,
3008 arg,
3009 arg1,
3010 arg2,
3011 arg3,
3012 } => {
3013 use crate::MathFunction as Mf;
3014
3015 let fun_name = match fun {
3016 // comparison
3017 Mf::Abs => "abs",
3018 Mf::Min => "min",
3019 Mf::Max => "max",
3020 Mf::Clamp => {
3021 let scalar_kind = ctx
3022 .resolve_type(arg, &self.module.types)
3023 .scalar_kind()
3024 .unwrap();
3025 match scalar_kind {
3026 crate::ScalarKind::Float => "clamp",
3027 // Clamp is undefined if min > max. In practice this means it can use a median-of-three
3028 // instruction to determine the value. This is fine according to the WGSL spec for float
3029 // clamp, but integer clamp _must_ use min-max. As such we write out min/max.
3030 _ => {
3031 write!(self.out, "min(max(")?;
3032 self.write_expr(arg, ctx)?;
3033 write!(self.out, ", ")?;
3034 self.write_expr(arg1.unwrap(), ctx)?;
3035 write!(self.out, "), ")?;
3036 self.write_expr(arg2.unwrap(), ctx)?;
3037 write!(self.out, ")")?;
3038
3039 return Ok(());
3040 }
3041 }
3042 }
3043 Mf::Saturate => {
3044 write!(self.out, "clamp(")?;
3045
3046 self.write_expr(arg, ctx)?;
3047
3048 match *ctx.resolve_type(arg, &self.module.types) {
3049 TypeInner::Vector { size, .. } => write!(
3050 self.out,
3051 ", vec{}(0.0), vec{0}(1.0)",
3052 common::vector_size_str(size)
3053 )?,
3054 _ => write!(self.out, ", 0.0, 1.0")?,
3055 }
3056
3057 write!(self.out, ")")?;
3058
3059 return Ok(());
3060 }
3061 // trigonometry
3062 Mf::Cos => "cos",
3063 Mf::Cosh => "cosh",
3064 Mf::Sin => "sin",
3065 Mf::Sinh => "sinh",
3066 Mf::Tan => "tan",
3067 Mf::Tanh => "tanh",
3068 Mf::Acos => "acos",
3069 Mf::Asin => "asin",
3070 Mf::Atan => "atan",
3071 Mf::Asinh => "asinh",
3072 Mf::Acosh => "acosh",
3073 Mf::Atanh => "atanh",
3074 Mf::Radians => "radians",
3075 Mf::Degrees => "degrees",
3076 // glsl doesn't have atan2 function
3077 // use two-argument variation of the atan function
3078 Mf::Atan2 => "atan",
3079 // decomposition
3080 Mf::Ceil => "ceil",
3081 Mf::Floor => "floor",
3082 Mf::Round => "roundEven",
3083 Mf::Fract => "fract",
3084 Mf::Trunc => "trunc",
3085 Mf::Modf => MODF_FUNCTION,
3086 Mf::Frexp => FREXP_FUNCTION,
3087 Mf::Ldexp => "ldexp",
3088 // exponent
3089 Mf::Exp => "exp",
3090 Mf::Exp2 => "exp2",
3091 Mf::Log => "log",
3092 Mf::Log2 => "log2",
3093 Mf::Pow => "pow",
3094 // geometry
3095 Mf::Dot => match *ctx.resolve_type(arg, &self.module.types) {
3096 TypeInner::Vector {
3097 scalar:
3098 crate::Scalar {
3099 kind: crate::ScalarKind::Float,
3100 ..
3101 },
3102 ..
3103 } => "dot",
3104 TypeInner::Vector { size, .. } => {
3105 return self.write_dot_product(arg, arg1.unwrap(), size as usize, ctx)
3106 }
3107 _ => unreachable!(
3108 "Correct TypeInner for dot product should be already validated"
3109 ),
3110 },
3111 fun @ (Mf::Dot4I8Packed | Mf::Dot4U8Packed) => {
3112 let conversion = match fun {
3113 Mf::Dot4I8Packed => "int",
3114 Mf::Dot4U8Packed => "",
3115 _ => unreachable!(),
3116 };
3117
3118 let arg1 = arg1.unwrap();
3119
3120 // Write parentheses around the dot product expression to prevent operators
3121 // with different precedences from applying earlier.
3122 write!(self.out, "(")?;
3123 for i in 0..4 {
3124 // Since `bitfieldExtract` only sign extends if the value is signed, we
3125 // need to convert the inputs to `int` in case of `Dot4I8Packed`. For
3126 // `Dot4U8Packed`, the code below only introduces parenthesis around
3127 // each factor, which aren't strictly needed because both operands are
3128 // baked, but which don't hurt either.
3129 write!(self.out, "bitfieldExtract({conversion}(")?;
3130 self.write_expr(arg, ctx)?;
3131 write!(self.out, "), {}, 8)", i * 8)?;
3132
3133 write!(self.out, " * bitfieldExtract({conversion}(")?;
3134 self.write_expr(arg1, ctx)?;
3135 write!(self.out, "), {}, 8)", i * 8)?;
3136
3137 if i != 3 {
3138 write!(self.out, " + ")?;
3139 }
3140 }
3141 write!(self.out, ")")?;
3142
3143 return Ok(());
3144 }
3145 Mf::Outer => "outerProduct",
3146 Mf::Cross => "cross",
3147 Mf::Distance => "distance",
3148 Mf::Length => "length",
3149 Mf::Normalize => "normalize",
3150 Mf::FaceForward => "faceforward",
3151 Mf::Reflect => "reflect",
3152 Mf::Refract => "refract",
3153 // computational
3154 Mf::Sign => "sign",
3155 Mf::Fma => {
3156 if self.options.version.supports_fma_function() {
3157 // Use the fma function when available
3158 "fma"
3159 } else {
3160 // No fma support. Transform the function call into an arithmetic expression
3161 write!(self.out, "(")?;
3162
3163 self.write_expr(arg, ctx)?;
3164 write!(self.out, " * ")?;
3165
3166 let arg1 =
3167 arg1.ok_or_else(|| Error::Custom("Missing fma arg1".to_owned()))?;
3168 self.write_expr(arg1, ctx)?;
3169 write!(self.out, " + ")?;
3170
3171 let arg2 =
3172 arg2.ok_or_else(|| Error::Custom("Missing fma arg2".to_owned()))?;
3173 self.write_expr(arg2, ctx)?;
3174 write!(self.out, ")")?;
3175
3176 return Ok(());
3177 }
3178 }
3179 Mf::Mix => "mix",
3180 Mf::Step => "step",
3181 Mf::SmoothStep => "smoothstep",
3182 Mf::Sqrt => "sqrt",
3183 Mf::InverseSqrt => "inversesqrt",
3184 Mf::Inverse => "inverse",
3185 Mf::Transpose => "transpose",
3186 Mf::Determinant => "determinant",
3187 Mf::QuantizeToF16 => match *ctx.resolve_type(arg, &self.module.types) {
3188 TypeInner::Scalar { .. } => {
3189 write!(self.out, "unpackHalf2x16(packHalf2x16(vec2(")?;
3190 self.write_expr(arg, ctx)?;
3191 write!(self.out, "))).x")?;
3192 return Ok(());
3193 }
3194 TypeInner::Vector {
3195 size: crate::VectorSize::Bi,
3196 ..
3197 } => {
3198 write!(self.out, "unpackHalf2x16(packHalf2x16(")?;
3199 self.write_expr(arg, ctx)?;
3200 write!(self.out, "))")?;
3201 return Ok(());
3202 }
3203 TypeInner::Vector {
3204 size: crate::VectorSize::Tri,
3205 ..
3206 } => {
3207 write!(self.out, "vec3(unpackHalf2x16(packHalf2x16(")?;
3208 self.write_expr(arg, ctx)?;
3209 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3210 self.write_expr(arg, ctx)?;
3211 write!(self.out, ".zz)).x)")?;
3212 return Ok(());
3213 }
3214 TypeInner::Vector {
3215 size: crate::VectorSize::Quad,
3216 ..
3217 } => {
3218 write!(self.out, "vec4(unpackHalf2x16(packHalf2x16(")?;
3219 self.write_expr(arg, ctx)?;
3220 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3221 self.write_expr(arg, ctx)?;
3222 write!(self.out, ".zw)))")?;
3223 return Ok(());
3224 }
3225 _ => unreachable!(
3226 "Correct TypeInner for QuantizeToF16 should be already validated"
3227 ),
3228 },
3229 // bits
3230 Mf::CountTrailingZeros => {
3231 match *ctx.resolve_type(arg, &self.module.types) {
3232 TypeInner::Vector { size, scalar, .. } => {
3233 let s = common::vector_size_str(size);
3234 if let crate::ScalarKind::Uint = scalar.kind {
3235 write!(self.out, "min(uvec{s}(findLSB(")?;
3236 self.write_expr(arg, ctx)?;
3237 write!(self.out, ")), uvec{s}(32u))")?;
3238 } else {
3239 write!(self.out, "ivec{s}(min(uvec{s}(findLSB(")?;
3240 self.write_expr(arg, ctx)?;
3241 write!(self.out, ")), uvec{s}(32u)))")?;
3242 }
3243 }
3244 TypeInner::Scalar(scalar) => {
3245 if let crate::ScalarKind::Uint = scalar.kind {
3246 write!(self.out, "min(uint(findLSB(")?;
3247 self.write_expr(arg, ctx)?;
3248 write!(self.out, ")), 32u)")?;
3249 } else {
3250 write!(self.out, "int(min(uint(findLSB(")?;
3251 self.write_expr(arg, ctx)?;
3252 write!(self.out, ")), 32u))")?;
3253 }
3254 }
3255 _ => unreachable!(),
3256 };
3257 return Ok(());
3258 }
3259 Mf::CountLeadingZeros => {
3260 if self.options.version.supports_integer_functions() {
3261 match *ctx.resolve_type(arg, &self.module.types) {
3262 TypeInner::Vector { size, scalar } => {
3263 let s = common::vector_size_str(size);
3264
3265 if let crate::ScalarKind::Uint = scalar.kind {
3266 write!(self.out, "uvec{s}(ivec{s}(31) - findMSB(")?;
3267 self.write_expr(arg, ctx)?;
3268 write!(self.out, "))")?;
3269 } else {
3270 write!(self.out, "mix(ivec{s}(31) - findMSB(")?;
3271 self.write_expr(arg, ctx)?;
3272 write!(self.out, "), ivec{s}(0), lessThan(")?;
3273 self.write_expr(arg, ctx)?;
3274 write!(self.out, ", ivec{s}(0)))")?;
3275 }
3276 }
3277 TypeInner::Scalar(scalar) => {
3278 if let crate::ScalarKind::Uint = scalar.kind {
3279 write!(self.out, "uint(31 - findMSB(")?;
3280 } else {
3281 write!(self.out, "(")?;
3282 self.write_expr(arg, ctx)?;
3283 write!(self.out, " < 0 ? 0 : 31 - findMSB(")?;
3284 }
3285
3286 self.write_expr(arg, ctx)?;
3287 write!(self.out, "))")?;
3288 }
3289 _ => unreachable!(),
3290 };
3291 } else {
3292 match *ctx.resolve_type(arg, &self.module.types) {
3293 TypeInner::Vector { size, scalar } => {
3294 let s = common::vector_size_str(size);
3295
3296 if let crate::ScalarKind::Uint = scalar.kind {
3297 write!(self.out, "uvec{s}(")?;
3298 write!(self.out, "vec{s}(31.0) - floor(log2(vec{s}(")?;
3299 self.write_expr(arg, ctx)?;
3300 write!(self.out, ") + 0.5)))")?;
3301 } else {
3302 write!(self.out, "ivec{s}(")?;
3303 write!(self.out, "mix(vec{s}(31.0) - floor(log2(vec{s}(")?;
3304 self.write_expr(arg, ctx)?;
3305 write!(self.out, ") + 0.5)), ")?;
3306 write!(self.out, "vec{s}(0.0), lessThan(")?;
3307 self.write_expr(arg, ctx)?;
3308 write!(self.out, ", ivec{s}(0u))))")?;
3309 }
3310 }
3311 TypeInner::Scalar(scalar) => {
3312 if let crate::ScalarKind::Uint = scalar.kind {
3313 write!(self.out, "uint(31.0 - floor(log2(float(")?;
3314 self.write_expr(arg, ctx)?;
3315 write!(self.out, ") + 0.5)))")?;
3316 } else {
3317 write!(self.out, "(")?;
3318 self.write_expr(arg, ctx)?;
3319 write!(self.out, " < 0 ? 0 : int(")?;
3320 write!(self.out, "31.0 - floor(log2(float(")?;
3321 self.write_expr(arg, ctx)?;
3322 write!(self.out, ") + 0.5))))")?;
3323 }
3324 }
3325 _ => unreachable!(),
3326 };
3327 }
3328
3329 return Ok(());
3330 }
3331 Mf::CountOneBits => "bitCount",
3332 Mf::ReverseBits => "bitfieldReverse",
3333 Mf::ExtractBits => {
3334 // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
3335 // to first sanitize the offset and count first. If we don't do this, AMD and Intel chips
3336 // will return out-of-spec values if the extracted range is not within the bit width.
3337 //
3338 // This encodes the exact formula specified by the wgsl spec, without temporary values:
3339 // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
3340 //
3341 // w = sizeof(x) * 8
3342 // o = min(offset, w)
3343 // c = min(count, w - o)
3344 //
3345 // bitfieldExtract(x, o, c)
3346 //
3347 // extract_bits(e, min(offset, w), min(count, w - min(offset, w))))
3348 let scalar_bits = ctx
3349 .resolve_type(arg, &self.module.types)
3350 .scalar_width()
3351 .unwrap()
3352 * 8;
3353
3354 write!(self.out, "bitfieldExtract(")?;
3355 self.write_expr(arg, ctx)?;
3356 write!(self.out, ", int(min(")?;
3357 self.write_expr(arg1.unwrap(), ctx)?;
3358 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3359 self.write_expr(arg2.unwrap(), ctx)?;
3360 write!(self.out, ", {scalar_bits}u - min(")?;
3361 self.write_expr(arg1.unwrap(), ctx)?;
3362 write!(self.out, ", {scalar_bits}u))))")?;
3363
3364 return Ok(());
3365 }
3366 Mf::InsertBits => {
3367 // InsertBits has the same considerations as ExtractBits above
3368 let scalar_bits = ctx
3369 .resolve_type(arg, &self.module.types)
3370 .scalar_width()
3371 .unwrap()
3372 * 8;
3373
3374 write!(self.out, "bitfieldInsert(")?;
3375 self.write_expr(arg, ctx)?;
3376 write!(self.out, ", ")?;
3377 self.write_expr(arg1.unwrap(), ctx)?;
3378 write!(self.out, ", int(min(")?;
3379 self.write_expr(arg2.unwrap(), ctx)?;
3380 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3381 self.write_expr(arg3.unwrap(), ctx)?;
3382 write!(self.out, ", {scalar_bits}u - min(")?;
3383 self.write_expr(arg2.unwrap(), ctx)?;
3384 write!(self.out, ", {scalar_bits}u))))")?;
3385
3386 return Ok(());
3387 }
3388 Mf::FirstTrailingBit => "findLSB",
3389 Mf::FirstLeadingBit => "findMSB",
3390 // data packing
3391 Mf::Pack4x8snorm => {
3392 if self.options.version.supports_pack_unpack_4x8() {
3393 "packSnorm4x8"
3394 } else {
3395 // polyfill should go here. Needs a corresponding entry in `need_bake_expression`
3396 return Err(Error::UnsupportedExternal("packSnorm4x8".into()));
3397 }
3398 }
3399 Mf::Pack4x8unorm => {
3400 if self.options.version.supports_pack_unpack_4x8() {
3401 "packUnorm4x8"
3402 } else {
3403 return Err(Error::UnsupportedExternal("packUnorm4x8".to_owned()));
3404 }
3405 }
3406 Mf::Pack2x16snorm => {
3407 if self.options.version.supports_pack_unpack_snorm_2x16() {
3408 "packSnorm2x16"
3409 } else {
3410 return Err(Error::UnsupportedExternal("packSnorm2x16".to_owned()));
3411 }
3412 }
3413 Mf::Pack2x16unorm => {
3414 if self.options.version.supports_pack_unpack_unorm_2x16() {
3415 "packUnorm2x16"
3416 } else {
3417 return Err(Error::UnsupportedExternal("packUnorm2x16".to_owned()));
3418 }
3419 }
3420 Mf::Pack2x16float => {
3421 if self.options.version.supports_pack_unpack_half_2x16() {
3422 "packHalf2x16"
3423 } else {
3424 return Err(Error::UnsupportedExternal("packHalf2x16".to_owned()));
3425 }
3426 }
3427
3428 fun @ (Mf::Pack4xI8 | Mf::Pack4xU8 | Mf::Pack4xI8Clamp | Mf::Pack4xU8Clamp) => {
3429 let was_signed = matches!(fun, Mf::Pack4xI8 | Mf::Pack4xI8Clamp);
3430 let clamp_bounds = match fun {
3431 Mf::Pack4xI8Clamp => Some(("-128", "127")),
3432 Mf::Pack4xU8Clamp => Some(("0", "255")),
3433 _ => None,
3434 };
3435 let const_suffix = if was_signed { "" } else { "u" };
3436 if was_signed {
3437 write!(self.out, "uint(")?;
3438 }
3439 let write_arg = |this: &mut Self| -> BackendResult {
3440 if let Some((min, max)) = clamp_bounds {
3441 write!(this.out, "clamp(")?;
3442 this.write_expr(arg, ctx)?;
3443 write!(this.out, ", {min}{const_suffix}, {max}{const_suffix})")?;
3444 } else {
3445 this.write_expr(arg, ctx)?;
3446 }
3447 Ok(())
3448 };
3449 write!(self.out, "(")?;
3450 write_arg(self)?;
3451 write!(self.out, "[0] & 0xFF{const_suffix}) | ((")?;
3452 write_arg(self)?;
3453 write!(self.out, "[1] & 0xFF{const_suffix}) << 8) | ((")?;
3454 write_arg(self)?;
3455 write!(self.out, "[2] & 0xFF{const_suffix}) << 16) | ((")?;
3456 write_arg(self)?;
3457 write!(self.out, "[3] & 0xFF{const_suffix}) << 24)")?;
3458 if was_signed {
3459 write!(self.out, ")")?;
3460 }
3461
3462 return Ok(());
3463 }
3464 // data unpacking
3465 Mf::Unpack2x16float => {
3466 if self.options.version.supports_pack_unpack_half_2x16() {
3467 "unpackHalf2x16"
3468 } else {
3469 return Err(Error::UnsupportedExternal("unpackHalf2x16".into()));
3470 }
3471 }
3472 Mf::Unpack2x16snorm => {
3473 if self.options.version.supports_pack_unpack_snorm_2x16() {
3474 "unpackSnorm2x16"
3475 } else {
3476 let scale = 32767;
3477
3478 write!(self.out, "(vec2(ivec2(")?;
3479 self.write_expr(arg, ctx)?;
3480 write!(self.out, " << 16, ")?;
3481 self.write_expr(arg, ctx)?;
3482 write!(self.out, ") >> 16) / {scale}.0)")?;
3483 return Ok(());
3484 }
3485 }
3486 Mf::Unpack2x16unorm => {
3487 if self.options.version.supports_pack_unpack_unorm_2x16() {
3488 "unpackUnorm2x16"
3489 } else {
3490 let scale = 65535;
3491
3492 write!(self.out, "(vec2(")?;
3493 self.write_expr(arg, ctx)?;
3494 write!(self.out, " & 0xFFFFu, ")?;
3495 self.write_expr(arg, ctx)?;
3496 write!(self.out, " >> 16) / {scale}.0)")?;
3497 return Ok(());
3498 }
3499 }
3500 Mf::Unpack4x8snorm => {
3501 if self.options.version.supports_pack_unpack_4x8() {
3502 "unpackSnorm4x8"
3503 } else {
3504 let scale = 127;
3505
3506 write!(self.out, "(vec4(ivec4(")?;
3507 self.write_expr(arg, ctx)?;
3508 write!(self.out, " << 24, ")?;
3509 self.write_expr(arg, ctx)?;
3510 write!(self.out, " << 16, ")?;
3511 self.write_expr(arg, ctx)?;
3512 write!(self.out, " << 8, ")?;
3513 self.write_expr(arg, ctx)?;
3514 write!(self.out, ") >> 24) / {scale}.0)")?;
3515 return Ok(());
3516 }
3517 }
3518 Mf::Unpack4x8unorm => {
3519 if self.options.version.supports_pack_unpack_4x8() {
3520 "unpackUnorm4x8"
3521 } else {
3522 let scale = 255;
3523
3524 write!(self.out, "(vec4(")?;
3525 self.write_expr(arg, ctx)?;
3526 write!(self.out, " & 0xFFu, ")?;
3527 self.write_expr(arg, ctx)?;
3528 write!(self.out, " >> 8 & 0xFFu, ")?;
3529 self.write_expr(arg, ctx)?;
3530 write!(self.out, " >> 16 & 0xFFu, ")?;
3531 self.write_expr(arg, ctx)?;
3532 write!(self.out, " >> 24) / {scale}.0)")?;
3533 return Ok(());
3534 }
3535 }
3536 fun @ (Mf::Unpack4xI8 | Mf::Unpack4xU8) => {
3537 let sign_prefix = match fun {
3538 Mf::Unpack4xI8 => 'i',
3539 Mf::Unpack4xU8 => 'u',
3540 _ => unreachable!(),
3541 };
3542 write!(self.out, "{sign_prefix}vec4(")?;
3543 for i in 0..4 {
3544 write!(self.out, "bitfieldExtract(")?;
3545 // Since bitfieldExtract only sign extends if the value is signed, this
3546 // cast is needed
3547 match fun {
3548 Mf::Unpack4xI8 => {
3549 write!(self.out, "int(")?;
3550 self.write_expr(arg, ctx)?;
3551 write!(self.out, ")")?;
3552 }
3553 Mf::Unpack4xU8 => self.write_expr(arg, ctx)?,
3554 _ => unreachable!(),
3555 };
3556 write!(self.out, ", {}, 8)", i * 8)?;
3557 if i != 3 {
3558 write!(self.out, ", ")?;
3559 }
3560 }
3561 write!(self.out, ")")?;
3562
3563 return Ok(());
3564 }
3565 };
3566
3567 let extract_bits = fun == Mf::ExtractBits;
3568 let insert_bits = fun == Mf::InsertBits;
3569
3570 // Some GLSL functions always return signed integers (like findMSB),
3571 // so they need to be cast to uint if the argument is also an uint.
3572 let ret_might_need_int_to_uint = matches!(
3573 fun,
3574 Mf::FirstTrailingBit | Mf::FirstLeadingBit | Mf::CountOneBits | Mf::Abs
3575 );
3576
3577 // Some GLSL functions only accept signed integers (like abs),
3578 // so they need their argument cast from uint to int.
3579 let arg_might_need_uint_to_int = matches!(fun, Mf::Abs);
3580
3581 // Check if the argument is an unsigned integer and return the vector size
3582 // in case it's a vector
3583 let maybe_uint_size = match *ctx.resolve_type(arg, &self.module.types) {
3584 TypeInner::Scalar(crate::Scalar {
3585 kind: crate::ScalarKind::Uint,
3586 ..
3587 }) => Some(None),
3588 TypeInner::Vector {
3589 scalar:
3590 crate::Scalar {
3591 kind: crate::ScalarKind::Uint,
3592 ..
3593 },
3594 size,
3595 } => Some(Some(size)),
3596 _ => None,
3597 };
3598
3599 // Cast to uint if the function needs it
3600 if ret_might_need_int_to_uint {
3601 if let Some(maybe_size) = maybe_uint_size {
3602 match maybe_size {
3603 Some(size) => write!(self.out, "uvec{}(", size as u8)?,
3604 None => write!(self.out, "uint(")?,
3605 }
3606 }
3607 }
3608
3609 write!(self.out, "{fun_name}(")?;
3610
3611 // Cast to int if the function needs it
3612 if arg_might_need_uint_to_int {
3613 if let Some(maybe_size) = maybe_uint_size {
3614 match maybe_size {
3615 Some(size) => write!(self.out, "ivec{}(", size as u8)?,
3616 None => write!(self.out, "int(")?,
3617 }
3618 }
3619 }
3620
3621 self.write_expr(arg, ctx)?;
3622
3623 // Close the cast from uint to int
3624 if arg_might_need_uint_to_int && maybe_uint_size.is_some() {
3625 write!(self.out, ")")?
3626 }
3627
3628 if let Some(arg) = arg1 {
3629 write!(self.out, ", ")?;
3630 if extract_bits {
3631 write!(self.out, "int(")?;
3632 self.write_expr(arg, ctx)?;
3633 write!(self.out, ")")?;
3634 } else {
3635 self.write_expr(arg, ctx)?;
3636 }
3637 }
3638 if let Some(arg) = arg2 {
3639 write!(self.out, ", ")?;
3640 if extract_bits || insert_bits {
3641 write!(self.out, "int(")?;
3642 self.write_expr(arg, ctx)?;
3643 write!(self.out, ")")?;
3644 } else {
3645 self.write_expr(arg, ctx)?;
3646 }
3647 }
3648 if let Some(arg) = arg3 {
3649 write!(self.out, ", ")?;
3650 if insert_bits {
3651 write!(self.out, "int(")?;
3652 self.write_expr(arg, ctx)?;
3653 write!(self.out, ")")?;
3654 } else {
3655 self.write_expr(arg, ctx)?;
3656 }
3657 }
3658 write!(self.out, ")")?;
3659
3660 // Close the cast from int to uint
3661 if ret_might_need_int_to_uint && maybe_uint_size.is_some() {
3662 write!(self.out, ")")?
3663 }
3664 }
3665 // `As` is always a call.
3666 // If `convert` is true the function name is the type
3667 // Else the function name is one of the glsl provided bitcast functions
3668 Expression::As {
3669 expr,
3670 kind: target_kind,
3671 convert,
3672 } => {
3673 let inner = ctx.resolve_type(expr, &self.module.types);
3674 match convert {
3675 Some(width) => {
3676 // this is similar to `write_type`, but with the target kind
3677 let scalar = glsl_scalar(crate::Scalar {
3678 kind: target_kind,
3679 width,
3680 })?;
3681 match *inner {
3682 TypeInner::Matrix { columns, rows, .. } => write!(
3683 self.out,
3684 "{}mat{}x{}",
3685 scalar.prefix, columns as u8, rows as u8
3686 )?,
3687 TypeInner::Vector { size, .. } => {
3688 write!(self.out, "{}vec{}", scalar.prefix, size as u8)?
3689 }
3690 _ => write!(self.out, "{}", scalar.full)?,
3691 }
3692
3693 write!(self.out, "(")?;
3694 self.write_expr(expr, ctx)?;
3695 write!(self.out, ")")?
3696 }
3697 None => {
3698 use crate::ScalarKind as Sk;
3699
3700 let target_vector_type = match *inner {
3701 TypeInner::Vector { size, scalar } => Some(TypeInner::Vector {
3702 size,
3703 scalar: crate::Scalar {
3704 kind: target_kind,
3705 width: scalar.width,
3706 },
3707 }),
3708 _ => None,
3709 };
3710
3711 let source_kind = inner.scalar_kind().unwrap();
3712
3713 match (source_kind, target_kind, target_vector_type) {
3714 // No conversion needed
3715 (Sk::Sint, Sk::Sint, _)
3716 | (Sk::Uint, Sk::Uint, _)
3717 | (Sk::Float, Sk::Float, _)
3718 | (Sk::Bool, Sk::Bool, _) => {
3719 self.write_expr(expr, ctx)?;
3720 return Ok(());
3721 }
3722
3723 // Cast to/from floats
3724 (Sk::Float, Sk::Sint, _) => write!(self.out, "floatBitsToInt")?,
3725 (Sk::Float, Sk::Uint, _) => write!(self.out, "floatBitsToUint")?,
3726 (Sk::Sint, Sk::Float, _) => write!(self.out, "intBitsToFloat")?,
3727 (Sk::Uint, Sk::Float, _) => write!(self.out, "uintBitsToFloat")?,
3728
3729 // Cast between vector types
3730 (_, _, Some(vector)) => {
3731 self.write_value_type(&vector)?;
3732 }
3733
3734 // There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion
3735 (Sk::Uint | Sk::Bool, Sk::Sint, None) => write!(self.out, "int")?,
3736 (Sk::Sint | Sk::Bool, Sk::Uint, None) => write!(self.out, "uint")?,
3737 (Sk::Bool, Sk::Float, None) => write!(self.out, "float")?,
3738 (Sk::Sint | Sk::Uint | Sk::Float, Sk::Bool, None) => {
3739 write!(self.out, "bool")?
3740 }
3741
3742 (Sk::AbstractInt | Sk::AbstractFloat, _, _)
3743 | (_, Sk::AbstractInt | Sk::AbstractFloat, _) => unreachable!(),
3744 };
3745
3746 write!(self.out, "(")?;
3747 self.write_expr(expr, ctx)?;
3748 write!(self.out, ")")?;
3749 }
3750 }
3751 }
3752 // These expressions never show up in `Emit`.
3753 Expression::CallResult(_)
3754 | Expression::AtomicResult { .. }
3755 | Expression::RayQueryProceedResult
3756 | Expression::WorkGroupUniformLoadResult { .. }
3757 | Expression::SubgroupOperationResult { .. }
3758 | Expression::SubgroupBallotResult => unreachable!(),
3759 // `ArrayLength` is written as `expr.length()` and we convert it to a uint
3760 Expression::ArrayLength(expr) => {
3761 write!(self.out, "uint(")?;
3762 self.write_expr(expr, ctx)?;
3763 write!(self.out, ".length())")?
3764 }
3765 // not supported yet
3766 Expression::RayQueryGetIntersection { .. }
3767 | Expression::RayQueryVertexPositions { .. } => unreachable!(),
3768 }
3769
3770 Ok(())
3771 }
3772
3773 /// Helper function to write the local holding the clamped lod
3774 fn write_clamped_lod(
3775 &mut self,
3776 ctx: &back::FunctionCtx,
3777 expr: Handle<crate::Expression>,
3778 image: Handle<crate::Expression>,
3779 level_expr: Handle<crate::Expression>,
3780 ) -> Result<(), Error> {
3781 // Define our local and start a call to `clamp`
3782 write!(
3783 self.out,
3784 "int {}{} = clamp(",
3785 Baked(expr),
3786 CLAMPED_LOD_SUFFIX
3787 )?;
3788 // Write the lod that will be clamped
3789 self.write_expr(level_expr, ctx)?;
3790 // Set the min value to 0 and start a call to `textureQueryLevels` to get
3791 // the maximum value
3792 write!(self.out, ", 0, textureQueryLevels(")?;
3793 // Write the target image as an argument to `textureQueryLevels`
3794 self.write_expr(image, ctx)?;
3795 // Close the call to `textureQueryLevels` subtract 1 from it since
3796 // the lod argument is 0 based, close the `clamp` call and end the
3797 // local declaration statement.
3798 writeln!(self.out, ") - 1);")?;
3799
3800 Ok(())
3801 }
3802
3803 // Helper method used to retrieve how many elements a coordinate vector
3804 // for the images operations need.
3805 fn get_coordinate_vector_size(&self, dim: crate::ImageDimension, arrayed: bool) -> u8 {
3806 // openGL es doesn't have 1D images so we need workaround it
3807 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
3808 // Get how many components the coordinate vector needs for the dimensions only
3809 let tex_coord_size = match dim {
3810 crate::ImageDimension::D1 => 1,
3811 crate::ImageDimension::D2 => 2,
3812 crate::ImageDimension::D3 => 3,
3813 crate::ImageDimension::Cube => 2,
3814 };
3815 // Calculate the true size of the coordinate vector by adding 1 for arrayed images
3816 // and another 1 if we need to workaround 1D images by making them 2D
3817 tex_coord_size + tex_1d_hack as u8 + arrayed as u8
3818 }
3819
3820 /// Helper method to write the coordinate vector for image operations
3821 fn write_texture_coord(
3822 &mut self,
3823 ctx: &back::FunctionCtx,
3824 vector_size: u8,
3825 coordinate: Handle<crate::Expression>,
3826 array_index: Option<Handle<crate::Expression>>,
3827 // Emulate 1D images as 2D for profiles that don't support it (glsl es)
3828 tex_1d_hack: bool,
3829 ) -> Result<(), Error> {
3830 match array_index {
3831 // If the image needs an array indice we need to add it to the end of our
3832 // coordinate vector, to do so we will use the `ivec(ivec, scalar)`
3833 // constructor notation (NOTE: the inner `ivec` can also be a scalar, this
3834 // is important for 1D arrayed images).
3835 Some(layer_expr) => {
3836 write!(self.out, "ivec{vector_size}(")?;
3837 self.write_expr(coordinate, ctx)?;
3838 write!(self.out, ", ")?;
3839 // If we are replacing sampler1D with sampler2D we also need
3840 // to add another zero to the coordinates vector for the y component
3841 if tex_1d_hack {
3842 write!(self.out, "0, ")?;
3843 }
3844 self.write_expr(layer_expr, ctx)?;
3845 write!(self.out, ")")?;
3846 }
3847 // Otherwise write just the expression (and the 1D hack if needed)
3848 None => {
3849 let uvec_size = match *ctx.resolve_type(coordinate, &self.module.types) {
3850 TypeInner::Scalar(crate::Scalar {
3851 kind: crate::ScalarKind::Uint,
3852 ..
3853 }) => Some(None),
3854 TypeInner::Vector {
3855 size,
3856 scalar:
3857 crate::Scalar {
3858 kind: crate::ScalarKind::Uint,
3859 ..
3860 },
3861 } => Some(Some(size as u32)),
3862 _ => None,
3863 };
3864 if tex_1d_hack {
3865 write!(self.out, "ivec2(")?;
3866 } else if uvec_size.is_some() {
3867 match uvec_size {
3868 Some(None) => write!(self.out, "int(")?,
3869 Some(Some(size)) => write!(self.out, "ivec{size}(")?,
3870 _ => {}
3871 }
3872 }
3873 self.write_expr(coordinate, ctx)?;
3874 if tex_1d_hack {
3875 write!(self.out, ", 0)")?;
3876 } else if uvec_size.is_some() {
3877 write!(self.out, ")")?;
3878 }
3879 }
3880 }
3881
3882 Ok(())
3883 }
3884
3885 /// Helper method to write the `ImageStore` statement
3886 fn write_image_store(
3887 &mut self,
3888 ctx: &back::FunctionCtx,
3889 image: Handle<crate::Expression>,
3890 coordinate: Handle<crate::Expression>,
3891 array_index: Option<Handle<crate::Expression>>,
3892 value: Handle<crate::Expression>,
3893 ) -> Result<(), Error> {
3894 use crate::ImageDimension as IDim;
3895
3896 // NOTE: openGL requires that `imageStore`s have no effects when the texel is invalid
3897 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
3898
3899 // This will only panic if the module is invalid
3900 let dim = match *ctx.resolve_type(image, &self.module.types) {
3901 TypeInner::Image { dim, .. } => dim,
3902 _ => unreachable!(),
3903 };
3904
3905 // Begin our call to `imageStore`
3906 write!(self.out, "imageStore(")?;
3907 self.write_expr(image, ctx)?;
3908 // Separate the image argument from the coordinates
3909 write!(self.out, ", ")?;
3910
3911 // openGL es doesn't have 1D images so we need workaround it
3912 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
3913 // Write the coordinate vector
3914 self.write_texture_coord(
3915 ctx,
3916 // Get the size of the coordinate vector
3917 self.get_coordinate_vector_size(dim, array_index.is_some()),
3918 coordinate,
3919 array_index,
3920 tex_1d_hack,
3921 )?;
3922
3923 // Separate the coordinate from the value to write and write the expression
3924 // of the value to write.
3925 write!(self.out, ", ")?;
3926 self.write_expr(value, ctx)?;
3927 // End the call to `imageStore` and the statement.
3928 writeln!(self.out, ");")?;
3929
3930 Ok(())
3931 }
3932
3933 /// Helper method to write the `ImageAtomic` statement
3934 fn write_image_atomic(
3935 &mut self,
3936 ctx: &back::FunctionCtx,
3937 image: Handle<crate::Expression>,
3938 coordinate: Handle<crate::Expression>,
3939 array_index: Option<Handle<crate::Expression>>,
3940 fun: crate::AtomicFunction,
3941 value: Handle<crate::Expression>,
3942 ) -> Result<(), Error> {
3943 use crate::ImageDimension as IDim;
3944
3945 // NOTE: openGL requires that `imageAtomic`s have no effects when the texel is invalid
3946 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
3947
3948 // This will only panic if the module is invalid
3949 let dim = match *ctx.resolve_type(image, &self.module.types) {
3950 TypeInner::Image { dim, .. } => dim,
3951 _ => unreachable!(),
3952 };
3953
3954 // Begin our call to `imageAtomic`
3955 let fun_str = fun.to_glsl();
3956 write!(self.out, "imageAtomic{fun_str}(")?;
3957 self.write_expr(image, ctx)?;
3958 // Separate the image argument from the coordinates
3959 write!(self.out, ", ")?;
3960
3961 // openGL es doesn't have 1D images so we need workaround it
3962 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
3963 // Write the coordinate vector
3964 self.write_texture_coord(
3965 ctx,
3966 // Get the size of the coordinate vector
3967 self.get_coordinate_vector_size(dim, false),
3968 coordinate,
3969 array_index,
3970 tex_1d_hack,
3971 )?;
3972
3973 // Separate the coordinate from the value to write and write the expression
3974 // of the value to write.
3975 write!(self.out, ", ")?;
3976 self.write_expr(value, ctx)?;
3977 // End the call to `imageAtomic` and the statement.
3978 writeln!(self.out, ");")?;
3979
3980 Ok(())
3981 }
3982
3983 /// Helper method for writing an `ImageLoad` expression.
3984 #[allow(clippy::too_many_arguments)]
3985 fn write_image_load(
3986 &mut self,
3987 handle: Handle<crate::Expression>,
3988 ctx: &back::FunctionCtx,
3989 image: Handle<crate::Expression>,
3990 coordinate: Handle<crate::Expression>,
3991 array_index: Option<Handle<crate::Expression>>,
3992 sample: Option<Handle<crate::Expression>>,
3993 level: Option<Handle<crate::Expression>>,
3994 ) -> Result<(), Error> {
3995 use crate::ImageDimension as IDim;
3996
3997 // `ImageLoad` is a bit complicated.
3998 // There are two functions one for sampled
3999 // images another for storage images, the former uses `texelFetch` and the
4000 // latter uses `imageLoad`.
4001 //
4002 // Furthermore we have `level` which is always `Some` for sampled images
4003 // and `None` for storage images, so we end up with two functions:
4004 // - `texelFetch(image, coordinate, level)` for sampled images
4005 // - `imageLoad(image, coordinate)` for storage images
4006 //
4007 // Finally we also have to consider bounds checking, for storage images
4008 // this is easy since openGL requires that invalid texels always return
4009 // 0, for sampled images we need to either verify that all arguments are
4010 // in bounds (`ReadZeroSkipWrite`) or make them a valid texel (`Restrict`).
4011
4012 // This will only panic if the module is invalid
4013 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
4014 TypeInner::Image {
4015 dim,
4016 arrayed: _,
4017 class,
4018 } => (dim, class),
4019 _ => unreachable!(),
4020 };
4021
4022 // Get the name of the function to be used for the load operation
4023 // and the policy to be used with it.
4024 let (fun_name, policy) = match class {
4025 // Sampled images inherit the policy from the user passed policies
4026 crate::ImageClass::Sampled { .. } => ("texelFetch", self.policies.image_load),
4027 crate::ImageClass::Storage { .. } => {
4028 // OpenGL ES 3.1 mentions in Chapter "8.22 Texture Image Loads and Stores" that:
4029 // "Invalid image loads will return a vector where the value of R, G, and B components
4030 // is 0 and the value of the A component is undefined."
4031 //
4032 // OpenGL 4.2 Core mentions in Chapter "3.9.20 Texture Image Loads and Stores" that:
4033 // "Invalid image loads will return zero."
4034 //
4035 // So, we only inject bounds checks for ES
4036 let policy = if self.options.version.is_es() {
4037 self.policies.image_load
4038 } else {
4039 proc::BoundsCheckPolicy::Unchecked
4040 };
4041 ("imageLoad", policy)
4042 }
4043 // TODO: Is there even a function for this?
4044 crate::ImageClass::Depth { multi: _ } => {
4045 return Err(Error::Custom(
4046 "WGSL `textureLoad` from depth textures is not supported in GLSL".to_string(),
4047 ))
4048 }
4049 crate::ImageClass::External => unimplemented!(),
4050 };
4051
4052 // openGL es doesn't have 1D images so we need workaround it
4053 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4054 // Get the size of the coordinate vector
4055 let vector_size = self.get_coordinate_vector_size(dim, array_index.is_some());
4056
4057 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4058 // To write the bounds checks for `ReadZeroSkipWrite` we will use a
4059 // ternary operator since we are in the middle of an expression and
4060 // need to return a value.
4061 //
4062 // NOTE: glsl does short circuit when evaluating logical
4063 // expressions so we can be sure that after we test a
4064 // condition it will be true for the next ones
4065
4066 // Write parentheses around the ternary operator to prevent problems with
4067 // expressions emitted before or after it having more precedence
4068 write!(self.out, "(",)?;
4069
4070 // The lod check needs to precede the size check since we need
4071 // to use the lod to get the size of the image at that level.
4072 if let Some(level_expr) = level {
4073 self.write_expr(level_expr, ctx)?;
4074 write!(self.out, " < textureQueryLevels(",)?;
4075 self.write_expr(image, ctx)?;
4076 // Chain the next check
4077 write!(self.out, ") && ")?;
4078 }
4079
4080 // Check that the sample arguments doesn't exceed the number of samples
4081 if let Some(sample_expr) = sample {
4082 self.write_expr(sample_expr, ctx)?;
4083 write!(self.out, " < textureSamples(",)?;
4084 self.write_expr(image, ctx)?;
4085 // Chain the next check
4086 write!(self.out, ") && ")?;
4087 }
4088
4089 // We now need to write the size checks for the coordinates and array index
4090 // first we write the comparison function in case the image is 1D non arrayed
4091 // (and no 1D to 2D hack was needed) we are comparing scalars so the less than
4092 // operator will suffice, but otherwise we'll be comparing two vectors so we'll
4093 // need to use the `lessThan` function but it returns a vector of booleans (one
4094 // for each comparison) so we need to fold it all in one scalar boolean, since
4095 // we want all comparisons to pass we use the `all` function which will only
4096 // return `true` if all the elements of the boolean vector are also `true`.
4097 //
4098 // So we'll end with one of the following forms
4099 // - `coord < textureSize(image, lod)` for 1D images
4100 // - `all(lessThan(coord, textureSize(image, lod)))` for normal images
4101 // - `all(lessThan(ivec(coord, array_index), textureSize(image, lod)))`
4102 // for arrayed images
4103 // - `all(lessThan(coord, textureSize(image)))` for multi sampled images
4104
4105 if vector_size != 1 {
4106 write!(self.out, "all(lessThan(")?;
4107 }
4108
4109 // Write the coordinate vector
4110 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4111
4112 if vector_size != 1 {
4113 // If we used the `lessThan` function we need to separate the
4114 // coordinates from the image size.
4115 write!(self.out, ", ")?;
4116 } else {
4117 // If we didn't use it (ie. 1D images) we perform the comparison
4118 // using the less than operator.
4119 write!(self.out, " < ")?;
4120 }
4121
4122 // Call `textureSize` to get our image size
4123 write!(self.out, "textureSize(")?;
4124 self.write_expr(image, ctx)?;
4125 // `textureSize` uses the lod as a second argument for mipmapped images
4126 if let Some(level_expr) = level {
4127 // Separate the image from the lod
4128 write!(self.out, ", ")?;
4129 self.write_expr(level_expr, ctx)?;
4130 }
4131 // Close the `textureSize` call
4132 write!(self.out, ")")?;
4133
4134 if vector_size != 1 {
4135 // Close the `all` and `lessThan` calls
4136 write!(self.out, "))")?;
4137 }
4138
4139 // Finally end the condition part of the ternary operator
4140 write!(self.out, " ? ")?;
4141 }
4142
4143 // Begin the call to the function used to load the texel
4144 write!(self.out, "{fun_name}(")?;
4145 self.write_expr(image, ctx)?;
4146 write!(self.out, ", ")?;
4147
4148 // If we are using `Restrict` bounds checking we need to pass valid texel
4149 // coordinates, to do so we use the `clamp` function to get a value between
4150 // 0 and the image size - 1 (indexing begins at 0)
4151 if let proc::BoundsCheckPolicy::Restrict = policy {
4152 write!(self.out, "clamp(")?;
4153 }
4154
4155 // Write the coordinate vector
4156 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4157
4158 // If we are using `Restrict` bounds checking we need to write the rest of the
4159 // clamp we initiated before writing the coordinates.
4160 if let proc::BoundsCheckPolicy::Restrict = policy {
4161 // Write the min value 0
4162 if vector_size == 1 {
4163 write!(self.out, ", 0")?;
4164 } else {
4165 write!(self.out, ", ivec{vector_size}(0)")?;
4166 }
4167 // Start the `textureSize` call to use as the max value.
4168 write!(self.out, ", textureSize(")?;
4169 self.write_expr(image, ctx)?;
4170 // If the image is mipmapped we need to add the lod argument to the
4171 // `textureSize` call, but this needs to be the clamped lod, this should
4172 // have been generated earlier and put in a local.
4173 if class.is_mipmapped() {
4174 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4175 }
4176 // Close the `textureSize` call
4177 write!(self.out, ")")?;
4178
4179 // Subtract 1 from the `textureSize` call since the coordinates are zero based.
4180 if vector_size == 1 {
4181 write!(self.out, " - 1")?;
4182 } else {
4183 write!(self.out, " - ivec{vector_size}(1)")?;
4184 }
4185
4186 // Close the `clamp` call
4187 write!(self.out, ")")?;
4188
4189 // Add the clamped lod (if present) as the second argument to the
4190 // image load function.
4191 if level.is_some() {
4192 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4193 }
4194
4195 // If a sample argument is needed we need to clamp it between 0 and
4196 // the number of samples the image has.
4197 if let Some(sample_expr) = sample {
4198 write!(self.out, ", clamp(")?;
4199 self.write_expr(sample_expr, ctx)?;
4200 // Set the min value to 0 and start the call to `textureSamples`
4201 write!(self.out, ", 0, textureSamples(")?;
4202 self.write_expr(image, ctx)?;
4203 // Close the `textureSamples` call, subtract 1 from it since the sample
4204 // argument is zero based, and close the `clamp` call
4205 writeln!(self.out, ") - 1)")?;
4206 }
4207 } else if let Some(sample_or_level) = sample.or(level) {
4208 // GLSL only support SInt on this field while WGSL support also UInt
4209 let cast_to_int = matches!(
4210 *ctx.resolve_type(sample_or_level, &self.module.types),
4211 TypeInner::Scalar(crate::Scalar {
4212 kind: crate::ScalarKind::Uint,
4213 ..
4214 })
4215 );
4216
4217 // If no bounds checking is need just add the sample or level argument
4218 // after the coordinates
4219 write!(self.out, ", ")?;
4220
4221 if cast_to_int {
4222 write!(self.out, "int(")?;
4223 }
4224
4225 self.write_expr(sample_or_level, ctx)?;
4226
4227 if cast_to_int {
4228 write!(self.out, ")")?;
4229 }
4230 }
4231
4232 // Close the image load function.
4233 write!(self.out, ")")?;
4234
4235 // If we were using the `ReadZeroSkipWrite` policy we need to end the first branch
4236 // (which is taken if the condition is `true`) with a colon (`:`) and write the
4237 // second branch which is just a 0 value.
4238 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4239 // Get the kind of the output value.
4240 let kind = match class {
4241 // Only sampled images can reach here since storage images
4242 // don't need bounds checks and depth images aren't implemented
4243 crate::ImageClass::Sampled { kind, .. } => kind,
4244 _ => unreachable!(),
4245 };
4246
4247 // End the first branch
4248 write!(self.out, " : ")?;
4249 // Write the 0 value
4250 write!(
4251 self.out,
4252 "{}vec4(",
4253 glsl_scalar(crate::Scalar { kind, width: 4 })?.prefix,
4254 )?;
4255 self.write_zero_init_scalar(kind)?;
4256 // Close the zero value constructor
4257 write!(self.out, ")")?;
4258 // Close the parentheses surrounding our ternary
4259 write!(self.out, ")")?;
4260 }
4261
4262 Ok(())
4263 }
4264
4265 fn write_named_expr(
4266 &mut self,
4267 handle: Handle<crate::Expression>,
4268 name: String,
4269 // The expression which is being named.
4270 // Generally, this is the same as handle, except in WorkGroupUniformLoad
4271 named: Handle<crate::Expression>,
4272 ctx: &back::FunctionCtx,
4273 ) -> BackendResult {
4274 match ctx.info[named].ty {
4275 proc::TypeResolution::Handle(ty_handle) => match self.module.types[ty_handle].inner {
4276 TypeInner::Struct { .. } => {
4277 let ty_name = &self.names[&NameKey::Type(ty_handle)];
4278 write!(self.out, "{ty_name}")?;
4279 }
4280 _ => {
4281 self.write_type(ty_handle)?;
4282 }
4283 },
4284 proc::TypeResolution::Value(ref inner) => {
4285 self.write_value_type(inner)?;
4286 }
4287 }
4288
4289 let resolved = ctx.resolve_type(named, &self.module.types);
4290
4291 write!(self.out, " {name}")?;
4292 if let TypeInner::Array { base, size, .. } = *resolved {
4293 self.write_array_size(base, size)?;
4294 }
4295 write!(self.out, " = ")?;
4296 self.write_expr(handle, ctx)?;
4297 writeln!(self.out, ";")?;
4298 self.named_expressions.insert(named, name);
4299
4300 Ok(())
4301 }
4302
4303 /// Helper function that write string with default zero initialization for supported types
4304 fn write_zero_init_value(&mut self, ty: Handle<crate::Type>) -> BackendResult {
4305 let inner = &self.module.types[ty].inner;
4306 match *inner {
4307 TypeInner::Scalar(scalar) | TypeInner::Atomic(scalar) => {
4308 self.write_zero_init_scalar(scalar.kind)?;
4309 }
4310 TypeInner::Vector { scalar, .. } => {
4311 self.write_value_type(inner)?;
4312 write!(self.out, "(")?;
4313 self.write_zero_init_scalar(scalar.kind)?;
4314 write!(self.out, ")")?;
4315 }
4316 TypeInner::Matrix { .. } => {
4317 self.write_value_type(inner)?;
4318 write!(self.out, "(")?;
4319 self.write_zero_init_scalar(crate::ScalarKind::Float)?;
4320 write!(self.out, ")")?;
4321 }
4322 TypeInner::Array { base, size, .. } => {
4323 let count = match size.resolve(self.module.to_ctx())? {
4324 proc::IndexableLength::Known(count) => count,
4325 proc::IndexableLength::Dynamic => return Ok(()),
4326 };
4327 self.write_type(base)?;
4328 self.write_array_size(base, size)?;
4329 write!(self.out, "(")?;
4330 for _ in 1..count {
4331 self.write_zero_init_value(base)?;
4332 write!(self.out, ", ")?;
4333 }
4334 // write last parameter without comma and space
4335 self.write_zero_init_value(base)?;
4336 write!(self.out, ")")?;
4337 }
4338 TypeInner::Struct { ref members, .. } => {
4339 let name = &self.names[&NameKey::Type(ty)];
4340 write!(self.out, "{name}(")?;
4341 for (index, member) in members.iter().enumerate() {
4342 if index != 0 {
4343 write!(self.out, ", ")?;
4344 }
4345 self.write_zero_init_value(member.ty)?;
4346 }
4347 write!(self.out, ")")?;
4348 }
4349 _ => unreachable!(),
4350 }
4351
4352 Ok(())
4353 }
4354
4355 /// Helper function that write string with zero initialization for scalar
4356 fn write_zero_init_scalar(&mut self, kind: crate::ScalarKind) -> BackendResult {
4357 match kind {
4358 crate::ScalarKind::Bool => write!(self.out, "false")?,
4359 crate::ScalarKind::Uint => write!(self.out, "0u")?,
4360 crate::ScalarKind::Float => write!(self.out, "0.0")?,
4361 crate::ScalarKind::Sint => write!(self.out, "0")?,
4362 crate::ScalarKind::AbstractInt | crate::ScalarKind::AbstractFloat => {
4363 return Err(Error::Custom(
4364 "Abstract types should not appear in IR presented to backends".to_string(),
4365 ))
4366 }
4367 }
4368
4369 Ok(())
4370 }
4371
4372 /// Issue a control barrier.
4373 fn write_control_barrier(
4374 &mut self,
4375 flags: crate::Barrier,
4376 level: back::Level,
4377 ) -> BackendResult {
4378 self.write_memory_barrier(flags, level)?;
4379 writeln!(self.out, "{level}barrier();")?;
4380 Ok(())
4381 }
4382
4383 /// Issue a memory barrier.
4384 fn write_memory_barrier(&mut self, flags: crate::Barrier, level: back::Level) -> BackendResult {
4385 if flags.contains(crate::Barrier::STORAGE) {
4386 writeln!(self.out, "{level}memoryBarrierBuffer();")?;
4387 }
4388 if flags.contains(crate::Barrier::WORK_GROUP) {
4389 writeln!(self.out, "{level}memoryBarrierShared();")?;
4390 }
4391 if flags.contains(crate::Barrier::SUB_GROUP) {
4392 writeln!(self.out, "{level}subgroupMemoryBarrier();")?;
4393 }
4394 if flags.contains(crate::Barrier::TEXTURE) {
4395 writeln!(self.out, "{level}memoryBarrierImage();")?;
4396 }
4397 Ok(())
4398 }
4399
4400 /// Helper function that return the glsl storage access string of [`StorageAccess`](crate::StorageAccess)
4401 ///
4402 /// glsl allows adding both `readonly` and `writeonly` but this means that
4403 /// they can only be used to query information about the resource which isn't what
4404 /// we want here so when storage access is both `LOAD` and `STORE` add no modifiers
4405 fn write_storage_access(&mut self, storage_access: crate::StorageAccess) -> BackendResult {
4406 if storage_access.contains(crate::StorageAccess::ATOMIC) {
4407 return Ok(());
4408 }
4409 if !storage_access.contains(crate::StorageAccess::STORE) {
4410 write!(self.out, "readonly ")?;
4411 }
4412 if !storage_access.contains(crate::StorageAccess::LOAD) {
4413 write!(self.out, "writeonly ")?;
4414 }
4415 Ok(())
4416 }
4417
4418 /// Helper method used to produce the reflection info that's returned to the user
4419 fn collect_reflection_info(&mut self) -> Result<ReflectionInfo, Error> {
4420 let info = self.info.get_entry_point(self.entry_point_idx as usize);
4421 let mut texture_mapping = crate::FastHashMap::default();
4422 let mut uniforms = crate::FastHashMap::default();
4423
4424 for sampling in info.sampling_set.iter() {
4425 let tex_name = self.reflection_names_globals[&sampling.image].clone();
4426
4427 match texture_mapping.entry(tex_name) {
4428 hash_map::Entry::Vacant(v) => {
4429 v.insert(TextureMapping {
4430 texture: sampling.image,
4431 sampler: Some(sampling.sampler),
4432 });
4433 }
4434 hash_map::Entry::Occupied(e) => {
4435 if e.get().sampler != Some(sampling.sampler) {
4436 log::error!("Conflicting samplers for {}", e.key());
4437 return Err(Error::ImageMultipleSamplers);
4438 }
4439 }
4440 }
4441 }
4442
4443 let mut immediates_info = None;
4444 for (handle, var) in self.module.global_variables.iter() {
4445 if info[handle].is_empty() {
4446 continue;
4447 }
4448 match self.module.types[var.ty].inner {
4449 TypeInner::Image { .. } => {
4450 let tex_name = self.reflection_names_globals[&handle].clone();
4451 match texture_mapping.entry(tex_name) {
4452 hash_map::Entry::Vacant(v) => {
4453 v.insert(TextureMapping {
4454 texture: handle,
4455 sampler: None,
4456 });
4457 }
4458 hash_map::Entry::Occupied(_) => {
4459 // already used with a sampler, do nothing
4460 }
4461 }
4462 }
4463 _ => match var.space {
4464 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => {
4465 let name = self.reflection_names_globals[&handle].clone();
4466 uniforms.insert(handle, name);
4467 }
4468 crate::AddressSpace::Immediate => {
4469 let name = self.reflection_names_globals[&handle].clone();
4470 immediates_info = Some((name, var.ty));
4471 }
4472 _ => (),
4473 },
4474 }
4475 }
4476
4477 let mut immediates_segments = Vec::new();
4478 let mut immediates_items = vec![];
4479
4480 if let Some((name, ty)) = immediates_info {
4481 // We don't have a layouter available to us, so we need to create one.
4482 //
4483 // This is potentially a bit wasteful, but the set of types in the program
4484 // shouldn't be too large.
4485 let mut layouter = proc::Layouter::default();
4486 layouter.update(self.module.to_ctx()).unwrap();
4487
4488 // We start with the name of the binding itself.
4489 immediates_segments.push(name);
4490
4491 // We then recursively collect all the uniform fields of the immediate data.
4492 self.collect_immediates_items(
4493 ty,
4494 &mut immediates_segments,
4495 &layouter,
4496 &mut 0,
4497 &mut immediates_items,
4498 );
4499 }
4500
4501 Ok(ReflectionInfo {
4502 texture_mapping,
4503 uniforms,
4504 varying: mem::take(&mut self.varying),
4505 immediates_items,
4506 clip_distance_count: self.clip_distance_count,
4507 })
4508 }
4509
4510 fn collect_immediates_items(
4511 &mut self,
4512 ty: Handle<crate::Type>,
4513 segments: &mut Vec<String>,
4514 layouter: &proc::Layouter,
4515 offset: &mut u32,
4516 items: &mut Vec<ImmediateItem>,
4517 ) {
4518 // At this point in the recursion, `segments` contains the path
4519 // needed to access `ty` from the root.
4520
4521 let layout = &layouter[ty];
4522 *offset = layout.alignment.round_up(*offset);
4523 match self.module.types[ty].inner {
4524 // All these types map directly to GL uniforms.
4525 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => {
4526 // Build the full name, by combining all current segments.
4527 let name: String = segments.iter().map(String::as_str).collect();
4528 items.push(ImmediateItem {
4529 access_path: name,
4530 offset: *offset,
4531 ty,
4532 });
4533 *offset += layout.size;
4534 }
4535 // Arrays are recursed into.
4536 TypeInner::Array { base, size, .. } => {
4537 let crate::ArraySize::Constant(count) = size else {
4538 unreachable!("Cannot have dynamic arrays in immediates");
4539 };
4540
4541 for i in 0..count.get() {
4542 // Add the array accessor and recurse.
4543 segments.push(format!("[{i}]"));
4544 self.collect_immediates_items(base, segments, layouter, offset, items);
4545 segments.pop();
4546 }
4547
4548 // Ensure the stride is kept by rounding up to the alignment.
4549 *offset = layout.alignment.round_up(*offset)
4550 }
4551 TypeInner::Struct { ref members, .. } => {
4552 for (index, member) in members.iter().enumerate() {
4553 // Add struct accessor and recurse.
4554 segments.push(format!(
4555 ".{}",
4556 self.names[&NameKey::StructMember(ty, index as u32)]
4557 ));
4558 self.collect_immediates_items(member.ty, segments, layouter, offset, items);
4559 segments.pop();
4560 }
4561
4562 // Ensure ending padding is kept by rounding up to the alignment.
4563 *offset = layout.alignment.round_up(*offset)
4564 }
4565 _ => unreachable!(),
4566 }
4567 }
4568}