naga/back/glsl/writer.rs
1use super::*;
2
3/// Writer responsible for all code generation.
4pub struct Writer<'a, W> {
5 // Inputs
6 /// The module being written.
7 pub(in crate::back::glsl) module: &'a crate::Module,
8 /// The module analysis.
9 pub(in crate::back::glsl) info: &'a valid::ModuleInfo,
10 /// The output writer.
11 out: W,
12 /// User defined configuration to be used.
13 pub(in crate::back::glsl) options: &'a Options,
14 /// The bound checking policies to be used
15 pub(in crate::back::glsl) policies: proc::BoundsCheckPolicies,
16
17 // Internal State
18 /// Features manager used to store all the needed features and write them.
19 pub(in crate::back::glsl) features: FeaturesManager,
20 namer: proc::Namer,
21 /// A map with all the names needed for writing the module
22 /// (generated by a [`Namer`](crate::proc::Namer)).
23 names: crate::FastHashMap<NameKey, String>,
24 /// A map with the names of global variables needed for reflections.
25 reflection_names_globals: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
26 /// The selected entry point.
27 pub(in crate::back::glsl) entry_point: &'a crate::EntryPoint,
28 /// The index of the selected entry point.
29 pub(in crate::back::glsl) entry_point_idx: proc::EntryPointIndex,
30 /// A generator for unique block numbers.
31 block_id: IdGenerator,
32 /// Set of expressions that have associated temporary variables.
33 named_expressions: crate::NamedExpressions,
34 /// Set of expressions that need to be baked to avoid unnecessary repetition in output
35 need_bake_expressions: back::NeedBakeExpressions,
36 /// Information about nesting of loops and switches.
37 ///
38 /// Used for forwarding continue statements in switches that have been
39 /// transformed to `do {} while(false);` loops.
40 continue_ctx: back::continue_forward::ContinueCtx,
41 /// How many views to render to, if doing multiview rendering.
42 pub(in crate::back::glsl) multiview: Option<core::num::NonZeroU32>,
43 /// Mapping of varying variables to their location. Needed for reflections.
44 varying: crate::FastHashMap<String, VaryingLocation>,
45 /// Number of user-defined clip planes. Only non-zero for vertex shaders.
46 clip_distance_count: u32,
47}
48
49impl<'a, W: Write> Writer<'a, W> {
50 /// Creates a new [`Writer`] instance.
51 ///
52 /// # Errors
53 /// - If the version specified is invalid or supported.
54 /// - If the entry point couldn't be found in the module.
55 /// - If the version specified doesn't support some used features.
56 pub fn new(
57 out: W,
58 module: &'a crate::Module,
59 info: &'a valid::ModuleInfo,
60 options: &'a Options,
61 pipeline_options: &'a PipelineOptions,
62 policies: proc::BoundsCheckPolicies,
63 ) -> Result<Self, Error> {
64 // Check if the requested version is supported
65 if !options.version.is_supported() {
66 log::error!("Version {}", options.version);
67 return Err(Error::VersionNotSupported);
68 }
69
70 // Try to find the entry point and corresponding index
71 let ep_idx = module
72 .entry_points
73 .iter()
74 .position(|ep| {
75 pipeline_options.shader_stage == ep.stage && pipeline_options.entry_point == ep.name
76 })
77 .ok_or(Error::EntryPointNotFound)?;
78
79 // Generate a map with names required to write the module
80 let mut names = crate::FastHashMap::default();
81 let mut namer = proc::Namer::default();
82 namer.reset(
83 module,
84 &keywords::RESERVED_KEYWORD_SET,
85 proc::KeywordSet::empty(),
86 proc::CaseInsensitiveKeywordSet::empty(),
87 &[
88 "gl_", // all GL built-in variables
89 "_group", // all normal bindings
90 "_immediates_binding_", // all immediate data bindings
91 ],
92 &mut names,
93 );
94
95 // Build the instance
96 let mut this = Self {
97 module,
98 info,
99 out,
100 options,
101 policies,
102
103 namer,
104 features: FeaturesManager::new(),
105 names,
106 reflection_names_globals: crate::FastHashMap::default(),
107 entry_point: &module.entry_points[ep_idx],
108 entry_point_idx: ep_idx as u16,
109 multiview: pipeline_options.multiview,
110 block_id: IdGenerator::default(),
111 named_expressions: Default::default(),
112 need_bake_expressions: Default::default(),
113 continue_ctx: back::continue_forward::ContinueCtx::default(),
114 varying: Default::default(),
115 clip_distance_count: 0,
116 };
117
118 // Find all features required to print this module
119 this.collect_required_features()?;
120
121 Ok(this)
122 }
123
124 /// Writes the [`Module`](crate::Module) as glsl to the output
125 ///
126 /// # Notes
127 /// If an error occurs while writing, the output might have been written partially
128 ///
129 /// # Panics
130 /// Might panic if the module is invalid
131 pub fn write(&mut self) -> Result<ReflectionInfo, Error> {
132 // We use `writeln!(self.out)` throughout the write to add newlines
133 // to make the output more readable
134
135 let es = self.options.version.is_es();
136
137 // Write the version (It must be the first thing or it isn't a valid glsl output)
138 writeln!(self.out, "#version {}", self.options.version)?;
139 // Write all the needed extensions
140 //
141 // This used to be the last thing being written as it allowed to search for features while
142 // writing the module saving some loops but some older versions (420 or less) required the
143 // extensions to appear before being used, even though extensions are part of the
144 // preprocessor not the processor ¯\_(ツ)_/¯
145 self.features.write(self.options, &mut self.out)?;
146
147 // glsl es requires a precision to be specified for floats and ints
148 // TODO: Should this be user configurable?
149 if es {
150 writeln!(self.out)?;
151 writeln!(self.out, "precision highp float;")?;
152 writeln!(self.out, "precision highp int;")?;
153 writeln!(self.out)?;
154 }
155
156 if self.entry_point.stage == ShaderStage::Compute {
157 let workgroup_size = self.entry_point.workgroup_size;
158 writeln!(
159 self.out,
160 "layout(local_size_x = {}, local_size_y = {}, local_size_z = {}) in;",
161 workgroup_size[0], workgroup_size[1], workgroup_size[2]
162 )?;
163 writeln!(self.out)?;
164 }
165
166 if self.entry_point.stage == ShaderStage::Vertex
167 && !self
168 .options
169 .writer_flags
170 .contains(WriterFlags::DRAW_PARAMETERS)
171 && self.features.contains(Features::INSTANCE_INDEX)
172 {
173 writeln!(self.out, "uniform uint {FIRST_INSTANCE_BINDING};")?;
174 writeln!(self.out)?;
175 }
176
177 // Enable early depth tests if needed
178 if let Some(early_depth_test) = self.entry_point.early_depth_test {
179 // If early depth test is supported for this version of GLSL
180 if self.options.version.supports_early_depth_test() {
181 match early_depth_test {
182 crate::EarlyDepthTest::Force => {
183 writeln!(self.out, "layout(early_fragment_tests) in;")?;
184 }
185 crate::EarlyDepthTest::Allow { conservative, .. } => {
186 use crate::ConservativeDepth as Cd;
187 let depth = match conservative {
188 Cd::GreaterEqual => "greater",
189 Cd::LessEqual => "less",
190 Cd::Unchanged => "unchanged",
191 };
192 writeln!(self.out, "layout (depth_{depth}) out float gl_FragDepth;")?;
193 }
194 }
195 } else {
196 log::warn!(
197 "Early depth testing is not supported for this version of GLSL: {}",
198 self.options.version
199 );
200 }
201 }
202
203 if self.entry_point.stage == ShaderStage::Vertex && self.options.version.is_webgl() {
204 if let Some(multiview) = self.multiview.as_ref() {
205 writeln!(self.out, "layout(num_views = {multiview}) in;")?;
206 writeln!(self.out)?;
207 }
208 }
209
210 // Write struct types.
211 //
212 // This are always ordered because the IR is structured in a way that
213 // you can't make a struct without adding all of its members first.
214 for (handle, ty) in self.module.types.iter() {
215 if let TypeInner::Struct { ref members, .. } = ty.inner {
216 let struct_name = &self.names[&NameKey::Type(handle)];
217
218 // Structures ending with runtime-sized arrays can only be
219 // rendered as shader storage blocks in GLSL, not stand-alone
220 // struct types.
221 if !self.module.types[members.last().unwrap().ty]
222 .inner
223 .is_dynamically_sized(&self.module.types)
224 {
225 write!(self.out, "struct {struct_name} ")?;
226 self.write_struct_body(handle, members)?;
227 writeln!(self.out, ";")?;
228 }
229 }
230 }
231
232 // Write functions for special types.
233 for (type_key, struct_ty) in self.module.special_types.predeclared_types.iter() {
234 match type_key {
235 &crate::PredeclaredType::ModfResult { size, scalar }
236 | &crate::PredeclaredType::FrexpResult { size, scalar } => {
237 let struct_name = &self.names[&NameKey::Type(*struct_ty)];
238 let arg_type_name_owner;
239 let arg_type_name = if let Some(size) = size {
240 arg_type_name_owner = format!(
241 "{}vec{}",
242 if scalar.width == 8 { "d" } else { "" },
243 size as u8
244 );
245 &arg_type_name_owner
246 } else if scalar.width == 8 {
247 "double"
248 } else {
249 "float"
250 };
251
252 let other_type_name_owner;
253 let (defined_func_name, called_func_name, other_type_name) =
254 if matches!(type_key, &crate::PredeclaredType::ModfResult { .. }) {
255 (MODF_FUNCTION, "modf", arg_type_name)
256 } else {
257 let other_type_name = if let Some(size) = size {
258 other_type_name_owner = format!("ivec{}", size as u8);
259 &other_type_name_owner
260 } else {
261 "int"
262 };
263 (FREXP_FUNCTION, "frexp", other_type_name)
264 };
265
266 writeln!(self.out)?;
267 if !self.options.version.supports_frexp_function()
268 && matches!(type_key, &crate::PredeclaredType::FrexpResult { .. })
269 {
270 writeln!(
271 self.out,
272 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
273 {other_type_name} other = arg == {arg_type_name}(0) ? {other_type_name}(0) : {other_type_name}({arg_type_name}(1) + log2(arg));
274 {arg_type_name} fract = arg * exp2({arg_type_name}(-other));
275 return {struct_name}(fract, other);
276}}",
277 )?;
278 } else {
279 writeln!(
280 self.out,
281 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
282 {other_type_name} other;
283 {arg_type_name} fract = {called_func_name}(arg, other);
284 return {struct_name}(fract, other);
285}}",
286 )?;
287 }
288 }
289 &crate::PredeclaredType::AtomicCompareExchangeWeakResult(_) => {
290 // Handled by the general struct writing loop earlier.
291 }
292 }
293 }
294
295 // Write all named constants
296 let mut constants = self
297 .module
298 .constants
299 .iter()
300 .filter(|&(_, c)| c.name.is_some())
301 .peekable();
302 while let Some((handle, _)) = constants.next() {
303 self.write_global_constant(handle)?;
304 // Add extra newline for readability on last iteration
305 if constants.peek().is_none() {
306 writeln!(self.out)?;
307 }
308 }
309
310 let ep_info = self.info.get_entry_point(self.entry_point_idx as usize);
311
312 // Write the globals
313 //
314 // Unless explicitly disabled with WriterFlags::INCLUDE_UNUSED_ITEMS,
315 // we filter all globals that aren't used by the selected entry point as they might be
316 // interfere with each other (i.e. two globals with the same location but different with
317 // different classes)
318 let include_unused = self
319 .options
320 .writer_flags
321 .contains(WriterFlags::INCLUDE_UNUSED_ITEMS);
322 for (handle, global) in self.module.global_variables.iter() {
323 let is_unused = ep_info[handle].is_empty();
324 if !include_unused && is_unused {
325 continue;
326 }
327
328 match self.module.types[global.ty].inner {
329 // We treat images separately because they might require
330 // writing the storage format
331 TypeInner::Image {
332 mut dim,
333 arrayed,
334 class,
335 } => {
336 // Gather the storage format if needed
337 let storage_format_access = match self.module.types[global.ty].inner {
338 TypeInner::Image {
339 class: crate::ImageClass::Storage { format, access },
340 ..
341 } => Some((format, access)),
342 _ => None,
343 };
344
345 if dim == crate::ImageDimension::D1 && es {
346 dim = crate::ImageDimension::D2
347 }
348
349 // Gether the location if needed
350 let layout_binding = if self.options.version.supports_explicit_locations() {
351 let br = global.binding.as_ref().unwrap();
352 self.options.binding_map.get(br).cloned()
353 } else {
354 None
355 };
356
357 // Write all the layout qualifiers
358 if layout_binding.is_some() || storage_format_access.is_some() {
359 write!(self.out, "layout(")?;
360 if let Some(binding) = layout_binding {
361 write!(self.out, "binding = {binding}")?;
362 }
363 if let Some((format, _)) = storage_format_access {
364 let format_str = glsl_storage_format(format)?;
365 let separator = match layout_binding {
366 Some(_) => ",",
367 None => "",
368 };
369 write!(self.out, "{separator}{format_str}")?;
370 }
371 write!(self.out, ") ")?;
372 }
373
374 if let Some((_, access)) = storage_format_access {
375 self.write_storage_access(access)?;
376 }
377
378 // All images in glsl are `uniform`
379 // The trailing space is important
380 write!(self.out, "uniform ")?;
381
382 // write the type
383 //
384 // This is way we need the leading space because `write_image_type` doesn't add
385 // any spaces at the beginning or end
386 self.write_image_type(dim, arrayed, class)?;
387
388 // Finally write the name and end the global with a `;`
389 // The leading space is important
390 let global_name = self.get_global_name(handle, global);
391 writeln!(self.out, " {global_name};")?;
392 writeln!(self.out)?;
393
394 self.reflection_names_globals.insert(handle, global_name);
395 }
396 // glsl has no concept of samplers so we just ignore it
397 TypeInner::Sampler { .. } => continue,
398 // All other globals are written by `write_global`
399 _ => {
400 self.write_global(handle, global)?;
401 // Add a newline (only for readability)
402 writeln!(self.out)?;
403 }
404 }
405 }
406
407 for arg in self.entry_point.function.arguments.iter() {
408 self.write_varying(arg.binding.as_ref(), arg.ty, false)?;
409 }
410 if let Some(ref result) = self.entry_point.function.result {
411 self.write_varying(result.binding.as_ref(), result.ty, true)?;
412 }
413 writeln!(self.out)?;
414
415 // Write all regular functions
416 for (handle, function) in self.module.functions.iter() {
417 // Check that the function doesn't use globals that aren't supported
418 // by the current entry point
419 if !include_unused && !ep_info.dominates_global_use(&self.info[handle]) {
420 continue;
421 }
422
423 let fun_info = &self.info[handle];
424
425 // Skip functions that that are not compatible with this entry point's stage.
426 //
427 // When validation is enabled, it rejects modules whose entry points try to call
428 // incompatible functions, so if we got this far, then any functions incompatible
429 // with our selected entry point must not be used.
430 //
431 // When validation is disabled, `fun_info.available_stages` is always just
432 // `ShaderStages::all()`, so this will write all functions in the module, and
433 // the downstream GLSL compiler will catch any problems.
434 if !fun_info.available_stages.contains(ep_info.available_stages) {
435 continue;
436 }
437
438 // Write the function
439 self.write_function(back::FunctionType::Function(handle), function, fun_info)?;
440
441 writeln!(self.out)?;
442 }
443
444 self.write_function(
445 back::FunctionType::EntryPoint(self.entry_point_idx),
446 &self.entry_point.function,
447 ep_info,
448 )?;
449
450 // Add newline at the end of file
451 writeln!(self.out)?;
452
453 // Collect all reflection info and return it to the user
454 self.collect_reflection_info()
455 }
456
457 fn write_array_size(
458 &mut self,
459 base: Handle<crate::Type>,
460 size: crate::ArraySize,
461 ) -> BackendResult {
462 write!(self.out, "[")?;
463
464 // Write the array size
465 // Writes nothing if `IndexableLength::Dynamic`
466 match size.resolve(self.module.to_ctx())? {
467 proc::IndexableLength::Known(size) => {
468 write!(self.out, "{size}")?;
469 }
470 proc::IndexableLength::Dynamic => (),
471 }
472
473 write!(self.out, "]")?;
474
475 if let TypeInner::Array {
476 base: next_base,
477 size: next_size,
478 ..
479 } = self.module.types[base].inner
480 {
481 self.write_array_size(next_base, next_size)?;
482 }
483
484 Ok(())
485 }
486
487 /// Helper method used to write value types
488 ///
489 /// # Notes
490 /// Adds no trailing or leading whitespace
491 fn write_value_type(&mut self, inner: &TypeInner) -> BackendResult {
492 match *inner {
493 // Scalars are simple we just get the full name from `glsl_scalar`
494 TypeInner::Scalar(scalar)
495 | TypeInner::Atomic(scalar)
496 | TypeInner::ValuePointer {
497 size: None,
498 scalar,
499 space: _,
500 } => write!(self.out, "{}", glsl_scalar(scalar)?.full)?,
501 // Vectors are just `gvecN` where `g` is the scalar prefix and `N` is the vector size
502 TypeInner::Vector { size, scalar }
503 | TypeInner::ValuePointer {
504 size: Some(size),
505 scalar,
506 space: _,
507 } => write!(self.out, "{}vec{}", glsl_scalar(scalar)?.prefix, size as u8)?,
508 // Matrices are written with `gmatMxN` where `g` is the scalar prefix (only floats and
509 // doubles are allowed), `M` is the columns count and `N` is the rows count
510 //
511 // glsl supports a matrix shorthand `gmatN` where `N` = `M` but it doesn't justify the
512 // extra branch to write matrices this way
513 TypeInner::Matrix {
514 columns,
515 rows,
516 scalar,
517 } => write!(
518 self.out,
519 "{}mat{}x{}",
520 glsl_scalar(scalar)?.prefix,
521 columns as u8,
522 rows as u8
523 )?,
524 // GLSL arrays are written as `type name[size]`
525 // Here we only write the size of the array i.e. `[size]`
526 // Base `type` and `name` should be written outside
527 TypeInner::Array { base, size, .. } => self.write_array_size(base, size)?,
528 // Write all variants instead of `_` so that if new variants are added a
529 // no exhaustiveness error is thrown
530 TypeInner::Pointer { .. }
531 | TypeInner::Struct { .. }
532 | TypeInner::Image { .. }
533 | TypeInner::Sampler { .. }
534 | TypeInner::AccelerationStructure { .. }
535 | TypeInner::RayQuery { .. }
536 | TypeInner::BindingArray { .. }
537 | TypeInner::CooperativeMatrix { .. } => {
538 return Err(Error::Custom(format!("Unable to write type {inner:?}")))
539 }
540 }
541
542 Ok(())
543 }
544
545 /// Helper method used to write non image/sampler types
546 ///
547 /// # Notes
548 /// Adds no trailing or leading whitespace
549 fn write_type(&mut self, ty: Handle<crate::Type>) -> BackendResult {
550 match self.module.types[ty].inner {
551 // glsl has no pointer types so just write types as normal and loads are skipped
552 TypeInner::Pointer { base, .. } => self.write_type(base),
553 // glsl structs are written as just the struct name
554 TypeInner::Struct { .. } => {
555 // Get the struct name
556 let name = &self.names[&NameKey::Type(ty)];
557 write!(self.out, "{name}")?;
558 Ok(())
559 }
560 // glsl array has the size separated from the base type
561 TypeInner::Array { base, .. } => self.write_type(base),
562 ref other => self.write_value_type(other),
563 }
564 }
565
566 /// Helper method to write a image type
567 ///
568 /// # Notes
569 /// Adds no leading or trailing whitespace
570 fn write_image_type(
571 &mut self,
572 dim: crate::ImageDimension,
573 arrayed: bool,
574 class: crate::ImageClass,
575 ) -> BackendResult {
576 // glsl images consist of four parts the scalar prefix, the image "type", the dimensions
577 // and modifiers
578 //
579 // There exists two image types
580 // - sampler - for sampled images
581 // - image - for storage images
582 //
583 // There are three possible modifiers that can be used together and must be written in
584 // this order to be valid
585 // - MS - used if it's a multisampled image
586 // - Array - used if it's an image array
587 // - Shadow - used if it's a depth image
588 use crate::ImageClass as Ic;
589 use crate::Scalar as S;
590 let float = S {
591 kind: crate::ScalarKind::Float,
592 width: 4,
593 };
594 let (base, scalar, ms, comparison) = match class {
595 Ic::Sampled { kind, multi: true } => ("sampler", S { kind, width: 4 }, "MS", ""),
596 Ic::Sampled { kind, multi: false } => ("sampler", S { kind, width: 4 }, "", ""),
597 Ic::Depth { multi: true } => ("sampler", float, "MS", ""),
598 Ic::Depth { multi: false } => ("sampler", float, "", "Shadow"),
599 Ic::Storage { format, .. } => ("image", format.into(), "", ""),
600 Ic::External => unimplemented!(),
601 };
602
603 let precision = if self.options.version.is_es() {
604 "highp "
605 } else {
606 ""
607 };
608
609 write!(
610 self.out,
611 "{}{}{}{}{}{}{}",
612 precision,
613 glsl_scalar(scalar)?.prefix,
614 base,
615 glsl_dimension(dim),
616 ms,
617 if arrayed { "Array" } else { "" },
618 comparison
619 )?;
620
621 Ok(())
622 }
623
624 /// Helper method used by [Self::write_global] to write just the layout part of
625 /// a non image/sampler global variable, if applicable.
626 ///
627 /// # Notes
628 ///
629 /// Adds trailing whitespace if any layout qualifier is written
630 fn write_global_layout(&mut self, global: &crate::GlobalVariable) -> BackendResult {
631 // Determine which (if any) explicit memory layout to use, and whether we support it
632 let layout = match global.space {
633 crate::AddressSpace::Uniform => {
634 if !self.options.version.supports_std140_layout() {
635 return Err(Error::Custom(
636 "Uniform address space requires std140 layout support".to_string(),
637 ));
638 }
639
640 Some("std140")
641 }
642 crate::AddressSpace::Storage { .. } => {
643 if !self.options.version.supports_std430_layout() {
644 return Err(Error::Custom(
645 "Storage address space requires std430 layout support".to_string(),
646 ));
647 }
648
649 Some("std430")
650 }
651 _ => None,
652 };
653
654 // If our version supports explicit layouts, we can also output the explicit binding
655 // if we have it
656 if self.options.version.supports_explicit_locations() {
657 if let Some(ref br) = global.binding {
658 match self.options.binding_map.get(br) {
659 Some(binding) => {
660 write!(self.out, "layout(")?;
661
662 if let Some(layout) = layout {
663 write!(self.out, "{layout}, ")?;
664 }
665
666 write!(self.out, "binding = {binding}) ")?;
667
668 return Ok(());
669 }
670 None => {
671 log::debug!("unassigned binding for {:?}", global.name);
672 }
673 }
674 }
675 }
676
677 // Either no explicit bindings are supported or we didn't have any.
678 // Write just the memory layout.
679 if let Some(layout) = layout {
680 write!(self.out, "layout({layout}) ")?;
681 }
682
683 Ok(())
684 }
685
686 /// Helper method used to write non images/sampler globals
687 ///
688 /// # Notes
689 /// Adds a newline
690 ///
691 /// # Panics
692 /// If the global has type sampler
693 fn write_global(
694 &mut self,
695 handle: Handle<crate::GlobalVariable>,
696 global: &crate::GlobalVariable,
697 ) -> BackendResult {
698 self.write_global_layout(global)?;
699
700 if let crate::AddressSpace::Storage { access } = global.space {
701 self.write_storage_access(access)?;
702 }
703
704 if let Some(storage_qualifier) = glsl_storage_qualifier(global.space) {
705 write!(self.out, "{storage_qualifier} ")?;
706 }
707
708 match global.space {
709 crate::AddressSpace::Private => {
710 self.write_simple_global(handle, global)?;
711 }
712 crate::AddressSpace::WorkGroup => {
713 self.write_simple_global(handle, global)?;
714 }
715 crate::AddressSpace::Immediate => {
716 self.write_simple_global(handle, global)?;
717 }
718 crate::AddressSpace::Uniform => {
719 self.write_interface_block(handle, global)?;
720 }
721 crate::AddressSpace::Storage { .. } => {
722 self.write_interface_block(handle, global)?;
723 }
724 crate::AddressSpace::TaskPayload => {
725 self.write_interface_block(handle, global)?;
726 }
727 // A global variable in the `Function` address space is a
728 // contradiction in terms.
729 crate::AddressSpace::Function => unreachable!(),
730 // Textures and samplers are handled directly in `Writer::write`.
731 crate::AddressSpace::Handle => unreachable!(),
732 // ray tracing pipelines unsupported
733 crate::AddressSpace::RayPayload | crate::AddressSpace::IncomingRayPayload => {
734 unreachable!()
735 }
736 }
737
738 Ok(())
739 }
740
741 fn write_simple_global(
742 &mut self,
743 handle: Handle<crate::GlobalVariable>,
744 global: &crate::GlobalVariable,
745 ) -> BackendResult {
746 self.write_type(global.ty)?;
747 write!(self.out, " ")?;
748 self.write_global_name(handle, global)?;
749
750 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
751 self.write_array_size(base, size)?;
752 }
753
754 if global.space.initializable() && is_value_init_supported(self.module, global.ty) {
755 write!(self.out, " = ")?;
756 if let Some(init) = global.init {
757 self.write_const_expr(init, &self.module.global_expressions)?;
758 } else {
759 self.write_zero_init_value(global.ty)?;
760 }
761 }
762
763 writeln!(self.out, ";")?;
764
765 if let crate::AddressSpace::Immediate = global.space {
766 let global_name = self.get_global_name(handle, global);
767 self.reflection_names_globals.insert(handle, global_name);
768 }
769
770 Ok(())
771 }
772
773 /// Write an interface block for a single Naga global.
774 ///
775 /// Write `block_name { members }`. Since `block_name` must be unique
776 /// between blocks and structs, we add `_block_ID` where `ID` is a
777 /// `IdGenerator` generated number. Write `members` in the same way we write
778 /// a struct's members.
779 fn write_interface_block(
780 &mut self,
781 handle: Handle<crate::GlobalVariable>,
782 global: &crate::GlobalVariable,
783 ) -> BackendResult {
784 // Write the block name, it's just the struct name appended with `_block_ID`
785 let ty_name = &self.names[&NameKey::Type(global.ty)];
786 let block_name = format!(
787 "{}_block_{}{:?}",
788 // avoid double underscores as they are reserved in GLSL
789 ty_name.trim_end_matches('_'),
790 self.block_id.generate(),
791 self.entry_point.stage,
792 );
793 write!(self.out, "{block_name} ")?;
794 self.reflection_names_globals.insert(handle, block_name);
795
796 match self.module.types[global.ty].inner {
797 TypeInner::Struct { ref members, .. }
798 if self.module.types[members.last().unwrap().ty]
799 .inner
800 .is_dynamically_sized(&self.module.types) =>
801 {
802 // Structs with dynamically sized arrays must have their
803 // members lifted up as members of the interface block. GLSL
804 // can't write such struct types anyway.
805 self.write_struct_body(global.ty, members)?;
806 write!(self.out, " ")?;
807 self.write_global_name(handle, global)?;
808 }
809 _ => {
810 // A global of any other type is written as the sole member
811 // of the interface block. Since the interface block is
812 // anonymous, this becomes visible in the global scope.
813 write!(self.out, "{{ ")?;
814 self.write_type(global.ty)?;
815 write!(self.out, " ")?;
816 self.write_global_name(handle, global)?;
817 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
818 self.write_array_size(base, size)?;
819 }
820 write!(self.out, "; }}")?;
821 }
822 }
823
824 writeln!(self.out, ";")?;
825
826 Ok(())
827 }
828
829 /// Helper method used to find which expressions of a given function require baking
830 ///
831 /// # Notes
832 /// Clears `need_bake_expressions` set before adding to it
833 fn update_expressions_to_bake(&mut self, func: &crate::Function, info: &valid::FunctionInfo) {
834 use crate::Expression;
835 self.need_bake_expressions.clear();
836 for (fun_handle, expr) in func.expressions.iter() {
837 let expr_info = &info[fun_handle];
838 let min_ref_count = func.expressions[fun_handle].bake_ref_count();
839 if min_ref_count <= expr_info.ref_count {
840 self.need_bake_expressions.insert(fun_handle);
841 }
842
843 let inner = expr_info.ty.inner_with(&self.module.types);
844
845 if let Expression::Math {
846 fun,
847 arg,
848 arg1,
849 arg2,
850 ..
851 } = *expr
852 {
853 match fun {
854 crate::MathFunction::Dot => {
855 // if the expression is a Dot product with integer arguments,
856 // then the args needs baking as well
857 if let TypeInner::Scalar(crate::Scalar {
858 kind: crate::ScalarKind::Sint | crate::ScalarKind::Uint,
859 ..
860 }) = *inner
861 {
862 self.need_bake_expressions.insert(arg);
863 self.need_bake_expressions.insert(arg1.unwrap());
864 }
865 }
866 crate::MathFunction::Dot4U8Packed | crate::MathFunction::Dot4I8Packed => {
867 self.need_bake_expressions.insert(arg);
868 self.need_bake_expressions.insert(arg1.unwrap());
869 }
870 crate::MathFunction::Pack4xI8
871 | crate::MathFunction::Pack4xU8
872 | crate::MathFunction::Pack4xI8Clamp
873 | crate::MathFunction::Pack4xU8Clamp
874 | crate::MathFunction::Unpack4xI8
875 | crate::MathFunction::Unpack4xU8
876 | crate::MathFunction::QuantizeToF16 => {
877 self.need_bake_expressions.insert(arg);
878 }
879 /* crate::MathFunction::Pack4x8unorm | */
880 crate::MathFunction::Unpack4x8snorm
881 if !self.options.version.supports_pack_unpack_4x8() =>
882 {
883 // We have a fallback if the platform doesn't natively support these
884 self.need_bake_expressions.insert(arg);
885 }
886 /* crate::MathFunction::Pack4x8unorm | */
887 crate::MathFunction::Unpack4x8unorm
888 if !self.options.version.supports_pack_unpack_4x8() =>
889 {
890 self.need_bake_expressions.insert(arg);
891 }
892 /* crate::MathFunction::Pack2x16snorm | */
893 crate::MathFunction::Unpack2x16snorm
894 if !self.options.version.supports_pack_unpack_snorm_2x16() =>
895 {
896 self.need_bake_expressions.insert(arg);
897 }
898 /* crate::MathFunction::Pack2x16unorm | */
899 crate::MathFunction::Unpack2x16unorm
900 if !self.options.version.supports_pack_unpack_unorm_2x16() =>
901 {
902 self.need_bake_expressions.insert(arg);
903 }
904 crate::MathFunction::ExtractBits => {
905 // Only argument 1 is re-used.
906 self.need_bake_expressions.insert(arg1.unwrap());
907 }
908 crate::MathFunction::InsertBits => {
909 // Only argument 2 is re-used.
910 self.need_bake_expressions.insert(arg2.unwrap());
911 }
912 crate::MathFunction::CountLeadingZeros => {
913 if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
914 self.need_bake_expressions.insert(arg);
915 }
916 }
917 _ => {}
918 }
919 }
920 }
921
922 for statement in func.body.iter() {
923 match *statement {
924 crate::Statement::Atomic {
925 fun: crate::AtomicFunction::Exchange { compare: Some(cmp) },
926 ..
927 } => {
928 self.need_bake_expressions.insert(cmp);
929 }
930 _ => {}
931 }
932 }
933 }
934
935 /// Helper method used to get a name for a global
936 ///
937 /// Globals have different naming schemes depending on their binding:
938 /// - Globals without bindings use the name from the [`Namer`](crate::proc::Namer)
939 /// - Globals with resource binding are named `_group_X_binding_Y` where `X`
940 /// is the group and `Y` is the binding
941 fn get_global_name(
942 &self,
943 handle: Handle<crate::GlobalVariable>,
944 global: &crate::GlobalVariable,
945 ) -> String {
946 match (&global.binding, global.space) {
947 (&Some(ref br), _) => {
948 format!(
949 "_group_{}_binding_{}_{}",
950 br.group,
951 br.binding,
952 self.entry_point.stage.to_str()
953 )
954 }
955 (&None, crate::AddressSpace::Immediate) => {
956 format!("_immediates_binding_{}", self.entry_point.stage.to_str())
957 }
958 (&None, _) => self.names[&NameKey::GlobalVariable(handle)].clone(),
959 }
960 }
961
962 /// Helper method used to write a name for a global without additional heap allocation
963 fn write_global_name(
964 &mut self,
965 handle: Handle<crate::GlobalVariable>,
966 global: &crate::GlobalVariable,
967 ) -> BackendResult {
968 match (&global.binding, global.space) {
969 (&Some(ref br), _) => write!(
970 self.out,
971 "_group_{}_binding_{}_{}",
972 br.group,
973 br.binding,
974 self.entry_point.stage.to_str()
975 )?,
976 (&None, crate::AddressSpace::Immediate) => write!(
977 self.out,
978 "_immediates_binding_{}",
979 self.entry_point.stage.to_str()
980 )?,
981 (&None, _) => write!(
982 self.out,
983 "{}",
984 &self.names[&NameKey::GlobalVariable(handle)]
985 )?,
986 }
987
988 Ok(())
989 }
990
991 /// Write a GLSL global that will carry a Naga entry point's argument or return value.
992 ///
993 /// A Naga entry point's arguments and return value are rendered in GLSL as
994 /// variables at global scope with the `in` and `out` storage qualifiers.
995 /// The code we generate for `main` loads from all the `in` globals into
996 /// appropriately named locals. Before it returns, `main` assigns the
997 /// components of its return value into all the `out` globals.
998 ///
999 /// This function writes a declaration for one such GLSL global,
1000 /// representing a value passed into or returned from [`self.entry_point`]
1001 /// that has a [`Location`] binding. The global's name is generated based on
1002 /// the location index and the shader stages being connected; see
1003 /// [`VaryingName`]. This means we don't need to know the names of
1004 /// arguments, just their types and bindings.
1005 ///
1006 /// Emit nothing for entry point arguments or return values with [`BuiltIn`]
1007 /// bindings; `main` will read from or assign to the appropriate GLSL
1008 /// special variable; these are pre-declared. As an exception, we do declare
1009 /// `gl_Position` or `gl_FragCoord` with the `invariant` qualifier if
1010 /// needed.
1011 ///
1012 /// Use `output` together with [`self.entry_point.stage`] to determine which
1013 /// shader stages are being connected, and choose the `in` or `out` storage
1014 /// qualifier.
1015 ///
1016 /// [`self.entry_point`]: Writer::entry_point
1017 /// [`self.entry_point.stage`]: crate::EntryPoint::stage
1018 /// [`Location`]: crate::Binding::Location
1019 /// [`BuiltIn`]: crate::Binding::BuiltIn
1020 fn write_varying(
1021 &mut self,
1022 binding: Option<&crate::Binding>,
1023 ty: Handle<crate::Type>,
1024 output: bool,
1025 ) -> Result<(), Error> {
1026 // For a struct, emit a separate global for each member with a binding.
1027 if let TypeInner::Struct { ref members, .. } = self.module.types[ty].inner {
1028 for member in members {
1029 self.write_varying(member.binding.as_ref(), member.ty, output)?;
1030 }
1031 return Ok(());
1032 }
1033
1034 let binding = match binding {
1035 None => return Ok(()),
1036 Some(binding) => binding,
1037 };
1038
1039 let (location, interpolation, sampling, blend_src) = match *binding {
1040 crate::Binding::Location {
1041 location,
1042 interpolation,
1043 sampling,
1044 blend_src,
1045 per_primitive: _,
1046 } => (location, interpolation, sampling, blend_src),
1047 crate::Binding::BuiltIn(built_in) => {
1048 match built_in {
1049 crate::BuiltIn::Position { invariant: true } => {
1050 match (self.options.version, self.entry_point.stage) {
1051 (
1052 Version::Embedded {
1053 version: 300,
1054 is_webgl: true,
1055 },
1056 ShaderStage::Fragment,
1057 ) => {
1058 // `invariant gl_FragCoord` is not allowed in WebGL2 and possibly
1059 // OpenGL ES in general (waiting on confirmation).
1060 //
1061 // See https://github.com/KhronosGroup/WebGL/issues/3518
1062 }
1063 _ => {
1064 writeln!(
1065 self.out,
1066 "invariant {};",
1067 glsl_built_in(
1068 built_in,
1069 VaryingOptions::from_writer_options(self.options, output)
1070 )
1071 )?;
1072 }
1073 }
1074 }
1075 crate::BuiltIn::ClipDistance => {
1076 // Re-declare `gl_ClipDistance` with number of clip planes.
1077 let TypeInner::Array { size, .. } = self.module.types[ty].inner else {
1078 unreachable!();
1079 };
1080 let proc::IndexableLength::Known(size) =
1081 size.resolve(self.module.to_ctx())?
1082 else {
1083 unreachable!();
1084 };
1085 self.clip_distance_count = size;
1086 writeln!(self.out, "out float gl_ClipDistance[{size}];")?;
1087 }
1088 _ => {}
1089 }
1090 return Ok(());
1091 }
1092 };
1093
1094 // Write the interpolation modifier if needed
1095 //
1096 // We ignore all interpolation and auxiliary modifiers that aren't used in fragment
1097 // shaders' input globals or vertex shaders' output globals.
1098 let emit_interpolation_and_auxiliary = match self.entry_point.stage {
1099 ShaderStage::Vertex => output,
1100 ShaderStage::Fragment => !output,
1101 ShaderStage::Compute => false,
1102 ShaderStage::Task
1103 | ShaderStage::Mesh
1104 | ShaderStage::RayGeneration
1105 | ShaderStage::AnyHit
1106 | ShaderStage::ClosestHit
1107 | ShaderStage::Miss => unreachable!(),
1108 };
1109
1110 // Write the I/O locations, if allowed
1111 let io_location = if self.options.version.supports_explicit_locations()
1112 || !emit_interpolation_and_auxiliary
1113 {
1114 if self.options.version.supports_io_locations() {
1115 if let Some(blend_src) = blend_src {
1116 write!(
1117 self.out,
1118 "layout(location = {location}, index = {blend_src}) "
1119 )?;
1120 } else {
1121 write!(self.out, "layout(location = {location}) ")?;
1122 }
1123 None
1124 } else {
1125 Some(VaryingLocation {
1126 location,
1127 index: blend_src.unwrap_or(0),
1128 })
1129 }
1130 } else {
1131 None
1132 };
1133
1134 // Write the interpolation qualifier.
1135 if let Some(interp) = interpolation {
1136 if emit_interpolation_and_auxiliary {
1137 write!(self.out, "{} ", glsl_interpolation(interp))?;
1138 }
1139 }
1140
1141 // Write the sampling auxiliary qualifier.
1142 //
1143 // Before GLSL 4.2, the `centroid` and `sample` qualifiers were required to appear
1144 // immediately before the `in` / `out` qualifier, so we'll just follow that rule
1145 // here, regardless of the version.
1146 if let Some(sampling) = sampling {
1147 if emit_interpolation_and_auxiliary {
1148 if let Some(qualifier) = glsl_sampling(sampling)? {
1149 write!(self.out, "{qualifier} ")?;
1150 }
1151 }
1152 }
1153
1154 // Write the input/output qualifier.
1155 write!(self.out, "{} ", if output { "out" } else { "in" })?;
1156
1157 // Write the type
1158 // `write_type` adds no leading or trailing spaces
1159 self.write_type(ty)?;
1160
1161 // Finally write the global name and end the global with a `;` and a newline
1162 // Leading space is important
1163 let vname = VaryingName {
1164 binding: &crate::Binding::Location {
1165 location,
1166 interpolation: None,
1167 sampling: None,
1168 blend_src,
1169 per_primitive: false,
1170 },
1171 stage: self.entry_point.stage,
1172 options: VaryingOptions::from_writer_options(self.options, output),
1173 };
1174 writeln!(self.out, " {vname};")?;
1175
1176 if let Some(location) = io_location {
1177 self.varying.insert(vname.to_string(), location);
1178 }
1179
1180 Ok(())
1181 }
1182
1183 /// Helper method used to write functions (both entry points and regular functions)
1184 ///
1185 /// # Notes
1186 /// Adds a newline
1187 fn write_function(
1188 &mut self,
1189 ty: back::FunctionType,
1190 func: &crate::Function,
1191 info: &valid::FunctionInfo,
1192 ) -> BackendResult {
1193 // Create a function context for the function being written
1194 let ctx = back::FunctionCtx {
1195 ty,
1196 info,
1197 expressions: &func.expressions,
1198 named_expressions: &func.named_expressions,
1199 };
1200
1201 self.named_expressions.clear();
1202 self.update_expressions_to_bake(func, info);
1203
1204 // Write the function header
1205 //
1206 // glsl headers are the same as in c:
1207 // `ret_type name(args)`
1208 // `ret_type` is the return type
1209 // `name` is the function name
1210 // `args` is a comma separated list of `type name`
1211 // | - `type` is the argument type
1212 // | - `name` is the argument name
1213
1214 // Start by writing the return type if any otherwise write void
1215 // This is the only place where `void` is a valid type
1216 // (though it's more a keyword than a type)
1217 if let back::FunctionType::EntryPoint(_) = ctx.ty {
1218 write!(self.out, "void")?;
1219 } else if let Some(ref result) = func.result {
1220 self.write_type(result.ty)?;
1221 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner {
1222 self.write_array_size(base, size)?
1223 }
1224 } else {
1225 write!(self.out, "void")?;
1226 }
1227
1228 // Write the function name and open parentheses for the argument list
1229 let function_name = match ctx.ty {
1230 back::FunctionType::Function(handle) => &self.names[&NameKey::Function(handle)],
1231 back::FunctionType::EntryPoint(_) => "main",
1232 };
1233 write!(self.out, " {function_name}(")?;
1234
1235 // Write the comma separated argument list
1236 //
1237 // We need access to `Self` here so we use the reference passed to the closure as an
1238 // argument instead of capturing as that would cause a borrow checker error
1239 let arguments = match ctx.ty {
1240 back::FunctionType::EntryPoint(_) => &[][..],
1241 back::FunctionType::Function(_) => &func.arguments,
1242 };
1243 let arguments: Vec<_> = arguments
1244 .iter()
1245 .enumerate()
1246 .filter(|&(_, arg)| match self.module.types[arg.ty].inner {
1247 TypeInner::Sampler { .. } => false,
1248 _ => true,
1249 })
1250 .collect();
1251 self.write_slice(&arguments, |this, _, &(i, arg)| {
1252 // Write the argument type
1253 match this.module.types[arg.ty].inner {
1254 // We treat images separately because they might require
1255 // writing the storage format
1256 TypeInner::Image {
1257 dim,
1258 arrayed,
1259 class,
1260 } => {
1261 // Write the storage format if needed
1262 if let TypeInner::Image {
1263 class: crate::ImageClass::Storage { format, .. },
1264 ..
1265 } = this.module.types[arg.ty].inner
1266 {
1267 write!(this.out, "layout({}) ", glsl_storage_format(format)?)?;
1268 }
1269
1270 // write the type
1271 //
1272 // This is way we need the leading space because `write_image_type` doesn't add
1273 // any spaces at the beginning or end
1274 this.write_image_type(dim, arrayed, class)?;
1275 }
1276 TypeInner::Pointer { base, .. } => {
1277 // write parameter qualifiers
1278 write!(this.out, "inout ")?;
1279 this.write_type(base)?;
1280 }
1281 // All other types are written by `write_type`
1282 _ => {
1283 this.write_type(arg.ty)?;
1284 }
1285 }
1286
1287 // Write the argument name
1288 // The leading space is important
1289 write!(this.out, " {}", &this.names[&ctx.argument_key(i as u32)])?;
1290
1291 // Write array size
1292 match this.module.types[arg.ty].inner {
1293 TypeInner::Array { base, size, .. } => {
1294 this.write_array_size(base, size)?;
1295 }
1296 TypeInner::Pointer { base, .. } => {
1297 if let TypeInner::Array { base, size, .. } = this.module.types[base].inner {
1298 this.write_array_size(base, size)?;
1299 }
1300 }
1301 _ => {}
1302 }
1303
1304 Ok(())
1305 })?;
1306
1307 // Close the parentheses and open braces to start the function body
1308 writeln!(self.out, ") {{")?;
1309
1310 if self.options.zero_initialize_workgroup_memory
1311 && ctx.ty.is_compute_like_entry_point(self.module)
1312 {
1313 self.write_workgroup_variables_initialization(&ctx)?;
1314 }
1315
1316 // Compose the function arguments from globals, in case of an entry point.
1317 if let back::FunctionType::EntryPoint(ep_index) = ctx.ty {
1318 let stage = self.module.entry_points[ep_index as usize].stage;
1319 for (index, arg) in func.arguments.iter().enumerate() {
1320 write!(self.out, "{}", back::INDENT)?;
1321 self.write_type(arg.ty)?;
1322 let name = &self.names[&NameKey::EntryPointArgument(ep_index, index as u32)];
1323 write!(self.out, " {name}")?;
1324 write!(self.out, " = ")?;
1325 match self.module.types[arg.ty].inner {
1326 TypeInner::Struct { ref members, .. } => {
1327 self.write_type(arg.ty)?;
1328 write!(self.out, "(")?;
1329 for (index, member) in members.iter().enumerate() {
1330 let varying_name = VaryingName {
1331 binding: member.binding.as_ref().unwrap(),
1332 stage,
1333 options: VaryingOptions::from_writer_options(self.options, false),
1334 };
1335 if index != 0 {
1336 write!(self.out, ", ")?;
1337 }
1338 write!(self.out, "{varying_name}")?;
1339 }
1340 writeln!(self.out, ");")?;
1341 }
1342 _ => {
1343 let varying_name = VaryingName {
1344 binding: arg.binding.as_ref().unwrap(),
1345 stage,
1346 options: VaryingOptions::from_writer_options(self.options, false),
1347 };
1348 writeln!(self.out, "{varying_name};")?;
1349 }
1350 }
1351 }
1352 }
1353
1354 // Write all function locals
1355 // Locals are `type name (= init)?;` where the init part (including the =) are optional
1356 //
1357 // Always adds a newline
1358 for (handle, local) in func.local_variables.iter() {
1359 // Write indentation (only for readability) and the type
1360 // `write_type` adds no trailing space
1361 write!(self.out, "{}", back::INDENT)?;
1362 self.write_type(local.ty)?;
1363
1364 // Write the local name
1365 // The leading space is important
1366 write!(self.out, " {}", self.names[&ctx.name_key(handle)])?;
1367 // Write size for array type
1368 if let TypeInner::Array { base, size, .. } = self.module.types[local.ty].inner {
1369 self.write_array_size(base, size)?;
1370 }
1371 // Write the local initializer if needed
1372 if let Some(init) = local.init {
1373 // Put the equal signal only if there's a initializer
1374 // The leading and trailing spaces aren't needed but help with readability
1375 write!(self.out, " = ")?;
1376
1377 // Write the constant
1378 // `write_constant` adds no trailing or leading space/newline
1379 self.write_expr(init, &ctx)?;
1380 } else if is_value_init_supported(self.module, local.ty) {
1381 write!(self.out, " = ")?;
1382 self.write_zero_init_value(local.ty)?;
1383 }
1384
1385 // Finish the local with `;` and add a newline (only for readability)
1386 writeln!(self.out, ";")?
1387 }
1388
1389 // Write the function body (statement list)
1390 for sta in func.body.iter() {
1391 // Write a statement, the indentation should always be 1 when writing the function body
1392 // `write_stmt` adds a newline
1393 self.write_stmt(sta, &ctx, back::Level(1))?;
1394 }
1395
1396 // Close braces and add a newline
1397 writeln!(self.out, "}}")?;
1398
1399 Ok(())
1400 }
1401
1402 fn write_workgroup_variables_initialization(
1403 &mut self,
1404 ctx: &back::FunctionCtx,
1405 ) -> BackendResult {
1406 let mut vars = self
1407 .module
1408 .global_variables
1409 .iter()
1410 .filter(|&(handle, var)| {
1411 !ctx.info[handle].is_empty() && var.space == crate::AddressSpace::WorkGroup
1412 })
1413 .peekable();
1414
1415 if vars.peek().is_some() {
1416 let level = back::Level(1);
1417
1418 writeln!(self.out, "{level}if (gl_LocalInvocationID == uvec3(0u)) {{")?;
1419
1420 for (handle, var) in vars {
1421 let name = &self.names[&NameKey::GlobalVariable(handle)];
1422 write!(self.out, "{}{} = ", level.next(), name)?;
1423 self.write_zero_init_value(var.ty)?;
1424 writeln!(self.out, ";")?;
1425 }
1426
1427 writeln!(self.out, "{level}}}")?;
1428 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1429 }
1430
1431 Ok(())
1432 }
1433
1434 /// Write a list of comma separated `T` values using a writer function `F`.
1435 ///
1436 /// The writer function `F` receives a mutable reference to `self` that if needed won't cause
1437 /// borrow checker issues (using for example a closure with `self` will cause issues), the
1438 /// second argument is the 0 based index of the element on the list, and the last element is
1439 /// a reference to the element `T` being written
1440 ///
1441 /// # Notes
1442 /// - Adds no newlines or leading/trailing whitespace
1443 /// - The last element won't have a trailing `,`
1444 fn write_slice<T, F: FnMut(&mut Self, u32, &T) -> BackendResult>(
1445 &mut self,
1446 data: &[T],
1447 mut f: F,
1448 ) -> BackendResult {
1449 // Loop through `data` invoking `f` for each element
1450 for (index, item) in data.iter().enumerate() {
1451 if index != 0 {
1452 write!(self.out, ", ")?;
1453 }
1454 f(self, index as u32, item)?;
1455 }
1456
1457 Ok(())
1458 }
1459
1460 /// Helper method used to write global constants
1461 fn write_global_constant(&mut self, handle: Handle<crate::Constant>) -> BackendResult {
1462 write!(self.out, "const ")?;
1463 let constant = &self.module.constants[handle];
1464 self.write_type(constant.ty)?;
1465 let name = &self.names[&NameKey::Constant(handle)];
1466 write!(self.out, " {name}")?;
1467 if let TypeInner::Array { base, size, .. } = self.module.types[constant.ty].inner {
1468 self.write_array_size(base, size)?;
1469 }
1470 write!(self.out, " = ")?;
1471 self.write_const_expr(constant.init, &self.module.global_expressions)?;
1472 writeln!(self.out, ";")?;
1473 Ok(())
1474 }
1475
1476 /// Helper method used to output a dot product as an arithmetic expression
1477 ///
1478 fn write_dot_product(
1479 &mut self,
1480 arg: Handle<crate::Expression>,
1481 arg1: Handle<crate::Expression>,
1482 size: usize,
1483 ctx: &back::FunctionCtx,
1484 ) -> BackendResult {
1485 // Write parentheses around the dot product expression to prevent operators
1486 // with different precedences from applying earlier.
1487 write!(self.out, "(")?;
1488
1489 // Cycle through all the components of the vector
1490 for index in 0..size {
1491 let component = back::COMPONENTS[index];
1492 // Write the addition to the previous product
1493 // This will print an extra '+' at the beginning but that is fine in glsl
1494 write!(self.out, " + ")?;
1495 // Write the first vector expression, this expression is marked to be
1496 // cached so unless it can't be cached (for example, it's a Constant)
1497 // it shouldn't produce large expressions.
1498 self.write_expr(arg, ctx)?;
1499 // Access the current component on the first vector
1500 write!(self.out, ".{component} * ")?;
1501 // Write the second vector expression, this expression is marked to be
1502 // cached so unless it can't be cached (for example, it's a Constant)
1503 // it shouldn't produce large expressions.
1504 self.write_expr(arg1, ctx)?;
1505 // Access the current component on the second vector
1506 write!(self.out, ".{component}")?;
1507 }
1508
1509 write!(self.out, ")")?;
1510 Ok(())
1511 }
1512
1513 /// Helper method used to write structs
1514 ///
1515 /// # Notes
1516 /// Ends in a newline
1517 fn write_struct_body(
1518 &mut self,
1519 handle: Handle<crate::Type>,
1520 members: &[crate::StructMember],
1521 ) -> BackendResult {
1522 // glsl structs are written as in C
1523 // `struct name() { members };`
1524 // | `struct` is a keyword
1525 // | `name` is the struct name
1526 // | `members` is a semicolon separated list of `type name`
1527 // | `type` is the member type
1528 // | `name` is the member name
1529 writeln!(self.out, "{{")?;
1530
1531 for (idx, member) in members.iter().enumerate() {
1532 // The indentation is only for readability
1533 write!(self.out, "{}", back::INDENT)?;
1534
1535 match self.module.types[member.ty].inner {
1536 TypeInner::Array {
1537 base,
1538 size,
1539 stride: _,
1540 } => {
1541 self.write_type(base)?;
1542 write!(
1543 self.out,
1544 " {}",
1545 &self.names[&NameKey::StructMember(handle, idx as u32)]
1546 )?;
1547 // Write [size]
1548 self.write_array_size(base, size)?;
1549 // Newline is important
1550 writeln!(self.out, ";")?;
1551 }
1552 _ => {
1553 // Write the member type
1554 // Adds no trailing space
1555 self.write_type(member.ty)?;
1556
1557 // Write the member name and put a semicolon
1558 // The leading space is important
1559 // All members must have a semicolon even the last one
1560 writeln!(
1561 self.out,
1562 " {};",
1563 &self.names[&NameKey::StructMember(handle, idx as u32)]
1564 )?;
1565 }
1566 }
1567 }
1568
1569 write!(self.out, "}}")?;
1570 Ok(())
1571 }
1572
1573 /// Helper method used to write statements
1574 ///
1575 /// # Notes
1576 /// Always adds a newline
1577 fn write_stmt(
1578 &mut self,
1579 sta: &crate::Statement,
1580 ctx: &back::FunctionCtx,
1581 level: back::Level,
1582 ) -> BackendResult {
1583 use crate::Statement;
1584
1585 match *sta {
1586 // This is where we can generate intermediate constants for some expression types.
1587 Statement::Emit(ref range) => {
1588 for handle in range.clone() {
1589 let ptr_class = ctx.resolve_type(handle, &self.module.types).pointer_space();
1590 let expr_name = if ptr_class.is_some() {
1591 // GLSL can't save a pointer-valued expression in a variable,
1592 // but we shouldn't ever need to: they should never be named expressions,
1593 // and none of the expression types flagged by bake_ref_count can be pointer-valued.
1594 None
1595 } else if let Some(name) = ctx.named_expressions.get(&handle) {
1596 // Front end provides names for all variables at the start of writing.
1597 // But we write them to step by step. We need to recache them
1598 // Otherwise, we could accidentally write variable name instead of full expression.
1599 // Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
1600 Some(self.namer.call(name))
1601 } else if self.need_bake_expressions.contains(&handle) {
1602 Some(Baked(handle).to_string())
1603 } else {
1604 None
1605 };
1606
1607 // If we are going to write an `ImageLoad` next and the target image
1608 // is sampled and we are using the `Restrict` policy for bounds
1609 // checking images we need to write a local holding the clamped lod.
1610 if let crate::Expression::ImageLoad {
1611 image,
1612 level: Some(level_expr),
1613 ..
1614 } = ctx.expressions[handle]
1615 {
1616 if let TypeInner::Image {
1617 class: crate::ImageClass::Sampled { .. },
1618 ..
1619 } = *ctx.resolve_type(image, &self.module.types)
1620 {
1621 if let proc::BoundsCheckPolicy::Restrict = self.policies.image_load {
1622 write!(self.out, "{level}")?;
1623 self.write_clamped_lod(ctx, handle, image, level_expr)?
1624 }
1625 }
1626 }
1627
1628 if let Some(name) = expr_name {
1629 write!(self.out, "{level}")?;
1630 self.write_named_expr(handle, name, handle, ctx)?;
1631 }
1632 }
1633 }
1634 // Blocks are simple we just need to write the block statements between braces
1635 // We could also just print the statements but this is more readable and maps more
1636 // closely to the IR
1637 Statement::Block(ref block) => {
1638 write!(self.out, "{level}")?;
1639 writeln!(self.out, "{{")?;
1640 for sta in block.iter() {
1641 // Increase the indentation to help with readability
1642 self.write_stmt(sta, ctx, level.next())?
1643 }
1644 writeln!(self.out, "{level}}}")?
1645 }
1646 // Ifs are written as in C:
1647 // ```
1648 // if(condition) {
1649 // accept
1650 // } else {
1651 // reject
1652 // }
1653 // ```
1654 Statement::If {
1655 condition,
1656 ref accept,
1657 ref reject,
1658 } => {
1659 write!(self.out, "{level}")?;
1660 write!(self.out, "if (")?;
1661 self.write_expr(condition, ctx)?;
1662 writeln!(self.out, ") {{")?;
1663
1664 for sta in accept {
1665 // Increase indentation to help with readability
1666 self.write_stmt(sta, ctx, level.next())?;
1667 }
1668
1669 // If there are no statements in the reject block we skip writing it
1670 // This is only for readability
1671 if !reject.is_empty() {
1672 writeln!(self.out, "{level}}} else {{")?;
1673
1674 for sta in reject {
1675 // Increase indentation to help with readability
1676 self.write_stmt(sta, ctx, level.next())?;
1677 }
1678 }
1679
1680 writeln!(self.out, "{level}}}")?
1681 }
1682 // Switch are written as in C:
1683 // ```
1684 // switch (selector) {
1685 // // Fallthrough
1686 // case label:
1687 // block
1688 // // Non fallthrough
1689 // case label:
1690 // block
1691 // break;
1692 // default:
1693 // block
1694 // }
1695 // ```
1696 // Where the `default` case happens isn't important but we put it last
1697 // so that we don't need to print a `break` for it
1698 Statement::Switch {
1699 selector,
1700 ref cases,
1701 } => {
1702 let l2 = level.next();
1703 // Some GLSL consumers may not handle switches with a single
1704 // body correctly: See wgpu#4514. Write such switch statements
1705 // as a `do {} while(false);` loop instead.
1706 //
1707 // Since doing so may inadvertently capture `continue`
1708 // statements in the switch body, we must apply continue
1709 // forwarding. See the `naga::back::continue_forward` module
1710 // docs for details.
1711 let one_body = cases
1712 .iter()
1713 .rev()
1714 .skip(1)
1715 .all(|case| case.fall_through && case.body.is_empty());
1716 if one_body {
1717 // Unlike HLSL, in GLSL `continue_ctx` only needs to know
1718 // about [`Switch`] statements that are being rendered as
1719 // `do-while` loops.
1720 if let Some(variable) = self.continue_ctx.enter_switch(&mut self.namer) {
1721 writeln!(self.out, "{level}bool {variable} = false;",)?;
1722 };
1723 writeln!(self.out, "{level}do {{")?;
1724 // Note: Expressions have no side-effects so we don't need to emit selector expression.
1725
1726 // Body
1727 if let Some(case) = cases.last() {
1728 for sta in case.body.iter() {
1729 self.write_stmt(sta, ctx, l2)?;
1730 }
1731 }
1732 // End do-while
1733 writeln!(self.out, "{level}}} while(false);")?;
1734
1735 // Handle any forwarded continue statements.
1736 use back::continue_forward::ExitControlFlow;
1737 let op = match self.continue_ctx.exit_switch() {
1738 ExitControlFlow::None => None,
1739 ExitControlFlow::Continue { variable } => Some(("continue", variable)),
1740 ExitControlFlow::Break { variable } => Some(("break", variable)),
1741 };
1742 if let Some((control_flow, variable)) = op {
1743 writeln!(self.out, "{level}if ({variable}) {{")?;
1744 writeln!(self.out, "{l2}{control_flow};")?;
1745 writeln!(self.out, "{level}}}")?;
1746 }
1747 } else {
1748 // Start the switch
1749 write!(self.out, "{level}")?;
1750 write!(self.out, "switch(")?;
1751 self.write_expr(selector, ctx)?;
1752 writeln!(self.out, ") {{")?;
1753
1754 // Write all cases
1755 for case in cases {
1756 match case.value {
1757 crate::SwitchValue::I32(value) => {
1758 write!(self.out, "{l2}case {value}:")?
1759 }
1760 crate::SwitchValue::U32(value) => {
1761 write!(self.out, "{l2}case {value}u:")?
1762 }
1763 crate::SwitchValue::Default => write!(self.out, "{l2}default:")?,
1764 }
1765
1766 let write_block_braces = !(case.fall_through && case.body.is_empty());
1767 if write_block_braces {
1768 writeln!(self.out, " {{")?;
1769 } else {
1770 writeln!(self.out)?;
1771 }
1772
1773 for sta in case.body.iter() {
1774 self.write_stmt(sta, ctx, l2.next())?;
1775 }
1776
1777 if !case.fall_through && case.body.last().is_none_or(|s| !s.is_terminator())
1778 {
1779 writeln!(self.out, "{}break;", l2.next())?;
1780 }
1781
1782 if write_block_braces {
1783 writeln!(self.out, "{l2}}}")?;
1784 }
1785 }
1786
1787 writeln!(self.out, "{level}}}")?
1788 }
1789 }
1790 // Loops in naga IR are based on wgsl loops, glsl can emulate the behaviour by using a
1791 // while true loop and appending the continuing block to the body resulting on:
1792 // ```
1793 // bool loop_init = true;
1794 // while(true) {
1795 // if (!loop_init) { <continuing> }
1796 // loop_init = false;
1797 // <body>
1798 // }
1799 // ```
1800 Statement::Loop {
1801 ref body,
1802 ref continuing,
1803 break_if,
1804 } => {
1805 self.continue_ctx.enter_loop();
1806 if !continuing.is_empty() || break_if.is_some() {
1807 let gate_name = self.namer.call("loop_init");
1808 writeln!(self.out, "{level}bool {gate_name} = true;")?;
1809 writeln!(self.out, "{level}while(true) {{")?;
1810 let l2 = level.next();
1811 let l3 = l2.next();
1812 writeln!(self.out, "{l2}if (!{gate_name}) {{")?;
1813 for sta in continuing {
1814 self.write_stmt(sta, ctx, l3)?;
1815 }
1816 if let Some(condition) = break_if {
1817 write!(self.out, "{l3}if (")?;
1818 self.write_expr(condition, ctx)?;
1819 writeln!(self.out, ") {{")?;
1820 writeln!(self.out, "{}break;", l3.next())?;
1821 writeln!(self.out, "{l3}}}")?;
1822 }
1823 writeln!(self.out, "{l2}}}")?;
1824 writeln!(self.out, "{}{} = false;", level.next(), gate_name)?;
1825 } else {
1826 writeln!(self.out, "{level}while(true) {{")?;
1827 }
1828 for sta in body {
1829 self.write_stmt(sta, ctx, level.next())?;
1830 }
1831 writeln!(self.out, "{level}}}")?;
1832 self.continue_ctx.exit_loop();
1833 }
1834 // Break, continue and return as written as in C
1835 // `break;`
1836 Statement::Break => {
1837 write!(self.out, "{level}")?;
1838 writeln!(self.out, "break;")?
1839 }
1840 // `continue;`
1841 Statement::Continue => {
1842 // Sometimes we must render a `Continue` statement as a `break`.
1843 // See the docs for the `back::continue_forward` module.
1844 if let Some(variable) = self.continue_ctx.continue_encountered() {
1845 writeln!(self.out, "{level}{variable} = true;",)?;
1846 writeln!(self.out, "{level}break;")?
1847 } else {
1848 writeln!(self.out, "{level}continue;")?
1849 }
1850 }
1851 // `return expr;`, `expr` is optional
1852 Statement::Return { value } => {
1853 write!(self.out, "{level}")?;
1854 match ctx.ty {
1855 back::FunctionType::Function(_) => {
1856 write!(self.out, "return")?;
1857 // Write the expression to be returned if needed
1858 if let Some(expr) = value {
1859 write!(self.out, " ")?;
1860 self.write_expr(expr, ctx)?;
1861 }
1862 writeln!(self.out, ";")?;
1863 }
1864 back::FunctionType::EntryPoint(ep_index) => {
1865 let mut has_point_size = false;
1866 let ep = &self.module.entry_points[ep_index as usize];
1867 if let Some(ref result) = ep.function.result {
1868 let value = value.unwrap();
1869 match self.module.types[result.ty].inner {
1870 TypeInner::Struct { ref members, .. } => {
1871 let temp_struct_name = match ctx.expressions[value] {
1872 crate::Expression::Compose { .. } => {
1873 let return_struct = "_tmp_return";
1874 write!(
1875 self.out,
1876 "{} {} = ",
1877 &self.names[&NameKey::Type(result.ty)],
1878 return_struct
1879 )?;
1880 self.write_expr(value, ctx)?;
1881 writeln!(self.out, ";")?;
1882 write!(self.out, "{level}")?;
1883 Some(return_struct)
1884 }
1885 _ => None,
1886 };
1887
1888 for (index, member) in members.iter().enumerate() {
1889 if let Some(crate::Binding::BuiltIn(
1890 crate::BuiltIn::PointSize,
1891 )) = member.binding
1892 {
1893 has_point_size = true;
1894 }
1895
1896 let varying_name = VaryingName {
1897 binding: member.binding.as_ref().unwrap(),
1898 stage: ep.stage,
1899 options: VaryingOptions::from_writer_options(
1900 self.options,
1901 true,
1902 ),
1903 };
1904 write!(self.out, "{varying_name} = ")?;
1905
1906 if let Some(struct_name) = temp_struct_name {
1907 write!(self.out, "{struct_name}")?;
1908 } else {
1909 self.write_expr(value, ctx)?;
1910 }
1911
1912 // Write field name
1913 writeln!(
1914 self.out,
1915 ".{};",
1916 &self.names
1917 [&NameKey::StructMember(result.ty, index as u32)]
1918 )?;
1919 write!(self.out, "{level}")?;
1920 }
1921 }
1922 _ => {
1923 let name = VaryingName {
1924 binding: result.binding.as_ref().unwrap(),
1925 stage: ep.stage,
1926 options: VaryingOptions::from_writer_options(
1927 self.options,
1928 true,
1929 ),
1930 };
1931 write!(self.out, "{name} = ")?;
1932 self.write_expr(value, ctx)?;
1933 writeln!(self.out, ";")?;
1934 write!(self.out, "{level}")?;
1935 }
1936 }
1937 }
1938
1939 let is_vertex_stage = self.module.entry_points[ep_index as usize].stage
1940 == ShaderStage::Vertex;
1941 if is_vertex_stage
1942 && self
1943 .options
1944 .writer_flags
1945 .contains(WriterFlags::ADJUST_COORDINATE_SPACE)
1946 {
1947 writeln!(
1948 self.out,
1949 "gl_Position.yz = vec2(-gl_Position.y, gl_Position.z * 2.0 - gl_Position.w);",
1950 )?;
1951 write!(self.out, "{level}")?;
1952 }
1953
1954 if is_vertex_stage
1955 && self
1956 .options
1957 .writer_flags
1958 .contains(WriterFlags::FORCE_POINT_SIZE)
1959 && !has_point_size
1960 {
1961 writeln!(self.out, "gl_PointSize = 1.0;")?;
1962 write!(self.out, "{level}")?;
1963 }
1964 writeln!(self.out, "return;")?;
1965 }
1966 }
1967 }
1968 // This is one of the places were glsl adds to the syntax of C in this case the discard
1969 // keyword which ceases all further processing in a fragment shader, it's called OpKill
1970 // in spir-v that's why it's called `Statement::Kill`
1971 Statement::Kill => writeln!(self.out, "{level}discard;")?,
1972 Statement::ControlBarrier(flags) => {
1973 self.write_control_barrier(flags, level)?;
1974 }
1975 Statement::MemoryBarrier(flags) => {
1976 self.write_memory_barrier(flags, level)?;
1977 }
1978 // Stores in glsl are just variable assignments written as `pointer = value;`
1979 Statement::Store { pointer, value } => {
1980 write!(self.out, "{level}")?;
1981 self.write_expr(pointer, ctx)?;
1982 write!(self.out, " = ")?;
1983 self.write_expr(value, ctx)?;
1984 writeln!(self.out, ";")?
1985 }
1986 Statement::WorkGroupUniformLoad { pointer, result } => {
1987 // GLSL doesn't have pointers, which means that this backend needs to ensure that
1988 // the actual "loading" is happening between the two barriers.
1989 // This is done in `Emit` by never emitting a variable name for pointer variables
1990 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1991
1992 let result_name = Baked(result).to_string();
1993 write!(self.out, "{level}")?;
1994 // Expressions cannot have side effects, so just writing the expression here is fine.
1995 self.write_named_expr(pointer, result_name, result, ctx)?;
1996
1997 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1998 }
1999 // Stores a value into an image.
2000 Statement::ImageStore {
2001 image,
2002 coordinate,
2003 array_index,
2004 value,
2005 } => {
2006 write!(self.out, "{level}")?;
2007 self.write_image_store(ctx, image, coordinate, array_index, value)?
2008 }
2009 // A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list
2010 Statement::Call {
2011 function,
2012 ref arguments,
2013 result,
2014 } => {
2015 write!(self.out, "{level}")?;
2016 if let Some(expr) = result {
2017 let name = Baked(expr).to_string();
2018 let result = self.module.functions[function].result.as_ref().unwrap();
2019 self.write_type(result.ty)?;
2020 write!(self.out, " {name}")?;
2021 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner
2022 {
2023 self.write_array_size(base, size)?
2024 }
2025 write!(self.out, " = ")?;
2026 self.named_expressions.insert(expr, name);
2027 }
2028 write!(self.out, "{}(", &self.names[&NameKey::Function(function)])?;
2029 let arguments: Vec<_> = arguments
2030 .iter()
2031 .enumerate()
2032 .filter_map(|(i, arg)| {
2033 let arg_ty = self.module.functions[function].arguments[i].ty;
2034 match self.module.types[arg_ty].inner {
2035 TypeInner::Sampler { .. } => None,
2036 _ => Some(*arg),
2037 }
2038 })
2039 .collect();
2040 self.write_slice(&arguments, |this, _, arg| this.write_expr(*arg, ctx))?;
2041 writeln!(self.out, ");")?
2042 }
2043 Statement::Atomic {
2044 pointer,
2045 ref fun,
2046 value,
2047 result,
2048 } => {
2049 write!(self.out, "{level}")?;
2050
2051 match *fun {
2052 crate::AtomicFunction::Exchange {
2053 compare: Some(compare_expr),
2054 } => {
2055 let result_handle = result.expect("CompareExchange must have a result");
2056 let res_name = Baked(result_handle).to_string();
2057 self.write_type(ctx.info[result_handle].ty.handle().unwrap())?;
2058 write!(self.out, " {res_name};")?;
2059 write!(self.out, " {res_name}.old_value = atomicCompSwap(")?;
2060 self.write_expr(pointer, ctx)?;
2061 write!(self.out, ", ")?;
2062 self.write_expr(compare_expr, ctx)?;
2063 write!(self.out, ", ")?;
2064 self.write_expr(value, ctx)?;
2065 writeln!(self.out, ");")?;
2066
2067 write!(
2068 self.out,
2069 "{level}{res_name}.exchanged = ({res_name}.old_value == "
2070 )?;
2071 self.write_expr(compare_expr, ctx)?;
2072 writeln!(self.out, ");")?;
2073 self.named_expressions.insert(result_handle, res_name);
2074 }
2075 _ => {
2076 if let Some(result) = result {
2077 let res_name = Baked(result).to_string();
2078 self.write_type(ctx.info[result].ty.handle().unwrap())?;
2079 write!(self.out, " {res_name} = ")?;
2080 self.named_expressions.insert(result, res_name);
2081 }
2082 let fun_str = fun.to_glsl();
2083 write!(self.out, "atomic{fun_str}(")?;
2084 self.write_expr(pointer, ctx)?;
2085 write!(self.out, ", ")?;
2086 if let crate::AtomicFunction::Subtract = *fun {
2087 // Emulate `atomicSub` with `atomicAdd` by negating the value.
2088 write!(self.out, "-")?;
2089 }
2090 self.write_expr(value, ctx)?;
2091 writeln!(self.out, ");")?;
2092 }
2093 }
2094 }
2095 // Stores a value into an image.
2096 Statement::ImageAtomic {
2097 image,
2098 coordinate,
2099 array_index,
2100 fun,
2101 value,
2102 } => {
2103 write!(self.out, "{level}")?;
2104 self.write_image_atomic(ctx, image, coordinate, array_index, fun, value)?
2105 }
2106 Statement::RayQuery { .. } => unreachable!(),
2107 Statement::SubgroupBallot { result, predicate } => {
2108 write!(self.out, "{level}")?;
2109 let res_name = Baked(result).to_string();
2110 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2111 self.write_value_type(res_ty)?;
2112 write!(self.out, " {res_name} = ")?;
2113 self.named_expressions.insert(result, res_name);
2114
2115 write!(self.out, "subgroupBallot(")?;
2116 match predicate {
2117 Some(predicate) => self.write_expr(predicate, ctx)?,
2118 None => write!(self.out, "true")?,
2119 }
2120 writeln!(self.out, ");")?;
2121 }
2122 Statement::SubgroupCollectiveOperation {
2123 op,
2124 collective_op,
2125 argument,
2126 result,
2127 } => {
2128 write!(self.out, "{level}")?;
2129 let res_name = Baked(result).to_string();
2130 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2131 self.write_value_type(res_ty)?;
2132 write!(self.out, " {res_name} = ")?;
2133 self.named_expressions.insert(result, res_name);
2134
2135 match (collective_op, op) {
2136 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::All) => {
2137 write!(self.out, "subgroupAll(")?
2138 }
2139 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Any) => {
2140 write!(self.out, "subgroupAny(")?
2141 }
2142 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Add) => {
2143 write!(self.out, "subgroupAdd(")?
2144 }
2145 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Mul) => {
2146 write!(self.out, "subgroupMul(")?
2147 }
2148 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Max) => {
2149 write!(self.out, "subgroupMax(")?
2150 }
2151 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Min) => {
2152 write!(self.out, "subgroupMin(")?
2153 }
2154 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::And) => {
2155 write!(self.out, "subgroupAnd(")?
2156 }
2157 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Or) => {
2158 write!(self.out, "subgroupOr(")?
2159 }
2160 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Xor) => {
2161 write!(self.out, "subgroupXor(")?
2162 }
2163 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Add) => {
2164 write!(self.out, "subgroupExclusiveAdd(")?
2165 }
2166 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Mul) => {
2167 write!(self.out, "subgroupExclusiveMul(")?
2168 }
2169 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Add) => {
2170 write!(self.out, "subgroupInclusiveAdd(")?
2171 }
2172 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Mul) => {
2173 write!(self.out, "subgroupInclusiveMul(")?
2174 }
2175 _ => unimplemented!(),
2176 }
2177 self.write_expr(argument, ctx)?;
2178 writeln!(self.out, ");")?;
2179 }
2180 Statement::SubgroupGather {
2181 mode,
2182 argument,
2183 result,
2184 } => {
2185 write!(self.out, "{level}")?;
2186 let res_name = Baked(result).to_string();
2187 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2188 self.write_value_type(res_ty)?;
2189 write!(self.out, " {res_name} = ")?;
2190 self.named_expressions.insert(result, res_name);
2191
2192 match mode {
2193 crate::GatherMode::BroadcastFirst => {
2194 write!(self.out, "subgroupBroadcastFirst(")?;
2195 }
2196 crate::GatherMode::Broadcast(_) => {
2197 write!(self.out, "subgroupBroadcast(")?;
2198 }
2199 crate::GatherMode::Shuffle(_) => {
2200 write!(self.out, "subgroupShuffle(")?;
2201 }
2202 crate::GatherMode::ShuffleDown(_) => {
2203 write!(self.out, "subgroupShuffleDown(")?;
2204 }
2205 crate::GatherMode::ShuffleUp(_) => {
2206 write!(self.out, "subgroupShuffleUp(")?;
2207 }
2208 crate::GatherMode::ShuffleXor(_) => {
2209 write!(self.out, "subgroupShuffleXor(")?;
2210 }
2211 crate::GatherMode::QuadBroadcast(_) => {
2212 write!(self.out, "subgroupQuadBroadcast(")?;
2213 }
2214 crate::GatherMode::QuadSwap(direction) => match direction {
2215 crate::Direction::X => {
2216 write!(self.out, "subgroupQuadSwapHorizontal(")?;
2217 }
2218 crate::Direction::Y => {
2219 write!(self.out, "subgroupQuadSwapVertical(")?;
2220 }
2221 crate::Direction::Diagonal => {
2222 write!(self.out, "subgroupQuadSwapDiagonal(")?;
2223 }
2224 },
2225 }
2226 self.write_expr(argument, ctx)?;
2227 match mode {
2228 crate::GatherMode::BroadcastFirst => {}
2229 crate::GatherMode::Broadcast(index)
2230 | crate::GatherMode::Shuffle(index)
2231 | crate::GatherMode::ShuffleDown(index)
2232 | crate::GatherMode::ShuffleUp(index)
2233 | crate::GatherMode::ShuffleXor(index)
2234 | crate::GatherMode::QuadBroadcast(index) => {
2235 write!(self.out, ", ")?;
2236 self.write_expr(index, ctx)?;
2237 }
2238 crate::GatherMode::QuadSwap(_) => {}
2239 }
2240 writeln!(self.out, ");")?;
2241 }
2242 Statement::CooperativeStore { .. } => unimplemented!(),
2243 Statement::RayPipelineFunction(_) => unimplemented!(),
2244 }
2245
2246 Ok(())
2247 }
2248
2249 /// Write a const expression.
2250 ///
2251 /// Write `expr`, a handle to an [`Expression`] in the current [`Module`]'s
2252 /// constant expression arena, as GLSL expression.
2253 ///
2254 /// # Notes
2255 /// Adds no newlines or leading/trailing whitespace
2256 ///
2257 /// [`Expression`]: crate::Expression
2258 /// [`Module`]: crate::Module
2259 fn write_const_expr(
2260 &mut self,
2261 expr: Handle<crate::Expression>,
2262 arena: &crate::Arena<crate::Expression>,
2263 ) -> BackendResult {
2264 self.write_possibly_const_expr(
2265 expr,
2266 arena,
2267 |expr| &self.info[expr],
2268 |writer, expr| writer.write_const_expr(expr, arena),
2269 )
2270 }
2271
2272 /// Write [`Expression`] variants that can occur in both runtime and const expressions.
2273 ///
2274 /// Write `expr`, a handle to an [`Expression`] in the arena `expressions`,
2275 /// as as GLSL expression. This must be one of the [`Expression`] variants
2276 /// that is allowed to occur in constant expressions.
2277 ///
2278 /// Use `write_expression` to write subexpressions.
2279 ///
2280 /// This is the common code for `write_expr`, which handles arbitrary
2281 /// runtime expressions, and `write_const_expr`, which only handles
2282 /// const-expressions. Each of those callers passes itself (essentially) as
2283 /// the `write_expression` callback, so that subexpressions are restricted
2284 /// to the appropriate variants.
2285 ///
2286 /// # Notes
2287 /// Adds no newlines or leading/trailing whitespace
2288 ///
2289 /// [`Expression`]: crate::Expression
2290 fn write_possibly_const_expr<'w, I, E>(
2291 &'w mut self,
2292 expr: Handle<crate::Expression>,
2293 expressions: &crate::Arena<crate::Expression>,
2294 info: I,
2295 write_expression: E,
2296 ) -> BackendResult
2297 where
2298 I: Fn(Handle<crate::Expression>) -> &'w proc::TypeResolution,
2299 E: Fn(&mut Self, Handle<crate::Expression>) -> BackendResult,
2300 {
2301 use crate::Expression;
2302
2303 match expressions[expr] {
2304 Expression::Literal(literal) => {
2305 match literal {
2306 // Floats are written using `Debug` instead of `Display` because it always appends the
2307 // decimal part even it's zero which is needed for a valid glsl float constant
2308 crate::Literal::F64(value) => write!(self.out, "{value:?}LF")?,
2309 crate::Literal::F32(value) => write!(self.out, "{value:?}")?,
2310 crate::Literal::F16(_) => {
2311 return Err(Error::Custom("GLSL has no 16-bit float type".into()));
2312 }
2313 // Unsigned integers need a `u` at the end
2314 //
2315 // While `core` doesn't necessarily need it, it's allowed and since `es` needs it we
2316 // always write it as the extra branch wouldn't have any benefit in readability
2317 crate::Literal::U32(value) => write!(self.out, "{value}u")?,
2318 crate::Literal::I32(value) => write!(self.out, "{value}")?,
2319 crate::Literal::Bool(value) => write!(self.out, "{value}")?,
2320 crate::Literal::I64(_) => {
2321 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2322 }
2323 crate::Literal::U64(_) => {
2324 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2325 }
2326 crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
2327 return Err(Error::Custom(
2328 "Abstract types should not appear in IR presented to backends".into(),
2329 ));
2330 }
2331 }
2332 }
2333 Expression::Constant(handle) => {
2334 let constant = &self.module.constants[handle];
2335 if constant.name.is_some() {
2336 write!(self.out, "{}", self.names[&NameKey::Constant(handle)])?;
2337 } else {
2338 self.write_const_expr(constant.init, &self.module.global_expressions)?;
2339 }
2340 }
2341 Expression::ZeroValue(ty) => {
2342 self.write_zero_init_value(ty)?;
2343 }
2344 Expression::Compose { ty, ref components } => {
2345 self.write_type(ty)?;
2346
2347 if let TypeInner::Array { base, size, .. } = self.module.types[ty].inner {
2348 self.write_array_size(base, size)?;
2349 }
2350
2351 write!(self.out, "(")?;
2352 for (index, component) in components.iter().enumerate() {
2353 if index != 0 {
2354 write!(self.out, ", ")?;
2355 }
2356 write_expression(self, *component)?;
2357 }
2358 write!(self.out, ")")?
2359 }
2360 // `Splat` needs to actually write down a vector, it's not always inferred in GLSL.
2361 Expression::Splat { size: _, value } => {
2362 let resolved = info(expr).inner_with(&self.module.types);
2363 self.write_value_type(resolved)?;
2364 write!(self.out, "(")?;
2365 write_expression(self, value)?;
2366 write!(self.out, ")")?
2367 }
2368 _ => {
2369 return Err(Error::Override);
2370 }
2371 }
2372
2373 Ok(())
2374 }
2375
2376 /// Helper method to write expressions
2377 ///
2378 /// # Notes
2379 /// Doesn't add any newlines or leading/trailing spaces
2380 fn write_expr(
2381 &mut self,
2382 expr: Handle<crate::Expression>,
2383 ctx: &back::FunctionCtx,
2384 ) -> BackendResult {
2385 use crate::Expression;
2386
2387 if let Some(name) = self.named_expressions.get(&expr) {
2388 write!(self.out, "{name}")?;
2389 return Ok(());
2390 }
2391
2392 match ctx.expressions[expr] {
2393 Expression::Literal(_)
2394 | Expression::Constant(_)
2395 | Expression::ZeroValue(_)
2396 | Expression::Compose { .. }
2397 | Expression::Splat { .. } => {
2398 self.write_possibly_const_expr(
2399 expr,
2400 ctx.expressions,
2401 |expr| &ctx.info[expr].ty,
2402 |writer, expr| writer.write_expr(expr, ctx),
2403 )?;
2404 }
2405 Expression::Override(_) => return Err(Error::Override),
2406 // `Access` is applied to arrays, vectors and matrices and is written as indexing
2407 Expression::Access { base, index } => {
2408 self.write_expr(base, ctx)?;
2409 write!(self.out, "[")?;
2410 self.write_expr(index, ctx)?;
2411 write!(self.out, "]")?
2412 }
2413 // `AccessIndex` is the same as `Access` except that the index is a constant and it can
2414 // be applied to structs, in this case we need to find the name of the field at that
2415 // index and write `base.field_name`
2416 Expression::AccessIndex { base, index } => {
2417 self.write_expr(base, ctx)?;
2418
2419 let base_ty_res = &ctx.info[base].ty;
2420 let mut resolved = base_ty_res.inner_with(&self.module.types);
2421 let base_ty_handle = match *resolved {
2422 TypeInner::Pointer { base, space: _ } => {
2423 resolved = &self.module.types[base].inner;
2424 Some(base)
2425 }
2426 _ => base_ty_res.handle(),
2427 };
2428
2429 match *resolved {
2430 TypeInner::Vector { .. } => {
2431 // Write vector access as a swizzle
2432 write!(self.out, ".{}", back::COMPONENTS[index as usize])?
2433 }
2434 TypeInner::Matrix { .. }
2435 | TypeInner::Array { .. }
2436 | TypeInner::ValuePointer { .. } => write!(self.out, "[{index}]")?,
2437 TypeInner::Struct { .. } => {
2438 // This will never panic in case the type is a `Struct`, this is not true
2439 // for other types so we can only check while inside this match arm
2440 let ty = base_ty_handle.unwrap();
2441
2442 write!(
2443 self.out,
2444 ".{}",
2445 &self.names[&NameKey::StructMember(ty, index)]
2446 )?
2447 }
2448 ref other => return Err(Error::Custom(format!("Cannot index {other:?}"))),
2449 }
2450 }
2451 // `Swizzle` adds a few letters behind the dot.
2452 Expression::Swizzle {
2453 size,
2454 vector,
2455 pattern,
2456 } => {
2457 self.write_expr(vector, ctx)?;
2458 write!(self.out, ".")?;
2459 for &sc in pattern[..size as usize].iter() {
2460 self.out.write_char(back::COMPONENTS[sc as usize])?;
2461 }
2462 }
2463 // Function arguments are written as the argument name
2464 Expression::FunctionArgument(pos) => {
2465 write!(self.out, "{}", &self.names[&ctx.argument_key(pos)])?
2466 }
2467 // Global variables need some special work for their name but
2468 // `get_global_name` does the work for us
2469 Expression::GlobalVariable(handle) => {
2470 let global = &self.module.global_variables[handle];
2471 self.write_global_name(handle, global)?
2472 }
2473 // A local is written as it's name
2474 Expression::LocalVariable(handle) => {
2475 write!(self.out, "{}", self.names[&ctx.name_key(handle)])?
2476 }
2477 // glsl has no pointers so there's no load operation, just write the pointer expression
2478 Expression::Load { pointer } => self.write_expr(pointer, ctx)?,
2479 // `ImageSample` is a bit complicated compared to the rest of the IR.
2480 //
2481 // First there are three variations depending whether the sample level is explicitly set,
2482 // if it's automatic or it it's bias:
2483 // `texture(image, coordinate)` - Automatic sample level
2484 // `texture(image, coordinate, bias)` - Bias sample level
2485 // `textureLod(image, coordinate, level)` - Zero or Exact sample level
2486 //
2487 // Furthermore if `depth_ref` is some we need to append it to the coordinate vector
2488 Expression::ImageSample {
2489 image,
2490 sampler: _, //TODO?
2491 gather,
2492 coordinate,
2493 array_index,
2494 offset,
2495 level,
2496 depth_ref,
2497 clamp_to_edge: _,
2498 } => {
2499 let (dim, class, arrayed) = match *ctx.resolve_type(image, &self.module.types) {
2500 TypeInner::Image {
2501 dim,
2502 class,
2503 arrayed,
2504 ..
2505 } => (dim, class, arrayed),
2506 _ => unreachable!(),
2507 };
2508 let mut err = None;
2509 if dim == crate::ImageDimension::Cube {
2510 if offset.is_some() {
2511 err = Some("gsamplerCube[Array][Shadow] doesn't support texture sampling with offsets");
2512 }
2513 if arrayed
2514 && matches!(class, crate::ImageClass::Depth { .. })
2515 && matches!(level, crate::SampleLevel::Gradient { .. })
2516 {
2517 err = Some("samplerCubeArrayShadow don't support textureGrad");
2518 }
2519 }
2520 if gather.is_some() && level != crate::SampleLevel::Zero {
2521 err = Some("textureGather doesn't support LOD parameters");
2522 }
2523 if let Some(err) = err {
2524 return Err(Error::Custom(String::from(err)));
2525 }
2526
2527 // `textureLod[Offset]` on `sampler2DArrayShadow` and `samplerCubeShadow` does not exist in GLSL,
2528 // unless `GL_EXT_texture_shadow_lod` is present.
2529 // But if the target LOD is zero, we can emulate that by using `textureGrad[Offset]` with a constant gradient of 0.
2530 let workaround_lod_with_grad = ((dim == crate::ImageDimension::Cube && !arrayed)
2531 || (dim == crate::ImageDimension::D2 && arrayed))
2532 && level == crate::SampleLevel::Zero
2533 && matches!(class, crate::ImageClass::Depth { .. })
2534 && !self.features.contains(Features::TEXTURE_SHADOW_LOD);
2535
2536 // Write the function to be used depending on the sample level
2537 let fun_name = match level {
2538 crate::SampleLevel::Zero if gather.is_some() => "textureGather",
2539 crate::SampleLevel::Zero if workaround_lod_with_grad => "textureGrad",
2540 crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
2541 crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => "textureLod",
2542 crate::SampleLevel::Gradient { .. } => "textureGrad",
2543 };
2544 let offset_name = match offset {
2545 Some(_) => "Offset",
2546 None => "",
2547 };
2548
2549 write!(self.out, "{fun_name}{offset_name}(")?;
2550
2551 // Write the image that will be used
2552 self.write_expr(image, ctx)?;
2553 // The space here isn't required but it helps with readability
2554 write!(self.out, ", ")?;
2555
2556 // TODO: handle clamp_to_edge
2557 // https://github.com/gfx-rs/wgpu/issues/7791
2558
2559 // We need to get the coordinates vector size to later build a vector that's `size + 1`
2560 // if `depth_ref` is some, if it isn't a vector we panic as that's not a valid expression
2561 let mut coord_dim = match *ctx.resolve_type(coordinate, &self.module.types) {
2562 TypeInner::Vector { size, .. } => size as u8,
2563 TypeInner::Scalar { .. } => 1,
2564 _ => unreachable!(),
2565 };
2566
2567 if array_index.is_some() {
2568 coord_dim += 1;
2569 }
2570 let merge_depth_ref = depth_ref.is_some() && gather.is_none() && coord_dim < 4;
2571 if merge_depth_ref {
2572 coord_dim += 1;
2573 }
2574
2575 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
2576 let is_vec = tex_1d_hack || coord_dim != 1;
2577 // Compose a new texture coordinates vector
2578 if is_vec {
2579 write!(self.out, "vec{}(", coord_dim + tex_1d_hack as u8)?;
2580 }
2581 self.write_expr(coordinate, ctx)?;
2582 if tex_1d_hack {
2583 write!(self.out, ", 0.0")?;
2584 }
2585 if let Some(expr) = array_index {
2586 write!(self.out, ", ")?;
2587 self.write_expr(expr, ctx)?;
2588 }
2589 if merge_depth_ref {
2590 write!(self.out, ", ")?;
2591 self.write_expr(depth_ref.unwrap(), ctx)?;
2592 }
2593 if is_vec {
2594 write!(self.out, ")")?;
2595 }
2596
2597 if let (Some(expr), false) = (depth_ref, merge_depth_ref) {
2598 write!(self.out, ", ")?;
2599 self.write_expr(expr, ctx)?;
2600 }
2601
2602 match level {
2603 // Auto needs no more arguments
2604 crate::SampleLevel::Auto => (),
2605 // Zero needs level set to 0
2606 crate::SampleLevel::Zero => {
2607 if workaround_lod_with_grad {
2608 let vec_dim = match dim {
2609 crate::ImageDimension::Cube => 3,
2610 _ => 2,
2611 };
2612 write!(self.out, ", vec{vec_dim}(0.0), vec{vec_dim}(0.0)")?;
2613 } else if gather.is_none() {
2614 write!(self.out, ", 0.0")?;
2615 }
2616 }
2617 // Exact and bias require another argument
2618 crate::SampleLevel::Exact(expr) => {
2619 write!(self.out, ", ")?;
2620 self.write_expr(expr, ctx)?;
2621 }
2622 crate::SampleLevel::Bias(_) => {
2623 // This needs to be done after the offset writing
2624 }
2625 crate::SampleLevel::Gradient { x, y } => {
2626 // If we are using sampler2D to replace sampler1D, we also
2627 // need to make sure to use vec2 gradients
2628 if tex_1d_hack {
2629 write!(self.out, ", vec2(")?;
2630 self.write_expr(x, ctx)?;
2631 write!(self.out, ", 0.0)")?;
2632 write!(self.out, ", vec2(")?;
2633 self.write_expr(y, ctx)?;
2634 write!(self.out, ", 0.0)")?;
2635 } else {
2636 write!(self.out, ", ")?;
2637 self.write_expr(x, ctx)?;
2638 write!(self.out, ", ")?;
2639 self.write_expr(y, ctx)?;
2640 }
2641 }
2642 }
2643
2644 if let Some(constant) = offset {
2645 write!(self.out, ", ")?;
2646 if tex_1d_hack {
2647 write!(self.out, "ivec2(")?;
2648 }
2649 self.write_const_expr(constant, ctx.expressions)?;
2650 if tex_1d_hack {
2651 write!(self.out, ", 0)")?;
2652 }
2653 }
2654
2655 // Bias is always the last argument
2656 if let crate::SampleLevel::Bias(expr) = level {
2657 write!(self.out, ", ")?;
2658 self.write_expr(expr, ctx)?;
2659 }
2660
2661 if let (Some(component), None) = (gather, depth_ref) {
2662 write!(self.out, ", {}", component as usize)?;
2663 }
2664
2665 // End the function
2666 write!(self.out, ")")?
2667 }
2668 Expression::ImageLoad {
2669 image,
2670 coordinate,
2671 array_index,
2672 sample,
2673 level,
2674 } => self.write_image_load(expr, ctx, image, coordinate, array_index, sample, level)?,
2675 // Query translates into one of the:
2676 // - textureSize/imageSize
2677 // - textureQueryLevels
2678 // - textureSamples/imageSamples
2679 Expression::ImageQuery { image, query } => {
2680 use crate::ImageClass;
2681
2682 // This will only panic if the module is invalid
2683 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
2684 TypeInner::Image {
2685 dim,
2686 arrayed: _,
2687 class,
2688 } => (dim, class),
2689 _ => unreachable!(),
2690 };
2691 let components = match dim {
2692 crate::ImageDimension::D1 => 1,
2693 crate::ImageDimension::D2 => 2,
2694 crate::ImageDimension::D3 => 3,
2695 crate::ImageDimension::Cube => 2,
2696 };
2697
2698 if let crate::ImageQuery::Size { .. } = query {
2699 match components {
2700 1 => write!(self.out, "uint(")?,
2701 _ => write!(self.out, "uvec{components}(")?,
2702 }
2703 } else {
2704 write!(self.out, "uint(")?;
2705 }
2706
2707 match query {
2708 crate::ImageQuery::Size { level } => {
2709 match class {
2710 ImageClass::Sampled { multi, .. } | ImageClass::Depth { multi } => {
2711 write!(self.out, "textureSize(")?;
2712 self.write_expr(image, ctx)?;
2713 if let Some(expr) = level {
2714 let cast_to_int = matches!(
2715 *ctx.resolve_type(expr, &self.module.types),
2716 TypeInner::Scalar(crate::Scalar {
2717 kind: crate::ScalarKind::Uint,
2718 ..
2719 })
2720 );
2721
2722 write!(self.out, ", ")?;
2723
2724 if cast_to_int {
2725 write!(self.out, "int(")?;
2726 }
2727
2728 self.write_expr(expr, ctx)?;
2729
2730 if cast_to_int {
2731 write!(self.out, ")")?;
2732 }
2733 } else if !multi {
2734 // All textureSize calls requires an lod argument
2735 // except for multisampled samplers
2736 write!(self.out, ", 0")?;
2737 }
2738 }
2739 ImageClass::Storage { .. } => {
2740 write!(self.out, "imageSize(")?;
2741 self.write_expr(image, ctx)?;
2742 }
2743 ImageClass::External => unimplemented!(),
2744 }
2745 write!(self.out, ")")?;
2746 if components != 1 || self.options.version.is_es() {
2747 write!(self.out, ".{}", &"xyz"[..components])?;
2748 }
2749 }
2750 crate::ImageQuery::NumLevels => {
2751 write!(self.out, "textureQueryLevels(",)?;
2752 self.write_expr(image, ctx)?;
2753 write!(self.out, ")",)?;
2754 }
2755 crate::ImageQuery::NumLayers => {
2756 let fun_name = match class {
2757 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => "textureSize",
2758 ImageClass::Storage { .. } => "imageSize",
2759 ImageClass::External => unimplemented!(),
2760 };
2761 write!(self.out, "{fun_name}(")?;
2762 self.write_expr(image, ctx)?;
2763 // All textureSize calls requires an lod argument
2764 // except for multisampled samplers
2765 if !class.is_multisampled() {
2766 write!(self.out, ", 0")?;
2767 }
2768 write!(self.out, ")")?;
2769 if components != 1 || self.options.version.is_es() {
2770 write!(self.out, ".{}", back::COMPONENTS[components])?;
2771 }
2772 }
2773 crate::ImageQuery::NumSamples => {
2774 let fun_name = match class {
2775 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
2776 "textureSamples"
2777 }
2778 ImageClass::Storage { .. } => "imageSamples",
2779 ImageClass::External => unimplemented!(),
2780 };
2781 write!(self.out, "{fun_name}(")?;
2782 self.write_expr(image, ctx)?;
2783 write!(self.out, ")",)?;
2784 }
2785 }
2786
2787 write!(self.out, ")")?;
2788 }
2789 Expression::Unary { op, expr } => {
2790 let operator_or_fn = match op {
2791 crate::UnaryOperator::Negate => "-",
2792 crate::UnaryOperator::LogicalNot => {
2793 match *ctx.resolve_type(expr, &self.module.types) {
2794 TypeInner::Vector { .. } => "not",
2795 _ => "!",
2796 }
2797 }
2798 crate::UnaryOperator::BitwiseNot => "~",
2799 };
2800 write!(self.out, "{operator_or_fn}(")?;
2801
2802 self.write_expr(expr, ctx)?;
2803
2804 write!(self.out, ")")?
2805 }
2806 // `Binary` we just write `left op right`, except when dealing with
2807 // comparison operations on vectors as they are implemented with
2808 // builtin functions.
2809 // Once again we wrap everything in parentheses to avoid precedence issues
2810 Expression::Binary {
2811 mut op,
2812 left,
2813 right,
2814 } => {
2815 // Holds `Some(function_name)` if the binary operation is
2816 // implemented as a function call
2817 use crate::{BinaryOperator as Bo, ScalarKind as Sk, TypeInner as Ti};
2818
2819 let left_inner = ctx.resolve_type(left, &self.module.types);
2820 let right_inner = ctx.resolve_type(right, &self.module.types);
2821
2822 let function = match (left_inner, right_inner) {
2823 (&Ti::Vector { scalar, .. }, &Ti::Vector { .. }) => match op {
2824 Bo::Less
2825 | Bo::LessEqual
2826 | Bo::Greater
2827 | Bo::GreaterEqual
2828 | Bo::Equal
2829 | Bo::NotEqual => BinaryOperation::VectorCompare,
2830 Bo::Modulo if scalar.kind == Sk::Float => BinaryOperation::Modulo,
2831 Bo::And if scalar.kind == Sk::Bool => {
2832 op = crate::BinaryOperator::LogicalAnd;
2833 BinaryOperation::VectorComponentWise
2834 }
2835 Bo::InclusiveOr if scalar.kind == Sk::Bool => {
2836 op = crate::BinaryOperator::LogicalOr;
2837 BinaryOperation::VectorComponentWise
2838 }
2839 _ => BinaryOperation::Other,
2840 },
2841 _ => match (left_inner.scalar_kind(), right_inner.scalar_kind()) {
2842 (Some(Sk::Float), _) | (_, Some(Sk::Float)) => match op {
2843 Bo::Modulo => BinaryOperation::Modulo,
2844 _ => BinaryOperation::Other,
2845 },
2846 (Some(Sk::Bool), Some(Sk::Bool)) => match op {
2847 Bo::InclusiveOr => {
2848 op = crate::BinaryOperator::LogicalOr;
2849 BinaryOperation::Other
2850 }
2851 Bo::And => {
2852 op = crate::BinaryOperator::LogicalAnd;
2853 BinaryOperation::Other
2854 }
2855 _ => BinaryOperation::Other,
2856 },
2857 _ => BinaryOperation::Other,
2858 },
2859 };
2860
2861 match function {
2862 BinaryOperation::VectorCompare => {
2863 let op_str = match op {
2864 Bo::Less => "lessThan(",
2865 Bo::LessEqual => "lessThanEqual(",
2866 Bo::Greater => "greaterThan(",
2867 Bo::GreaterEqual => "greaterThanEqual(",
2868 Bo::Equal => "equal(",
2869 Bo::NotEqual => "notEqual(",
2870 _ => unreachable!(),
2871 };
2872 write!(self.out, "{op_str}")?;
2873 self.write_expr(left, ctx)?;
2874 write!(self.out, ", ")?;
2875 self.write_expr(right, ctx)?;
2876 write!(self.out, ")")?;
2877 }
2878 BinaryOperation::VectorComponentWise => {
2879 self.write_value_type(left_inner)?;
2880 write!(self.out, "(")?;
2881
2882 let size = match *left_inner {
2883 Ti::Vector { size, .. } => size,
2884 _ => unreachable!(),
2885 };
2886
2887 for i in 0..size as usize {
2888 if i != 0 {
2889 write!(self.out, ", ")?;
2890 }
2891
2892 self.write_expr(left, ctx)?;
2893 write!(self.out, ".{}", back::COMPONENTS[i])?;
2894
2895 write!(self.out, " {} ", back::binary_operation_str(op))?;
2896
2897 self.write_expr(right, ctx)?;
2898 write!(self.out, ".{}", back::COMPONENTS[i])?;
2899 }
2900
2901 write!(self.out, ")")?;
2902 }
2903 // TODO: handle undefined behavior of BinaryOperator::Modulo
2904 //
2905 // sint:
2906 // if right == 0 return 0
2907 // if left == min(type_of(left)) && right == -1 return 0
2908 // if sign(left) == -1 || sign(right) == -1 return result as defined by WGSL
2909 //
2910 // uint:
2911 // if right == 0 return 0
2912 //
2913 // float:
2914 // if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
2915 BinaryOperation::Modulo => {
2916 write!(self.out, "(")?;
2917
2918 // write `e1 - e2 * trunc(e1 / e2)`
2919 self.write_expr(left, ctx)?;
2920 write!(self.out, " - ")?;
2921 self.write_expr(right, ctx)?;
2922 write!(self.out, " * ")?;
2923 write!(self.out, "trunc(")?;
2924 self.write_expr(left, ctx)?;
2925 write!(self.out, " / ")?;
2926 self.write_expr(right, ctx)?;
2927 write!(self.out, ")")?;
2928
2929 write!(self.out, ")")?;
2930 }
2931 BinaryOperation::Other => {
2932 write!(self.out, "(")?;
2933
2934 self.write_expr(left, ctx)?;
2935 write!(self.out, " {} ", back::binary_operation_str(op))?;
2936 self.write_expr(right, ctx)?;
2937
2938 write!(self.out, ")")?;
2939 }
2940 }
2941 }
2942 // `Select` is written as `condition ? accept : reject`
2943 // We wrap everything in parentheses to avoid precedence issues
2944 Expression::Select {
2945 condition,
2946 accept,
2947 reject,
2948 } => {
2949 let cond_ty = ctx.resolve_type(condition, &self.module.types);
2950 let vec_select = if let TypeInner::Vector { .. } = *cond_ty {
2951 true
2952 } else {
2953 false
2954 };
2955
2956 // TODO: Boolean mix on desktop required GL_EXT_shader_integer_mix
2957 if vec_select {
2958 // Glsl defines that for mix when the condition is a boolean the first element
2959 // is picked if condition is false and the second if condition is true
2960 write!(self.out, "mix(")?;
2961 self.write_expr(reject, ctx)?;
2962 write!(self.out, ", ")?;
2963 self.write_expr(accept, ctx)?;
2964 write!(self.out, ", ")?;
2965 self.write_expr(condition, ctx)?;
2966 } else {
2967 write!(self.out, "(")?;
2968 self.write_expr(condition, ctx)?;
2969 write!(self.out, " ? ")?;
2970 self.write_expr(accept, ctx)?;
2971 write!(self.out, " : ")?;
2972 self.write_expr(reject, ctx)?;
2973 }
2974
2975 write!(self.out, ")")?
2976 }
2977 // `Derivative` is a function call to a glsl provided function
2978 Expression::Derivative { axis, ctrl, expr } => {
2979 use crate::{DerivativeAxis as Axis, DerivativeControl as Ctrl};
2980 let fun_name = if self.options.version.supports_derivative_control() {
2981 match (axis, ctrl) {
2982 (Axis::X, Ctrl::Coarse) => "dFdxCoarse",
2983 (Axis::X, Ctrl::Fine) => "dFdxFine",
2984 (Axis::X, Ctrl::None) => "dFdx",
2985 (Axis::Y, Ctrl::Coarse) => "dFdyCoarse",
2986 (Axis::Y, Ctrl::Fine) => "dFdyFine",
2987 (Axis::Y, Ctrl::None) => "dFdy",
2988 (Axis::Width, Ctrl::Coarse) => "fwidthCoarse",
2989 (Axis::Width, Ctrl::Fine) => "fwidthFine",
2990 (Axis::Width, Ctrl::None) => "fwidth",
2991 }
2992 } else {
2993 match axis {
2994 Axis::X => "dFdx",
2995 Axis::Y => "dFdy",
2996 Axis::Width => "fwidth",
2997 }
2998 };
2999 write!(self.out, "{fun_name}(")?;
3000 self.write_expr(expr, ctx)?;
3001 write!(self.out, ")")?
3002 }
3003 // `Relational` is a normal function call to some glsl provided functions
3004 Expression::Relational { fun, argument } => {
3005 use crate::RelationalFunction as Rf;
3006
3007 let fun_name = match fun {
3008 Rf::IsInf => "isinf",
3009 Rf::IsNan => "isnan",
3010 Rf::All => "all",
3011 Rf::Any => "any",
3012 };
3013 write!(self.out, "{fun_name}(")?;
3014
3015 self.write_expr(argument, ctx)?;
3016
3017 write!(self.out, ")")?
3018 }
3019 Expression::Math {
3020 fun,
3021 arg,
3022 arg1,
3023 arg2,
3024 arg3,
3025 } => {
3026 use crate::MathFunction as Mf;
3027
3028 let fun_name = match fun {
3029 // comparison
3030 Mf::Abs => "abs",
3031 Mf::Min => "min",
3032 Mf::Max => "max",
3033 Mf::Clamp => {
3034 let scalar_kind = ctx
3035 .resolve_type(arg, &self.module.types)
3036 .scalar_kind()
3037 .unwrap();
3038 match scalar_kind {
3039 crate::ScalarKind::Float => "clamp",
3040 // Clamp is undefined if min > max. In practice this means it can use a median-of-three
3041 // instruction to determine the value. This is fine according to the WGSL spec for float
3042 // clamp, but integer clamp _must_ use min-max. As such we write out min/max.
3043 _ => {
3044 write!(self.out, "min(max(")?;
3045 self.write_expr(arg, ctx)?;
3046 write!(self.out, ", ")?;
3047 self.write_expr(arg1.unwrap(), ctx)?;
3048 write!(self.out, "), ")?;
3049 self.write_expr(arg2.unwrap(), ctx)?;
3050 write!(self.out, ")")?;
3051
3052 return Ok(());
3053 }
3054 }
3055 }
3056 Mf::Saturate => {
3057 write!(self.out, "clamp(")?;
3058
3059 self.write_expr(arg, ctx)?;
3060
3061 match *ctx.resolve_type(arg, &self.module.types) {
3062 TypeInner::Vector { size, .. } => write!(
3063 self.out,
3064 ", vec{}(0.0), vec{0}(1.0)",
3065 common::vector_size_str(size)
3066 )?,
3067 _ => write!(self.out, ", 0.0, 1.0")?,
3068 }
3069
3070 write!(self.out, ")")?;
3071
3072 return Ok(());
3073 }
3074 // trigonometry
3075 Mf::Cos => "cos",
3076 Mf::Cosh => "cosh",
3077 Mf::Sin => "sin",
3078 Mf::Sinh => "sinh",
3079 Mf::Tan => "tan",
3080 Mf::Tanh => "tanh",
3081 Mf::Acos => "acos",
3082 Mf::Asin => "asin",
3083 Mf::Atan => "atan",
3084 Mf::Asinh => "asinh",
3085 Mf::Acosh => "acosh",
3086 Mf::Atanh => "atanh",
3087 Mf::Radians => "radians",
3088 Mf::Degrees => "degrees",
3089 // glsl doesn't have atan2 function
3090 // use two-argument variation of the atan function
3091 Mf::Atan2 => "atan",
3092 // decomposition
3093 Mf::Ceil => "ceil",
3094 Mf::Floor => "floor",
3095 Mf::Round => "roundEven",
3096 Mf::Fract => "fract",
3097 Mf::Trunc => "trunc",
3098 Mf::Modf => MODF_FUNCTION,
3099 Mf::Frexp => FREXP_FUNCTION,
3100 Mf::Ldexp => "ldexp",
3101 // exponent
3102 Mf::Exp => "exp",
3103 Mf::Exp2 => "exp2",
3104 Mf::Log => "log",
3105 Mf::Log2 => "log2",
3106 Mf::Pow => "pow",
3107 // geometry
3108 Mf::Dot => match *ctx.resolve_type(arg, &self.module.types) {
3109 TypeInner::Vector {
3110 scalar:
3111 crate::Scalar {
3112 kind: crate::ScalarKind::Float,
3113 ..
3114 },
3115 ..
3116 } => "dot",
3117 TypeInner::Vector { size, .. } => {
3118 return self.write_dot_product(arg, arg1.unwrap(), size as usize, ctx)
3119 }
3120 _ => unreachable!(
3121 "Correct TypeInner for dot product should be already validated"
3122 ),
3123 },
3124 fun @ (Mf::Dot4I8Packed | Mf::Dot4U8Packed) => {
3125 let conversion = match fun {
3126 Mf::Dot4I8Packed => "int",
3127 Mf::Dot4U8Packed => "",
3128 _ => unreachable!(),
3129 };
3130
3131 let arg1 = arg1.unwrap();
3132
3133 // Write parentheses around the dot product expression to prevent operators
3134 // with different precedences from applying earlier.
3135 write!(self.out, "(")?;
3136 for i in 0..4 {
3137 // Since `bitfieldExtract` only sign extends if the value is signed, we
3138 // need to convert the inputs to `int` in case of `Dot4I8Packed`. For
3139 // `Dot4U8Packed`, the code below only introduces parenthesis around
3140 // each factor, which aren't strictly needed because both operands are
3141 // baked, but which don't hurt either.
3142 write!(self.out, "bitfieldExtract({conversion}(")?;
3143 self.write_expr(arg, ctx)?;
3144 write!(self.out, "), {}, 8)", i * 8)?;
3145
3146 write!(self.out, " * bitfieldExtract({conversion}(")?;
3147 self.write_expr(arg1, ctx)?;
3148 write!(self.out, "), {}, 8)", i * 8)?;
3149
3150 if i != 3 {
3151 write!(self.out, " + ")?;
3152 }
3153 }
3154 write!(self.out, ")")?;
3155
3156 return Ok(());
3157 }
3158 Mf::Outer => "outerProduct",
3159 Mf::Cross => "cross",
3160 Mf::Distance => "distance",
3161 Mf::Length => "length",
3162 Mf::Normalize => "normalize",
3163 Mf::FaceForward => "faceforward",
3164 Mf::Reflect => "reflect",
3165 Mf::Refract => "refract",
3166 // computational
3167 Mf::Sign => "sign",
3168 Mf::Fma => {
3169 if self.options.version.supports_fma_function() {
3170 // Use the fma function when available
3171 "fma"
3172 } else {
3173 // No fma support. Transform the function call into an arithmetic expression
3174 write!(self.out, "(")?;
3175
3176 self.write_expr(arg, ctx)?;
3177 write!(self.out, " * ")?;
3178
3179 let arg1 =
3180 arg1.ok_or_else(|| Error::Custom("Missing fma arg1".to_owned()))?;
3181 self.write_expr(arg1, ctx)?;
3182 write!(self.out, " + ")?;
3183
3184 let arg2 =
3185 arg2.ok_or_else(|| Error::Custom("Missing fma arg2".to_owned()))?;
3186 self.write_expr(arg2, ctx)?;
3187 write!(self.out, ")")?;
3188
3189 return Ok(());
3190 }
3191 }
3192 Mf::Mix => "mix",
3193 Mf::Step => "step",
3194 Mf::SmoothStep => "smoothstep",
3195 Mf::Sqrt => "sqrt",
3196 Mf::InverseSqrt => "inversesqrt",
3197 Mf::Inverse => "inverse",
3198 Mf::Transpose => "transpose",
3199 Mf::Determinant => "determinant",
3200 Mf::QuantizeToF16 => match *ctx.resolve_type(arg, &self.module.types) {
3201 TypeInner::Scalar { .. } => {
3202 write!(self.out, "unpackHalf2x16(packHalf2x16(vec2(")?;
3203 self.write_expr(arg, ctx)?;
3204 write!(self.out, "))).x")?;
3205 return Ok(());
3206 }
3207 TypeInner::Vector {
3208 size: crate::VectorSize::Bi,
3209 ..
3210 } => {
3211 write!(self.out, "unpackHalf2x16(packHalf2x16(")?;
3212 self.write_expr(arg, ctx)?;
3213 write!(self.out, "))")?;
3214 return Ok(());
3215 }
3216 TypeInner::Vector {
3217 size: crate::VectorSize::Tri,
3218 ..
3219 } => {
3220 write!(self.out, "vec3(unpackHalf2x16(packHalf2x16(")?;
3221 self.write_expr(arg, ctx)?;
3222 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3223 self.write_expr(arg, ctx)?;
3224 write!(self.out, ".zz)).x)")?;
3225 return Ok(());
3226 }
3227 TypeInner::Vector {
3228 size: crate::VectorSize::Quad,
3229 ..
3230 } => {
3231 write!(self.out, "vec4(unpackHalf2x16(packHalf2x16(")?;
3232 self.write_expr(arg, ctx)?;
3233 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3234 self.write_expr(arg, ctx)?;
3235 write!(self.out, ".zw)))")?;
3236 return Ok(());
3237 }
3238 _ => unreachable!(
3239 "Correct TypeInner for QuantizeToF16 should be already validated"
3240 ),
3241 },
3242 // bits
3243 Mf::CountTrailingZeros => {
3244 match *ctx.resolve_type(arg, &self.module.types) {
3245 TypeInner::Vector { size, scalar, .. } => {
3246 let s = common::vector_size_str(size);
3247 if let crate::ScalarKind::Uint = scalar.kind {
3248 write!(self.out, "min(uvec{s}(findLSB(")?;
3249 self.write_expr(arg, ctx)?;
3250 write!(self.out, ")), uvec{s}(32u))")?;
3251 } else {
3252 write!(self.out, "ivec{s}(min(uvec{s}(findLSB(")?;
3253 self.write_expr(arg, ctx)?;
3254 write!(self.out, ")), uvec{s}(32u)))")?;
3255 }
3256 }
3257 TypeInner::Scalar(scalar) => {
3258 if let crate::ScalarKind::Uint = scalar.kind {
3259 write!(self.out, "min(uint(findLSB(")?;
3260 self.write_expr(arg, ctx)?;
3261 write!(self.out, ")), 32u)")?;
3262 } else {
3263 write!(self.out, "int(min(uint(findLSB(")?;
3264 self.write_expr(arg, ctx)?;
3265 write!(self.out, ")), 32u))")?;
3266 }
3267 }
3268 _ => unreachable!(),
3269 };
3270 return Ok(());
3271 }
3272 Mf::CountLeadingZeros => {
3273 if self.options.version.supports_integer_functions() {
3274 match *ctx.resolve_type(arg, &self.module.types) {
3275 TypeInner::Vector { size, scalar } => {
3276 let s = common::vector_size_str(size);
3277
3278 if let crate::ScalarKind::Uint = scalar.kind {
3279 write!(self.out, "uvec{s}(ivec{s}(31) - findMSB(")?;
3280 self.write_expr(arg, ctx)?;
3281 write!(self.out, "))")?;
3282 } else {
3283 write!(self.out, "mix(ivec{s}(31) - findMSB(")?;
3284 self.write_expr(arg, ctx)?;
3285 write!(self.out, "), ivec{s}(0), lessThan(")?;
3286 self.write_expr(arg, ctx)?;
3287 write!(self.out, ", ivec{s}(0)))")?;
3288 }
3289 }
3290 TypeInner::Scalar(scalar) => {
3291 if let crate::ScalarKind::Uint = scalar.kind {
3292 write!(self.out, "uint(31 - findMSB(")?;
3293 } else {
3294 write!(self.out, "(")?;
3295 self.write_expr(arg, ctx)?;
3296 write!(self.out, " < 0 ? 0 : 31 - findMSB(")?;
3297 }
3298
3299 self.write_expr(arg, ctx)?;
3300 write!(self.out, "))")?;
3301 }
3302 _ => unreachable!(),
3303 };
3304 } else {
3305 match *ctx.resolve_type(arg, &self.module.types) {
3306 TypeInner::Vector { size, scalar } => {
3307 let s = common::vector_size_str(size);
3308
3309 if let crate::ScalarKind::Uint = scalar.kind {
3310 write!(self.out, "uvec{s}(")?;
3311 write!(self.out, "vec{s}(31.0) - floor(log2(vec{s}(")?;
3312 self.write_expr(arg, ctx)?;
3313 write!(self.out, ") + 0.5)))")?;
3314 } else {
3315 write!(self.out, "ivec{s}(")?;
3316 write!(self.out, "mix(vec{s}(31.0) - floor(log2(vec{s}(")?;
3317 self.write_expr(arg, ctx)?;
3318 write!(self.out, ") + 0.5)), ")?;
3319 write!(self.out, "vec{s}(0.0), lessThan(")?;
3320 self.write_expr(arg, ctx)?;
3321 write!(self.out, ", ivec{s}(0u))))")?;
3322 }
3323 }
3324 TypeInner::Scalar(scalar) => {
3325 if let crate::ScalarKind::Uint = scalar.kind {
3326 write!(self.out, "uint(31.0 - floor(log2(float(")?;
3327 self.write_expr(arg, ctx)?;
3328 write!(self.out, ") + 0.5)))")?;
3329 } else {
3330 write!(self.out, "(")?;
3331 self.write_expr(arg, ctx)?;
3332 write!(self.out, " < 0 ? 0 : int(")?;
3333 write!(self.out, "31.0 - floor(log2(float(")?;
3334 self.write_expr(arg, ctx)?;
3335 write!(self.out, ") + 0.5))))")?;
3336 }
3337 }
3338 _ => unreachable!(),
3339 };
3340 }
3341
3342 return Ok(());
3343 }
3344 Mf::CountOneBits => "bitCount",
3345 Mf::ReverseBits => "bitfieldReverse",
3346 Mf::ExtractBits => {
3347 // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
3348 // to first sanitize the offset and count first. If we don't do this, AMD and Intel chips
3349 // will return out-of-spec values if the extracted range is not within the bit width.
3350 //
3351 // This encodes the exact formula specified by the wgsl spec, without temporary values:
3352 // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
3353 //
3354 // w = sizeof(x) * 8
3355 // o = min(offset, w)
3356 // c = min(count, w - o)
3357 //
3358 // bitfieldExtract(x, o, c)
3359 //
3360 // extract_bits(e, min(offset, w), min(count, w - min(offset, w))))
3361 let scalar_bits = ctx
3362 .resolve_type(arg, &self.module.types)
3363 .scalar_width()
3364 .unwrap()
3365 * 8;
3366
3367 write!(self.out, "bitfieldExtract(")?;
3368 self.write_expr(arg, ctx)?;
3369 write!(self.out, ", int(min(")?;
3370 self.write_expr(arg1.unwrap(), ctx)?;
3371 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3372 self.write_expr(arg2.unwrap(), ctx)?;
3373 write!(self.out, ", {scalar_bits}u - min(")?;
3374 self.write_expr(arg1.unwrap(), ctx)?;
3375 write!(self.out, ", {scalar_bits}u))))")?;
3376
3377 return Ok(());
3378 }
3379 Mf::InsertBits => {
3380 // InsertBits has the same considerations as ExtractBits above
3381 let scalar_bits = ctx
3382 .resolve_type(arg, &self.module.types)
3383 .scalar_width()
3384 .unwrap()
3385 * 8;
3386
3387 write!(self.out, "bitfieldInsert(")?;
3388 self.write_expr(arg, ctx)?;
3389 write!(self.out, ", ")?;
3390 self.write_expr(arg1.unwrap(), ctx)?;
3391 write!(self.out, ", int(min(")?;
3392 self.write_expr(arg2.unwrap(), ctx)?;
3393 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3394 self.write_expr(arg3.unwrap(), ctx)?;
3395 write!(self.out, ", {scalar_bits}u - min(")?;
3396 self.write_expr(arg2.unwrap(), ctx)?;
3397 write!(self.out, ", {scalar_bits}u))))")?;
3398
3399 return Ok(());
3400 }
3401 Mf::FirstTrailingBit => "findLSB",
3402 Mf::FirstLeadingBit => "findMSB",
3403 // data packing
3404 Mf::Pack4x8snorm => {
3405 if self.options.version.supports_pack_unpack_4x8() {
3406 "packSnorm4x8"
3407 } else {
3408 // polyfill should go here. Needs a corresponding entry in `need_bake_expression`
3409 return Err(Error::UnsupportedExternal("packSnorm4x8".into()));
3410 }
3411 }
3412 Mf::Pack4x8unorm => {
3413 if self.options.version.supports_pack_unpack_4x8() {
3414 "packUnorm4x8"
3415 } else {
3416 return Err(Error::UnsupportedExternal("packUnorm4x8".to_owned()));
3417 }
3418 }
3419 Mf::Pack2x16snorm => {
3420 if self.options.version.supports_pack_unpack_snorm_2x16() {
3421 "packSnorm2x16"
3422 } else {
3423 return Err(Error::UnsupportedExternal("packSnorm2x16".to_owned()));
3424 }
3425 }
3426 Mf::Pack2x16unorm => {
3427 if self.options.version.supports_pack_unpack_unorm_2x16() {
3428 "packUnorm2x16"
3429 } else {
3430 return Err(Error::UnsupportedExternal("packUnorm2x16".to_owned()));
3431 }
3432 }
3433 Mf::Pack2x16float => {
3434 if self.options.version.supports_pack_unpack_half_2x16() {
3435 "packHalf2x16"
3436 } else {
3437 return Err(Error::UnsupportedExternal("packHalf2x16".to_owned()));
3438 }
3439 }
3440
3441 fun @ (Mf::Pack4xI8 | Mf::Pack4xU8 | Mf::Pack4xI8Clamp | Mf::Pack4xU8Clamp) => {
3442 let was_signed = matches!(fun, Mf::Pack4xI8 | Mf::Pack4xI8Clamp);
3443 let clamp_bounds = match fun {
3444 Mf::Pack4xI8Clamp => Some(("-128", "127")),
3445 Mf::Pack4xU8Clamp => Some(("0", "255")),
3446 _ => None,
3447 };
3448 let const_suffix = if was_signed { "" } else { "u" };
3449 if was_signed {
3450 write!(self.out, "uint(")?;
3451 }
3452 let write_arg = |this: &mut Self| -> BackendResult {
3453 if let Some((min, max)) = clamp_bounds {
3454 write!(this.out, "clamp(")?;
3455 this.write_expr(arg, ctx)?;
3456 write!(this.out, ", {min}{const_suffix}, {max}{const_suffix})")?;
3457 } else {
3458 this.write_expr(arg, ctx)?;
3459 }
3460 Ok(())
3461 };
3462 write!(self.out, "(")?;
3463 write_arg(self)?;
3464 write!(self.out, "[0] & 0xFF{const_suffix}) | ((")?;
3465 write_arg(self)?;
3466 write!(self.out, "[1] & 0xFF{const_suffix}) << 8) | ((")?;
3467 write_arg(self)?;
3468 write!(self.out, "[2] & 0xFF{const_suffix}) << 16) | ((")?;
3469 write_arg(self)?;
3470 write!(self.out, "[3] & 0xFF{const_suffix}) << 24)")?;
3471 if was_signed {
3472 write!(self.out, ")")?;
3473 }
3474
3475 return Ok(());
3476 }
3477 // data unpacking
3478 Mf::Unpack2x16float => {
3479 if self.options.version.supports_pack_unpack_half_2x16() {
3480 "unpackHalf2x16"
3481 } else {
3482 return Err(Error::UnsupportedExternal("unpackHalf2x16".into()));
3483 }
3484 }
3485 Mf::Unpack2x16snorm => {
3486 if self.options.version.supports_pack_unpack_snorm_2x16() {
3487 "unpackSnorm2x16"
3488 } else {
3489 let scale = 32767;
3490
3491 write!(self.out, "(vec2(ivec2(")?;
3492 self.write_expr(arg, ctx)?;
3493 write!(self.out, " << 16, ")?;
3494 self.write_expr(arg, ctx)?;
3495 write!(self.out, ") >> 16) / {scale}.0)")?;
3496 return Ok(());
3497 }
3498 }
3499 Mf::Unpack2x16unorm => {
3500 if self.options.version.supports_pack_unpack_unorm_2x16() {
3501 "unpackUnorm2x16"
3502 } else {
3503 let scale = 65535;
3504
3505 write!(self.out, "(vec2(")?;
3506 self.write_expr(arg, ctx)?;
3507 write!(self.out, " & 0xFFFFu, ")?;
3508 self.write_expr(arg, ctx)?;
3509 write!(self.out, " >> 16) / {scale}.0)")?;
3510 return Ok(());
3511 }
3512 }
3513 Mf::Unpack4x8snorm => {
3514 if self.options.version.supports_pack_unpack_4x8() {
3515 "unpackSnorm4x8"
3516 } else {
3517 let scale = 127;
3518
3519 write!(self.out, "(vec4(ivec4(")?;
3520 self.write_expr(arg, ctx)?;
3521 write!(self.out, " << 24, ")?;
3522 self.write_expr(arg, ctx)?;
3523 write!(self.out, " << 16, ")?;
3524 self.write_expr(arg, ctx)?;
3525 write!(self.out, " << 8, ")?;
3526 self.write_expr(arg, ctx)?;
3527 write!(self.out, ") >> 24) / {scale}.0)")?;
3528 return Ok(());
3529 }
3530 }
3531 Mf::Unpack4x8unorm => {
3532 if self.options.version.supports_pack_unpack_4x8() {
3533 "unpackUnorm4x8"
3534 } else {
3535 let scale = 255;
3536
3537 write!(self.out, "(vec4(")?;
3538 self.write_expr(arg, ctx)?;
3539 write!(self.out, " & 0xFFu, ")?;
3540 self.write_expr(arg, ctx)?;
3541 write!(self.out, " >> 8 & 0xFFu, ")?;
3542 self.write_expr(arg, ctx)?;
3543 write!(self.out, " >> 16 & 0xFFu, ")?;
3544 self.write_expr(arg, ctx)?;
3545 write!(self.out, " >> 24) / {scale}.0)")?;
3546 return Ok(());
3547 }
3548 }
3549 fun @ (Mf::Unpack4xI8 | Mf::Unpack4xU8) => {
3550 let sign_prefix = match fun {
3551 Mf::Unpack4xI8 => 'i',
3552 Mf::Unpack4xU8 => 'u',
3553 _ => unreachable!(),
3554 };
3555 write!(self.out, "{sign_prefix}vec4(")?;
3556 for i in 0..4 {
3557 write!(self.out, "bitfieldExtract(")?;
3558 // Since bitfieldExtract only sign extends if the value is signed, this
3559 // cast is needed
3560 match fun {
3561 Mf::Unpack4xI8 => {
3562 write!(self.out, "int(")?;
3563 self.write_expr(arg, ctx)?;
3564 write!(self.out, ")")?;
3565 }
3566 Mf::Unpack4xU8 => self.write_expr(arg, ctx)?,
3567 _ => unreachable!(),
3568 };
3569 write!(self.out, ", {}, 8)", i * 8)?;
3570 if i != 3 {
3571 write!(self.out, ", ")?;
3572 }
3573 }
3574 write!(self.out, ")")?;
3575
3576 return Ok(());
3577 }
3578 };
3579
3580 let extract_bits = fun == Mf::ExtractBits;
3581 let insert_bits = fun == Mf::InsertBits;
3582
3583 // Some GLSL functions always return signed integers (like findMSB),
3584 // so they need to be cast to uint if the argument is also an uint.
3585 let ret_might_need_int_to_uint = matches!(
3586 fun,
3587 Mf::FirstTrailingBit | Mf::FirstLeadingBit | Mf::CountOneBits | Mf::Abs
3588 );
3589
3590 // Some GLSL functions only accept signed integers (like abs),
3591 // so they need their argument cast from uint to int.
3592 let arg_might_need_uint_to_int = matches!(fun, Mf::Abs);
3593
3594 // Check if the argument is an unsigned integer and return the vector size
3595 // in case it's a vector
3596 let maybe_uint_size = match *ctx.resolve_type(arg, &self.module.types) {
3597 TypeInner::Scalar(crate::Scalar {
3598 kind: crate::ScalarKind::Uint,
3599 ..
3600 }) => Some(None),
3601 TypeInner::Vector {
3602 scalar:
3603 crate::Scalar {
3604 kind: crate::ScalarKind::Uint,
3605 ..
3606 },
3607 size,
3608 } => Some(Some(size)),
3609 _ => None,
3610 };
3611
3612 // Cast to uint if the function needs it
3613 if ret_might_need_int_to_uint {
3614 if let Some(maybe_size) = maybe_uint_size {
3615 match maybe_size {
3616 Some(size) => write!(self.out, "uvec{}(", size as u8)?,
3617 None => write!(self.out, "uint(")?,
3618 }
3619 }
3620 }
3621
3622 write!(self.out, "{fun_name}(")?;
3623
3624 // Cast to int if the function needs it
3625 if arg_might_need_uint_to_int {
3626 if let Some(maybe_size) = maybe_uint_size {
3627 match maybe_size {
3628 Some(size) => write!(self.out, "ivec{}(", size as u8)?,
3629 None => write!(self.out, "int(")?,
3630 }
3631 }
3632 }
3633
3634 self.write_expr(arg, ctx)?;
3635
3636 // Close the cast from uint to int
3637 if arg_might_need_uint_to_int && maybe_uint_size.is_some() {
3638 write!(self.out, ")")?
3639 }
3640
3641 if let Some(arg) = arg1 {
3642 write!(self.out, ", ")?;
3643 if extract_bits {
3644 write!(self.out, "int(")?;
3645 self.write_expr(arg, ctx)?;
3646 write!(self.out, ")")?;
3647 } else {
3648 self.write_expr(arg, ctx)?;
3649 }
3650 }
3651 if let Some(arg) = arg2 {
3652 write!(self.out, ", ")?;
3653 if extract_bits || insert_bits {
3654 write!(self.out, "int(")?;
3655 self.write_expr(arg, ctx)?;
3656 write!(self.out, ")")?;
3657 } else {
3658 self.write_expr(arg, ctx)?;
3659 }
3660 }
3661 if let Some(arg) = arg3 {
3662 write!(self.out, ", ")?;
3663 if insert_bits {
3664 write!(self.out, "int(")?;
3665 self.write_expr(arg, ctx)?;
3666 write!(self.out, ")")?;
3667 } else {
3668 self.write_expr(arg, ctx)?;
3669 }
3670 }
3671 write!(self.out, ")")?;
3672
3673 // Close the cast from int to uint
3674 if ret_might_need_int_to_uint && maybe_uint_size.is_some() {
3675 write!(self.out, ")")?
3676 }
3677 }
3678 // `As` is always a call.
3679 // If `convert` is true the function name is the type
3680 // Else the function name is one of the glsl provided bitcast functions
3681 Expression::As {
3682 expr,
3683 kind: target_kind,
3684 convert,
3685 } => {
3686 let inner = ctx.resolve_type(expr, &self.module.types);
3687 match convert {
3688 Some(width) => {
3689 // this is similar to `write_type`, but with the target kind
3690 let scalar = glsl_scalar(crate::Scalar {
3691 kind: target_kind,
3692 width,
3693 })?;
3694 match *inner {
3695 TypeInner::Matrix { columns, rows, .. } => write!(
3696 self.out,
3697 "{}mat{}x{}",
3698 scalar.prefix, columns as u8, rows as u8
3699 )?,
3700 TypeInner::Vector { size, .. } => {
3701 write!(self.out, "{}vec{}", scalar.prefix, size as u8)?
3702 }
3703 _ => write!(self.out, "{}", scalar.full)?,
3704 }
3705
3706 write!(self.out, "(")?;
3707 self.write_expr(expr, ctx)?;
3708 write!(self.out, ")")?
3709 }
3710 None => {
3711 use crate::ScalarKind as Sk;
3712
3713 let target_vector_type = match *inner {
3714 TypeInner::Vector { size, scalar } => Some(TypeInner::Vector {
3715 size,
3716 scalar: crate::Scalar {
3717 kind: target_kind,
3718 width: scalar.width,
3719 },
3720 }),
3721 _ => None,
3722 };
3723
3724 let source_kind = inner.scalar_kind().unwrap();
3725
3726 match (source_kind, target_kind, target_vector_type) {
3727 // No conversion needed
3728 (Sk::Sint, Sk::Sint, _)
3729 | (Sk::Uint, Sk::Uint, _)
3730 | (Sk::Float, Sk::Float, _)
3731 | (Sk::Bool, Sk::Bool, _) => {
3732 self.write_expr(expr, ctx)?;
3733 return Ok(());
3734 }
3735
3736 // Cast to/from floats
3737 (Sk::Float, Sk::Sint, _) => write!(self.out, "floatBitsToInt")?,
3738 (Sk::Float, Sk::Uint, _) => write!(self.out, "floatBitsToUint")?,
3739 (Sk::Sint, Sk::Float, _) => write!(self.out, "intBitsToFloat")?,
3740 (Sk::Uint, Sk::Float, _) => write!(self.out, "uintBitsToFloat")?,
3741
3742 // Cast between vector types
3743 (_, _, Some(vector)) => {
3744 self.write_value_type(&vector)?;
3745 }
3746
3747 // There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion
3748 (Sk::Uint | Sk::Bool, Sk::Sint, None) => write!(self.out, "int")?,
3749 (Sk::Sint | Sk::Bool, Sk::Uint, None) => write!(self.out, "uint")?,
3750 (Sk::Bool, Sk::Float, None) => write!(self.out, "float")?,
3751 (Sk::Sint | Sk::Uint | Sk::Float, Sk::Bool, None) => {
3752 write!(self.out, "bool")?
3753 }
3754
3755 (Sk::AbstractInt | Sk::AbstractFloat, _, _)
3756 | (_, Sk::AbstractInt | Sk::AbstractFloat, _) => unreachable!(),
3757 };
3758
3759 write!(self.out, "(")?;
3760 self.write_expr(expr, ctx)?;
3761 write!(self.out, ")")?;
3762 }
3763 }
3764 }
3765 // These expressions never show up in `Emit`.
3766 Expression::CallResult(_)
3767 | Expression::AtomicResult { .. }
3768 | Expression::RayQueryProceedResult
3769 | Expression::WorkGroupUniformLoadResult { .. }
3770 | Expression::SubgroupOperationResult { .. }
3771 | Expression::SubgroupBallotResult => unreachable!(),
3772 // `ArrayLength` is written as `expr.length()` and we convert it to a uint
3773 Expression::ArrayLength(expr) => {
3774 write!(self.out, "uint(")?;
3775 self.write_expr(expr, ctx)?;
3776 write!(self.out, ".length())")?
3777 }
3778 // not supported yet
3779 Expression::RayQueryGetIntersection { .. }
3780 | Expression::RayQueryVertexPositions { .. }
3781 | Expression::CooperativeLoad { .. }
3782 | Expression::CooperativeMultiplyAdd { .. } => unreachable!(),
3783 }
3784
3785 Ok(())
3786 }
3787
3788 /// Helper function to write the local holding the clamped lod
3789 fn write_clamped_lod(
3790 &mut self,
3791 ctx: &back::FunctionCtx,
3792 expr: Handle<crate::Expression>,
3793 image: Handle<crate::Expression>,
3794 level_expr: Handle<crate::Expression>,
3795 ) -> Result<(), Error> {
3796 // Define our local and start a call to `clamp`
3797 write!(
3798 self.out,
3799 "int {}{} = clamp(",
3800 Baked(expr),
3801 CLAMPED_LOD_SUFFIX
3802 )?;
3803 // Write the lod that will be clamped
3804 self.write_expr(level_expr, ctx)?;
3805 // Set the min value to 0 and start a call to `textureQueryLevels` to get
3806 // the maximum value
3807 write!(self.out, ", 0, textureQueryLevels(")?;
3808 // Write the target image as an argument to `textureQueryLevels`
3809 self.write_expr(image, ctx)?;
3810 // Close the call to `textureQueryLevels` subtract 1 from it since
3811 // the lod argument is 0 based, close the `clamp` call and end the
3812 // local declaration statement.
3813 writeln!(self.out, ") - 1);")?;
3814
3815 Ok(())
3816 }
3817
3818 // Helper method used to retrieve how many elements a coordinate vector
3819 // for the images operations need.
3820 fn get_coordinate_vector_size(&self, dim: crate::ImageDimension, arrayed: bool) -> u8 {
3821 // openGL es doesn't have 1D images so we need workaround it
3822 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
3823 // Get how many components the coordinate vector needs for the dimensions only
3824 let tex_coord_size = match dim {
3825 crate::ImageDimension::D1 => 1,
3826 crate::ImageDimension::D2 => 2,
3827 crate::ImageDimension::D3 => 3,
3828 crate::ImageDimension::Cube => 2,
3829 };
3830 // Calculate the true size of the coordinate vector by adding 1 for arrayed images
3831 // and another 1 if we need to workaround 1D images by making them 2D
3832 tex_coord_size + tex_1d_hack as u8 + arrayed as u8
3833 }
3834
3835 /// Helper method to write the coordinate vector for image operations
3836 fn write_texture_coord(
3837 &mut self,
3838 ctx: &back::FunctionCtx,
3839 vector_size: u8,
3840 coordinate: Handle<crate::Expression>,
3841 array_index: Option<Handle<crate::Expression>>,
3842 // Emulate 1D images as 2D for profiles that don't support it (glsl es)
3843 tex_1d_hack: bool,
3844 ) -> Result<(), Error> {
3845 match array_index {
3846 // If the image needs an array indice we need to add it to the end of our
3847 // coordinate vector, to do so we will use the `ivec(ivec, scalar)`
3848 // constructor notation (NOTE: the inner `ivec` can also be a scalar, this
3849 // is important for 1D arrayed images).
3850 Some(layer_expr) => {
3851 write!(self.out, "ivec{vector_size}(")?;
3852 self.write_expr(coordinate, ctx)?;
3853 write!(self.out, ", ")?;
3854 // If we are replacing sampler1D with sampler2D we also need
3855 // to add another zero to the coordinates vector for the y component
3856 if tex_1d_hack {
3857 write!(self.out, "0, ")?;
3858 }
3859 self.write_expr(layer_expr, ctx)?;
3860 write!(self.out, ")")?;
3861 }
3862 // Otherwise write just the expression (and the 1D hack if needed)
3863 None => {
3864 let uvec_size = match *ctx.resolve_type(coordinate, &self.module.types) {
3865 TypeInner::Scalar(crate::Scalar {
3866 kind: crate::ScalarKind::Uint,
3867 ..
3868 }) => Some(None),
3869 TypeInner::Vector {
3870 size,
3871 scalar:
3872 crate::Scalar {
3873 kind: crate::ScalarKind::Uint,
3874 ..
3875 },
3876 } => Some(Some(size as u32)),
3877 _ => None,
3878 };
3879 if tex_1d_hack {
3880 write!(self.out, "ivec2(")?;
3881 } else if uvec_size.is_some() {
3882 match uvec_size {
3883 Some(None) => write!(self.out, "int(")?,
3884 Some(Some(size)) => write!(self.out, "ivec{size}(")?,
3885 _ => {}
3886 }
3887 }
3888 self.write_expr(coordinate, ctx)?;
3889 if tex_1d_hack {
3890 write!(self.out, ", 0)")?;
3891 } else if uvec_size.is_some() {
3892 write!(self.out, ")")?;
3893 }
3894 }
3895 }
3896
3897 Ok(())
3898 }
3899
3900 /// Helper method to write the `ImageStore` statement
3901 fn write_image_store(
3902 &mut self,
3903 ctx: &back::FunctionCtx,
3904 image: Handle<crate::Expression>,
3905 coordinate: Handle<crate::Expression>,
3906 array_index: Option<Handle<crate::Expression>>,
3907 value: Handle<crate::Expression>,
3908 ) -> Result<(), Error> {
3909 use crate::ImageDimension as IDim;
3910
3911 // NOTE: openGL requires that `imageStore`s have no effects when the texel is invalid
3912 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
3913
3914 // This will only panic if the module is invalid
3915 let dim = match *ctx.resolve_type(image, &self.module.types) {
3916 TypeInner::Image { dim, .. } => dim,
3917 _ => unreachable!(),
3918 };
3919
3920 // Begin our call to `imageStore`
3921 write!(self.out, "imageStore(")?;
3922 self.write_expr(image, ctx)?;
3923 // Separate the image argument from the coordinates
3924 write!(self.out, ", ")?;
3925
3926 // openGL es doesn't have 1D images so we need workaround it
3927 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
3928 // Write the coordinate vector
3929 self.write_texture_coord(
3930 ctx,
3931 // Get the size of the coordinate vector
3932 self.get_coordinate_vector_size(dim, array_index.is_some()),
3933 coordinate,
3934 array_index,
3935 tex_1d_hack,
3936 )?;
3937
3938 // Separate the coordinate from the value to write and write the expression
3939 // of the value to write.
3940 write!(self.out, ", ")?;
3941 self.write_expr(value, ctx)?;
3942 // End the call to `imageStore` and the statement.
3943 writeln!(self.out, ");")?;
3944
3945 Ok(())
3946 }
3947
3948 /// Helper method to write the `ImageAtomic` statement
3949 fn write_image_atomic(
3950 &mut self,
3951 ctx: &back::FunctionCtx,
3952 image: Handle<crate::Expression>,
3953 coordinate: Handle<crate::Expression>,
3954 array_index: Option<Handle<crate::Expression>>,
3955 fun: crate::AtomicFunction,
3956 value: Handle<crate::Expression>,
3957 ) -> Result<(), Error> {
3958 use crate::ImageDimension as IDim;
3959
3960 // NOTE: openGL requires that `imageAtomic`s have no effects when the texel is invalid
3961 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
3962
3963 // This will only panic if the module is invalid
3964 let dim = match *ctx.resolve_type(image, &self.module.types) {
3965 TypeInner::Image { dim, .. } => dim,
3966 _ => unreachable!(),
3967 };
3968
3969 // Begin our call to `imageAtomic`
3970 let fun_str = fun.to_glsl();
3971 write!(self.out, "imageAtomic{fun_str}(")?;
3972 self.write_expr(image, ctx)?;
3973 // Separate the image argument from the coordinates
3974 write!(self.out, ", ")?;
3975
3976 // openGL es doesn't have 1D images so we need workaround it
3977 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
3978 // Write the coordinate vector
3979 self.write_texture_coord(
3980 ctx,
3981 // Get the size of the coordinate vector
3982 self.get_coordinate_vector_size(dim, false),
3983 coordinate,
3984 array_index,
3985 tex_1d_hack,
3986 )?;
3987
3988 // Separate the coordinate from the value to write and write the expression
3989 // of the value to write.
3990 write!(self.out, ", ")?;
3991 self.write_expr(value, ctx)?;
3992 // End the call to `imageAtomic` and the statement.
3993 writeln!(self.out, ");")?;
3994
3995 Ok(())
3996 }
3997
3998 /// Helper method for writing an `ImageLoad` expression.
3999 #[allow(clippy::too_many_arguments)]
4000 fn write_image_load(
4001 &mut self,
4002 handle: Handle<crate::Expression>,
4003 ctx: &back::FunctionCtx,
4004 image: Handle<crate::Expression>,
4005 coordinate: Handle<crate::Expression>,
4006 array_index: Option<Handle<crate::Expression>>,
4007 sample: Option<Handle<crate::Expression>>,
4008 level: Option<Handle<crate::Expression>>,
4009 ) -> Result<(), Error> {
4010 use crate::ImageDimension as IDim;
4011
4012 // `ImageLoad` is a bit complicated.
4013 // There are two functions one for sampled
4014 // images another for storage images, the former uses `texelFetch` and the
4015 // latter uses `imageLoad`.
4016 //
4017 // Furthermore we have `level` which is always `Some` for sampled images
4018 // and `None` for storage images, so we end up with two functions:
4019 // - `texelFetch(image, coordinate, level)` for sampled images
4020 // - `imageLoad(image, coordinate)` for storage images
4021 //
4022 // Finally we also have to consider bounds checking, for storage images
4023 // this is easy since openGL requires that invalid texels always return
4024 // 0, for sampled images we need to either verify that all arguments are
4025 // in bounds (`ReadZeroSkipWrite`) or make them a valid texel (`Restrict`).
4026
4027 // This will only panic if the module is invalid
4028 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
4029 TypeInner::Image {
4030 dim,
4031 arrayed: _,
4032 class,
4033 } => (dim, class),
4034 _ => unreachable!(),
4035 };
4036
4037 // Get the name of the function to be used for the load operation
4038 // and the policy to be used with it.
4039 let (fun_name, policy) = match class {
4040 // Sampled images inherit the policy from the user passed policies
4041 crate::ImageClass::Sampled { .. } => ("texelFetch", self.policies.image_load),
4042 crate::ImageClass::Storage { .. } => {
4043 // OpenGL ES 3.1 mentions in Chapter "8.22 Texture Image Loads and Stores" that:
4044 // "Invalid image loads will return a vector where the value of R, G, and B components
4045 // is 0 and the value of the A component is undefined."
4046 //
4047 // OpenGL 4.2 Core mentions in Chapter "3.9.20 Texture Image Loads and Stores" that:
4048 // "Invalid image loads will return zero."
4049 //
4050 // So, we only inject bounds checks for ES
4051 let policy = if self.options.version.is_es() {
4052 self.policies.image_load
4053 } else {
4054 proc::BoundsCheckPolicy::Unchecked
4055 };
4056 ("imageLoad", policy)
4057 }
4058 // TODO: Is there even a function for this?
4059 crate::ImageClass::Depth { multi: _ } => {
4060 return Err(Error::Custom(
4061 "WGSL `textureLoad` from depth textures is not supported in GLSL".to_string(),
4062 ))
4063 }
4064 crate::ImageClass::External => unimplemented!(),
4065 };
4066
4067 // openGL es doesn't have 1D images so we need workaround it
4068 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4069 // Get the size of the coordinate vector
4070 let vector_size = self.get_coordinate_vector_size(dim, array_index.is_some());
4071
4072 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4073 // To write the bounds checks for `ReadZeroSkipWrite` we will use a
4074 // ternary operator since we are in the middle of an expression and
4075 // need to return a value.
4076 //
4077 // NOTE: glsl does short circuit when evaluating logical
4078 // expressions so we can be sure that after we test a
4079 // condition it will be true for the next ones
4080
4081 // Write parentheses around the ternary operator to prevent problems with
4082 // expressions emitted before or after it having more precedence
4083 write!(self.out, "(",)?;
4084
4085 // The lod check needs to precede the size check since we need
4086 // to use the lod to get the size of the image at that level.
4087 if let Some(level_expr) = level {
4088 self.write_expr(level_expr, ctx)?;
4089 write!(self.out, " < textureQueryLevels(",)?;
4090 self.write_expr(image, ctx)?;
4091 // Chain the next check
4092 write!(self.out, ") && ")?;
4093 }
4094
4095 // Check that the sample arguments doesn't exceed the number of samples
4096 if let Some(sample_expr) = sample {
4097 self.write_expr(sample_expr, ctx)?;
4098 write!(self.out, " < textureSamples(",)?;
4099 self.write_expr(image, ctx)?;
4100 // Chain the next check
4101 write!(self.out, ") && ")?;
4102 }
4103
4104 // We now need to write the size checks for the coordinates and array index
4105 // first we write the comparison function in case the image is 1D non arrayed
4106 // (and no 1D to 2D hack was needed) we are comparing scalars so the less than
4107 // operator will suffice, but otherwise we'll be comparing two vectors so we'll
4108 // need to use the `lessThan` function but it returns a vector of booleans (one
4109 // for each comparison) so we need to fold it all in one scalar boolean, since
4110 // we want all comparisons to pass we use the `all` function which will only
4111 // return `true` if all the elements of the boolean vector are also `true`.
4112 //
4113 // So we'll end with one of the following forms
4114 // - `coord < textureSize(image, lod)` for 1D images
4115 // - `all(lessThan(coord, textureSize(image, lod)))` for normal images
4116 // - `all(lessThan(ivec(coord, array_index), textureSize(image, lod)))`
4117 // for arrayed images
4118 // - `all(lessThan(coord, textureSize(image)))` for multi sampled images
4119
4120 if vector_size != 1 {
4121 write!(self.out, "all(lessThan(")?;
4122 }
4123
4124 // Write the coordinate vector
4125 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4126
4127 if vector_size != 1 {
4128 // If we used the `lessThan` function we need to separate the
4129 // coordinates from the image size.
4130 write!(self.out, ", ")?;
4131 } else {
4132 // If we didn't use it (ie. 1D images) we perform the comparison
4133 // using the less than operator.
4134 write!(self.out, " < ")?;
4135 }
4136
4137 // Call `textureSize` to get our image size
4138 write!(self.out, "textureSize(")?;
4139 self.write_expr(image, ctx)?;
4140 // `textureSize` uses the lod as a second argument for mipmapped images
4141 if let Some(level_expr) = level {
4142 // Separate the image from the lod
4143 write!(self.out, ", ")?;
4144 self.write_expr(level_expr, ctx)?;
4145 }
4146 // Close the `textureSize` call
4147 write!(self.out, ")")?;
4148
4149 if vector_size != 1 {
4150 // Close the `all` and `lessThan` calls
4151 write!(self.out, "))")?;
4152 }
4153
4154 // Finally end the condition part of the ternary operator
4155 write!(self.out, " ? ")?;
4156 }
4157
4158 // Begin the call to the function used to load the texel
4159 write!(self.out, "{fun_name}(")?;
4160 self.write_expr(image, ctx)?;
4161 write!(self.out, ", ")?;
4162
4163 // If we are using `Restrict` bounds checking we need to pass valid texel
4164 // coordinates, to do so we use the `clamp` function to get a value between
4165 // 0 and the image size - 1 (indexing begins at 0)
4166 if let proc::BoundsCheckPolicy::Restrict = policy {
4167 write!(self.out, "clamp(")?;
4168 }
4169
4170 // Write the coordinate vector
4171 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4172
4173 // If we are using `Restrict` bounds checking we need to write the rest of the
4174 // clamp we initiated before writing the coordinates.
4175 if let proc::BoundsCheckPolicy::Restrict = policy {
4176 // Write the min value 0
4177 if vector_size == 1 {
4178 write!(self.out, ", 0")?;
4179 } else {
4180 write!(self.out, ", ivec{vector_size}(0)")?;
4181 }
4182 // Start the `textureSize` call to use as the max value.
4183 write!(self.out, ", textureSize(")?;
4184 self.write_expr(image, ctx)?;
4185 // If the image is mipmapped we need to add the lod argument to the
4186 // `textureSize` call, but this needs to be the clamped lod, this should
4187 // have been generated earlier and put in a local.
4188 if class.is_mipmapped() {
4189 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4190 }
4191 // Close the `textureSize` call
4192 write!(self.out, ")")?;
4193
4194 // Subtract 1 from the `textureSize` call since the coordinates are zero based.
4195 if vector_size == 1 {
4196 write!(self.out, " - 1")?;
4197 } else {
4198 write!(self.out, " - ivec{vector_size}(1)")?;
4199 }
4200
4201 // Close the `clamp` call
4202 write!(self.out, ")")?;
4203
4204 // Add the clamped lod (if present) as the second argument to the
4205 // image load function.
4206 if level.is_some() {
4207 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4208 }
4209
4210 // If a sample argument is needed we need to clamp it between 0 and
4211 // the number of samples the image has.
4212 if let Some(sample_expr) = sample {
4213 write!(self.out, ", clamp(")?;
4214 self.write_expr(sample_expr, ctx)?;
4215 // Set the min value to 0 and start the call to `textureSamples`
4216 write!(self.out, ", 0, textureSamples(")?;
4217 self.write_expr(image, ctx)?;
4218 // Close the `textureSamples` call, subtract 1 from it since the sample
4219 // argument is zero based, and close the `clamp` call
4220 writeln!(self.out, ") - 1)")?;
4221 }
4222 } else if let Some(sample_or_level) = sample.or(level) {
4223 // GLSL only support SInt on this field while WGSL support also UInt
4224 let cast_to_int = matches!(
4225 *ctx.resolve_type(sample_or_level, &self.module.types),
4226 TypeInner::Scalar(crate::Scalar {
4227 kind: crate::ScalarKind::Uint,
4228 ..
4229 })
4230 );
4231
4232 // If no bounds checking is need just add the sample or level argument
4233 // after the coordinates
4234 write!(self.out, ", ")?;
4235
4236 if cast_to_int {
4237 write!(self.out, "int(")?;
4238 }
4239
4240 self.write_expr(sample_or_level, ctx)?;
4241
4242 if cast_to_int {
4243 write!(self.out, ")")?;
4244 }
4245 }
4246
4247 // Close the image load function.
4248 write!(self.out, ")")?;
4249
4250 // If we were using the `ReadZeroSkipWrite` policy we need to end the first branch
4251 // (which is taken if the condition is `true`) with a colon (`:`) and write the
4252 // second branch which is just a 0 value.
4253 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4254 // Get the kind of the output value.
4255 let kind = match class {
4256 // Only sampled images can reach here since storage images
4257 // don't need bounds checks and depth images aren't implemented
4258 crate::ImageClass::Sampled { kind, .. } => kind,
4259 _ => unreachable!(),
4260 };
4261
4262 // End the first branch
4263 write!(self.out, " : ")?;
4264 // Write the 0 value
4265 write!(
4266 self.out,
4267 "{}vec4(",
4268 glsl_scalar(crate::Scalar { kind, width: 4 })?.prefix,
4269 )?;
4270 self.write_zero_init_scalar(kind)?;
4271 // Close the zero value constructor
4272 write!(self.out, ")")?;
4273 // Close the parentheses surrounding our ternary
4274 write!(self.out, ")")?;
4275 }
4276
4277 Ok(())
4278 }
4279
4280 fn write_named_expr(
4281 &mut self,
4282 handle: Handle<crate::Expression>,
4283 name: String,
4284 // The expression which is being named.
4285 // Generally, this is the same as handle, except in WorkGroupUniformLoad
4286 named: Handle<crate::Expression>,
4287 ctx: &back::FunctionCtx,
4288 ) -> BackendResult {
4289 match ctx.info[named].ty {
4290 proc::TypeResolution::Handle(ty_handle) => match self.module.types[ty_handle].inner {
4291 TypeInner::Struct { .. } => {
4292 let ty_name = &self.names[&NameKey::Type(ty_handle)];
4293 write!(self.out, "{ty_name}")?;
4294 }
4295 _ => {
4296 self.write_type(ty_handle)?;
4297 }
4298 },
4299 proc::TypeResolution::Value(ref inner) => {
4300 self.write_value_type(inner)?;
4301 }
4302 }
4303
4304 let resolved = ctx.resolve_type(named, &self.module.types);
4305
4306 write!(self.out, " {name}")?;
4307 if let TypeInner::Array { base, size, .. } = *resolved {
4308 self.write_array_size(base, size)?;
4309 }
4310 write!(self.out, " = ")?;
4311 self.write_expr(handle, ctx)?;
4312 writeln!(self.out, ";")?;
4313 self.named_expressions.insert(named, name);
4314
4315 Ok(())
4316 }
4317
4318 /// Helper function that write string with default zero initialization for supported types
4319 fn write_zero_init_value(&mut self, ty: Handle<crate::Type>) -> BackendResult {
4320 let inner = &self.module.types[ty].inner;
4321 match *inner {
4322 TypeInner::Scalar(scalar) | TypeInner::Atomic(scalar) => {
4323 self.write_zero_init_scalar(scalar.kind)?;
4324 }
4325 TypeInner::Vector { scalar, .. } => {
4326 self.write_value_type(inner)?;
4327 write!(self.out, "(")?;
4328 self.write_zero_init_scalar(scalar.kind)?;
4329 write!(self.out, ")")?;
4330 }
4331 TypeInner::Matrix { .. } => {
4332 self.write_value_type(inner)?;
4333 write!(self.out, "(")?;
4334 self.write_zero_init_scalar(crate::ScalarKind::Float)?;
4335 write!(self.out, ")")?;
4336 }
4337 TypeInner::Array { base, size, .. } => {
4338 let count = match size.resolve(self.module.to_ctx())? {
4339 proc::IndexableLength::Known(count) => count,
4340 proc::IndexableLength::Dynamic => return Ok(()),
4341 };
4342 self.write_type(base)?;
4343 self.write_array_size(base, size)?;
4344 write!(self.out, "(")?;
4345 for _ in 1..count {
4346 self.write_zero_init_value(base)?;
4347 write!(self.out, ", ")?;
4348 }
4349 // write last parameter without comma and space
4350 self.write_zero_init_value(base)?;
4351 write!(self.out, ")")?;
4352 }
4353 TypeInner::Struct { ref members, .. } => {
4354 let name = &self.names[&NameKey::Type(ty)];
4355 write!(self.out, "{name}(")?;
4356 for (index, member) in members.iter().enumerate() {
4357 if index != 0 {
4358 write!(self.out, ", ")?;
4359 }
4360 self.write_zero_init_value(member.ty)?;
4361 }
4362 write!(self.out, ")")?;
4363 }
4364 _ => unreachable!(),
4365 }
4366
4367 Ok(())
4368 }
4369
4370 /// Helper function that write string with zero initialization for scalar
4371 fn write_zero_init_scalar(&mut self, kind: crate::ScalarKind) -> BackendResult {
4372 match kind {
4373 crate::ScalarKind::Bool => write!(self.out, "false")?,
4374 crate::ScalarKind::Uint => write!(self.out, "0u")?,
4375 crate::ScalarKind::Float => write!(self.out, "0.0")?,
4376 crate::ScalarKind::Sint => write!(self.out, "0")?,
4377 crate::ScalarKind::AbstractInt | crate::ScalarKind::AbstractFloat => {
4378 return Err(Error::Custom(
4379 "Abstract types should not appear in IR presented to backends".to_string(),
4380 ))
4381 }
4382 }
4383
4384 Ok(())
4385 }
4386
4387 /// Issue a control barrier.
4388 fn write_control_barrier(
4389 &mut self,
4390 flags: crate::Barrier,
4391 level: back::Level,
4392 ) -> BackendResult {
4393 self.write_memory_barrier(flags, level)?;
4394 writeln!(self.out, "{level}barrier();")?;
4395 Ok(())
4396 }
4397
4398 /// Issue a memory barrier.
4399 fn write_memory_barrier(&mut self, flags: crate::Barrier, level: back::Level) -> BackendResult {
4400 if flags.contains(crate::Barrier::STORAGE) {
4401 writeln!(self.out, "{level}memoryBarrierBuffer();")?;
4402 }
4403 if flags.contains(crate::Barrier::WORK_GROUP) {
4404 writeln!(self.out, "{level}memoryBarrierShared();")?;
4405 }
4406 if flags.contains(crate::Barrier::SUB_GROUP) {
4407 writeln!(self.out, "{level}subgroupMemoryBarrier();")?;
4408 }
4409 if flags.contains(crate::Barrier::TEXTURE) {
4410 writeln!(self.out, "{level}memoryBarrierImage();")?;
4411 }
4412 Ok(())
4413 }
4414
4415 /// Helper function that return the glsl storage access string of [`StorageAccess`](crate::StorageAccess)
4416 ///
4417 /// glsl allows adding both `readonly` and `writeonly` but this means that
4418 /// they can only be used to query information about the resource which isn't what
4419 /// we want here so when storage access is both `LOAD` and `STORE` add no modifiers
4420 fn write_storage_access(&mut self, storage_access: crate::StorageAccess) -> BackendResult {
4421 if storage_access.contains(crate::StorageAccess::ATOMIC) {
4422 return Ok(());
4423 }
4424 if !storage_access.contains(crate::StorageAccess::STORE) {
4425 write!(self.out, "readonly ")?;
4426 }
4427 if !storage_access.contains(crate::StorageAccess::LOAD) {
4428 write!(self.out, "writeonly ")?;
4429 }
4430 Ok(())
4431 }
4432
4433 /// Helper method used to produce the reflection info that's returned to the user
4434 fn collect_reflection_info(&mut self) -> Result<ReflectionInfo, Error> {
4435 let info = self.info.get_entry_point(self.entry_point_idx as usize);
4436 let mut texture_mapping = crate::FastHashMap::default();
4437 let mut uniforms = crate::FastHashMap::default();
4438
4439 for sampling in info.sampling_set.iter() {
4440 let tex_name = self.reflection_names_globals[&sampling.image].clone();
4441
4442 match texture_mapping.entry(tex_name) {
4443 hash_map::Entry::Vacant(v) => {
4444 v.insert(TextureMapping {
4445 texture: sampling.image,
4446 sampler: Some(sampling.sampler),
4447 });
4448 }
4449 hash_map::Entry::Occupied(e) => {
4450 if e.get().sampler != Some(sampling.sampler) {
4451 log::error!("Conflicting samplers for {}", e.key());
4452 return Err(Error::ImageMultipleSamplers);
4453 }
4454 }
4455 }
4456 }
4457
4458 let mut immediates_info = None;
4459 for (handle, var) in self.module.global_variables.iter() {
4460 if info[handle].is_empty() {
4461 continue;
4462 }
4463 match self.module.types[var.ty].inner {
4464 TypeInner::Image { .. } => {
4465 let tex_name = self.reflection_names_globals[&handle].clone();
4466 match texture_mapping.entry(tex_name) {
4467 hash_map::Entry::Vacant(v) => {
4468 v.insert(TextureMapping {
4469 texture: handle,
4470 sampler: None,
4471 });
4472 }
4473 hash_map::Entry::Occupied(_) => {
4474 // already used with a sampler, do nothing
4475 }
4476 }
4477 }
4478 _ => match var.space {
4479 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => {
4480 let name = self.reflection_names_globals[&handle].clone();
4481 uniforms.insert(handle, name);
4482 }
4483 crate::AddressSpace::Immediate => {
4484 let name = self.reflection_names_globals[&handle].clone();
4485 immediates_info = Some((name, var.ty));
4486 }
4487 _ => (),
4488 },
4489 }
4490 }
4491
4492 let mut immediates_segments = Vec::new();
4493 let mut immediates_items = vec![];
4494
4495 if let Some((name, ty)) = immediates_info {
4496 // We don't have a layouter available to us, so we need to create one.
4497 //
4498 // This is potentially a bit wasteful, but the set of types in the program
4499 // shouldn't be too large.
4500 let mut layouter = proc::Layouter::default();
4501 layouter.update(self.module.to_ctx()).unwrap();
4502
4503 // We start with the name of the binding itself.
4504 immediates_segments.push(name);
4505
4506 // We then recursively collect all the uniform fields of the immediate data.
4507 self.collect_immediates_items(
4508 ty,
4509 &mut immediates_segments,
4510 &layouter,
4511 &mut 0,
4512 &mut immediates_items,
4513 );
4514 }
4515
4516 Ok(ReflectionInfo {
4517 texture_mapping,
4518 uniforms,
4519 varying: mem::take(&mut self.varying),
4520 immediates_items,
4521 clip_distance_count: self.clip_distance_count,
4522 })
4523 }
4524
4525 fn collect_immediates_items(
4526 &mut self,
4527 ty: Handle<crate::Type>,
4528 segments: &mut Vec<String>,
4529 layouter: &proc::Layouter,
4530 offset: &mut u32,
4531 items: &mut Vec<ImmediateItem>,
4532 ) {
4533 // At this point in the recursion, `segments` contains the path
4534 // needed to access `ty` from the root.
4535
4536 let layout = &layouter[ty];
4537 *offset = layout.alignment.round_up(*offset);
4538 match self.module.types[ty].inner {
4539 // All these types map directly to GL uniforms.
4540 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => {
4541 // Build the full name, by combining all current segments.
4542 let name: String = segments.iter().map(String::as_str).collect();
4543 items.push(ImmediateItem {
4544 access_path: name,
4545 offset: *offset,
4546 ty,
4547 });
4548 *offset += layout.size;
4549 }
4550 // Arrays are recursed into.
4551 TypeInner::Array { base, size, .. } => {
4552 let crate::ArraySize::Constant(count) = size else {
4553 unreachable!("Cannot have dynamic arrays in immediates");
4554 };
4555
4556 for i in 0..count.get() {
4557 // Add the array accessor and recurse.
4558 segments.push(format!("[{i}]"));
4559 self.collect_immediates_items(base, segments, layouter, offset, items);
4560 segments.pop();
4561 }
4562
4563 // Ensure the stride is kept by rounding up to the alignment.
4564 *offset = layout.alignment.round_up(*offset)
4565 }
4566 TypeInner::Struct { ref members, .. } => {
4567 for (index, member) in members.iter().enumerate() {
4568 // Add struct accessor and recurse.
4569 segments.push(format!(
4570 ".{}",
4571 self.names[&NameKey::StructMember(ty, index as u32)]
4572 ));
4573 self.collect_immediates_items(member.ty, segments, layouter, offset, items);
4574 segments.pop();
4575 }
4576
4577 // Ensure ending padding is kept by rounding up to the alignment.
4578 *offset = layout.alignment.round_up(*offset)
4579 }
4580 _ => unreachable!(),
4581 }
4582 }
4583}