naga/back/glsl/writer.rs
1use super::*;
2
3/// Writer responsible for all code generation.
4pub struct Writer<'a, W> {
5 // Inputs
6 /// The module being written.
7 pub(in crate::back::glsl) module: &'a crate::Module,
8 /// The module analysis.
9 pub(in crate::back::glsl) info: &'a valid::ModuleInfo,
10 /// The output writer.
11 out: W,
12 /// User defined configuration to be used.
13 pub(in crate::back::glsl) options: &'a Options,
14 /// The bound checking policies to be used
15 pub(in crate::back::glsl) policies: proc::BoundsCheckPolicies,
16
17 // Internal State
18 /// Features manager used to store all the needed features and write them.
19 pub(in crate::back::glsl) features: FeaturesManager,
20 namer: proc::Namer,
21 /// A map with all the names needed for writing the module
22 /// (generated by a [`Namer`](crate::proc::Namer)).
23 names: crate::FastHashMap<NameKey, String>,
24 /// A map with the names of global variables needed for reflections.
25 reflection_names_globals: crate::FastHashMap<Handle<crate::GlobalVariable>, String>,
26 /// The selected entry point.
27 pub(in crate::back::glsl) entry_point: &'a crate::EntryPoint,
28 /// The index of the selected entry point.
29 pub(in crate::back::glsl) entry_point_idx: proc::EntryPointIndex,
30 /// A generator for unique block numbers.
31 block_id: IdGenerator,
32 /// Set of expressions that have associated temporary variables.
33 named_expressions: crate::NamedExpressions,
34 /// Set of expressions that need to be baked to avoid unnecessary repetition in output
35 need_bake_expressions: back::NeedBakeExpressions,
36 /// Information about nesting of loops and switches.
37 ///
38 /// Used for forwarding continue statements in switches that have been
39 /// transformed to `do {} while(false);` loops.
40 continue_ctx: back::continue_forward::ContinueCtx,
41 /// How many views to render to, if doing multiview rendering.
42 pub(in crate::back::glsl) multiview: Option<core::num::NonZeroU32>,
43 /// Mapping of varying variables to their location. Needed for reflections.
44 varying: crate::FastHashMap<String, VaryingLocation>,
45 /// Number of user-defined clip planes. Only non-zero for vertex shaders.
46 clip_distance_count: u32,
47}
48
49impl<'a, W: Write> Writer<'a, W> {
50 /// Creates a new [`Writer`] instance.
51 ///
52 /// # Errors
53 /// - If the version specified is invalid or supported.
54 /// - If the entry point couldn't be found in the module.
55 /// - If the version specified doesn't support some used features.
56 pub fn new(
57 out: W,
58 module: &'a crate::Module,
59 info: &'a valid::ModuleInfo,
60 options: &'a Options,
61 pipeline_options: &'a PipelineOptions,
62 policies: proc::BoundsCheckPolicies,
63 ) -> Result<Self, Error> {
64 // Check if the requested version is supported
65 if !options.version.is_supported() {
66 log::error!("Version {}", options.version);
67 return Err(Error::VersionNotSupported);
68 }
69
70 // Try to find the entry point and corresponding index
71 let ep_idx = module
72 .entry_points
73 .iter()
74 .position(|ep| {
75 pipeline_options.shader_stage == ep.stage && pipeline_options.entry_point == ep.name
76 })
77 .ok_or(Error::EntryPointNotFound)?;
78
79 // Generate a map with names required to write the module
80 let mut names = crate::FastHashMap::default();
81 let mut namer = proc::Namer::default();
82 namer.reset(
83 module,
84 &keywords::RESERVED_KEYWORD_SET,
85 proc::CaseInsensitiveKeywordSet::empty(),
86 &[
87 "gl_", // all GL built-in variables
88 "_group", // all normal bindings
89 "_immediates_binding_", // all immediate data bindings
90 ],
91 &mut names,
92 );
93
94 // Build the instance
95 let mut this = Self {
96 module,
97 info,
98 out,
99 options,
100 policies,
101
102 namer,
103 features: FeaturesManager::new(),
104 names,
105 reflection_names_globals: crate::FastHashMap::default(),
106 entry_point: &module.entry_points[ep_idx],
107 entry_point_idx: ep_idx as u16,
108 multiview: pipeline_options.multiview,
109 block_id: IdGenerator::default(),
110 named_expressions: Default::default(),
111 need_bake_expressions: Default::default(),
112 continue_ctx: back::continue_forward::ContinueCtx::default(),
113 varying: Default::default(),
114 clip_distance_count: 0,
115 };
116
117 // Find all features required to print this module
118 this.collect_required_features()?;
119
120 Ok(this)
121 }
122
123 /// Writes the [`Module`](crate::Module) as glsl to the output
124 ///
125 /// # Notes
126 /// If an error occurs while writing, the output might have been written partially
127 ///
128 /// # Panics
129 /// Might panic if the module is invalid
130 pub fn write(&mut self) -> Result<ReflectionInfo, Error> {
131 // We use `writeln!(self.out)` throughout the write to add newlines
132 // to make the output more readable
133
134 let es = self.options.version.is_es();
135
136 // Write the version (It must be the first thing or it isn't a valid glsl output)
137 writeln!(self.out, "#version {}", self.options.version)?;
138 // Write all the needed extensions
139 //
140 // This used to be the last thing being written as it allowed to search for features while
141 // writing the module saving some loops but some older versions (420 or less) required the
142 // extensions to appear before being used, even though extensions are part of the
143 // preprocessor not the processor ¯\_(ツ)_/¯
144 self.features.write(self.options, &mut self.out)?;
145
146 // glsl es requires a precision to be specified for floats and ints
147 // TODO: Should this be user configurable?
148 if es {
149 writeln!(self.out)?;
150 writeln!(self.out, "precision highp float;")?;
151 writeln!(self.out, "precision highp int;")?;
152 writeln!(self.out)?;
153 }
154
155 if self.entry_point.stage == ShaderStage::Compute {
156 let workgroup_size = self.entry_point.workgroup_size;
157 writeln!(
158 self.out,
159 "layout(local_size_x = {}, local_size_y = {}, local_size_z = {}) in;",
160 workgroup_size[0], workgroup_size[1], workgroup_size[2]
161 )?;
162 writeln!(self.out)?;
163 }
164
165 if self.entry_point.stage == ShaderStage::Vertex
166 && !self
167 .options
168 .writer_flags
169 .contains(WriterFlags::DRAW_PARAMETERS)
170 && self.features.contains(Features::INSTANCE_INDEX)
171 {
172 writeln!(self.out, "uniform uint {FIRST_INSTANCE_BINDING};")?;
173 writeln!(self.out)?;
174 }
175
176 // Enable early depth tests if needed
177 if let Some(early_depth_test) = self.entry_point.early_depth_test {
178 // If early depth test is supported for this version of GLSL
179 if self.options.version.supports_early_depth_test() {
180 match early_depth_test {
181 crate::EarlyDepthTest::Force => {
182 writeln!(self.out, "layout(early_fragment_tests) in;")?;
183 }
184 crate::EarlyDepthTest::Allow { conservative, .. } => {
185 use crate::ConservativeDepth as Cd;
186 let depth = match conservative {
187 Cd::GreaterEqual => "greater",
188 Cd::LessEqual => "less",
189 Cd::Unchanged => "unchanged",
190 };
191 writeln!(self.out, "layout (depth_{depth}) out float gl_FragDepth;")?;
192 }
193 }
194 } else {
195 log::warn!(
196 "Early depth testing is not supported for this version of GLSL: {}",
197 self.options.version
198 );
199 }
200 }
201
202 if self.entry_point.stage == ShaderStage::Vertex && self.options.version.is_webgl() {
203 if let Some(multiview) = self.multiview.as_ref() {
204 writeln!(self.out, "layout(num_views = {multiview}) in;")?;
205 writeln!(self.out)?;
206 }
207 }
208
209 // Write struct types.
210 //
211 // This are always ordered because the IR is structured in a way that
212 // you can't make a struct without adding all of its members first.
213 for (handle, ty) in self.module.types.iter() {
214 if let TypeInner::Struct { ref members, .. } = ty.inner {
215 let struct_name = &self.names[&NameKey::Type(handle)];
216
217 // Structures ending with runtime-sized arrays can only be
218 // rendered as shader storage blocks in GLSL, not stand-alone
219 // struct types.
220 if !self.module.types[members.last().unwrap().ty]
221 .inner
222 .is_dynamically_sized(&self.module.types)
223 {
224 write!(self.out, "struct {struct_name} ")?;
225 self.write_struct_body(handle, members)?;
226 writeln!(self.out, ";")?;
227 }
228 }
229 }
230
231 // Write functions for special types.
232 for (type_key, struct_ty) in self.module.special_types.predeclared_types.iter() {
233 match type_key {
234 &crate::PredeclaredType::ModfResult { size, scalar }
235 | &crate::PredeclaredType::FrexpResult { size, scalar } => {
236 let struct_name = &self.names[&NameKey::Type(*struct_ty)];
237 let arg_type_name_owner;
238 let arg_type_name = if let Some(size) = size {
239 arg_type_name_owner = format!(
240 "{}vec{}",
241 if scalar.width == 8 { "d" } else { "" },
242 size as u8
243 );
244 &arg_type_name_owner
245 } else if scalar.width == 8 {
246 "double"
247 } else {
248 "float"
249 };
250
251 let other_type_name_owner;
252 let (defined_func_name, called_func_name, other_type_name) =
253 if matches!(type_key, &crate::PredeclaredType::ModfResult { .. }) {
254 (MODF_FUNCTION, "modf", arg_type_name)
255 } else {
256 let other_type_name = if let Some(size) = size {
257 other_type_name_owner = format!("ivec{}", size as u8);
258 &other_type_name_owner
259 } else {
260 "int"
261 };
262 (FREXP_FUNCTION, "frexp", other_type_name)
263 };
264
265 writeln!(self.out)?;
266 if !self.options.version.supports_frexp_function()
267 && matches!(type_key, &crate::PredeclaredType::FrexpResult { .. })
268 {
269 writeln!(
270 self.out,
271 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
272 {other_type_name} other = arg == {arg_type_name}(0) ? {other_type_name}(0) : {other_type_name}({arg_type_name}(1) + log2(arg));
273 {arg_type_name} fract = arg * exp2({arg_type_name}(-other));
274 return {struct_name}(fract, other);
275}}",
276 )?;
277 } else {
278 writeln!(
279 self.out,
280 "{struct_name} {defined_func_name}({arg_type_name} arg) {{
281 {other_type_name} other;
282 {arg_type_name} fract = {called_func_name}(arg, other);
283 return {struct_name}(fract, other);
284}}",
285 )?;
286 }
287 }
288 &crate::PredeclaredType::AtomicCompareExchangeWeakResult(_) => {
289 // Handled by the general struct writing loop earlier.
290 }
291 }
292 }
293
294 // Write all named constants
295 let mut constants = self
296 .module
297 .constants
298 .iter()
299 .filter(|&(_, c)| c.name.is_some())
300 .peekable();
301 while let Some((handle, _)) = constants.next() {
302 self.write_global_constant(handle)?;
303 // Add extra newline for readability on last iteration
304 if constants.peek().is_none() {
305 writeln!(self.out)?;
306 }
307 }
308
309 let ep_info = self.info.get_entry_point(self.entry_point_idx as usize);
310
311 // Write the globals
312 //
313 // Unless explicitly disabled with WriterFlags::INCLUDE_UNUSED_ITEMS,
314 // we filter all globals that aren't used by the selected entry point as they might be
315 // interfere with each other (i.e. two globals with the same location but different with
316 // different classes)
317 let include_unused = self
318 .options
319 .writer_flags
320 .contains(WriterFlags::INCLUDE_UNUSED_ITEMS);
321 for (handle, global) in self.module.global_variables.iter() {
322 let is_unused = ep_info[handle].is_empty();
323 if !include_unused && is_unused {
324 continue;
325 }
326
327 match self.module.types[global.ty].inner {
328 // We treat images separately because they might require
329 // writing the storage format
330 TypeInner::Image {
331 mut dim,
332 arrayed,
333 class,
334 } => {
335 // Gather the storage format if needed
336 let storage_format_access = match self.module.types[global.ty].inner {
337 TypeInner::Image {
338 class: crate::ImageClass::Storage { format, access },
339 ..
340 } => Some((format, access)),
341 _ => None,
342 };
343
344 if dim == crate::ImageDimension::D1 && es {
345 dim = crate::ImageDimension::D2
346 }
347
348 // Gether the location if needed
349 let layout_binding = if self.options.version.supports_explicit_locations() {
350 let br = global.binding.as_ref().unwrap();
351 self.options.binding_map.get(br).cloned()
352 } else {
353 None
354 };
355
356 // Write all the layout qualifiers
357 if layout_binding.is_some() || storage_format_access.is_some() {
358 write!(self.out, "layout(")?;
359 if let Some(binding) = layout_binding {
360 write!(self.out, "binding = {binding}")?;
361 }
362 if let Some((format, _)) = storage_format_access {
363 let format_str = glsl_storage_format(format)?;
364 let separator = match layout_binding {
365 Some(_) => ",",
366 None => "",
367 };
368 write!(self.out, "{separator}{format_str}")?;
369 }
370 write!(self.out, ") ")?;
371 }
372
373 if let Some((_, access)) = storage_format_access {
374 self.write_storage_access(access)?;
375 }
376
377 // All images in glsl are `uniform`
378 // The trailing space is important
379 write!(self.out, "uniform ")?;
380
381 // write the type
382 //
383 // This is way we need the leading space because `write_image_type` doesn't add
384 // any spaces at the beginning or end
385 self.write_image_type(dim, arrayed, class)?;
386
387 // Finally write the name and end the global with a `;`
388 // The leading space is important
389 let global_name = self.get_global_name(handle, global);
390 writeln!(self.out, " {global_name};")?;
391 writeln!(self.out)?;
392
393 self.reflection_names_globals.insert(handle, global_name);
394 }
395 // glsl has no concept of samplers so we just ignore it
396 TypeInner::Sampler { .. } => continue,
397 // All other globals are written by `write_global`
398 _ => {
399 self.write_global(handle, global)?;
400 // Add a newline (only for readability)
401 writeln!(self.out)?;
402 }
403 }
404 }
405
406 for arg in self.entry_point.function.arguments.iter() {
407 self.write_varying(arg.binding.as_ref(), arg.ty, false)?;
408 }
409 if let Some(ref result) = self.entry_point.function.result {
410 self.write_varying(result.binding.as_ref(), result.ty, true)?;
411 }
412 writeln!(self.out)?;
413
414 // Write all regular functions
415 for (handle, function) in self.module.functions.iter() {
416 // Check that the function doesn't use globals that aren't supported
417 // by the current entry point
418 if !include_unused && !ep_info.dominates_global_use(&self.info[handle]) {
419 continue;
420 }
421
422 let fun_info = &self.info[handle];
423
424 // Skip functions that that are not compatible with this entry point's stage.
425 //
426 // When validation is enabled, it rejects modules whose entry points try to call
427 // incompatible functions, so if we got this far, then any functions incompatible
428 // with our selected entry point must not be used.
429 //
430 // When validation is disabled, `fun_info.available_stages` is always just
431 // `ShaderStages::all()`, so this will write all functions in the module, and
432 // the downstream GLSL compiler will catch any problems.
433 if !fun_info.available_stages.contains(ep_info.available_stages) {
434 continue;
435 }
436
437 // Write the function
438 self.write_function(back::FunctionType::Function(handle), function, fun_info)?;
439
440 writeln!(self.out)?;
441 }
442
443 self.write_function(
444 back::FunctionType::EntryPoint(self.entry_point_idx),
445 &self.entry_point.function,
446 ep_info,
447 )?;
448
449 // Add newline at the end of file
450 writeln!(self.out)?;
451
452 // Collect all reflection info and return it to the user
453 self.collect_reflection_info()
454 }
455
456 fn write_array_size(
457 &mut self,
458 base: Handle<crate::Type>,
459 size: crate::ArraySize,
460 ) -> BackendResult {
461 write!(self.out, "[")?;
462
463 // Write the array size
464 // Writes nothing if `IndexableLength::Dynamic`
465 match size.resolve(self.module.to_ctx())? {
466 proc::IndexableLength::Known(size) => {
467 write!(self.out, "{size}")?;
468 }
469 proc::IndexableLength::Dynamic => (),
470 }
471
472 write!(self.out, "]")?;
473
474 if let TypeInner::Array {
475 base: next_base,
476 size: next_size,
477 ..
478 } = self.module.types[base].inner
479 {
480 self.write_array_size(next_base, next_size)?;
481 }
482
483 Ok(())
484 }
485
486 /// Helper method used to write value types
487 ///
488 /// # Notes
489 /// Adds no trailing or leading whitespace
490 fn write_value_type(&mut self, inner: &TypeInner) -> BackendResult {
491 match *inner {
492 // Scalars are simple we just get the full name from `glsl_scalar`
493 TypeInner::Scalar(scalar)
494 | TypeInner::Atomic(scalar)
495 | TypeInner::ValuePointer {
496 size: None,
497 scalar,
498 space: _,
499 } => write!(self.out, "{}", glsl_scalar(scalar)?.full)?,
500 // Vectors are just `gvecN` where `g` is the scalar prefix and `N` is the vector size
501 TypeInner::Vector { size, scalar }
502 | TypeInner::ValuePointer {
503 size: Some(size),
504 scalar,
505 space: _,
506 } => write!(self.out, "{}vec{}", glsl_scalar(scalar)?.prefix, size as u8)?,
507 // Matrices are written with `gmatMxN` where `g` is the scalar prefix (only floats and
508 // doubles are allowed), `M` is the columns count and `N` is the rows count
509 //
510 // glsl supports a matrix shorthand `gmatN` where `N` = `M` but it doesn't justify the
511 // extra branch to write matrices this way
512 TypeInner::Matrix {
513 columns,
514 rows,
515 scalar,
516 } => write!(
517 self.out,
518 "{}mat{}x{}",
519 glsl_scalar(scalar)?.prefix,
520 columns as u8,
521 rows as u8
522 )?,
523 // GLSL arrays are written as `type name[size]`
524 // Here we only write the size of the array i.e. `[size]`
525 // Base `type` and `name` should be written outside
526 TypeInner::Array { base, size, .. } => self.write_array_size(base, size)?,
527 // Write all variants instead of `_` so that if new variants are added a
528 // no exhaustiveness error is thrown
529 TypeInner::Pointer { .. }
530 | TypeInner::Struct { .. }
531 | TypeInner::Image { .. }
532 | TypeInner::Sampler { .. }
533 | TypeInner::AccelerationStructure { .. }
534 | TypeInner::RayQuery { .. }
535 | TypeInner::BindingArray { .. }
536 | TypeInner::CooperativeMatrix { .. } => {
537 return Err(Error::Custom(format!("Unable to write type {inner:?}")))
538 }
539 }
540
541 Ok(())
542 }
543
544 /// Helper method used to write non image/sampler types
545 ///
546 /// # Notes
547 /// Adds no trailing or leading whitespace
548 fn write_type(&mut self, ty: Handle<crate::Type>) -> BackendResult {
549 match self.module.types[ty].inner {
550 // glsl has no pointer types so just write types as normal and loads are skipped
551 TypeInner::Pointer { base, .. } => self.write_type(base),
552 // glsl structs are written as just the struct name
553 TypeInner::Struct { .. } => {
554 // Get the struct name
555 let name = &self.names[&NameKey::Type(ty)];
556 write!(self.out, "{name}")?;
557 Ok(())
558 }
559 // glsl array has the size separated from the base type
560 TypeInner::Array { base, .. } => self.write_type(base),
561 ref other => self.write_value_type(other),
562 }
563 }
564
565 /// Helper method to write a image type
566 ///
567 /// # Notes
568 /// Adds no leading or trailing whitespace
569 fn write_image_type(
570 &mut self,
571 dim: crate::ImageDimension,
572 arrayed: bool,
573 class: crate::ImageClass,
574 ) -> BackendResult {
575 // glsl images consist of four parts the scalar prefix, the image "type", the dimensions
576 // and modifiers
577 //
578 // There exists two image types
579 // - sampler - for sampled images
580 // - image - for storage images
581 //
582 // There are three possible modifiers that can be used together and must be written in
583 // this order to be valid
584 // - MS - used if it's a multisampled image
585 // - Array - used if it's an image array
586 // - Shadow - used if it's a depth image
587 use crate::ImageClass as Ic;
588 use crate::Scalar as S;
589 let float = S {
590 kind: crate::ScalarKind::Float,
591 width: 4,
592 };
593 let (base, scalar, ms, comparison) = match class {
594 Ic::Sampled { kind, multi: true } => ("sampler", S { kind, width: 4 }, "MS", ""),
595 Ic::Sampled { kind, multi: false } => ("sampler", S { kind, width: 4 }, "", ""),
596 Ic::Depth { multi: true } => ("sampler", float, "MS", ""),
597 Ic::Depth { multi: false } => ("sampler", float, "", "Shadow"),
598 Ic::Storage { format, .. } => ("image", format.into(), "", ""),
599 Ic::External => unimplemented!(),
600 };
601
602 let precision = if self.options.version.is_es() {
603 "highp "
604 } else {
605 ""
606 };
607
608 write!(
609 self.out,
610 "{}{}{}{}{}{}{}",
611 precision,
612 glsl_scalar(scalar)?.prefix,
613 base,
614 glsl_dimension(dim),
615 ms,
616 if arrayed { "Array" } else { "" },
617 comparison
618 )?;
619
620 Ok(())
621 }
622
623 /// Helper method used by [Self::write_global] to write just the layout part of
624 /// a non image/sampler global variable, if applicable.
625 ///
626 /// # Notes
627 ///
628 /// Adds trailing whitespace if any layout qualifier is written
629 fn write_global_layout(&mut self, global: &crate::GlobalVariable) -> BackendResult {
630 // Determine which (if any) explicit memory layout to use, and whether we support it
631 let layout = match global.space {
632 crate::AddressSpace::Uniform => {
633 if !self.options.version.supports_std140_layout() {
634 return Err(Error::Custom(
635 "Uniform address space requires std140 layout support".to_string(),
636 ));
637 }
638
639 Some("std140")
640 }
641 crate::AddressSpace::Storage { .. } => {
642 if !self.options.version.supports_std430_layout() {
643 return Err(Error::Custom(
644 "Storage address space requires std430 layout support".to_string(),
645 ));
646 }
647
648 Some("std430")
649 }
650 _ => None,
651 };
652
653 // If our version supports explicit layouts, we can also output the explicit binding
654 // if we have it
655 if self.options.version.supports_explicit_locations() {
656 if let Some(ref br) = global.binding {
657 match self.options.binding_map.get(br) {
658 Some(binding) => {
659 write!(self.out, "layout(")?;
660
661 if let Some(layout) = layout {
662 write!(self.out, "{layout}, ")?;
663 }
664
665 write!(self.out, "binding = {binding}) ")?;
666
667 return Ok(());
668 }
669 None => {
670 log::debug!("unassigned binding for {:?}", global.name);
671 }
672 }
673 }
674 }
675
676 // Either no explicit bindings are supported or we didn't have any.
677 // Write just the memory layout.
678 if let Some(layout) = layout {
679 write!(self.out, "layout({layout}) ")?;
680 }
681
682 Ok(())
683 }
684
685 /// Helper method used to write non images/sampler globals
686 ///
687 /// # Notes
688 /// Adds a newline
689 ///
690 /// # Panics
691 /// If the global has type sampler
692 fn write_global(
693 &mut self,
694 handle: Handle<crate::GlobalVariable>,
695 global: &crate::GlobalVariable,
696 ) -> BackendResult {
697 self.write_global_layout(global)?;
698
699 if let crate::AddressSpace::Storage { access } = global.space {
700 self.write_storage_access(access)?;
701 }
702
703 if let Some(storage_qualifier) = glsl_storage_qualifier(global.space) {
704 write!(self.out, "{storage_qualifier} ")?;
705 }
706
707 match global.space {
708 crate::AddressSpace::Private => {
709 self.write_simple_global(handle, global)?;
710 }
711 crate::AddressSpace::WorkGroup => {
712 self.write_simple_global(handle, global)?;
713 }
714 crate::AddressSpace::Immediate => {
715 self.write_simple_global(handle, global)?;
716 }
717 crate::AddressSpace::Uniform => {
718 self.write_interface_block(handle, global)?;
719 }
720 crate::AddressSpace::Storage { .. } => {
721 self.write_interface_block(handle, global)?;
722 }
723 crate::AddressSpace::TaskPayload => {
724 self.write_interface_block(handle, global)?;
725 }
726 // A global variable in the `Function` address space is a
727 // contradiction in terms.
728 crate::AddressSpace::Function => unreachable!(),
729 // Textures and samplers are handled directly in `Writer::write`.
730 crate::AddressSpace::Handle => unreachable!(),
731 }
732
733 Ok(())
734 }
735
736 fn write_simple_global(
737 &mut self,
738 handle: Handle<crate::GlobalVariable>,
739 global: &crate::GlobalVariable,
740 ) -> BackendResult {
741 self.write_type(global.ty)?;
742 write!(self.out, " ")?;
743 self.write_global_name(handle, global)?;
744
745 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
746 self.write_array_size(base, size)?;
747 }
748
749 if global.space.initializable() && is_value_init_supported(self.module, global.ty) {
750 write!(self.out, " = ")?;
751 if let Some(init) = global.init {
752 self.write_const_expr(init, &self.module.global_expressions)?;
753 } else {
754 self.write_zero_init_value(global.ty)?;
755 }
756 }
757
758 writeln!(self.out, ";")?;
759
760 if let crate::AddressSpace::Immediate = global.space {
761 let global_name = self.get_global_name(handle, global);
762 self.reflection_names_globals.insert(handle, global_name);
763 }
764
765 Ok(())
766 }
767
768 /// Write an interface block for a single Naga global.
769 ///
770 /// Write `block_name { members }`. Since `block_name` must be unique
771 /// between blocks and structs, we add `_block_ID` where `ID` is a
772 /// `IdGenerator` generated number. Write `members` in the same way we write
773 /// a struct's members.
774 fn write_interface_block(
775 &mut self,
776 handle: Handle<crate::GlobalVariable>,
777 global: &crate::GlobalVariable,
778 ) -> BackendResult {
779 // Write the block name, it's just the struct name appended with `_block_ID`
780 let ty_name = &self.names[&NameKey::Type(global.ty)];
781 let block_name = format!(
782 "{}_block_{}{:?}",
783 // avoid double underscores as they are reserved in GLSL
784 ty_name.trim_end_matches('_'),
785 self.block_id.generate(),
786 self.entry_point.stage,
787 );
788 write!(self.out, "{block_name} ")?;
789 self.reflection_names_globals.insert(handle, block_name);
790
791 match self.module.types[global.ty].inner {
792 TypeInner::Struct { ref members, .. }
793 if self.module.types[members.last().unwrap().ty]
794 .inner
795 .is_dynamically_sized(&self.module.types) =>
796 {
797 // Structs with dynamically sized arrays must have their
798 // members lifted up as members of the interface block. GLSL
799 // can't write such struct types anyway.
800 self.write_struct_body(global.ty, members)?;
801 write!(self.out, " ")?;
802 self.write_global_name(handle, global)?;
803 }
804 _ => {
805 // A global of any other type is written as the sole member
806 // of the interface block. Since the interface block is
807 // anonymous, this becomes visible in the global scope.
808 write!(self.out, "{{ ")?;
809 self.write_type(global.ty)?;
810 write!(self.out, " ")?;
811 self.write_global_name(handle, global)?;
812 if let TypeInner::Array { base, size, .. } = self.module.types[global.ty].inner {
813 self.write_array_size(base, size)?;
814 }
815 write!(self.out, "; }}")?;
816 }
817 }
818
819 writeln!(self.out, ";")?;
820
821 Ok(())
822 }
823
824 /// Helper method used to find which expressions of a given function require baking
825 ///
826 /// # Notes
827 /// Clears `need_bake_expressions` set before adding to it
828 fn update_expressions_to_bake(&mut self, func: &crate::Function, info: &valid::FunctionInfo) {
829 use crate::Expression;
830 self.need_bake_expressions.clear();
831 for (fun_handle, expr) in func.expressions.iter() {
832 let expr_info = &info[fun_handle];
833 let min_ref_count = func.expressions[fun_handle].bake_ref_count();
834 if min_ref_count <= expr_info.ref_count {
835 self.need_bake_expressions.insert(fun_handle);
836 }
837
838 let inner = expr_info.ty.inner_with(&self.module.types);
839
840 if let Expression::Math {
841 fun,
842 arg,
843 arg1,
844 arg2,
845 ..
846 } = *expr
847 {
848 match fun {
849 crate::MathFunction::Dot => {
850 // if the expression is a Dot product with integer arguments,
851 // then the args needs baking as well
852 if let TypeInner::Scalar(crate::Scalar {
853 kind: crate::ScalarKind::Sint | crate::ScalarKind::Uint,
854 ..
855 }) = *inner
856 {
857 self.need_bake_expressions.insert(arg);
858 self.need_bake_expressions.insert(arg1.unwrap());
859 }
860 }
861 crate::MathFunction::Dot4U8Packed | crate::MathFunction::Dot4I8Packed => {
862 self.need_bake_expressions.insert(arg);
863 self.need_bake_expressions.insert(arg1.unwrap());
864 }
865 crate::MathFunction::Pack4xI8
866 | crate::MathFunction::Pack4xU8
867 | crate::MathFunction::Pack4xI8Clamp
868 | crate::MathFunction::Pack4xU8Clamp
869 | crate::MathFunction::Unpack4xI8
870 | crate::MathFunction::Unpack4xU8
871 | crate::MathFunction::QuantizeToF16 => {
872 self.need_bake_expressions.insert(arg);
873 }
874 /* crate::MathFunction::Pack4x8unorm | */
875 crate::MathFunction::Unpack4x8snorm
876 if !self.options.version.supports_pack_unpack_4x8() =>
877 {
878 // We have a fallback if the platform doesn't natively support these
879 self.need_bake_expressions.insert(arg);
880 }
881 /* crate::MathFunction::Pack4x8unorm | */
882 crate::MathFunction::Unpack4x8unorm
883 if !self.options.version.supports_pack_unpack_4x8() =>
884 {
885 self.need_bake_expressions.insert(arg);
886 }
887 /* crate::MathFunction::Pack2x16snorm | */
888 crate::MathFunction::Unpack2x16snorm
889 if !self.options.version.supports_pack_unpack_snorm_2x16() =>
890 {
891 self.need_bake_expressions.insert(arg);
892 }
893 /* crate::MathFunction::Pack2x16unorm | */
894 crate::MathFunction::Unpack2x16unorm
895 if !self.options.version.supports_pack_unpack_unorm_2x16() =>
896 {
897 self.need_bake_expressions.insert(arg);
898 }
899 crate::MathFunction::ExtractBits => {
900 // Only argument 1 is re-used.
901 self.need_bake_expressions.insert(arg1.unwrap());
902 }
903 crate::MathFunction::InsertBits => {
904 // Only argument 2 is re-used.
905 self.need_bake_expressions.insert(arg2.unwrap());
906 }
907 crate::MathFunction::CountLeadingZeros => {
908 if let Some(crate::ScalarKind::Sint) = inner.scalar_kind() {
909 self.need_bake_expressions.insert(arg);
910 }
911 }
912 _ => {}
913 }
914 }
915 }
916
917 for statement in func.body.iter() {
918 match *statement {
919 crate::Statement::Atomic {
920 fun: crate::AtomicFunction::Exchange { compare: Some(cmp) },
921 ..
922 } => {
923 self.need_bake_expressions.insert(cmp);
924 }
925 _ => {}
926 }
927 }
928 }
929
930 /// Helper method used to get a name for a global
931 ///
932 /// Globals have different naming schemes depending on their binding:
933 /// - Globals without bindings use the name from the [`Namer`](crate::proc::Namer)
934 /// - Globals with resource binding are named `_group_X_binding_Y` where `X`
935 /// is the group and `Y` is the binding
936 fn get_global_name(
937 &self,
938 handle: Handle<crate::GlobalVariable>,
939 global: &crate::GlobalVariable,
940 ) -> String {
941 match (&global.binding, global.space) {
942 (&Some(ref br), _) => {
943 format!(
944 "_group_{}_binding_{}_{}",
945 br.group,
946 br.binding,
947 self.entry_point.stage.to_str()
948 )
949 }
950 (&None, crate::AddressSpace::Immediate) => {
951 format!("_immediates_binding_{}", self.entry_point.stage.to_str())
952 }
953 (&None, _) => self.names[&NameKey::GlobalVariable(handle)].clone(),
954 }
955 }
956
957 /// Helper method used to write a name for a global without additional heap allocation
958 fn write_global_name(
959 &mut self,
960 handle: Handle<crate::GlobalVariable>,
961 global: &crate::GlobalVariable,
962 ) -> BackendResult {
963 match (&global.binding, global.space) {
964 (&Some(ref br), _) => write!(
965 self.out,
966 "_group_{}_binding_{}_{}",
967 br.group,
968 br.binding,
969 self.entry_point.stage.to_str()
970 )?,
971 (&None, crate::AddressSpace::Immediate) => write!(
972 self.out,
973 "_immediates_binding_{}",
974 self.entry_point.stage.to_str()
975 )?,
976 (&None, _) => write!(
977 self.out,
978 "{}",
979 &self.names[&NameKey::GlobalVariable(handle)]
980 )?,
981 }
982
983 Ok(())
984 }
985
986 /// Write a GLSL global that will carry a Naga entry point's argument or return value.
987 ///
988 /// A Naga entry point's arguments and return value are rendered in GLSL as
989 /// variables at global scope with the `in` and `out` storage qualifiers.
990 /// The code we generate for `main` loads from all the `in` globals into
991 /// appropriately named locals. Before it returns, `main` assigns the
992 /// components of its return value into all the `out` globals.
993 ///
994 /// This function writes a declaration for one such GLSL global,
995 /// representing a value passed into or returned from [`self.entry_point`]
996 /// that has a [`Location`] binding. The global's name is generated based on
997 /// the location index and the shader stages being connected; see
998 /// [`VaryingName`]. This means we don't need to know the names of
999 /// arguments, just their types and bindings.
1000 ///
1001 /// Emit nothing for entry point arguments or return values with [`BuiltIn`]
1002 /// bindings; `main` will read from or assign to the appropriate GLSL
1003 /// special variable; these are pre-declared. As an exception, we do declare
1004 /// `gl_Position` or `gl_FragCoord` with the `invariant` qualifier if
1005 /// needed.
1006 ///
1007 /// Use `output` together with [`self.entry_point.stage`] to determine which
1008 /// shader stages are being connected, and choose the `in` or `out` storage
1009 /// qualifier.
1010 ///
1011 /// [`self.entry_point`]: Writer::entry_point
1012 /// [`self.entry_point.stage`]: crate::EntryPoint::stage
1013 /// [`Location`]: crate::Binding::Location
1014 /// [`BuiltIn`]: crate::Binding::BuiltIn
1015 fn write_varying(
1016 &mut self,
1017 binding: Option<&crate::Binding>,
1018 ty: Handle<crate::Type>,
1019 output: bool,
1020 ) -> Result<(), Error> {
1021 // For a struct, emit a separate global for each member with a binding.
1022 if let TypeInner::Struct { ref members, .. } = self.module.types[ty].inner {
1023 for member in members {
1024 self.write_varying(member.binding.as_ref(), member.ty, output)?;
1025 }
1026 return Ok(());
1027 }
1028
1029 let binding = match binding {
1030 None => return Ok(()),
1031 Some(binding) => binding,
1032 };
1033
1034 let (location, interpolation, sampling, blend_src) = match *binding {
1035 crate::Binding::Location {
1036 location,
1037 interpolation,
1038 sampling,
1039 blend_src,
1040 per_primitive: _,
1041 } => (location, interpolation, sampling, blend_src),
1042 crate::Binding::BuiltIn(built_in) => {
1043 match built_in {
1044 crate::BuiltIn::Position { invariant: true } => {
1045 match (self.options.version, self.entry_point.stage) {
1046 (
1047 Version::Embedded {
1048 version: 300,
1049 is_webgl: true,
1050 },
1051 ShaderStage::Fragment,
1052 ) => {
1053 // `invariant gl_FragCoord` is not allowed in WebGL2 and possibly
1054 // OpenGL ES in general (waiting on confirmation).
1055 //
1056 // See https://github.com/KhronosGroup/WebGL/issues/3518
1057 }
1058 _ => {
1059 writeln!(
1060 self.out,
1061 "invariant {};",
1062 glsl_built_in(
1063 built_in,
1064 VaryingOptions::from_writer_options(self.options, output)
1065 )
1066 )?;
1067 }
1068 }
1069 }
1070 crate::BuiltIn::ClipDistance => {
1071 // Re-declare `gl_ClipDistance` with number of clip planes.
1072 let TypeInner::Array { size, .. } = self.module.types[ty].inner else {
1073 unreachable!();
1074 };
1075 let proc::IndexableLength::Known(size) =
1076 size.resolve(self.module.to_ctx())?
1077 else {
1078 unreachable!();
1079 };
1080 self.clip_distance_count = size;
1081 writeln!(self.out, "out float gl_ClipDistance[{size}];")?;
1082 }
1083 _ => {}
1084 }
1085 return Ok(());
1086 }
1087 };
1088
1089 // Write the interpolation modifier if needed
1090 //
1091 // We ignore all interpolation and auxiliary modifiers that aren't used in fragment
1092 // shaders' input globals or vertex shaders' output globals.
1093 let emit_interpolation_and_auxiliary = match self.entry_point.stage {
1094 ShaderStage::Vertex => output,
1095 ShaderStage::Fragment => !output,
1096 ShaderStage::Compute => false,
1097 ShaderStage::Task | ShaderStage::Mesh => unreachable!(),
1098 };
1099
1100 // Write the I/O locations, if allowed
1101 let io_location = if self.options.version.supports_explicit_locations()
1102 || !emit_interpolation_and_auxiliary
1103 {
1104 if self.options.version.supports_io_locations() {
1105 if let Some(blend_src) = blend_src {
1106 write!(
1107 self.out,
1108 "layout(location = {location}, index = {blend_src}) "
1109 )?;
1110 } else {
1111 write!(self.out, "layout(location = {location}) ")?;
1112 }
1113 None
1114 } else {
1115 Some(VaryingLocation {
1116 location,
1117 index: blend_src.unwrap_or(0),
1118 })
1119 }
1120 } else {
1121 None
1122 };
1123
1124 // Write the interpolation qualifier.
1125 if let Some(interp) = interpolation {
1126 if emit_interpolation_and_auxiliary {
1127 write!(self.out, "{} ", glsl_interpolation(interp))?;
1128 }
1129 }
1130
1131 // Write the sampling auxiliary qualifier.
1132 //
1133 // Before GLSL 4.2, the `centroid` and `sample` qualifiers were required to appear
1134 // immediately before the `in` / `out` qualifier, so we'll just follow that rule
1135 // here, regardless of the version.
1136 if let Some(sampling) = sampling {
1137 if emit_interpolation_and_auxiliary {
1138 if let Some(qualifier) = glsl_sampling(sampling)? {
1139 write!(self.out, "{qualifier} ")?;
1140 }
1141 }
1142 }
1143
1144 // Write the input/output qualifier.
1145 write!(self.out, "{} ", if output { "out" } else { "in" })?;
1146
1147 // Write the type
1148 // `write_type` adds no leading or trailing spaces
1149 self.write_type(ty)?;
1150
1151 // Finally write the global name and end the global with a `;` and a newline
1152 // Leading space is important
1153 let vname = VaryingName {
1154 binding: &crate::Binding::Location {
1155 location,
1156 interpolation: None,
1157 sampling: None,
1158 blend_src,
1159 per_primitive: false,
1160 },
1161 stage: self.entry_point.stage,
1162 options: VaryingOptions::from_writer_options(self.options, output),
1163 };
1164 writeln!(self.out, " {vname};")?;
1165
1166 if let Some(location) = io_location {
1167 self.varying.insert(vname.to_string(), location);
1168 }
1169
1170 Ok(())
1171 }
1172
1173 /// Helper method used to write functions (both entry points and regular functions)
1174 ///
1175 /// # Notes
1176 /// Adds a newline
1177 fn write_function(
1178 &mut self,
1179 ty: back::FunctionType,
1180 func: &crate::Function,
1181 info: &valid::FunctionInfo,
1182 ) -> BackendResult {
1183 // Create a function context for the function being written
1184 let ctx = back::FunctionCtx {
1185 ty,
1186 info,
1187 expressions: &func.expressions,
1188 named_expressions: &func.named_expressions,
1189 };
1190
1191 self.named_expressions.clear();
1192 self.update_expressions_to_bake(func, info);
1193
1194 // Write the function header
1195 //
1196 // glsl headers are the same as in c:
1197 // `ret_type name(args)`
1198 // `ret_type` is the return type
1199 // `name` is the function name
1200 // `args` is a comma separated list of `type name`
1201 // | - `type` is the argument type
1202 // | - `name` is the argument name
1203
1204 // Start by writing the return type if any otherwise write void
1205 // This is the only place where `void` is a valid type
1206 // (though it's more a keyword than a type)
1207 if let back::FunctionType::EntryPoint(_) = ctx.ty {
1208 write!(self.out, "void")?;
1209 } else if let Some(ref result) = func.result {
1210 self.write_type(result.ty)?;
1211 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner {
1212 self.write_array_size(base, size)?
1213 }
1214 } else {
1215 write!(self.out, "void")?;
1216 }
1217
1218 // Write the function name and open parentheses for the argument list
1219 let function_name = match ctx.ty {
1220 back::FunctionType::Function(handle) => &self.names[&NameKey::Function(handle)],
1221 back::FunctionType::EntryPoint(_) => "main",
1222 };
1223 write!(self.out, " {function_name}(")?;
1224
1225 // Write the comma separated argument list
1226 //
1227 // We need access to `Self` here so we use the reference passed to the closure as an
1228 // argument instead of capturing as that would cause a borrow checker error
1229 let arguments = match ctx.ty {
1230 back::FunctionType::EntryPoint(_) => &[][..],
1231 back::FunctionType::Function(_) => &func.arguments,
1232 };
1233 let arguments: Vec<_> = arguments
1234 .iter()
1235 .enumerate()
1236 .filter(|&(_, arg)| match self.module.types[arg.ty].inner {
1237 TypeInner::Sampler { .. } => false,
1238 _ => true,
1239 })
1240 .collect();
1241 self.write_slice(&arguments, |this, _, &(i, arg)| {
1242 // Write the argument type
1243 match this.module.types[arg.ty].inner {
1244 // We treat images separately because they might require
1245 // writing the storage format
1246 TypeInner::Image {
1247 dim,
1248 arrayed,
1249 class,
1250 } => {
1251 // Write the storage format if needed
1252 if let TypeInner::Image {
1253 class: crate::ImageClass::Storage { format, .. },
1254 ..
1255 } = this.module.types[arg.ty].inner
1256 {
1257 write!(this.out, "layout({}) ", glsl_storage_format(format)?)?;
1258 }
1259
1260 // write the type
1261 //
1262 // This is way we need the leading space because `write_image_type` doesn't add
1263 // any spaces at the beginning or end
1264 this.write_image_type(dim, arrayed, class)?;
1265 }
1266 TypeInner::Pointer { base, .. } => {
1267 // write parameter qualifiers
1268 write!(this.out, "inout ")?;
1269 this.write_type(base)?;
1270 }
1271 // All other types are written by `write_type`
1272 _ => {
1273 this.write_type(arg.ty)?;
1274 }
1275 }
1276
1277 // Write the argument name
1278 // The leading space is important
1279 write!(this.out, " {}", &this.names[&ctx.argument_key(i as u32)])?;
1280
1281 // Write array size
1282 match this.module.types[arg.ty].inner {
1283 TypeInner::Array { base, size, .. } => {
1284 this.write_array_size(base, size)?;
1285 }
1286 TypeInner::Pointer { base, .. } => {
1287 if let TypeInner::Array { base, size, .. } = this.module.types[base].inner {
1288 this.write_array_size(base, size)?;
1289 }
1290 }
1291 _ => {}
1292 }
1293
1294 Ok(())
1295 })?;
1296
1297 // Close the parentheses and open braces to start the function body
1298 writeln!(self.out, ") {{")?;
1299
1300 if self.options.zero_initialize_workgroup_memory
1301 && ctx.ty.is_compute_like_entry_point(self.module)
1302 {
1303 self.write_workgroup_variables_initialization(&ctx)?;
1304 }
1305
1306 // Compose the function arguments from globals, in case of an entry point.
1307 if let back::FunctionType::EntryPoint(ep_index) = ctx.ty {
1308 let stage = self.module.entry_points[ep_index as usize].stage;
1309 for (index, arg) in func.arguments.iter().enumerate() {
1310 write!(self.out, "{}", back::INDENT)?;
1311 self.write_type(arg.ty)?;
1312 let name = &self.names[&NameKey::EntryPointArgument(ep_index, index as u32)];
1313 write!(self.out, " {name}")?;
1314 write!(self.out, " = ")?;
1315 match self.module.types[arg.ty].inner {
1316 TypeInner::Struct { ref members, .. } => {
1317 self.write_type(arg.ty)?;
1318 write!(self.out, "(")?;
1319 for (index, member) in members.iter().enumerate() {
1320 let varying_name = VaryingName {
1321 binding: member.binding.as_ref().unwrap(),
1322 stage,
1323 options: VaryingOptions::from_writer_options(self.options, false),
1324 };
1325 if index != 0 {
1326 write!(self.out, ", ")?;
1327 }
1328 write!(self.out, "{varying_name}")?;
1329 }
1330 writeln!(self.out, ");")?;
1331 }
1332 _ => {
1333 let varying_name = VaryingName {
1334 binding: arg.binding.as_ref().unwrap(),
1335 stage,
1336 options: VaryingOptions::from_writer_options(self.options, false),
1337 };
1338 writeln!(self.out, "{varying_name};")?;
1339 }
1340 }
1341 }
1342 }
1343
1344 // Write all function locals
1345 // Locals are `type name (= init)?;` where the init part (including the =) are optional
1346 //
1347 // Always adds a newline
1348 for (handle, local) in func.local_variables.iter() {
1349 // Write indentation (only for readability) and the type
1350 // `write_type` adds no trailing space
1351 write!(self.out, "{}", back::INDENT)?;
1352 self.write_type(local.ty)?;
1353
1354 // Write the local name
1355 // The leading space is important
1356 write!(self.out, " {}", self.names[&ctx.name_key(handle)])?;
1357 // Write size for array type
1358 if let TypeInner::Array { base, size, .. } = self.module.types[local.ty].inner {
1359 self.write_array_size(base, size)?;
1360 }
1361 // Write the local initializer if needed
1362 if let Some(init) = local.init {
1363 // Put the equal signal only if there's a initializer
1364 // The leading and trailing spaces aren't needed but help with readability
1365 write!(self.out, " = ")?;
1366
1367 // Write the constant
1368 // `write_constant` adds no trailing or leading space/newline
1369 self.write_expr(init, &ctx)?;
1370 } else if is_value_init_supported(self.module, local.ty) {
1371 write!(self.out, " = ")?;
1372 self.write_zero_init_value(local.ty)?;
1373 }
1374
1375 // Finish the local with `;` and add a newline (only for readability)
1376 writeln!(self.out, ";")?
1377 }
1378
1379 // Write the function body (statement list)
1380 for sta in func.body.iter() {
1381 // Write a statement, the indentation should always be 1 when writing the function body
1382 // `write_stmt` adds a newline
1383 self.write_stmt(sta, &ctx, back::Level(1))?;
1384 }
1385
1386 // Close braces and add a newline
1387 writeln!(self.out, "}}")?;
1388
1389 Ok(())
1390 }
1391
1392 fn write_workgroup_variables_initialization(
1393 &mut self,
1394 ctx: &back::FunctionCtx,
1395 ) -> BackendResult {
1396 let mut vars = self
1397 .module
1398 .global_variables
1399 .iter()
1400 .filter(|&(handle, var)| {
1401 !ctx.info[handle].is_empty() && var.space == crate::AddressSpace::WorkGroup
1402 })
1403 .peekable();
1404
1405 if vars.peek().is_some() {
1406 let level = back::Level(1);
1407
1408 writeln!(self.out, "{level}if (gl_LocalInvocationID == uvec3(0u)) {{")?;
1409
1410 for (handle, var) in vars {
1411 let name = &self.names[&NameKey::GlobalVariable(handle)];
1412 write!(self.out, "{}{} = ", level.next(), name)?;
1413 self.write_zero_init_value(var.ty)?;
1414 writeln!(self.out, ";")?;
1415 }
1416
1417 writeln!(self.out, "{level}}}")?;
1418 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1419 }
1420
1421 Ok(())
1422 }
1423
1424 /// Write a list of comma separated `T` values using a writer function `F`.
1425 ///
1426 /// The writer function `F` receives a mutable reference to `self` that if needed won't cause
1427 /// borrow checker issues (using for example a closure with `self` will cause issues), the
1428 /// second argument is the 0 based index of the element on the list, and the last element is
1429 /// a reference to the element `T` being written
1430 ///
1431 /// # Notes
1432 /// - Adds no newlines or leading/trailing whitespace
1433 /// - The last element won't have a trailing `,`
1434 fn write_slice<T, F: FnMut(&mut Self, u32, &T) -> BackendResult>(
1435 &mut self,
1436 data: &[T],
1437 mut f: F,
1438 ) -> BackendResult {
1439 // Loop through `data` invoking `f` for each element
1440 for (index, item) in data.iter().enumerate() {
1441 if index != 0 {
1442 write!(self.out, ", ")?;
1443 }
1444 f(self, index as u32, item)?;
1445 }
1446
1447 Ok(())
1448 }
1449
1450 /// Helper method used to write global constants
1451 fn write_global_constant(&mut self, handle: Handle<crate::Constant>) -> BackendResult {
1452 write!(self.out, "const ")?;
1453 let constant = &self.module.constants[handle];
1454 self.write_type(constant.ty)?;
1455 let name = &self.names[&NameKey::Constant(handle)];
1456 write!(self.out, " {name}")?;
1457 if let TypeInner::Array { base, size, .. } = self.module.types[constant.ty].inner {
1458 self.write_array_size(base, size)?;
1459 }
1460 write!(self.out, " = ")?;
1461 self.write_const_expr(constant.init, &self.module.global_expressions)?;
1462 writeln!(self.out, ";")?;
1463 Ok(())
1464 }
1465
1466 /// Helper method used to output a dot product as an arithmetic expression
1467 ///
1468 fn write_dot_product(
1469 &mut self,
1470 arg: Handle<crate::Expression>,
1471 arg1: Handle<crate::Expression>,
1472 size: usize,
1473 ctx: &back::FunctionCtx,
1474 ) -> BackendResult {
1475 // Write parentheses around the dot product expression to prevent operators
1476 // with different precedences from applying earlier.
1477 write!(self.out, "(")?;
1478
1479 // Cycle through all the components of the vector
1480 for index in 0..size {
1481 let component = back::COMPONENTS[index];
1482 // Write the addition to the previous product
1483 // This will print an extra '+' at the beginning but that is fine in glsl
1484 write!(self.out, " + ")?;
1485 // Write the first vector expression, this expression is marked to be
1486 // cached so unless it can't be cached (for example, it's a Constant)
1487 // it shouldn't produce large expressions.
1488 self.write_expr(arg, ctx)?;
1489 // Access the current component on the first vector
1490 write!(self.out, ".{component} * ")?;
1491 // Write the second vector expression, this expression is marked to be
1492 // cached so unless it can't be cached (for example, it's a Constant)
1493 // it shouldn't produce large expressions.
1494 self.write_expr(arg1, ctx)?;
1495 // Access the current component on the second vector
1496 write!(self.out, ".{component}")?;
1497 }
1498
1499 write!(self.out, ")")?;
1500 Ok(())
1501 }
1502
1503 /// Helper method used to write structs
1504 ///
1505 /// # Notes
1506 /// Ends in a newline
1507 fn write_struct_body(
1508 &mut self,
1509 handle: Handle<crate::Type>,
1510 members: &[crate::StructMember],
1511 ) -> BackendResult {
1512 // glsl structs are written as in C
1513 // `struct name() { members };`
1514 // | `struct` is a keyword
1515 // | `name` is the struct name
1516 // | `members` is a semicolon separated list of `type name`
1517 // | `type` is the member type
1518 // | `name` is the member name
1519 writeln!(self.out, "{{")?;
1520
1521 for (idx, member) in members.iter().enumerate() {
1522 // The indentation is only for readability
1523 write!(self.out, "{}", back::INDENT)?;
1524
1525 match self.module.types[member.ty].inner {
1526 TypeInner::Array {
1527 base,
1528 size,
1529 stride: _,
1530 } => {
1531 self.write_type(base)?;
1532 write!(
1533 self.out,
1534 " {}",
1535 &self.names[&NameKey::StructMember(handle, idx as u32)]
1536 )?;
1537 // Write [size]
1538 self.write_array_size(base, size)?;
1539 // Newline is important
1540 writeln!(self.out, ";")?;
1541 }
1542 _ => {
1543 // Write the member type
1544 // Adds no trailing space
1545 self.write_type(member.ty)?;
1546
1547 // Write the member name and put a semicolon
1548 // The leading space is important
1549 // All members must have a semicolon even the last one
1550 writeln!(
1551 self.out,
1552 " {};",
1553 &self.names[&NameKey::StructMember(handle, idx as u32)]
1554 )?;
1555 }
1556 }
1557 }
1558
1559 write!(self.out, "}}")?;
1560 Ok(())
1561 }
1562
1563 /// Helper method used to write statements
1564 ///
1565 /// # Notes
1566 /// Always adds a newline
1567 fn write_stmt(
1568 &mut self,
1569 sta: &crate::Statement,
1570 ctx: &back::FunctionCtx,
1571 level: back::Level,
1572 ) -> BackendResult {
1573 use crate::Statement;
1574
1575 match *sta {
1576 // This is where we can generate intermediate constants for some expression types.
1577 Statement::Emit(ref range) => {
1578 for handle in range.clone() {
1579 let ptr_class = ctx.resolve_type(handle, &self.module.types).pointer_space();
1580 let expr_name = if ptr_class.is_some() {
1581 // GLSL can't save a pointer-valued expression in a variable,
1582 // but we shouldn't ever need to: they should never be named expressions,
1583 // and none of the expression types flagged by bake_ref_count can be pointer-valued.
1584 None
1585 } else if let Some(name) = ctx.named_expressions.get(&handle) {
1586 // Front end provides names for all variables at the start of writing.
1587 // But we write them to step by step. We need to recache them
1588 // Otherwise, we could accidentally write variable name instead of full expression.
1589 // Also, we use sanitized names! It defense backend from generating variable with name from reserved keywords.
1590 Some(self.namer.call(name))
1591 } else if self.need_bake_expressions.contains(&handle) {
1592 Some(Baked(handle).to_string())
1593 } else {
1594 None
1595 };
1596
1597 // If we are going to write an `ImageLoad` next and the target image
1598 // is sampled and we are using the `Restrict` policy for bounds
1599 // checking images we need to write a local holding the clamped lod.
1600 if let crate::Expression::ImageLoad {
1601 image,
1602 level: Some(level_expr),
1603 ..
1604 } = ctx.expressions[handle]
1605 {
1606 if let TypeInner::Image {
1607 class: crate::ImageClass::Sampled { .. },
1608 ..
1609 } = *ctx.resolve_type(image, &self.module.types)
1610 {
1611 if let proc::BoundsCheckPolicy::Restrict = self.policies.image_load {
1612 write!(self.out, "{level}")?;
1613 self.write_clamped_lod(ctx, handle, image, level_expr)?
1614 }
1615 }
1616 }
1617
1618 if let Some(name) = expr_name {
1619 write!(self.out, "{level}")?;
1620 self.write_named_expr(handle, name, handle, ctx)?;
1621 }
1622 }
1623 }
1624 // Blocks are simple we just need to write the block statements between braces
1625 // We could also just print the statements but this is more readable and maps more
1626 // closely to the IR
1627 Statement::Block(ref block) => {
1628 write!(self.out, "{level}")?;
1629 writeln!(self.out, "{{")?;
1630 for sta in block.iter() {
1631 // Increase the indentation to help with readability
1632 self.write_stmt(sta, ctx, level.next())?
1633 }
1634 writeln!(self.out, "{level}}}")?
1635 }
1636 // Ifs are written as in C:
1637 // ```
1638 // if(condition) {
1639 // accept
1640 // } else {
1641 // reject
1642 // }
1643 // ```
1644 Statement::If {
1645 condition,
1646 ref accept,
1647 ref reject,
1648 } => {
1649 write!(self.out, "{level}")?;
1650 write!(self.out, "if (")?;
1651 self.write_expr(condition, ctx)?;
1652 writeln!(self.out, ") {{")?;
1653
1654 for sta in accept {
1655 // Increase indentation to help with readability
1656 self.write_stmt(sta, ctx, level.next())?;
1657 }
1658
1659 // If there are no statements in the reject block we skip writing it
1660 // This is only for readability
1661 if !reject.is_empty() {
1662 writeln!(self.out, "{level}}} else {{")?;
1663
1664 for sta in reject {
1665 // Increase indentation to help with readability
1666 self.write_stmt(sta, ctx, level.next())?;
1667 }
1668 }
1669
1670 writeln!(self.out, "{level}}}")?
1671 }
1672 // Switch are written as in C:
1673 // ```
1674 // switch (selector) {
1675 // // Fallthrough
1676 // case label:
1677 // block
1678 // // Non fallthrough
1679 // case label:
1680 // block
1681 // break;
1682 // default:
1683 // block
1684 // }
1685 // ```
1686 // Where the `default` case happens isn't important but we put it last
1687 // so that we don't need to print a `break` for it
1688 Statement::Switch {
1689 selector,
1690 ref cases,
1691 } => {
1692 let l2 = level.next();
1693 // Some GLSL consumers may not handle switches with a single
1694 // body correctly: See wgpu#4514. Write such switch statements
1695 // as a `do {} while(false);` loop instead.
1696 //
1697 // Since doing so may inadvertently capture `continue`
1698 // statements in the switch body, we must apply continue
1699 // forwarding. See the `naga::back::continue_forward` module
1700 // docs for details.
1701 let one_body = cases
1702 .iter()
1703 .rev()
1704 .skip(1)
1705 .all(|case| case.fall_through && case.body.is_empty());
1706 if one_body {
1707 // Unlike HLSL, in GLSL `continue_ctx` only needs to know
1708 // about [`Switch`] statements that are being rendered as
1709 // `do-while` loops.
1710 if let Some(variable) = self.continue_ctx.enter_switch(&mut self.namer) {
1711 writeln!(self.out, "{level}bool {variable} = false;",)?;
1712 };
1713 writeln!(self.out, "{level}do {{")?;
1714 // Note: Expressions have no side-effects so we don't need to emit selector expression.
1715
1716 // Body
1717 if let Some(case) = cases.last() {
1718 for sta in case.body.iter() {
1719 self.write_stmt(sta, ctx, l2)?;
1720 }
1721 }
1722 // End do-while
1723 writeln!(self.out, "{level}}} while(false);")?;
1724
1725 // Handle any forwarded continue statements.
1726 use back::continue_forward::ExitControlFlow;
1727 let op = match self.continue_ctx.exit_switch() {
1728 ExitControlFlow::None => None,
1729 ExitControlFlow::Continue { variable } => Some(("continue", variable)),
1730 ExitControlFlow::Break { variable } => Some(("break", variable)),
1731 };
1732 if let Some((control_flow, variable)) = op {
1733 writeln!(self.out, "{level}if ({variable}) {{")?;
1734 writeln!(self.out, "{l2}{control_flow};")?;
1735 writeln!(self.out, "{level}}}")?;
1736 }
1737 } else {
1738 // Start the switch
1739 write!(self.out, "{level}")?;
1740 write!(self.out, "switch(")?;
1741 self.write_expr(selector, ctx)?;
1742 writeln!(self.out, ") {{")?;
1743
1744 // Write all cases
1745 for case in cases {
1746 match case.value {
1747 crate::SwitchValue::I32(value) => {
1748 write!(self.out, "{l2}case {value}:")?
1749 }
1750 crate::SwitchValue::U32(value) => {
1751 write!(self.out, "{l2}case {value}u:")?
1752 }
1753 crate::SwitchValue::Default => write!(self.out, "{l2}default:")?,
1754 }
1755
1756 let write_block_braces = !(case.fall_through && case.body.is_empty());
1757 if write_block_braces {
1758 writeln!(self.out, " {{")?;
1759 } else {
1760 writeln!(self.out)?;
1761 }
1762
1763 for sta in case.body.iter() {
1764 self.write_stmt(sta, ctx, l2.next())?;
1765 }
1766
1767 if !case.fall_through && case.body.last().is_none_or(|s| !s.is_terminator())
1768 {
1769 writeln!(self.out, "{}break;", l2.next())?;
1770 }
1771
1772 if write_block_braces {
1773 writeln!(self.out, "{l2}}}")?;
1774 }
1775 }
1776
1777 writeln!(self.out, "{level}}}")?
1778 }
1779 }
1780 // Loops in naga IR are based on wgsl loops, glsl can emulate the behaviour by using a
1781 // while true loop and appending the continuing block to the body resulting on:
1782 // ```
1783 // bool loop_init = true;
1784 // while(true) {
1785 // if (!loop_init) { <continuing> }
1786 // loop_init = false;
1787 // <body>
1788 // }
1789 // ```
1790 Statement::Loop {
1791 ref body,
1792 ref continuing,
1793 break_if,
1794 } => {
1795 self.continue_ctx.enter_loop();
1796 if !continuing.is_empty() || break_if.is_some() {
1797 let gate_name = self.namer.call("loop_init");
1798 writeln!(self.out, "{level}bool {gate_name} = true;")?;
1799 writeln!(self.out, "{level}while(true) {{")?;
1800 let l2 = level.next();
1801 let l3 = l2.next();
1802 writeln!(self.out, "{l2}if (!{gate_name}) {{")?;
1803 for sta in continuing {
1804 self.write_stmt(sta, ctx, l3)?;
1805 }
1806 if let Some(condition) = break_if {
1807 write!(self.out, "{l3}if (")?;
1808 self.write_expr(condition, ctx)?;
1809 writeln!(self.out, ") {{")?;
1810 writeln!(self.out, "{}break;", l3.next())?;
1811 writeln!(self.out, "{l3}}}")?;
1812 }
1813 writeln!(self.out, "{l2}}}")?;
1814 writeln!(self.out, "{}{} = false;", level.next(), gate_name)?;
1815 } else {
1816 writeln!(self.out, "{level}while(true) {{")?;
1817 }
1818 for sta in body {
1819 self.write_stmt(sta, ctx, level.next())?;
1820 }
1821 writeln!(self.out, "{level}}}")?;
1822 self.continue_ctx.exit_loop();
1823 }
1824 // Break, continue and return as written as in C
1825 // `break;`
1826 Statement::Break => {
1827 write!(self.out, "{level}")?;
1828 writeln!(self.out, "break;")?
1829 }
1830 // `continue;`
1831 Statement::Continue => {
1832 // Sometimes we must render a `Continue` statement as a `break`.
1833 // See the docs for the `back::continue_forward` module.
1834 if let Some(variable) = self.continue_ctx.continue_encountered() {
1835 writeln!(self.out, "{level}{variable} = true;",)?;
1836 writeln!(self.out, "{level}break;")?
1837 } else {
1838 writeln!(self.out, "{level}continue;")?
1839 }
1840 }
1841 // `return expr;`, `expr` is optional
1842 Statement::Return { value } => {
1843 write!(self.out, "{level}")?;
1844 match ctx.ty {
1845 back::FunctionType::Function(_) => {
1846 write!(self.out, "return")?;
1847 // Write the expression to be returned if needed
1848 if let Some(expr) = value {
1849 write!(self.out, " ")?;
1850 self.write_expr(expr, ctx)?;
1851 }
1852 writeln!(self.out, ";")?;
1853 }
1854 back::FunctionType::EntryPoint(ep_index) => {
1855 let mut has_point_size = false;
1856 let ep = &self.module.entry_points[ep_index as usize];
1857 if let Some(ref result) = ep.function.result {
1858 let value = value.unwrap();
1859 match self.module.types[result.ty].inner {
1860 TypeInner::Struct { ref members, .. } => {
1861 let temp_struct_name = match ctx.expressions[value] {
1862 crate::Expression::Compose { .. } => {
1863 let return_struct = "_tmp_return";
1864 write!(
1865 self.out,
1866 "{} {} = ",
1867 &self.names[&NameKey::Type(result.ty)],
1868 return_struct
1869 )?;
1870 self.write_expr(value, ctx)?;
1871 writeln!(self.out, ";")?;
1872 write!(self.out, "{level}")?;
1873 Some(return_struct)
1874 }
1875 _ => None,
1876 };
1877
1878 for (index, member) in members.iter().enumerate() {
1879 if let Some(crate::Binding::BuiltIn(
1880 crate::BuiltIn::PointSize,
1881 )) = member.binding
1882 {
1883 has_point_size = true;
1884 }
1885
1886 let varying_name = VaryingName {
1887 binding: member.binding.as_ref().unwrap(),
1888 stage: ep.stage,
1889 options: VaryingOptions::from_writer_options(
1890 self.options,
1891 true,
1892 ),
1893 };
1894 write!(self.out, "{varying_name} = ")?;
1895
1896 if let Some(struct_name) = temp_struct_name {
1897 write!(self.out, "{struct_name}")?;
1898 } else {
1899 self.write_expr(value, ctx)?;
1900 }
1901
1902 // Write field name
1903 writeln!(
1904 self.out,
1905 ".{};",
1906 &self.names
1907 [&NameKey::StructMember(result.ty, index as u32)]
1908 )?;
1909 write!(self.out, "{level}")?;
1910 }
1911 }
1912 _ => {
1913 let name = VaryingName {
1914 binding: result.binding.as_ref().unwrap(),
1915 stage: ep.stage,
1916 options: VaryingOptions::from_writer_options(
1917 self.options,
1918 true,
1919 ),
1920 };
1921 write!(self.out, "{name} = ")?;
1922 self.write_expr(value, ctx)?;
1923 writeln!(self.out, ";")?;
1924 write!(self.out, "{level}")?;
1925 }
1926 }
1927 }
1928
1929 let is_vertex_stage = self.module.entry_points[ep_index as usize].stage
1930 == ShaderStage::Vertex;
1931 if is_vertex_stage
1932 && self
1933 .options
1934 .writer_flags
1935 .contains(WriterFlags::ADJUST_COORDINATE_SPACE)
1936 {
1937 writeln!(
1938 self.out,
1939 "gl_Position.yz = vec2(-gl_Position.y, gl_Position.z * 2.0 - gl_Position.w);",
1940 )?;
1941 write!(self.out, "{level}")?;
1942 }
1943
1944 if is_vertex_stage
1945 && self
1946 .options
1947 .writer_flags
1948 .contains(WriterFlags::FORCE_POINT_SIZE)
1949 && !has_point_size
1950 {
1951 writeln!(self.out, "gl_PointSize = 1.0;")?;
1952 write!(self.out, "{level}")?;
1953 }
1954 writeln!(self.out, "return;")?;
1955 }
1956 }
1957 }
1958 // This is one of the places were glsl adds to the syntax of C in this case the discard
1959 // keyword which ceases all further processing in a fragment shader, it's called OpKill
1960 // in spir-v that's why it's called `Statement::Kill`
1961 Statement::Kill => writeln!(self.out, "{level}discard;")?,
1962 Statement::ControlBarrier(flags) => {
1963 self.write_control_barrier(flags, level)?;
1964 }
1965 Statement::MemoryBarrier(flags) => {
1966 self.write_memory_barrier(flags, level)?;
1967 }
1968 // Stores in glsl are just variable assignments written as `pointer = value;`
1969 Statement::Store { pointer, value } => {
1970 write!(self.out, "{level}")?;
1971 self.write_expr(pointer, ctx)?;
1972 write!(self.out, " = ")?;
1973 self.write_expr(value, ctx)?;
1974 writeln!(self.out, ";")?
1975 }
1976 Statement::WorkGroupUniformLoad { pointer, result } => {
1977 // GLSL doesn't have pointers, which means that this backend needs to ensure that
1978 // the actual "loading" is happening between the two barriers.
1979 // This is done in `Emit` by never emitting a variable name for pointer variables
1980 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1981
1982 let result_name = Baked(result).to_string();
1983 write!(self.out, "{level}")?;
1984 // Expressions cannot have side effects, so just writing the expression here is fine.
1985 self.write_named_expr(pointer, result_name, result, ctx)?;
1986
1987 self.write_control_barrier(crate::Barrier::WORK_GROUP, level)?;
1988 }
1989 // Stores a value into an image.
1990 Statement::ImageStore {
1991 image,
1992 coordinate,
1993 array_index,
1994 value,
1995 } => {
1996 write!(self.out, "{level}")?;
1997 self.write_image_store(ctx, image, coordinate, array_index, value)?
1998 }
1999 // A `Call` is written `name(arguments)` where `arguments` is a comma separated expressions list
2000 Statement::Call {
2001 function,
2002 ref arguments,
2003 result,
2004 } => {
2005 write!(self.out, "{level}")?;
2006 if let Some(expr) = result {
2007 let name = Baked(expr).to_string();
2008 let result = self.module.functions[function].result.as_ref().unwrap();
2009 self.write_type(result.ty)?;
2010 write!(self.out, " {name}")?;
2011 if let TypeInner::Array { base, size, .. } = self.module.types[result.ty].inner
2012 {
2013 self.write_array_size(base, size)?
2014 }
2015 write!(self.out, " = ")?;
2016 self.named_expressions.insert(expr, name);
2017 }
2018 write!(self.out, "{}(", &self.names[&NameKey::Function(function)])?;
2019 let arguments: Vec<_> = arguments
2020 .iter()
2021 .enumerate()
2022 .filter_map(|(i, arg)| {
2023 let arg_ty = self.module.functions[function].arguments[i].ty;
2024 match self.module.types[arg_ty].inner {
2025 TypeInner::Sampler { .. } => None,
2026 _ => Some(*arg),
2027 }
2028 })
2029 .collect();
2030 self.write_slice(&arguments, |this, _, arg| this.write_expr(*arg, ctx))?;
2031 writeln!(self.out, ");")?
2032 }
2033 Statement::Atomic {
2034 pointer,
2035 ref fun,
2036 value,
2037 result,
2038 } => {
2039 write!(self.out, "{level}")?;
2040
2041 match *fun {
2042 crate::AtomicFunction::Exchange {
2043 compare: Some(compare_expr),
2044 } => {
2045 let result_handle = result.expect("CompareExchange must have a result");
2046 let res_name = Baked(result_handle).to_string();
2047 self.write_type(ctx.info[result_handle].ty.handle().unwrap())?;
2048 write!(self.out, " {res_name};")?;
2049 write!(self.out, " {res_name}.old_value = atomicCompSwap(")?;
2050 self.write_expr(pointer, ctx)?;
2051 write!(self.out, ", ")?;
2052 self.write_expr(compare_expr, ctx)?;
2053 write!(self.out, ", ")?;
2054 self.write_expr(value, ctx)?;
2055 writeln!(self.out, ");")?;
2056
2057 write!(
2058 self.out,
2059 "{level}{res_name}.exchanged = ({res_name}.old_value == "
2060 )?;
2061 self.write_expr(compare_expr, ctx)?;
2062 writeln!(self.out, ");")?;
2063 self.named_expressions.insert(result_handle, res_name);
2064 }
2065 _ => {
2066 if let Some(result) = result {
2067 let res_name = Baked(result).to_string();
2068 self.write_type(ctx.info[result].ty.handle().unwrap())?;
2069 write!(self.out, " {res_name} = ")?;
2070 self.named_expressions.insert(result, res_name);
2071 }
2072 let fun_str = fun.to_glsl();
2073 write!(self.out, "atomic{fun_str}(")?;
2074 self.write_expr(pointer, ctx)?;
2075 write!(self.out, ", ")?;
2076 if let crate::AtomicFunction::Subtract = *fun {
2077 // Emulate `atomicSub` with `atomicAdd` by negating the value.
2078 write!(self.out, "-")?;
2079 }
2080 self.write_expr(value, ctx)?;
2081 writeln!(self.out, ");")?;
2082 }
2083 }
2084 }
2085 // Stores a value into an image.
2086 Statement::ImageAtomic {
2087 image,
2088 coordinate,
2089 array_index,
2090 fun,
2091 value,
2092 } => {
2093 write!(self.out, "{level}")?;
2094 self.write_image_atomic(ctx, image, coordinate, array_index, fun, value)?
2095 }
2096 Statement::RayQuery { .. } => unreachable!(),
2097 Statement::SubgroupBallot { result, predicate } => {
2098 write!(self.out, "{level}")?;
2099 let res_name = Baked(result).to_string();
2100 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2101 self.write_value_type(res_ty)?;
2102 write!(self.out, " {res_name} = ")?;
2103 self.named_expressions.insert(result, res_name);
2104
2105 write!(self.out, "subgroupBallot(")?;
2106 match predicate {
2107 Some(predicate) => self.write_expr(predicate, ctx)?,
2108 None => write!(self.out, "true")?,
2109 }
2110 writeln!(self.out, ");")?;
2111 }
2112 Statement::SubgroupCollectiveOperation {
2113 op,
2114 collective_op,
2115 argument,
2116 result,
2117 } => {
2118 write!(self.out, "{level}")?;
2119 let res_name = Baked(result).to_string();
2120 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2121 self.write_value_type(res_ty)?;
2122 write!(self.out, " {res_name} = ")?;
2123 self.named_expressions.insert(result, res_name);
2124
2125 match (collective_op, op) {
2126 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::All) => {
2127 write!(self.out, "subgroupAll(")?
2128 }
2129 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Any) => {
2130 write!(self.out, "subgroupAny(")?
2131 }
2132 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Add) => {
2133 write!(self.out, "subgroupAdd(")?
2134 }
2135 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Mul) => {
2136 write!(self.out, "subgroupMul(")?
2137 }
2138 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Max) => {
2139 write!(self.out, "subgroupMax(")?
2140 }
2141 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Min) => {
2142 write!(self.out, "subgroupMin(")?
2143 }
2144 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::And) => {
2145 write!(self.out, "subgroupAnd(")?
2146 }
2147 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Or) => {
2148 write!(self.out, "subgroupOr(")?
2149 }
2150 (crate::CollectiveOperation::Reduce, crate::SubgroupOperation::Xor) => {
2151 write!(self.out, "subgroupXor(")?
2152 }
2153 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Add) => {
2154 write!(self.out, "subgroupExclusiveAdd(")?
2155 }
2156 (crate::CollectiveOperation::ExclusiveScan, crate::SubgroupOperation::Mul) => {
2157 write!(self.out, "subgroupExclusiveMul(")?
2158 }
2159 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Add) => {
2160 write!(self.out, "subgroupInclusiveAdd(")?
2161 }
2162 (crate::CollectiveOperation::InclusiveScan, crate::SubgroupOperation::Mul) => {
2163 write!(self.out, "subgroupInclusiveMul(")?
2164 }
2165 _ => unimplemented!(),
2166 }
2167 self.write_expr(argument, ctx)?;
2168 writeln!(self.out, ");")?;
2169 }
2170 Statement::SubgroupGather {
2171 mode,
2172 argument,
2173 result,
2174 } => {
2175 write!(self.out, "{level}")?;
2176 let res_name = Baked(result).to_string();
2177 let res_ty = ctx.info[result].ty.inner_with(&self.module.types);
2178 self.write_value_type(res_ty)?;
2179 write!(self.out, " {res_name} = ")?;
2180 self.named_expressions.insert(result, res_name);
2181
2182 match mode {
2183 crate::GatherMode::BroadcastFirst => {
2184 write!(self.out, "subgroupBroadcastFirst(")?;
2185 }
2186 crate::GatherMode::Broadcast(_) => {
2187 write!(self.out, "subgroupBroadcast(")?;
2188 }
2189 crate::GatherMode::Shuffle(_) => {
2190 write!(self.out, "subgroupShuffle(")?;
2191 }
2192 crate::GatherMode::ShuffleDown(_) => {
2193 write!(self.out, "subgroupShuffleDown(")?;
2194 }
2195 crate::GatherMode::ShuffleUp(_) => {
2196 write!(self.out, "subgroupShuffleUp(")?;
2197 }
2198 crate::GatherMode::ShuffleXor(_) => {
2199 write!(self.out, "subgroupShuffleXor(")?;
2200 }
2201 crate::GatherMode::QuadBroadcast(_) => {
2202 write!(self.out, "subgroupQuadBroadcast(")?;
2203 }
2204 crate::GatherMode::QuadSwap(direction) => match direction {
2205 crate::Direction::X => {
2206 write!(self.out, "subgroupQuadSwapHorizontal(")?;
2207 }
2208 crate::Direction::Y => {
2209 write!(self.out, "subgroupQuadSwapVertical(")?;
2210 }
2211 crate::Direction::Diagonal => {
2212 write!(self.out, "subgroupQuadSwapDiagonal(")?;
2213 }
2214 },
2215 }
2216 self.write_expr(argument, ctx)?;
2217 match mode {
2218 crate::GatherMode::BroadcastFirst => {}
2219 crate::GatherMode::Broadcast(index)
2220 | crate::GatherMode::Shuffle(index)
2221 | crate::GatherMode::ShuffleDown(index)
2222 | crate::GatherMode::ShuffleUp(index)
2223 | crate::GatherMode::ShuffleXor(index)
2224 | crate::GatherMode::QuadBroadcast(index) => {
2225 write!(self.out, ", ")?;
2226 self.write_expr(index, ctx)?;
2227 }
2228 crate::GatherMode::QuadSwap(_) => {}
2229 }
2230 writeln!(self.out, ");")?;
2231 }
2232 Statement::CooperativeStore { .. } => unimplemented!(),
2233 }
2234
2235 Ok(())
2236 }
2237
2238 /// Write a const expression.
2239 ///
2240 /// Write `expr`, a handle to an [`Expression`] in the current [`Module`]'s
2241 /// constant expression arena, as GLSL expression.
2242 ///
2243 /// # Notes
2244 /// Adds no newlines or leading/trailing whitespace
2245 ///
2246 /// [`Expression`]: crate::Expression
2247 /// [`Module`]: crate::Module
2248 fn write_const_expr(
2249 &mut self,
2250 expr: Handle<crate::Expression>,
2251 arena: &crate::Arena<crate::Expression>,
2252 ) -> BackendResult {
2253 self.write_possibly_const_expr(
2254 expr,
2255 arena,
2256 |expr| &self.info[expr],
2257 |writer, expr| writer.write_const_expr(expr, arena),
2258 )
2259 }
2260
2261 /// Write [`Expression`] variants that can occur in both runtime and const expressions.
2262 ///
2263 /// Write `expr`, a handle to an [`Expression`] in the arena `expressions`,
2264 /// as as GLSL expression. This must be one of the [`Expression`] variants
2265 /// that is allowed to occur in constant expressions.
2266 ///
2267 /// Use `write_expression` to write subexpressions.
2268 ///
2269 /// This is the common code for `write_expr`, which handles arbitrary
2270 /// runtime expressions, and `write_const_expr`, which only handles
2271 /// const-expressions. Each of those callers passes itself (essentially) as
2272 /// the `write_expression` callback, so that subexpressions are restricted
2273 /// to the appropriate variants.
2274 ///
2275 /// # Notes
2276 /// Adds no newlines or leading/trailing whitespace
2277 ///
2278 /// [`Expression`]: crate::Expression
2279 fn write_possibly_const_expr<'w, I, E>(
2280 &'w mut self,
2281 expr: Handle<crate::Expression>,
2282 expressions: &crate::Arena<crate::Expression>,
2283 info: I,
2284 write_expression: E,
2285 ) -> BackendResult
2286 where
2287 I: Fn(Handle<crate::Expression>) -> &'w proc::TypeResolution,
2288 E: Fn(&mut Self, Handle<crate::Expression>) -> BackendResult,
2289 {
2290 use crate::Expression;
2291
2292 match expressions[expr] {
2293 Expression::Literal(literal) => {
2294 match literal {
2295 // Floats are written using `Debug` instead of `Display` because it always appends the
2296 // decimal part even it's zero which is needed for a valid glsl float constant
2297 crate::Literal::F64(value) => write!(self.out, "{value:?}LF")?,
2298 crate::Literal::F32(value) => write!(self.out, "{value:?}")?,
2299 crate::Literal::F16(_) => {
2300 return Err(Error::Custom("GLSL has no 16-bit float type".into()));
2301 }
2302 // Unsigned integers need a `u` at the end
2303 //
2304 // While `core` doesn't necessarily need it, it's allowed and since `es` needs it we
2305 // always write it as the extra branch wouldn't have any benefit in readability
2306 crate::Literal::U32(value) => write!(self.out, "{value}u")?,
2307 crate::Literal::I32(value) => write!(self.out, "{value}")?,
2308 crate::Literal::Bool(value) => write!(self.out, "{value}")?,
2309 crate::Literal::I64(_) => {
2310 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2311 }
2312 crate::Literal::U64(_) => {
2313 return Err(Error::Custom("GLSL has no 64-bit integer type".into()));
2314 }
2315 crate::Literal::AbstractInt(_) | crate::Literal::AbstractFloat(_) => {
2316 return Err(Error::Custom(
2317 "Abstract types should not appear in IR presented to backends".into(),
2318 ));
2319 }
2320 }
2321 }
2322 Expression::Constant(handle) => {
2323 let constant = &self.module.constants[handle];
2324 if constant.name.is_some() {
2325 write!(self.out, "{}", self.names[&NameKey::Constant(handle)])?;
2326 } else {
2327 self.write_const_expr(constant.init, &self.module.global_expressions)?;
2328 }
2329 }
2330 Expression::ZeroValue(ty) => {
2331 self.write_zero_init_value(ty)?;
2332 }
2333 Expression::Compose { ty, ref components } => {
2334 self.write_type(ty)?;
2335
2336 if let TypeInner::Array { base, size, .. } = self.module.types[ty].inner {
2337 self.write_array_size(base, size)?;
2338 }
2339
2340 write!(self.out, "(")?;
2341 for (index, component) in components.iter().enumerate() {
2342 if index != 0 {
2343 write!(self.out, ", ")?;
2344 }
2345 write_expression(self, *component)?;
2346 }
2347 write!(self.out, ")")?
2348 }
2349 // `Splat` needs to actually write down a vector, it's not always inferred in GLSL.
2350 Expression::Splat { size: _, value } => {
2351 let resolved = info(expr).inner_with(&self.module.types);
2352 self.write_value_type(resolved)?;
2353 write!(self.out, "(")?;
2354 write_expression(self, value)?;
2355 write!(self.out, ")")?
2356 }
2357 _ => {
2358 return Err(Error::Override);
2359 }
2360 }
2361
2362 Ok(())
2363 }
2364
2365 /// Helper method to write expressions
2366 ///
2367 /// # Notes
2368 /// Doesn't add any newlines or leading/trailing spaces
2369 fn write_expr(
2370 &mut self,
2371 expr: Handle<crate::Expression>,
2372 ctx: &back::FunctionCtx,
2373 ) -> BackendResult {
2374 use crate::Expression;
2375
2376 if let Some(name) = self.named_expressions.get(&expr) {
2377 write!(self.out, "{name}")?;
2378 return Ok(());
2379 }
2380
2381 match ctx.expressions[expr] {
2382 Expression::Literal(_)
2383 | Expression::Constant(_)
2384 | Expression::ZeroValue(_)
2385 | Expression::Compose { .. }
2386 | Expression::Splat { .. } => {
2387 self.write_possibly_const_expr(
2388 expr,
2389 ctx.expressions,
2390 |expr| &ctx.info[expr].ty,
2391 |writer, expr| writer.write_expr(expr, ctx),
2392 )?;
2393 }
2394 Expression::Override(_) => return Err(Error::Override),
2395 // `Access` is applied to arrays, vectors and matrices and is written as indexing
2396 Expression::Access { base, index } => {
2397 self.write_expr(base, ctx)?;
2398 write!(self.out, "[")?;
2399 self.write_expr(index, ctx)?;
2400 write!(self.out, "]")?
2401 }
2402 // `AccessIndex` is the same as `Access` except that the index is a constant and it can
2403 // be applied to structs, in this case we need to find the name of the field at that
2404 // index and write `base.field_name`
2405 Expression::AccessIndex { base, index } => {
2406 self.write_expr(base, ctx)?;
2407
2408 let base_ty_res = &ctx.info[base].ty;
2409 let mut resolved = base_ty_res.inner_with(&self.module.types);
2410 let base_ty_handle = match *resolved {
2411 TypeInner::Pointer { base, space: _ } => {
2412 resolved = &self.module.types[base].inner;
2413 Some(base)
2414 }
2415 _ => base_ty_res.handle(),
2416 };
2417
2418 match *resolved {
2419 TypeInner::Vector { .. } => {
2420 // Write vector access as a swizzle
2421 write!(self.out, ".{}", back::COMPONENTS[index as usize])?
2422 }
2423 TypeInner::Matrix { .. }
2424 | TypeInner::Array { .. }
2425 | TypeInner::ValuePointer { .. } => write!(self.out, "[{index}]")?,
2426 TypeInner::Struct { .. } => {
2427 // This will never panic in case the type is a `Struct`, this is not true
2428 // for other types so we can only check while inside this match arm
2429 let ty = base_ty_handle.unwrap();
2430
2431 write!(
2432 self.out,
2433 ".{}",
2434 &self.names[&NameKey::StructMember(ty, index)]
2435 )?
2436 }
2437 ref other => return Err(Error::Custom(format!("Cannot index {other:?}"))),
2438 }
2439 }
2440 // `Swizzle` adds a few letters behind the dot.
2441 Expression::Swizzle {
2442 size,
2443 vector,
2444 pattern,
2445 } => {
2446 self.write_expr(vector, ctx)?;
2447 write!(self.out, ".")?;
2448 for &sc in pattern[..size as usize].iter() {
2449 self.out.write_char(back::COMPONENTS[sc as usize])?;
2450 }
2451 }
2452 // Function arguments are written as the argument name
2453 Expression::FunctionArgument(pos) => {
2454 write!(self.out, "{}", &self.names[&ctx.argument_key(pos)])?
2455 }
2456 // Global variables need some special work for their name but
2457 // `get_global_name` does the work for us
2458 Expression::GlobalVariable(handle) => {
2459 let global = &self.module.global_variables[handle];
2460 self.write_global_name(handle, global)?
2461 }
2462 // A local is written as it's name
2463 Expression::LocalVariable(handle) => {
2464 write!(self.out, "{}", self.names[&ctx.name_key(handle)])?
2465 }
2466 // glsl has no pointers so there's no load operation, just write the pointer expression
2467 Expression::Load { pointer } => self.write_expr(pointer, ctx)?,
2468 // `ImageSample` is a bit complicated compared to the rest of the IR.
2469 //
2470 // First there are three variations depending whether the sample level is explicitly set,
2471 // if it's automatic or it it's bias:
2472 // `texture(image, coordinate)` - Automatic sample level
2473 // `texture(image, coordinate, bias)` - Bias sample level
2474 // `textureLod(image, coordinate, level)` - Zero or Exact sample level
2475 //
2476 // Furthermore if `depth_ref` is some we need to append it to the coordinate vector
2477 Expression::ImageSample {
2478 image,
2479 sampler: _, //TODO?
2480 gather,
2481 coordinate,
2482 array_index,
2483 offset,
2484 level,
2485 depth_ref,
2486 clamp_to_edge: _,
2487 } => {
2488 let (dim, class, arrayed) = match *ctx.resolve_type(image, &self.module.types) {
2489 TypeInner::Image {
2490 dim,
2491 class,
2492 arrayed,
2493 ..
2494 } => (dim, class, arrayed),
2495 _ => unreachable!(),
2496 };
2497 let mut err = None;
2498 if dim == crate::ImageDimension::Cube {
2499 if offset.is_some() {
2500 err = Some("gsamplerCube[Array][Shadow] doesn't support texture sampling with offsets");
2501 }
2502 if arrayed
2503 && matches!(class, crate::ImageClass::Depth { .. })
2504 && matches!(level, crate::SampleLevel::Gradient { .. })
2505 {
2506 err = Some("samplerCubeArrayShadow don't support textureGrad");
2507 }
2508 }
2509 if gather.is_some() && level != crate::SampleLevel::Zero {
2510 err = Some("textureGather doesn't support LOD parameters");
2511 }
2512 if let Some(err) = err {
2513 return Err(Error::Custom(String::from(err)));
2514 }
2515
2516 // `textureLod[Offset]` on `sampler2DArrayShadow` and `samplerCubeShadow` does not exist in GLSL,
2517 // unless `GL_EXT_texture_shadow_lod` is present.
2518 // But if the target LOD is zero, we can emulate that by using `textureGrad[Offset]` with a constant gradient of 0.
2519 let workaround_lod_with_grad = ((dim == crate::ImageDimension::Cube && !arrayed)
2520 || (dim == crate::ImageDimension::D2 && arrayed))
2521 && level == crate::SampleLevel::Zero
2522 && matches!(class, crate::ImageClass::Depth { .. })
2523 && !self.features.contains(Features::TEXTURE_SHADOW_LOD);
2524
2525 // Write the function to be used depending on the sample level
2526 let fun_name = match level {
2527 crate::SampleLevel::Zero if gather.is_some() => "textureGather",
2528 crate::SampleLevel::Zero if workaround_lod_with_grad => "textureGrad",
2529 crate::SampleLevel::Auto | crate::SampleLevel::Bias(_) => "texture",
2530 crate::SampleLevel::Zero | crate::SampleLevel::Exact(_) => "textureLod",
2531 crate::SampleLevel::Gradient { .. } => "textureGrad",
2532 };
2533 let offset_name = match offset {
2534 Some(_) => "Offset",
2535 None => "",
2536 };
2537
2538 write!(self.out, "{fun_name}{offset_name}(")?;
2539
2540 // Write the image that will be used
2541 self.write_expr(image, ctx)?;
2542 // The space here isn't required but it helps with readability
2543 write!(self.out, ", ")?;
2544
2545 // TODO: handle clamp_to_edge
2546 // https://github.com/gfx-rs/wgpu/issues/7791
2547
2548 // We need to get the coordinates vector size to later build a vector that's `size + 1`
2549 // if `depth_ref` is some, if it isn't a vector we panic as that's not a valid expression
2550 let mut coord_dim = match *ctx.resolve_type(coordinate, &self.module.types) {
2551 TypeInner::Vector { size, .. } => size as u8,
2552 TypeInner::Scalar { .. } => 1,
2553 _ => unreachable!(),
2554 };
2555
2556 if array_index.is_some() {
2557 coord_dim += 1;
2558 }
2559 let merge_depth_ref = depth_ref.is_some() && gather.is_none() && coord_dim < 4;
2560 if merge_depth_ref {
2561 coord_dim += 1;
2562 }
2563
2564 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
2565 let is_vec = tex_1d_hack || coord_dim != 1;
2566 // Compose a new texture coordinates vector
2567 if is_vec {
2568 write!(self.out, "vec{}(", coord_dim + tex_1d_hack as u8)?;
2569 }
2570 self.write_expr(coordinate, ctx)?;
2571 if tex_1d_hack {
2572 write!(self.out, ", 0.0")?;
2573 }
2574 if let Some(expr) = array_index {
2575 write!(self.out, ", ")?;
2576 self.write_expr(expr, ctx)?;
2577 }
2578 if merge_depth_ref {
2579 write!(self.out, ", ")?;
2580 self.write_expr(depth_ref.unwrap(), ctx)?;
2581 }
2582 if is_vec {
2583 write!(self.out, ")")?;
2584 }
2585
2586 if let (Some(expr), false) = (depth_ref, merge_depth_ref) {
2587 write!(self.out, ", ")?;
2588 self.write_expr(expr, ctx)?;
2589 }
2590
2591 match level {
2592 // Auto needs no more arguments
2593 crate::SampleLevel::Auto => (),
2594 // Zero needs level set to 0
2595 crate::SampleLevel::Zero => {
2596 if workaround_lod_with_grad {
2597 let vec_dim = match dim {
2598 crate::ImageDimension::Cube => 3,
2599 _ => 2,
2600 };
2601 write!(self.out, ", vec{vec_dim}(0.0), vec{vec_dim}(0.0)")?;
2602 } else if gather.is_none() {
2603 write!(self.out, ", 0.0")?;
2604 }
2605 }
2606 // Exact and bias require another argument
2607 crate::SampleLevel::Exact(expr) => {
2608 write!(self.out, ", ")?;
2609 self.write_expr(expr, ctx)?;
2610 }
2611 crate::SampleLevel::Bias(_) => {
2612 // This needs to be done after the offset writing
2613 }
2614 crate::SampleLevel::Gradient { x, y } => {
2615 // If we are using sampler2D to replace sampler1D, we also
2616 // need to make sure to use vec2 gradients
2617 if tex_1d_hack {
2618 write!(self.out, ", vec2(")?;
2619 self.write_expr(x, ctx)?;
2620 write!(self.out, ", 0.0)")?;
2621 write!(self.out, ", vec2(")?;
2622 self.write_expr(y, ctx)?;
2623 write!(self.out, ", 0.0)")?;
2624 } else {
2625 write!(self.out, ", ")?;
2626 self.write_expr(x, ctx)?;
2627 write!(self.out, ", ")?;
2628 self.write_expr(y, ctx)?;
2629 }
2630 }
2631 }
2632
2633 if let Some(constant) = offset {
2634 write!(self.out, ", ")?;
2635 if tex_1d_hack {
2636 write!(self.out, "ivec2(")?;
2637 }
2638 self.write_const_expr(constant, ctx.expressions)?;
2639 if tex_1d_hack {
2640 write!(self.out, ", 0)")?;
2641 }
2642 }
2643
2644 // Bias is always the last argument
2645 if let crate::SampleLevel::Bias(expr) = level {
2646 write!(self.out, ", ")?;
2647 self.write_expr(expr, ctx)?;
2648 }
2649
2650 if let (Some(component), None) = (gather, depth_ref) {
2651 write!(self.out, ", {}", component as usize)?;
2652 }
2653
2654 // End the function
2655 write!(self.out, ")")?
2656 }
2657 Expression::ImageLoad {
2658 image,
2659 coordinate,
2660 array_index,
2661 sample,
2662 level,
2663 } => self.write_image_load(expr, ctx, image, coordinate, array_index, sample, level)?,
2664 // Query translates into one of the:
2665 // - textureSize/imageSize
2666 // - textureQueryLevels
2667 // - textureSamples/imageSamples
2668 Expression::ImageQuery { image, query } => {
2669 use crate::ImageClass;
2670
2671 // This will only panic if the module is invalid
2672 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
2673 TypeInner::Image {
2674 dim,
2675 arrayed: _,
2676 class,
2677 } => (dim, class),
2678 _ => unreachable!(),
2679 };
2680 let components = match dim {
2681 crate::ImageDimension::D1 => 1,
2682 crate::ImageDimension::D2 => 2,
2683 crate::ImageDimension::D3 => 3,
2684 crate::ImageDimension::Cube => 2,
2685 };
2686
2687 if let crate::ImageQuery::Size { .. } = query {
2688 match components {
2689 1 => write!(self.out, "uint(")?,
2690 _ => write!(self.out, "uvec{components}(")?,
2691 }
2692 } else {
2693 write!(self.out, "uint(")?;
2694 }
2695
2696 match query {
2697 crate::ImageQuery::Size { level } => {
2698 match class {
2699 ImageClass::Sampled { multi, .. } | ImageClass::Depth { multi } => {
2700 write!(self.out, "textureSize(")?;
2701 self.write_expr(image, ctx)?;
2702 if let Some(expr) = level {
2703 let cast_to_int = matches!(
2704 *ctx.resolve_type(expr, &self.module.types),
2705 TypeInner::Scalar(crate::Scalar {
2706 kind: crate::ScalarKind::Uint,
2707 ..
2708 })
2709 );
2710
2711 write!(self.out, ", ")?;
2712
2713 if cast_to_int {
2714 write!(self.out, "int(")?;
2715 }
2716
2717 self.write_expr(expr, ctx)?;
2718
2719 if cast_to_int {
2720 write!(self.out, ")")?;
2721 }
2722 } else if !multi {
2723 // All textureSize calls requires an lod argument
2724 // except for multisampled samplers
2725 write!(self.out, ", 0")?;
2726 }
2727 }
2728 ImageClass::Storage { .. } => {
2729 write!(self.out, "imageSize(")?;
2730 self.write_expr(image, ctx)?;
2731 }
2732 ImageClass::External => unimplemented!(),
2733 }
2734 write!(self.out, ")")?;
2735 if components != 1 || self.options.version.is_es() {
2736 write!(self.out, ".{}", &"xyz"[..components])?;
2737 }
2738 }
2739 crate::ImageQuery::NumLevels => {
2740 write!(self.out, "textureQueryLevels(",)?;
2741 self.write_expr(image, ctx)?;
2742 write!(self.out, ")",)?;
2743 }
2744 crate::ImageQuery::NumLayers => {
2745 let fun_name = match class {
2746 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => "textureSize",
2747 ImageClass::Storage { .. } => "imageSize",
2748 ImageClass::External => unimplemented!(),
2749 };
2750 write!(self.out, "{fun_name}(")?;
2751 self.write_expr(image, ctx)?;
2752 // All textureSize calls requires an lod argument
2753 // except for multisampled samplers
2754 if !class.is_multisampled() {
2755 write!(self.out, ", 0")?;
2756 }
2757 write!(self.out, ")")?;
2758 if components != 1 || self.options.version.is_es() {
2759 write!(self.out, ".{}", back::COMPONENTS[components])?;
2760 }
2761 }
2762 crate::ImageQuery::NumSamples => {
2763 let fun_name = match class {
2764 ImageClass::Sampled { .. } | ImageClass::Depth { .. } => {
2765 "textureSamples"
2766 }
2767 ImageClass::Storage { .. } => "imageSamples",
2768 ImageClass::External => unimplemented!(),
2769 };
2770 write!(self.out, "{fun_name}(")?;
2771 self.write_expr(image, ctx)?;
2772 write!(self.out, ")",)?;
2773 }
2774 }
2775
2776 write!(self.out, ")")?;
2777 }
2778 Expression::Unary { op, expr } => {
2779 let operator_or_fn = match op {
2780 crate::UnaryOperator::Negate => "-",
2781 crate::UnaryOperator::LogicalNot => {
2782 match *ctx.resolve_type(expr, &self.module.types) {
2783 TypeInner::Vector { .. } => "not",
2784 _ => "!",
2785 }
2786 }
2787 crate::UnaryOperator::BitwiseNot => "~",
2788 };
2789 write!(self.out, "{operator_or_fn}(")?;
2790
2791 self.write_expr(expr, ctx)?;
2792
2793 write!(self.out, ")")?
2794 }
2795 // `Binary` we just write `left op right`, except when dealing with
2796 // comparison operations on vectors as they are implemented with
2797 // builtin functions.
2798 // Once again we wrap everything in parentheses to avoid precedence issues
2799 Expression::Binary {
2800 mut op,
2801 left,
2802 right,
2803 } => {
2804 // Holds `Some(function_name)` if the binary operation is
2805 // implemented as a function call
2806 use crate::{BinaryOperator as Bo, ScalarKind as Sk, TypeInner as Ti};
2807
2808 let left_inner = ctx.resolve_type(left, &self.module.types);
2809 let right_inner = ctx.resolve_type(right, &self.module.types);
2810
2811 let function = match (left_inner, right_inner) {
2812 (&Ti::Vector { scalar, .. }, &Ti::Vector { .. }) => match op {
2813 Bo::Less
2814 | Bo::LessEqual
2815 | Bo::Greater
2816 | Bo::GreaterEqual
2817 | Bo::Equal
2818 | Bo::NotEqual => BinaryOperation::VectorCompare,
2819 Bo::Modulo if scalar.kind == Sk::Float => BinaryOperation::Modulo,
2820 Bo::And if scalar.kind == Sk::Bool => {
2821 op = crate::BinaryOperator::LogicalAnd;
2822 BinaryOperation::VectorComponentWise
2823 }
2824 Bo::InclusiveOr if scalar.kind == Sk::Bool => {
2825 op = crate::BinaryOperator::LogicalOr;
2826 BinaryOperation::VectorComponentWise
2827 }
2828 _ => BinaryOperation::Other,
2829 },
2830 _ => match (left_inner.scalar_kind(), right_inner.scalar_kind()) {
2831 (Some(Sk::Float), _) | (_, Some(Sk::Float)) => match op {
2832 Bo::Modulo => BinaryOperation::Modulo,
2833 _ => BinaryOperation::Other,
2834 },
2835 (Some(Sk::Bool), Some(Sk::Bool)) => match op {
2836 Bo::InclusiveOr => {
2837 op = crate::BinaryOperator::LogicalOr;
2838 BinaryOperation::Other
2839 }
2840 Bo::And => {
2841 op = crate::BinaryOperator::LogicalAnd;
2842 BinaryOperation::Other
2843 }
2844 _ => BinaryOperation::Other,
2845 },
2846 _ => BinaryOperation::Other,
2847 },
2848 };
2849
2850 match function {
2851 BinaryOperation::VectorCompare => {
2852 let op_str = match op {
2853 Bo::Less => "lessThan(",
2854 Bo::LessEqual => "lessThanEqual(",
2855 Bo::Greater => "greaterThan(",
2856 Bo::GreaterEqual => "greaterThanEqual(",
2857 Bo::Equal => "equal(",
2858 Bo::NotEqual => "notEqual(",
2859 _ => unreachable!(),
2860 };
2861 write!(self.out, "{op_str}")?;
2862 self.write_expr(left, ctx)?;
2863 write!(self.out, ", ")?;
2864 self.write_expr(right, ctx)?;
2865 write!(self.out, ")")?;
2866 }
2867 BinaryOperation::VectorComponentWise => {
2868 self.write_value_type(left_inner)?;
2869 write!(self.out, "(")?;
2870
2871 let size = match *left_inner {
2872 Ti::Vector { size, .. } => size,
2873 _ => unreachable!(),
2874 };
2875
2876 for i in 0..size as usize {
2877 if i != 0 {
2878 write!(self.out, ", ")?;
2879 }
2880
2881 self.write_expr(left, ctx)?;
2882 write!(self.out, ".{}", back::COMPONENTS[i])?;
2883
2884 write!(self.out, " {} ", back::binary_operation_str(op))?;
2885
2886 self.write_expr(right, ctx)?;
2887 write!(self.out, ".{}", back::COMPONENTS[i])?;
2888 }
2889
2890 write!(self.out, ")")?;
2891 }
2892 // TODO: handle undefined behavior of BinaryOperator::Modulo
2893 //
2894 // sint:
2895 // if right == 0 return 0
2896 // if left == min(type_of(left)) && right == -1 return 0
2897 // if sign(left) == -1 || sign(right) == -1 return result as defined by WGSL
2898 //
2899 // uint:
2900 // if right == 0 return 0
2901 //
2902 // float:
2903 // if right == 0 return ? see https://github.com/gpuweb/gpuweb/issues/2798
2904 BinaryOperation::Modulo => {
2905 write!(self.out, "(")?;
2906
2907 // write `e1 - e2 * trunc(e1 / e2)`
2908 self.write_expr(left, ctx)?;
2909 write!(self.out, " - ")?;
2910 self.write_expr(right, ctx)?;
2911 write!(self.out, " * ")?;
2912 write!(self.out, "trunc(")?;
2913 self.write_expr(left, ctx)?;
2914 write!(self.out, " / ")?;
2915 self.write_expr(right, ctx)?;
2916 write!(self.out, ")")?;
2917
2918 write!(self.out, ")")?;
2919 }
2920 BinaryOperation::Other => {
2921 write!(self.out, "(")?;
2922
2923 self.write_expr(left, ctx)?;
2924 write!(self.out, " {} ", back::binary_operation_str(op))?;
2925 self.write_expr(right, ctx)?;
2926
2927 write!(self.out, ")")?;
2928 }
2929 }
2930 }
2931 // `Select` is written as `condition ? accept : reject`
2932 // We wrap everything in parentheses to avoid precedence issues
2933 Expression::Select {
2934 condition,
2935 accept,
2936 reject,
2937 } => {
2938 let cond_ty = ctx.resolve_type(condition, &self.module.types);
2939 let vec_select = if let TypeInner::Vector { .. } = *cond_ty {
2940 true
2941 } else {
2942 false
2943 };
2944
2945 // TODO: Boolean mix on desktop required GL_EXT_shader_integer_mix
2946 if vec_select {
2947 // Glsl defines that for mix when the condition is a boolean the first element
2948 // is picked if condition is false and the second if condition is true
2949 write!(self.out, "mix(")?;
2950 self.write_expr(reject, ctx)?;
2951 write!(self.out, ", ")?;
2952 self.write_expr(accept, ctx)?;
2953 write!(self.out, ", ")?;
2954 self.write_expr(condition, ctx)?;
2955 } else {
2956 write!(self.out, "(")?;
2957 self.write_expr(condition, ctx)?;
2958 write!(self.out, " ? ")?;
2959 self.write_expr(accept, ctx)?;
2960 write!(self.out, " : ")?;
2961 self.write_expr(reject, ctx)?;
2962 }
2963
2964 write!(self.out, ")")?
2965 }
2966 // `Derivative` is a function call to a glsl provided function
2967 Expression::Derivative { axis, ctrl, expr } => {
2968 use crate::{DerivativeAxis as Axis, DerivativeControl as Ctrl};
2969 let fun_name = if self.options.version.supports_derivative_control() {
2970 match (axis, ctrl) {
2971 (Axis::X, Ctrl::Coarse) => "dFdxCoarse",
2972 (Axis::X, Ctrl::Fine) => "dFdxFine",
2973 (Axis::X, Ctrl::None) => "dFdx",
2974 (Axis::Y, Ctrl::Coarse) => "dFdyCoarse",
2975 (Axis::Y, Ctrl::Fine) => "dFdyFine",
2976 (Axis::Y, Ctrl::None) => "dFdy",
2977 (Axis::Width, Ctrl::Coarse) => "fwidthCoarse",
2978 (Axis::Width, Ctrl::Fine) => "fwidthFine",
2979 (Axis::Width, Ctrl::None) => "fwidth",
2980 }
2981 } else {
2982 match axis {
2983 Axis::X => "dFdx",
2984 Axis::Y => "dFdy",
2985 Axis::Width => "fwidth",
2986 }
2987 };
2988 write!(self.out, "{fun_name}(")?;
2989 self.write_expr(expr, ctx)?;
2990 write!(self.out, ")")?
2991 }
2992 // `Relational` is a normal function call to some glsl provided functions
2993 Expression::Relational { fun, argument } => {
2994 use crate::RelationalFunction as Rf;
2995
2996 let fun_name = match fun {
2997 Rf::IsInf => "isinf",
2998 Rf::IsNan => "isnan",
2999 Rf::All => "all",
3000 Rf::Any => "any",
3001 };
3002 write!(self.out, "{fun_name}(")?;
3003
3004 self.write_expr(argument, ctx)?;
3005
3006 write!(self.out, ")")?
3007 }
3008 Expression::Math {
3009 fun,
3010 arg,
3011 arg1,
3012 arg2,
3013 arg3,
3014 } => {
3015 use crate::MathFunction as Mf;
3016
3017 let fun_name = match fun {
3018 // comparison
3019 Mf::Abs => "abs",
3020 Mf::Min => "min",
3021 Mf::Max => "max",
3022 Mf::Clamp => {
3023 let scalar_kind = ctx
3024 .resolve_type(arg, &self.module.types)
3025 .scalar_kind()
3026 .unwrap();
3027 match scalar_kind {
3028 crate::ScalarKind::Float => "clamp",
3029 // Clamp is undefined if min > max. In practice this means it can use a median-of-three
3030 // instruction to determine the value. This is fine according to the WGSL spec for float
3031 // clamp, but integer clamp _must_ use min-max. As such we write out min/max.
3032 _ => {
3033 write!(self.out, "min(max(")?;
3034 self.write_expr(arg, ctx)?;
3035 write!(self.out, ", ")?;
3036 self.write_expr(arg1.unwrap(), ctx)?;
3037 write!(self.out, "), ")?;
3038 self.write_expr(arg2.unwrap(), ctx)?;
3039 write!(self.out, ")")?;
3040
3041 return Ok(());
3042 }
3043 }
3044 }
3045 Mf::Saturate => {
3046 write!(self.out, "clamp(")?;
3047
3048 self.write_expr(arg, ctx)?;
3049
3050 match *ctx.resolve_type(arg, &self.module.types) {
3051 TypeInner::Vector { size, .. } => write!(
3052 self.out,
3053 ", vec{}(0.0), vec{0}(1.0)",
3054 common::vector_size_str(size)
3055 )?,
3056 _ => write!(self.out, ", 0.0, 1.0")?,
3057 }
3058
3059 write!(self.out, ")")?;
3060
3061 return Ok(());
3062 }
3063 // trigonometry
3064 Mf::Cos => "cos",
3065 Mf::Cosh => "cosh",
3066 Mf::Sin => "sin",
3067 Mf::Sinh => "sinh",
3068 Mf::Tan => "tan",
3069 Mf::Tanh => "tanh",
3070 Mf::Acos => "acos",
3071 Mf::Asin => "asin",
3072 Mf::Atan => "atan",
3073 Mf::Asinh => "asinh",
3074 Mf::Acosh => "acosh",
3075 Mf::Atanh => "atanh",
3076 Mf::Radians => "radians",
3077 Mf::Degrees => "degrees",
3078 // glsl doesn't have atan2 function
3079 // use two-argument variation of the atan function
3080 Mf::Atan2 => "atan",
3081 // decomposition
3082 Mf::Ceil => "ceil",
3083 Mf::Floor => "floor",
3084 Mf::Round => "roundEven",
3085 Mf::Fract => "fract",
3086 Mf::Trunc => "trunc",
3087 Mf::Modf => MODF_FUNCTION,
3088 Mf::Frexp => FREXP_FUNCTION,
3089 Mf::Ldexp => "ldexp",
3090 // exponent
3091 Mf::Exp => "exp",
3092 Mf::Exp2 => "exp2",
3093 Mf::Log => "log",
3094 Mf::Log2 => "log2",
3095 Mf::Pow => "pow",
3096 // geometry
3097 Mf::Dot => match *ctx.resolve_type(arg, &self.module.types) {
3098 TypeInner::Vector {
3099 scalar:
3100 crate::Scalar {
3101 kind: crate::ScalarKind::Float,
3102 ..
3103 },
3104 ..
3105 } => "dot",
3106 TypeInner::Vector { size, .. } => {
3107 return self.write_dot_product(arg, arg1.unwrap(), size as usize, ctx)
3108 }
3109 _ => unreachable!(
3110 "Correct TypeInner for dot product should be already validated"
3111 ),
3112 },
3113 fun @ (Mf::Dot4I8Packed | Mf::Dot4U8Packed) => {
3114 let conversion = match fun {
3115 Mf::Dot4I8Packed => "int",
3116 Mf::Dot4U8Packed => "",
3117 _ => unreachable!(),
3118 };
3119
3120 let arg1 = arg1.unwrap();
3121
3122 // Write parentheses around the dot product expression to prevent operators
3123 // with different precedences from applying earlier.
3124 write!(self.out, "(")?;
3125 for i in 0..4 {
3126 // Since `bitfieldExtract` only sign extends if the value is signed, we
3127 // need to convert the inputs to `int` in case of `Dot4I8Packed`. For
3128 // `Dot4U8Packed`, the code below only introduces parenthesis around
3129 // each factor, which aren't strictly needed because both operands are
3130 // baked, but which don't hurt either.
3131 write!(self.out, "bitfieldExtract({conversion}(")?;
3132 self.write_expr(arg, ctx)?;
3133 write!(self.out, "), {}, 8)", i * 8)?;
3134
3135 write!(self.out, " * bitfieldExtract({conversion}(")?;
3136 self.write_expr(arg1, ctx)?;
3137 write!(self.out, "), {}, 8)", i * 8)?;
3138
3139 if i != 3 {
3140 write!(self.out, " + ")?;
3141 }
3142 }
3143 write!(self.out, ")")?;
3144
3145 return Ok(());
3146 }
3147 Mf::Outer => "outerProduct",
3148 Mf::Cross => "cross",
3149 Mf::Distance => "distance",
3150 Mf::Length => "length",
3151 Mf::Normalize => "normalize",
3152 Mf::FaceForward => "faceforward",
3153 Mf::Reflect => "reflect",
3154 Mf::Refract => "refract",
3155 // computational
3156 Mf::Sign => "sign",
3157 Mf::Fma => {
3158 if self.options.version.supports_fma_function() {
3159 // Use the fma function when available
3160 "fma"
3161 } else {
3162 // No fma support. Transform the function call into an arithmetic expression
3163 write!(self.out, "(")?;
3164
3165 self.write_expr(arg, ctx)?;
3166 write!(self.out, " * ")?;
3167
3168 let arg1 =
3169 arg1.ok_or_else(|| Error::Custom("Missing fma arg1".to_owned()))?;
3170 self.write_expr(arg1, ctx)?;
3171 write!(self.out, " + ")?;
3172
3173 let arg2 =
3174 arg2.ok_or_else(|| Error::Custom("Missing fma arg2".to_owned()))?;
3175 self.write_expr(arg2, ctx)?;
3176 write!(self.out, ")")?;
3177
3178 return Ok(());
3179 }
3180 }
3181 Mf::Mix => "mix",
3182 Mf::Step => "step",
3183 Mf::SmoothStep => "smoothstep",
3184 Mf::Sqrt => "sqrt",
3185 Mf::InverseSqrt => "inversesqrt",
3186 Mf::Inverse => "inverse",
3187 Mf::Transpose => "transpose",
3188 Mf::Determinant => "determinant",
3189 Mf::QuantizeToF16 => match *ctx.resolve_type(arg, &self.module.types) {
3190 TypeInner::Scalar { .. } => {
3191 write!(self.out, "unpackHalf2x16(packHalf2x16(vec2(")?;
3192 self.write_expr(arg, ctx)?;
3193 write!(self.out, "))).x")?;
3194 return Ok(());
3195 }
3196 TypeInner::Vector {
3197 size: crate::VectorSize::Bi,
3198 ..
3199 } => {
3200 write!(self.out, "unpackHalf2x16(packHalf2x16(")?;
3201 self.write_expr(arg, ctx)?;
3202 write!(self.out, "))")?;
3203 return Ok(());
3204 }
3205 TypeInner::Vector {
3206 size: crate::VectorSize::Tri,
3207 ..
3208 } => {
3209 write!(self.out, "vec3(unpackHalf2x16(packHalf2x16(")?;
3210 self.write_expr(arg, ctx)?;
3211 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3212 self.write_expr(arg, ctx)?;
3213 write!(self.out, ".zz)).x)")?;
3214 return Ok(());
3215 }
3216 TypeInner::Vector {
3217 size: crate::VectorSize::Quad,
3218 ..
3219 } => {
3220 write!(self.out, "vec4(unpackHalf2x16(packHalf2x16(")?;
3221 self.write_expr(arg, ctx)?;
3222 write!(self.out, ".xy)), unpackHalf2x16(packHalf2x16(")?;
3223 self.write_expr(arg, ctx)?;
3224 write!(self.out, ".zw)))")?;
3225 return Ok(());
3226 }
3227 _ => unreachable!(
3228 "Correct TypeInner for QuantizeToF16 should be already validated"
3229 ),
3230 },
3231 // bits
3232 Mf::CountTrailingZeros => {
3233 match *ctx.resolve_type(arg, &self.module.types) {
3234 TypeInner::Vector { size, scalar, .. } => {
3235 let s = common::vector_size_str(size);
3236 if let crate::ScalarKind::Uint = scalar.kind {
3237 write!(self.out, "min(uvec{s}(findLSB(")?;
3238 self.write_expr(arg, ctx)?;
3239 write!(self.out, ")), uvec{s}(32u))")?;
3240 } else {
3241 write!(self.out, "ivec{s}(min(uvec{s}(findLSB(")?;
3242 self.write_expr(arg, ctx)?;
3243 write!(self.out, ")), uvec{s}(32u)))")?;
3244 }
3245 }
3246 TypeInner::Scalar(scalar) => {
3247 if let crate::ScalarKind::Uint = scalar.kind {
3248 write!(self.out, "min(uint(findLSB(")?;
3249 self.write_expr(arg, ctx)?;
3250 write!(self.out, ")), 32u)")?;
3251 } else {
3252 write!(self.out, "int(min(uint(findLSB(")?;
3253 self.write_expr(arg, ctx)?;
3254 write!(self.out, ")), 32u))")?;
3255 }
3256 }
3257 _ => unreachable!(),
3258 };
3259 return Ok(());
3260 }
3261 Mf::CountLeadingZeros => {
3262 if self.options.version.supports_integer_functions() {
3263 match *ctx.resolve_type(arg, &self.module.types) {
3264 TypeInner::Vector { size, scalar } => {
3265 let s = common::vector_size_str(size);
3266
3267 if let crate::ScalarKind::Uint = scalar.kind {
3268 write!(self.out, "uvec{s}(ivec{s}(31) - findMSB(")?;
3269 self.write_expr(arg, ctx)?;
3270 write!(self.out, "))")?;
3271 } else {
3272 write!(self.out, "mix(ivec{s}(31) - findMSB(")?;
3273 self.write_expr(arg, ctx)?;
3274 write!(self.out, "), ivec{s}(0), lessThan(")?;
3275 self.write_expr(arg, ctx)?;
3276 write!(self.out, ", ivec{s}(0)))")?;
3277 }
3278 }
3279 TypeInner::Scalar(scalar) => {
3280 if let crate::ScalarKind::Uint = scalar.kind {
3281 write!(self.out, "uint(31 - findMSB(")?;
3282 } else {
3283 write!(self.out, "(")?;
3284 self.write_expr(arg, ctx)?;
3285 write!(self.out, " < 0 ? 0 : 31 - findMSB(")?;
3286 }
3287
3288 self.write_expr(arg, ctx)?;
3289 write!(self.out, "))")?;
3290 }
3291 _ => unreachable!(),
3292 };
3293 } else {
3294 match *ctx.resolve_type(arg, &self.module.types) {
3295 TypeInner::Vector { size, scalar } => {
3296 let s = common::vector_size_str(size);
3297
3298 if let crate::ScalarKind::Uint = scalar.kind {
3299 write!(self.out, "uvec{s}(")?;
3300 write!(self.out, "vec{s}(31.0) - floor(log2(vec{s}(")?;
3301 self.write_expr(arg, ctx)?;
3302 write!(self.out, ") + 0.5)))")?;
3303 } else {
3304 write!(self.out, "ivec{s}(")?;
3305 write!(self.out, "mix(vec{s}(31.0) - floor(log2(vec{s}(")?;
3306 self.write_expr(arg, ctx)?;
3307 write!(self.out, ") + 0.5)), ")?;
3308 write!(self.out, "vec{s}(0.0), lessThan(")?;
3309 self.write_expr(arg, ctx)?;
3310 write!(self.out, ", ivec{s}(0u))))")?;
3311 }
3312 }
3313 TypeInner::Scalar(scalar) => {
3314 if let crate::ScalarKind::Uint = scalar.kind {
3315 write!(self.out, "uint(31.0 - floor(log2(float(")?;
3316 self.write_expr(arg, ctx)?;
3317 write!(self.out, ") + 0.5)))")?;
3318 } else {
3319 write!(self.out, "(")?;
3320 self.write_expr(arg, ctx)?;
3321 write!(self.out, " < 0 ? 0 : int(")?;
3322 write!(self.out, "31.0 - floor(log2(float(")?;
3323 self.write_expr(arg, ctx)?;
3324 write!(self.out, ") + 0.5))))")?;
3325 }
3326 }
3327 _ => unreachable!(),
3328 };
3329 }
3330
3331 return Ok(());
3332 }
3333 Mf::CountOneBits => "bitCount",
3334 Mf::ReverseBits => "bitfieldReverse",
3335 Mf::ExtractBits => {
3336 // The behavior of ExtractBits is undefined when offset + count > bit_width. We need
3337 // to first sanitize the offset and count first. If we don't do this, AMD and Intel chips
3338 // will return out-of-spec values if the extracted range is not within the bit width.
3339 //
3340 // This encodes the exact formula specified by the wgsl spec, without temporary values:
3341 // https://gpuweb.github.io/gpuweb/wgsl/#extractBits-unsigned-builtin
3342 //
3343 // w = sizeof(x) * 8
3344 // o = min(offset, w)
3345 // c = min(count, w - o)
3346 //
3347 // bitfieldExtract(x, o, c)
3348 //
3349 // extract_bits(e, min(offset, w), min(count, w - min(offset, w))))
3350 let scalar_bits = ctx
3351 .resolve_type(arg, &self.module.types)
3352 .scalar_width()
3353 .unwrap()
3354 * 8;
3355
3356 write!(self.out, "bitfieldExtract(")?;
3357 self.write_expr(arg, ctx)?;
3358 write!(self.out, ", int(min(")?;
3359 self.write_expr(arg1.unwrap(), ctx)?;
3360 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3361 self.write_expr(arg2.unwrap(), ctx)?;
3362 write!(self.out, ", {scalar_bits}u - min(")?;
3363 self.write_expr(arg1.unwrap(), ctx)?;
3364 write!(self.out, ", {scalar_bits}u))))")?;
3365
3366 return Ok(());
3367 }
3368 Mf::InsertBits => {
3369 // InsertBits has the same considerations as ExtractBits above
3370 let scalar_bits = ctx
3371 .resolve_type(arg, &self.module.types)
3372 .scalar_width()
3373 .unwrap()
3374 * 8;
3375
3376 write!(self.out, "bitfieldInsert(")?;
3377 self.write_expr(arg, ctx)?;
3378 write!(self.out, ", ")?;
3379 self.write_expr(arg1.unwrap(), ctx)?;
3380 write!(self.out, ", int(min(")?;
3381 self.write_expr(arg2.unwrap(), ctx)?;
3382 write!(self.out, ", {scalar_bits}u)), int(min(",)?;
3383 self.write_expr(arg3.unwrap(), ctx)?;
3384 write!(self.out, ", {scalar_bits}u - min(")?;
3385 self.write_expr(arg2.unwrap(), ctx)?;
3386 write!(self.out, ", {scalar_bits}u))))")?;
3387
3388 return Ok(());
3389 }
3390 Mf::FirstTrailingBit => "findLSB",
3391 Mf::FirstLeadingBit => "findMSB",
3392 // data packing
3393 Mf::Pack4x8snorm => {
3394 if self.options.version.supports_pack_unpack_4x8() {
3395 "packSnorm4x8"
3396 } else {
3397 // polyfill should go here. Needs a corresponding entry in `need_bake_expression`
3398 return Err(Error::UnsupportedExternal("packSnorm4x8".into()));
3399 }
3400 }
3401 Mf::Pack4x8unorm => {
3402 if self.options.version.supports_pack_unpack_4x8() {
3403 "packUnorm4x8"
3404 } else {
3405 return Err(Error::UnsupportedExternal("packUnorm4x8".to_owned()));
3406 }
3407 }
3408 Mf::Pack2x16snorm => {
3409 if self.options.version.supports_pack_unpack_snorm_2x16() {
3410 "packSnorm2x16"
3411 } else {
3412 return Err(Error::UnsupportedExternal("packSnorm2x16".to_owned()));
3413 }
3414 }
3415 Mf::Pack2x16unorm => {
3416 if self.options.version.supports_pack_unpack_unorm_2x16() {
3417 "packUnorm2x16"
3418 } else {
3419 return Err(Error::UnsupportedExternal("packUnorm2x16".to_owned()));
3420 }
3421 }
3422 Mf::Pack2x16float => {
3423 if self.options.version.supports_pack_unpack_half_2x16() {
3424 "packHalf2x16"
3425 } else {
3426 return Err(Error::UnsupportedExternal("packHalf2x16".to_owned()));
3427 }
3428 }
3429
3430 fun @ (Mf::Pack4xI8 | Mf::Pack4xU8 | Mf::Pack4xI8Clamp | Mf::Pack4xU8Clamp) => {
3431 let was_signed = matches!(fun, Mf::Pack4xI8 | Mf::Pack4xI8Clamp);
3432 let clamp_bounds = match fun {
3433 Mf::Pack4xI8Clamp => Some(("-128", "127")),
3434 Mf::Pack4xU8Clamp => Some(("0", "255")),
3435 _ => None,
3436 };
3437 let const_suffix = if was_signed { "" } else { "u" };
3438 if was_signed {
3439 write!(self.out, "uint(")?;
3440 }
3441 let write_arg = |this: &mut Self| -> BackendResult {
3442 if let Some((min, max)) = clamp_bounds {
3443 write!(this.out, "clamp(")?;
3444 this.write_expr(arg, ctx)?;
3445 write!(this.out, ", {min}{const_suffix}, {max}{const_suffix})")?;
3446 } else {
3447 this.write_expr(arg, ctx)?;
3448 }
3449 Ok(())
3450 };
3451 write!(self.out, "(")?;
3452 write_arg(self)?;
3453 write!(self.out, "[0] & 0xFF{const_suffix}) | ((")?;
3454 write_arg(self)?;
3455 write!(self.out, "[1] & 0xFF{const_suffix}) << 8) | ((")?;
3456 write_arg(self)?;
3457 write!(self.out, "[2] & 0xFF{const_suffix}) << 16) | ((")?;
3458 write_arg(self)?;
3459 write!(self.out, "[3] & 0xFF{const_suffix}) << 24)")?;
3460 if was_signed {
3461 write!(self.out, ")")?;
3462 }
3463
3464 return Ok(());
3465 }
3466 // data unpacking
3467 Mf::Unpack2x16float => {
3468 if self.options.version.supports_pack_unpack_half_2x16() {
3469 "unpackHalf2x16"
3470 } else {
3471 return Err(Error::UnsupportedExternal("unpackHalf2x16".into()));
3472 }
3473 }
3474 Mf::Unpack2x16snorm => {
3475 if self.options.version.supports_pack_unpack_snorm_2x16() {
3476 "unpackSnorm2x16"
3477 } else {
3478 let scale = 32767;
3479
3480 write!(self.out, "(vec2(ivec2(")?;
3481 self.write_expr(arg, ctx)?;
3482 write!(self.out, " << 16, ")?;
3483 self.write_expr(arg, ctx)?;
3484 write!(self.out, ") >> 16) / {scale}.0)")?;
3485 return Ok(());
3486 }
3487 }
3488 Mf::Unpack2x16unorm => {
3489 if self.options.version.supports_pack_unpack_unorm_2x16() {
3490 "unpackUnorm2x16"
3491 } else {
3492 let scale = 65535;
3493
3494 write!(self.out, "(vec2(")?;
3495 self.write_expr(arg, ctx)?;
3496 write!(self.out, " & 0xFFFFu, ")?;
3497 self.write_expr(arg, ctx)?;
3498 write!(self.out, " >> 16) / {scale}.0)")?;
3499 return Ok(());
3500 }
3501 }
3502 Mf::Unpack4x8snorm => {
3503 if self.options.version.supports_pack_unpack_4x8() {
3504 "unpackSnorm4x8"
3505 } else {
3506 let scale = 127;
3507
3508 write!(self.out, "(vec4(ivec4(")?;
3509 self.write_expr(arg, ctx)?;
3510 write!(self.out, " << 24, ")?;
3511 self.write_expr(arg, ctx)?;
3512 write!(self.out, " << 16, ")?;
3513 self.write_expr(arg, ctx)?;
3514 write!(self.out, " << 8, ")?;
3515 self.write_expr(arg, ctx)?;
3516 write!(self.out, ") >> 24) / {scale}.0)")?;
3517 return Ok(());
3518 }
3519 }
3520 Mf::Unpack4x8unorm => {
3521 if self.options.version.supports_pack_unpack_4x8() {
3522 "unpackUnorm4x8"
3523 } else {
3524 let scale = 255;
3525
3526 write!(self.out, "(vec4(")?;
3527 self.write_expr(arg, ctx)?;
3528 write!(self.out, " & 0xFFu, ")?;
3529 self.write_expr(arg, ctx)?;
3530 write!(self.out, " >> 8 & 0xFFu, ")?;
3531 self.write_expr(arg, ctx)?;
3532 write!(self.out, " >> 16 & 0xFFu, ")?;
3533 self.write_expr(arg, ctx)?;
3534 write!(self.out, " >> 24) / {scale}.0)")?;
3535 return Ok(());
3536 }
3537 }
3538 fun @ (Mf::Unpack4xI8 | Mf::Unpack4xU8) => {
3539 let sign_prefix = match fun {
3540 Mf::Unpack4xI8 => 'i',
3541 Mf::Unpack4xU8 => 'u',
3542 _ => unreachable!(),
3543 };
3544 write!(self.out, "{sign_prefix}vec4(")?;
3545 for i in 0..4 {
3546 write!(self.out, "bitfieldExtract(")?;
3547 // Since bitfieldExtract only sign extends if the value is signed, this
3548 // cast is needed
3549 match fun {
3550 Mf::Unpack4xI8 => {
3551 write!(self.out, "int(")?;
3552 self.write_expr(arg, ctx)?;
3553 write!(self.out, ")")?;
3554 }
3555 Mf::Unpack4xU8 => self.write_expr(arg, ctx)?,
3556 _ => unreachable!(),
3557 };
3558 write!(self.out, ", {}, 8)", i * 8)?;
3559 if i != 3 {
3560 write!(self.out, ", ")?;
3561 }
3562 }
3563 write!(self.out, ")")?;
3564
3565 return Ok(());
3566 }
3567 };
3568
3569 let extract_bits = fun == Mf::ExtractBits;
3570 let insert_bits = fun == Mf::InsertBits;
3571
3572 // Some GLSL functions always return signed integers (like findMSB),
3573 // so they need to be cast to uint if the argument is also an uint.
3574 let ret_might_need_int_to_uint = matches!(
3575 fun,
3576 Mf::FirstTrailingBit | Mf::FirstLeadingBit | Mf::CountOneBits | Mf::Abs
3577 );
3578
3579 // Some GLSL functions only accept signed integers (like abs),
3580 // so they need their argument cast from uint to int.
3581 let arg_might_need_uint_to_int = matches!(fun, Mf::Abs);
3582
3583 // Check if the argument is an unsigned integer and return the vector size
3584 // in case it's a vector
3585 let maybe_uint_size = match *ctx.resolve_type(arg, &self.module.types) {
3586 TypeInner::Scalar(crate::Scalar {
3587 kind: crate::ScalarKind::Uint,
3588 ..
3589 }) => Some(None),
3590 TypeInner::Vector {
3591 scalar:
3592 crate::Scalar {
3593 kind: crate::ScalarKind::Uint,
3594 ..
3595 },
3596 size,
3597 } => Some(Some(size)),
3598 _ => None,
3599 };
3600
3601 // Cast to uint if the function needs it
3602 if ret_might_need_int_to_uint {
3603 if let Some(maybe_size) = maybe_uint_size {
3604 match maybe_size {
3605 Some(size) => write!(self.out, "uvec{}(", size as u8)?,
3606 None => write!(self.out, "uint(")?,
3607 }
3608 }
3609 }
3610
3611 write!(self.out, "{fun_name}(")?;
3612
3613 // Cast to int if the function needs it
3614 if arg_might_need_uint_to_int {
3615 if let Some(maybe_size) = maybe_uint_size {
3616 match maybe_size {
3617 Some(size) => write!(self.out, "ivec{}(", size as u8)?,
3618 None => write!(self.out, "int(")?,
3619 }
3620 }
3621 }
3622
3623 self.write_expr(arg, ctx)?;
3624
3625 // Close the cast from uint to int
3626 if arg_might_need_uint_to_int && maybe_uint_size.is_some() {
3627 write!(self.out, ")")?
3628 }
3629
3630 if let Some(arg) = arg1 {
3631 write!(self.out, ", ")?;
3632 if extract_bits {
3633 write!(self.out, "int(")?;
3634 self.write_expr(arg, ctx)?;
3635 write!(self.out, ")")?;
3636 } else {
3637 self.write_expr(arg, ctx)?;
3638 }
3639 }
3640 if let Some(arg) = arg2 {
3641 write!(self.out, ", ")?;
3642 if extract_bits || insert_bits {
3643 write!(self.out, "int(")?;
3644 self.write_expr(arg, ctx)?;
3645 write!(self.out, ")")?;
3646 } else {
3647 self.write_expr(arg, ctx)?;
3648 }
3649 }
3650 if let Some(arg) = arg3 {
3651 write!(self.out, ", ")?;
3652 if insert_bits {
3653 write!(self.out, "int(")?;
3654 self.write_expr(arg, ctx)?;
3655 write!(self.out, ")")?;
3656 } else {
3657 self.write_expr(arg, ctx)?;
3658 }
3659 }
3660 write!(self.out, ")")?;
3661
3662 // Close the cast from int to uint
3663 if ret_might_need_int_to_uint && maybe_uint_size.is_some() {
3664 write!(self.out, ")")?
3665 }
3666 }
3667 // `As` is always a call.
3668 // If `convert` is true the function name is the type
3669 // Else the function name is one of the glsl provided bitcast functions
3670 Expression::As {
3671 expr,
3672 kind: target_kind,
3673 convert,
3674 } => {
3675 let inner = ctx.resolve_type(expr, &self.module.types);
3676 match convert {
3677 Some(width) => {
3678 // this is similar to `write_type`, but with the target kind
3679 let scalar = glsl_scalar(crate::Scalar {
3680 kind: target_kind,
3681 width,
3682 })?;
3683 match *inner {
3684 TypeInner::Matrix { columns, rows, .. } => write!(
3685 self.out,
3686 "{}mat{}x{}",
3687 scalar.prefix, columns as u8, rows as u8
3688 )?,
3689 TypeInner::Vector { size, .. } => {
3690 write!(self.out, "{}vec{}", scalar.prefix, size as u8)?
3691 }
3692 _ => write!(self.out, "{}", scalar.full)?,
3693 }
3694
3695 write!(self.out, "(")?;
3696 self.write_expr(expr, ctx)?;
3697 write!(self.out, ")")?
3698 }
3699 None => {
3700 use crate::ScalarKind as Sk;
3701
3702 let target_vector_type = match *inner {
3703 TypeInner::Vector { size, scalar } => Some(TypeInner::Vector {
3704 size,
3705 scalar: crate::Scalar {
3706 kind: target_kind,
3707 width: scalar.width,
3708 },
3709 }),
3710 _ => None,
3711 };
3712
3713 let source_kind = inner.scalar_kind().unwrap();
3714
3715 match (source_kind, target_kind, target_vector_type) {
3716 // No conversion needed
3717 (Sk::Sint, Sk::Sint, _)
3718 | (Sk::Uint, Sk::Uint, _)
3719 | (Sk::Float, Sk::Float, _)
3720 | (Sk::Bool, Sk::Bool, _) => {
3721 self.write_expr(expr, ctx)?;
3722 return Ok(());
3723 }
3724
3725 // Cast to/from floats
3726 (Sk::Float, Sk::Sint, _) => write!(self.out, "floatBitsToInt")?,
3727 (Sk::Float, Sk::Uint, _) => write!(self.out, "floatBitsToUint")?,
3728 (Sk::Sint, Sk::Float, _) => write!(self.out, "intBitsToFloat")?,
3729 (Sk::Uint, Sk::Float, _) => write!(self.out, "uintBitsToFloat")?,
3730
3731 // Cast between vector types
3732 (_, _, Some(vector)) => {
3733 self.write_value_type(&vector)?;
3734 }
3735
3736 // There is no way to bitcast between Uint/Sint in glsl. Use constructor conversion
3737 (Sk::Uint | Sk::Bool, Sk::Sint, None) => write!(self.out, "int")?,
3738 (Sk::Sint | Sk::Bool, Sk::Uint, None) => write!(self.out, "uint")?,
3739 (Sk::Bool, Sk::Float, None) => write!(self.out, "float")?,
3740 (Sk::Sint | Sk::Uint | Sk::Float, Sk::Bool, None) => {
3741 write!(self.out, "bool")?
3742 }
3743
3744 (Sk::AbstractInt | Sk::AbstractFloat, _, _)
3745 | (_, Sk::AbstractInt | Sk::AbstractFloat, _) => unreachable!(),
3746 };
3747
3748 write!(self.out, "(")?;
3749 self.write_expr(expr, ctx)?;
3750 write!(self.out, ")")?;
3751 }
3752 }
3753 }
3754 // These expressions never show up in `Emit`.
3755 Expression::CallResult(_)
3756 | Expression::AtomicResult { .. }
3757 | Expression::RayQueryProceedResult
3758 | Expression::WorkGroupUniformLoadResult { .. }
3759 | Expression::SubgroupOperationResult { .. }
3760 | Expression::SubgroupBallotResult => unreachable!(),
3761 // `ArrayLength` is written as `expr.length()` and we convert it to a uint
3762 Expression::ArrayLength(expr) => {
3763 write!(self.out, "uint(")?;
3764 self.write_expr(expr, ctx)?;
3765 write!(self.out, ".length())")?
3766 }
3767 // not supported yet
3768 Expression::RayQueryGetIntersection { .. }
3769 | Expression::RayQueryVertexPositions { .. }
3770 | Expression::CooperativeLoad { .. }
3771 | Expression::CooperativeMultiplyAdd { .. } => unreachable!(),
3772 }
3773
3774 Ok(())
3775 }
3776
3777 /// Helper function to write the local holding the clamped lod
3778 fn write_clamped_lod(
3779 &mut self,
3780 ctx: &back::FunctionCtx,
3781 expr: Handle<crate::Expression>,
3782 image: Handle<crate::Expression>,
3783 level_expr: Handle<crate::Expression>,
3784 ) -> Result<(), Error> {
3785 // Define our local and start a call to `clamp`
3786 write!(
3787 self.out,
3788 "int {}{} = clamp(",
3789 Baked(expr),
3790 CLAMPED_LOD_SUFFIX
3791 )?;
3792 // Write the lod that will be clamped
3793 self.write_expr(level_expr, ctx)?;
3794 // Set the min value to 0 and start a call to `textureQueryLevels` to get
3795 // the maximum value
3796 write!(self.out, ", 0, textureQueryLevels(")?;
3797 // Write the target image as an argument to `textureQueryLevels`
3798 self.write_expr(image, ctx)?;
3799 // Close the call to `textureQueryLevels` subtract 1 from it since
3800 // the lod argument is 0 based, close the `clamp` call and end the
3801 // local declaration statement.
3802 writeln!(self.out, ") - 1);")?;
3803
3804 Ok(())
3805 }
3806
3807 // Helper method used to retrieve how many elements a coordinate vector
3808 // for the images operations need.
3809 fn get_coordinate_vector_size(&self, dim: crate::ImageDimension, arrayed: bool) -> u8 {
3810 // openGL es doesn't have 1D images so we need workaround it
3811 let tex_1d_hack = dim == crate::ImageDimension::D1 && self.options.version.is_es();
3812 // Get how many components the coordinate vector needs for the dimensions only
3813 let tex_coord_size = match dim {
3814 crate::ImageDimension::D1 => 1,
3815 crate::ImageDimension::D2 => 2,
3816 crate::ImageDimension::D3 => 3,
3817 crate::ImageDimension::Cube => 2,
3818 };
3819 // Calculate the true size of the coordinate vector by adding 1 for arrayed images
3820 // and another 1 if we need to workaround 1D images by making them 2D
3821 tex_coord_size + tex_1d_hack as u8 + arrayed as u8
3822 }
3823
3824 /// Helper method to write the coordinate vector for image operations
3825 fn write_texture_coord(
3826 &mut self,
3827 ctx: &back::FunctionCtx,
3828 vector_size: u8,
3829 coordinate: Handle<crate::Expression>,
3830 array_index: Option<Handle<crate::Expression>>,
3831 // Emulate 1D images as 2D for profiles that don't support it (glsl es)
3832 tex_1d_hack: bool,
3833 ) -> Result<(), Error> {
3834 match array_index {
3835 // If the image needs an array indice we need to add it to the end of our
3836 // coordinate vector, to do so we will use the `ivec(ivec, scalar)`
3837 // constructor notation (NOTE: the inner `ivec` can also be a scalar, this
3838 // is important for 1D arrayed images).
3839 Some(layer_expr) => {
3840 write!(self.out, "ivec{vector_size}(")?;
3841 self.write_expr(coordinate, ctx)?;
3842 write!(self.out, ", ")?;
3843 // If we are replacing sampler1D with sampler2D we also need
3844 // to add another zero to the coordinates vector for the y component
3845 if tex_1d_hack {
3846 write!(self.out, "0, ")?;
3847 }
3848 self.write_expr(layer_expr, ctx)?;
3849 write!(self.out, ")")?;
3850 }
3851 // Otherwise write just the expression (and the 1D hack if needed)
3852 None => {
3853 let uvec_size = match *ctx.resolve_type(coordinate, &self.module.types) {
3854 TypeInner::Scalar(crate::Scalar {
3855 kind: crate::ScalarKind::Uint,
3856 ..
3857 }) => Some(None),
3858 TypeInner::Vector {
3859 size,
3860 scalar:
3861 crate::Scalar {
3862 kind: crate::ScalarKind::Uint,
3863 ..
3864 },
3865 } => Some(Some(size as u32)),
3866 _ => None,
3867 };
3868 if tex_1d_hack {
3869 write!(self.out, "ivec2(")?;
3870 } else if uvec_size.is_some() {
3871 match uvec_size {
3872 Some(None) => write!(self.out, "int(")?,
3873 Some(Some(size)) => write!(self.out, "ivec{size}(")?,
3874 _ => {}
3875 }
3876 }
3877 self.write_expr(coordinate, ctx)?;
3878 if tex_1d_hack {
3879 write!(self.out, ", 0)")?;
3880 } else if uvec_size.is_some() {
3881 write!(self.out, ")")?;
3882 }
3883 }
3884 }
3885
3886 Ok(())
3887 }
3888
3889 /// Helper method to write the `ImageStore` statement
3890 fn write_image_store(
3891 &mut self,
3892 ctx: &back::FunctionCtx,
3893 image: Handle<crate::Expression>,
3894 coordinate: Handle<crate::Expression>,
3895 array_index: Option<Handle<crate::Expression>>,
3896 value: Handle<crate::Expression>,
3897 ) -> Result<(), Error> {
3898 use crate::ImageDimension as IDim;
3899
3900 // NOTE: openGL requires that `imageStore`s have no effects when the texel is invalid
3901 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
3902
3903 // This will only panic if the module is invalid
3904 let dim = match *ctx.resolve_type(image, &self.module.types) {
3905 TypeInner::Image { dim, .. } => dim,
3906 _ => unreachable!(),
3907 };
3908
3909 // Begin our call to `imageStore`
3910 write!(self.out, "imageStore(")?;
3911 self.write_expr(image, ctx)?;
3912 // Separate the image argument from the coordinates
3913 write!(self.out, ", ")?;
3914
3915 // openGL es doesn't have 1D images so we need workaround it
3916 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
3917 // Write the coordinate vector
3918 self.write_texture_coord(
3919 ctx,
3920 // Get the size of the coordinate vector
3921 self.get_coordinate_vector_size(dim, array_index.is_some()),
3922 coordinate,
3923 array_index,
3924 tex_1d_hack,
3925 )?;
3926
3927 // Separate the coordinate from the value to write and write the expression
3928 // of the value to write.
3929 write!(self.out, ", ")?;
3930 self.write_expr(value, ctx)?;
3931 // End the call to `imageStore` and the statement.
3932 writeln!(self.out, ");")?;
3933
3934 Ok(())
3935 }
3936
3937 /// Helper method to write the `ImageAtomic` statement
3938 fn write_image_atomic(
3939 &mut self,
3940 ctx: &back::FunctionCtx,
3941 image: Handle<crate::Expression>,
3942 coordinate: Handle<crate::Expression>,
3943 array_index: Option<Handle<crate::Expression>>,
3944 fun: crate::AtomicFunction,
3945 value: Handle<crate::Expression>,
3946 ) -> Result<(), Error> {
3947 use crate::ImageDimension as IDim;
3948
3949 // NOTE: openGL requires that `imageAtomic`s have no effects when the texel is invalid
3950 // so we don't need to generate bounds checks (OpenGL 4.2 Core §3.9.20)
3951
3952 // This will only panic if the module is invalid
3953 let dim = match *ctx.resolve_type(image, &self.module.types) {
3954 TypeInner::Image { dim, .. } => dim,
3955 _ => unreachable!(),
3956 };
3957
3958 // Begin our call to `imageAtomic`
3959 let fun_str = fun.to_glsl();
3960 write!(self.out, "imageAtomic{fun_str}(")?;
3961 self.write_expr(image, ctx)?;
3962 // Separate the image argument from the coordinates
3963 write!(self.out, ", ")?;
3964
3965 // openGL es doesn't have 1D images so we need workaround it
3966 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
3967 // Write the coordinate vector
3968 self.write_texture_coord(
3969 ctx,
3970 // Get the size of the coordinate vector
3971 self.get_coordinate_vector_size(dim, false),
3972 coordinate,
3973 array_index,
3974 tex_1d_hack,
3975 )?;
3976
3977 // Separate the coordinate from the value to write and write the expression
3978 // of the value to write.
3979 write!(self.out, ", ")?;
3980 self.write_expr(value, ctx)?;
3981 // End the call to `imageAtomic` and the statement.
3982 writeln!(self.out, ");")?;
3983
3984 Ok(())
3985 }
3986
3987 /// Helper method for writing an `ImageLoad` expression.
3988 #[allow(clippy::too_many_arguments)]
3989 fn write_image_load(
3990 &mut self,
3991 handle: Handle<crate::Expression>,
3992 ctx: &back::FunctionCtx,
3993 image: Handle<crate::Expression>,
3994 coordinate: Handle<crate::Expression>,
3995 array_index: Option<Handle<crate::Expression>>,
3996 sample: Option<Handle<crate::Expression>>,
3997 level: Option<Handle<crate::Expression>>,
3998 ) -> Result<(), Error> {
3999 use crate::ImageDimension as IDim;
4000
4001 // `ImageLoad` is a bit complicated.
4002 // There are two functions one for sampled
4003 // images another for storage images, the former uses `texelFetch` and the
4004 // latter uses `imageLoad`.
4005 //
4006 // Furthermore we have `level` which is always `Some` for sampled images
4007 // and `None` for storage images, so we end up with two functions:
4008 // - `texelFetch(image, coordinate, level)` for sampled images
4009 // - `imageLoad(image, coordinate)` for storage images
4010 //
4011 // Finally we also have to consider bounds checking, for storage images
4012 // this is easy since openGL requires that invalid texels always return
4013 // 0, for sampled images we need to either verify that all arguments are
4014 // in bounds (`ReadZeroSkipWrite`) or make them a valid texel (`Restrict`).
4015
4016 // This will only panic if the module is invalid
4017 let (dim, class) = match *ctx.resolve_type(image, &self.module.types) {
4018 TypeInner::Image {
4019 dim,
4020 arrayed: _,
4021 class,
4022 } => (dim, class),
4023 _ => unreachable!(),
4024 };
4025
4026 // Get the name of the function to be used for the load operation
4027 // and the policy to be used with it.
4028 let (fun_name, policy) = match class {
4029 // Sampled images inherit the policy from the user passed policies
4030 crate::ImageClass::Sampled { .. } => ("texelFetch", self.policies.image_load),
4031 crate::ImageClass::Storage { .. } => {
4032 // OpenGL ES 3.1 mentions in Chapter "8.22 Texture Image Loads and Stores" that:
4033 // "Invalid image loads will return a vector where the value of R, G, and B components
4034 // is 0 and the value of the A component is undefined."
4035 //
4036 // OpenGL 4.2 Core mentions in Chapter "3.9.20 Texture Image Loads and Stores" that:
4037 // "Invalid image loads will return zero."
4038 //
4039 // So, we only inject bounds checks for ES
4040 let policy = if self.options.version.is_es() {
4041 self.policies.image_load
4042 } else {
4043 proc::BoundsCheckPolicy::Unchecked
4044 };
4045 ("imageLoad", policy)
4046 }
4047 // TODO: Is there even a function for this?
4048 crate::ImageClass::Depth { multi: _ } => {
4049 return Err(Error::Custom(
4050 "WGSL `textureLoad` from depth textures is not supported in GLSL".to_string(),
4051 ))
4052 }
4053 crate::ImageClass::External => unimplemented!(),
4054 };
4055
4056 // openGL es doesn't have 1D images so we need workaround it
4057 let tex_1d_hack = dim == IDim::D1 && self.options.version.is_es();
4058 // Get the size of the coordinate vector
4059 let vector_size = self.get_coordinate_vector_size(dim, array_index.is_some());
4060
4061 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4062 // To write the bounds checks for `ReadZeroSkipWrite` we will use a
4063 // ternary operator since we are in the middle of an expression and
4064 // need to return a value.
4065 //
4066 // NOTE: glsl does short circuit when evaluating logical
4067 // expressions so we can be sure that after we test a
4068 // condition it will be true for the next ones
4069
4070 // Write parentheses around the ternary operator to prevent problems with
4071 // expressions emitted before or after it having more precedence
4072 write!(self.out, "(",)?;
4073
4074 // The lod check needs to precede the size check since we need
4075 // to use the lod to get the size of the image at that level.
4076 if let Some(level_expr) = level {
4077 self.write_expr(level_expr, ctx)?;
4078 write!(self.out, " < textureQueryLevels(",)?;
4079 self.write_expr(image, ctx)?;
4080 // Chain the next check
4081 write!(self.out, ") && ")?;
4082 }
4083
4084 // Check that the sample arguments doesn't exceed the number of samples
4085 if let Some(sample_expr) = sample {
4086 self.write_expr(sample_expr, ctx)?;
4087 write!(self.out, " < textureSamples(",)?;
4088 self.write_expr(image, ctx)?;
4089 // Chain the next check
4090 write!(self.out, ") && ")?;
4091 }
4092
4093 // We now need to write the size checks for the coordinates and array index
4094 // first we write the comparison function in case the image is 1D non arrayed
4095 // (and no 1D to 2D hack was needed) we are comparing scalars so the less than
4096 // operator will suffice, but otherwise we'll be comparing two vectors so we'll
4097 // need to use the `lessThan` function but it returns a vector of booleans (one
4098 // for each comparison) so we need to fold it all in one scalar boolean, since
4099 // we want all comparisons to pass we use the `all` function which will only
4100 // return `true` if all the elements of the boolean vector are also `true`.
4101 //
4102 // So we'll end with one of the following forms
4103 // - `coord < textureSize(image, lod)` for 1D images
4104 // - `all(lessThan(coord, textureSize(image, lod)))` for normal images
4105 // - `all(lessThan(ivec(coord, array_index), textureSize(image, lod)))`
4106 // for arrayed images
4107 // - `all(lessThan(coord, textureSize(image)))` for multi sampled images
4108
4109 if vector_size != 1 {
4110 write!(self.out, "all(lessThan(")?;
4111 }
4112
4113 // Write the coordinate vector
4114 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4115
4116 if vector_size != 1 {
4117 // If we used the `lessThan` function we need to separate the
4118 // coordinates from the image size.
4119 write!(self.out, ", ")?;
4120 } else {
4121 // If we didn't use it (ie. 1D images) we perform the comparison
4122 // using the less than operator.
4123 write!(self.out, " < ")?;
4124 }
4125
4126 // Call `textureSize` to get our image size
4127 write!(self.out, "textureSize(")?;
4128 self.write_expr(image, ctx)?;
4129 // `textureSize` uses the lod as a second argument for mipmapped images
4130 if let Some(level_expr) = level {
4131 // Separate the image from the lod
4132 write!(self.out, ", ")?;
4133 self.write_expr(level_expr, ctx)?;
4134 }
4135 // Close the `textureSize` call
4136 write!(self.out, ")")?;
4137
4138 if vector_size != 1 {
4139 // Close the `all` and `lessThan` calls
4140 write!(self.out, "))")?;
4141 }
4142
4143 // Finally end the condition part of the ternary operator
4144 write!(self.out, " ? ")?;
4145 }
4146
4147 // Begin the call to the function used to load the texel
4148 write!(self.out, "{fun_name}(")?;
4149 self.write_expr(image, ctx)?;
4150 write!(self.out, ", ")?;
4151
4152 // If we are using `Restrict` bounds checking we need to pass valid texel
4153 // coordinates, to do so we use the `clamp` function to get a value between
4154 // 0 and the image size - 1 (indexing begins at 0)
4155 if let proc::BoundsCheckPolicy::Restrict = policy {
4156 write!(self.out, "clamp(")?;
4157 }
4158
4159 // Write the coordinate vector
4160 self.write_texture_coord(ctx, vector_size, coordinate, array_index, tex_1d_hack)?;
4161
4162 // If we are using `Restrict` bounds checking we need to write the rest of the
4163 // clamp we initiated before writing the coordinates.
4164 if let proc::BoundsCheckPolicy::Restrict = policy {
4165 // Write the min value 0
4166 if vector_size == 1 {
4167 write!(self.out, ", 0")?;
4168 } else {
4169 write!(self.out, ", ivec{vector_size}(0)")?;
4170 }
4171 // Start the `textureSize` call to use as the max value.
4172 write!(self.out, ", textureSize(")?;
4173 self.write_expr(image, ctx)?;
4174 // If the image is mipmapped we need to add the lod argument to the
4175 // `textureSize` call, but this needs to be the clamped lod, this should
4176 // have been generated earlier and put in a local.
4177 if class.is_mipmapped() {
4178 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4179 }
4180 // Close the `textureSize` call
4181 write!(self.out, ")")?;
4182
4183 // Subtract 1 from the `textureSize` call since the coordinates are zero based.
4184 if vector_size == 1 {
4185 write!(self.out, " - 1")?;
4186 } else {
4187 write!(self.out, " - ivec{vector_size}(1)")?;
4188 }
4189
4190 // Close the `clamp` call
4191 write!(self.out, ")")?;
4192
4193 // Add the clamped lod (if present) as the second argument to the
4194 // image load function.
4195 if level.is_some() {
4196 write!(self.out, ", {}{}", Baked(handle), CLAMPED_LOD_SUFFIX)?;
4197 }
4198
4199 // If a sample argument is needed we need to clamp it between 0 and
4200 // the number of samples the image has.
4201 if let Some(sample_expr) = sample {
4202 write!(self.out, ", clamp(")?;
4203 self.write_expr(sample_expr, ctx)?;
4204 // Set the min value to 0 and start the call to `textureSamples`
4205 write!(self.out, ", 0, textureSamples(")?;
4206 self.write_expr(image, ctx)?;
4207 // Close the `textureSamples` call, subtract 1 from it since the sample
4208 // argument is zero based, and close the `clamp` call
4209 writeln!(self.out, ") - 1)")?;
4210 }
4211 } else if let Some(sample_or_level) = sample.or(level) {
4212 // GLSL only support SInt on this field while WGSL support also UInt
4213 let cast_to_int = matches!(
4214 *ctx.resolve_type(sample_or_level, &self.module.types),
4215 TypeInner::Scalar(crate::Scalar {
4216 kind: crate::ScalarKind::Uint,
4217 ..
4218 })
4219 );
4220
4221 // If no bounds checking is need just add the sample or level argument
4222 // after the coordinates
4223 write!(self.out, ", ")?;
4224
4225 if cast_to_int {
4226 write!(self.out, "int(")?;
4227 }
4228
4229 self.write_expr(sample_or_level, ctx)?;
4230
4231 if cast_to_int {
4232 write!(self.out, ")")?;
4233 }
4234 }
4235
4236 // Close the image load function.
4237 write!(self.out, ")")?;
4238
4239 // If we were using the `ReadZeroSkipWrite` policy we need to end the first branch
4240 // (which is taken if the condition is `true`) with a colon (`:`) and write the
4241 // second branch which is just a 0 value.
4242 if let proc::BoundsCheckPolicy::ReadZeroSkipWrite = policy {
4243 // Get the kind of the output value.
4244 let kind = match class {
4245 // Only sampled images can reach here since storage images
4246 // don't need bounds checks and depth images aren't implemented
4247 crate::ImageClass::Sampled { kind, .. } => kind,
4248 _ => unreachable!(),
4249 };
4250
4251 // End the first branch
4252 write!(self.out, " : ")?;
4253 // Write the 0 value
4254 write!(
4255 self.out,
4256 "{}vec4(",
4257 glsl_scalar(crate::Scalar { kind, width: 4 })?.prefix,
4258 )?;
4259 self.write_zero_init_scalar(kind)?;
4260 // Close the zero value constructor
4261 write!(self.out, ")")?;
4262 // Close the parentheses surrounding our ternary
4263 write!(self.out, ")")?;
4264 }
4265
4266 Ok(())
4267 }
4268
4269 fn write_named_expr(
4270 &mut self,
4271 handle: Handle<crate::Expression>,
4272 name: String,
4273 // The expression which is being named.
4274 // Generally, this is the same as handle, except in WorkGroupUniformLoad
4275 named: Handle<crate::Expression>,
4276 ctx: &back::FunctionCtx,
4277 ) -> BackendResult {
4278 match ctx.info[named].ty {
4279 proc::TypeResolution::Handle(ty_handle) => match self.module.types[ty_handle].inner {
4280 TypeInner::Struct { .. } => {
4281 let ty_name = &self.names[&NameKey::Type(ty_handle)];
4282 write!(self.out, "{ty_name}")?;
4283 }
4284 _ => {
4285 self.write_type(ty_handle)?;
4286 }
4287 },
4288 proc::TypeResolution::Value(ref inner) => {
4289 self.write_value_type(inner)?;
4290 }
4291 }
4292
4293 let resolved = ctx.resolve_type(named, &self.module.types);
4294
4295 write!(self.out, " {name}")?;
4296 if let TypeInner::Array { base, size, .. } = *resolved {
4297 self.write_array_size(base, size)?;
4298 }
4299 write!(self.out, " = ")?;
4300 self.write_expr(handle, ctx)?;
4301 writeln!(self.out, ";")?;
4302 self.named_expressions.insert(named, name);
4303
4304 Ok(())
4305 }
4306
4307 /// Helper function that write string with default zero initialization for supported types
4308 fn write_zero_init_value(&mut self, ty: Handle<crate::Type>) -> BackendResult {
4309 let inner = &self.module.types[ty].inner;
4310 match *inner {
4311 TypeInner::Scalar(scalar) | TypeInner::Atomic(scalar) => {
4312 self.write_zero_init_scalar(scalar.kind)?;
4313 }
4314 TypeInner::Vector { scalar, .. } => {
4315 self.write_value_type(inner)?;
4316 write!(self.out, "(")?;
4317 self.write_zero_init_scalar(scalar.kind)?;
4318 write!(self.out, ")")?;
4319 }
4320 TypeInner::Matrix { .. } => {
4321 self.write_value_type(inner)?;
4322 write!(self.out, "(")?;
4323 self.write_zero_init_scalar(crate::ScalarKind::Float)?;
4324 write!(self.out, ")")?;
4325 }
4326 TypeInner::Array { base, size, .. } => {
4327 let count = match size.resolve(self.module.to_ctx())? {
4328 proc::IndexableLength::Known(count) => count,
4329 proc::IndexableLength::Dynamic => return Ok(()),
4330 };
4331 self.write_type(base)?;
4332 self.write_array_size(base, size)?;
4333 write!(self.out, "(")?;
4334 for _ in 1..count {
4335 self.write_zero_init_value(base)?;
4336 write!(self.out, ", ")?;
4337 }
4338 // write last parameter without comma and space
4339 self.write_zero_init_value(base)?;
4340 write!(self.out, ")")?;
4341 }
4342 TypeInner::Struct { ref members, .. } => {
4343 let name = &self.names[&NameKey::Type(ty)];
4344 write!(self.out, "{name}(")?;
4345 for (index, member) in members.iter().enumerate() {
4346 if index != 0 {
4347 write!(self.out, ", ")?;
4348 }
4349 self.write_zero_init_value(member.ty)?;
4350 }
4351 write!(self.out, ")")?;
4352 }
4353 _ => unreachable!(),
4354 }
4355
4356 Ok(())
4357 }
4358
4359 /// Helper function that write string with zero initialization for scalar
4360 fn write_zero_init_scalar(&mut self, kind: crate::ScalarKind) -> BackendResult {
4361 match kind {
4362 crate::ScalarKind::Bool => write!(self.out, "false")?,
4363 crate::ScalarKind::Uint => write!(self.out, "0u")?,
4364 crate::ScalarKind::Float => write!(self.out, "0.0")?,
4365 crate::ScalarKind::Sint => write!(self.out, "0")?,
4366 crate::ScalarKind::AbstractInt | crate::ScalarKind::AbstractFloat => {
4367 return Err(Error::Custom(
4368 "Abstract types should not appear in IR presented to backends".to_string(),
4369 ))
4370 }
4371 }
4372
4373 Ok(())
4374 }
4375
4376 /// Issue a control barrier.
4377 fn write_control_barrier(
4378 &mut self,
4379 flags: crate::Barrier,
4380 level: back::Level,
4381 ) -> BackendResult {
4382 self.write_memory_barrier(flags, level)?;
4383 writeln!(self.out, "{level}barrier();")?;
4384 Ok(())
4385 }
4386
4387 /// Issue a memory barrier.
4388 fn write_memory_barrier(&mut self, flags: crate::Barrier, level: back::Level) -> BackendResult {
4389 if flags.contains(crate::Barrier::STORAGE) {
4390 writeln!(self.out, "{level}memoryBarrierBuffer();")?;
4391 }
4392 if flags.contains(crate::Barrier::WORK_GROUP) {
4393 writeln!(self.out, "{level}memoryBarrierShared();")?;
4394 }
4395 if flags.contains(crate::Barrier::SUB_GROUP) {
4396 writeln!(self.out, "{level}subgroupMemoryBarrier();")?;
4397 }
4398 if flags.contains(crate::Barrier::TEXTURE) {
4399 writeln!(self.out, "{level}memoryBarrierImage();")?;
4400 }
4401 Ok(())
4402 }
4403
4404 /// Helper function that return the glsl storage access string of [`StorageAccess`](crate::StorageAccess)
4405 ///
4406 /// glsl allows adding both `readonly` and `writeonly` but this means that
4407 /// they can only be used to query information about the resource which isn't what
4408 /// we want here so when storage access is both `LOAD` and `STORE` add no modifiers
4409 fn write_storage_access(&mut self, storage_access: crate::StorageAccess) -> BackendResult {
4410 if storage_access.contains(crate::StorageAccess::ATOMIC) {
4411 return Ok(());
4412 }
4413 if !storage_access.contains(crate::StorageAccess::STORE) {
4414 write!(self.out, "readonly ")?;
4415 }
4416 if !storage_access.contains(crate::StorageAccess::LOAD) {
4417 write!(self.out, "writeonly ")?;
4418 }
4419 Ok(())
4420 }
4421
4422 /// Helper method used to produce the reflection info that's returned to the user
4423 fn collect_reflection_info(&mut self) -> Result<ReflectionInfo, Error> {
4424 let info = self.info.get_entry_point(self.entry_point_idx as usize);
4425 let mut texture_mapping = crate::FastHashMap::default();
4426 let mut uniforms = crate::FastHashMap::default();
4427
4428 for sampling in info.sampling_set.iter() {
4429 let tex_name = self.reflection_names_globals[&sampling.image].clone();
4430
4431 match texture_mapping.entry(tex_name) {
4432 hash_map::Entry::Vacant(v) => {
4433 v.insert(TextureMapping {
4434 texture: sampling.image,
4435 sampler: Some(sampling.sampler),
4436 });
4437 }
4438 hash_map::Entry::Occupied(e) => {
4439 if e.get().sampler != Some(sampling.sampler) {
4440 log::error!("Conflicting samplers for {}", e.key());
4441 return Err(Error::ImageMultipleSamplers);
4442 }
4443 }
4444 }
4445 }
4446
4447 let mut immediates_info = None;
4448 for (handle, var) in self.module.global_variables.iter() {
4449 if info[handle].is_empty() {
4450 continue;
4451 }
4452 match self.module.types[var.ty].inner {
4453 TypeInner::Image { .. } => {
4454 let tex_name = self.reflection_names_globals[&handle].clone();
4455 match texture_mapping.entry(tex_name) {
4456 hash_map::Entry::Vacant(v) => {
4457 v.insert(TextureMapping {
4458 texture: handle,
4459 sampler: None,
4460 });
4461 }
4462 hash_map::Entry::Occupied(_) => {
4463 // already used with a sampler, do nothing
4464 }
4465 }
4466 }
4467 _ => match var.space {
4468 crate::AddressSpace::Uniform | crate::AddressSpace::Storage { .. } => {
4469 let name = self.reflection_names_globals[&handle].clone();
4470 uniforms.insert(handle, name);
4471 }
4472 crate::AddressSpace::Immediate => {
4473 let name = self.reflection_names_globals[&handle].clone();
4474 immediates_info = Some((name, var.ty));
4475 }
4476 _ => (),
4477 },
4478 }
4479 }
4480
4481 let mut immediates_segments = Vec::new();
4482 let mut immediates_items = vec![];
4483
4484 if let Some((name, ty)) = immediates_info {
4485 // We don't have a layouter available to us, so we need to create one.
4486 //
4487 // This is potentially a bit wasteful, but the set of types in the program
4488 // shouldn't be too large.
4489 let mut layouter = proc::Layouter::default();
4490 layouter.update(self.module.to_ctx()).unwrap();
4491
4492 // We start with the name of the binding itself.
4493 immediates_segments.push(name);
4494
4495 // We then recursively collect all the uniform fields of the immediate data.
4496 self.collect_immediates_items(
4497 ty,
4498 &mut immediates_segments,
4499 &layouter,
4500 &mut 0,
4501 &mut immediates_items,
4502 );
4503 }
4504
4505 Ok(ReflectionInfo {
4506 texture_mapping,
4507 uniforms,
4508 varying: mem::take(&mut self.varying),
4509 immediates_items,
4510 clip_distance_count: self.clip_distance_count,
4511 })
4512 }
4513
4514 fn collect_immediates_items(
4515 &mut self,
4516 ty: Handle<crate::Type>,
4517 segments: &mut Vec<String>,
4518 layouter: &proc::Layouter,
4519 offset: &mut u32,
4520 items: &mut Vec<ImmediateItem>,
4521 ) {
4522 // At this point in the recursion, `segments` contains the path
4523 // needed to access `ty` from the root.
4524
4525 let layout = &layouter[ty];
4526 *offset = layout.alignment.round_up(*offset);
4527 match self.module.types[ty].inner {
4528 // All these types map directly to GL uniforms.
4529 TypeInner::Scalar { .. } | TypeInner::Vector { .. } | TypeInner::Matrix { .. } => {
4530 // Build the full name, by combining all current segments.
4531 let name: String = segments.iter().map(String::as_str).collect();
4532 items.push(ImmediateItem {
4533 access_path: name,
4534 offset: *offset,
4535 ty,
4536 });
4537 *offset += layout.size;
4538 }
4539 // Arrays are recursed into.
4540 TypeInner::Array { base, size, .. } => {
4541 let crate::ArraySize::Constant(count) = size else {
4542 unreachable!("Cannot have dynamic arrays in immediates");
4543 };
4544
4545 for i in 0..count.get() {
4546 // Add the array accessor and recurse.
4547 segments.push(format!("[{i}]"));
4548 self.collect_immediates_items(base, segments, layouter, offset, items);
4549 segments.pop();
4550 }
4551
4552 // Ensure the stride is kept by rounding up to the alignment.
4553 *offset = layout.alignment.round_up(*offset)
4554 }
4555 TypeInner::Struct { ref members, .. } => {
4556 for (index, member) in members.iter().enumerate() {
4557 // Add struct accessor and recurse.
4558 segments.push(format!(
4559 ".{}",
4560 self.names[&NameKey::StructMember(ty, index as u32)]
4561 ));
4562 self.collect_immediates_items(member.ty, segments, layouter, offset, items);
4563 segments.pop();
4564 }
4565
4566 // Ensure ending padding is kept by rounding up to the alignment.
4567 *offset = layout.alignment.round_up(*offset)
4568 }
4569 _ => unreachable!(),
4570 }
4571 }
4572}