wgpu_benchmark/
lib.rs

1#![cfg(not(target_arch = "wasm32"))]
2#![expect(
3    clippy::disallowed_types,
4    reason = "We're outside of the main wgpu codebase"
5)]
6
7//! Benchmarking framework for `wgpu`.
8//!
9//! This crate is a basic framework for benchmarking. Its design is guided
10//! by a few goals:
11//!
12//! - Enumerating tests should be extremely cheap. `criterion` needs
13//!   to run all of your benchmark functions to enumerate them during
14//!   testing. This requires your code to contort itself to avoid doing
15//!   any work until you enter a benchmark callback. This framework
16//!   avoids that by having an explicit list of benchmark function.
17//! - It must be compatible with `cargo-nextest` and have a compatible
18//!   "test" mode that runs each benchmark exactly once.
19//! - It should be able to have intuitive test grouping, allowing for
20//!   allowing for quick execution of a reasonable baseline set of benchmarks
21//!   during development, while still allowing for a more exhaustive
22//!   benchmark suite to be run if desired.
23//!
24//! By default all tests run for 2 seconds, but this can be overridden
25//! by individual tests.
26
27use std::{collections::HashMap, io::IsTerminal, time::Duration};
28
29use anyhow::Result;
30use pico_args::Arguments;
31use serde::{Deserialize, Serialize};
32use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
33
34mod context;
35mod file;
36mod iter;
37mod print;
38
39pub use context::*;
40pub use iter::*;
41
42use crate::file::PREVIOUS;
43
44#[derive(Serialize, Deserialize, Default)]
45pub struct BenchmarkFile {
46    pub results: HashMap<String, Vec<SubBenchResult>>,
47}
48
49impl BenchmarkFile {
50    pub fn get_result(
51        &self,
52        benchmark_name: &str,
53        sub_benchmark_name: &str,
54    ) -> Option<&SubBenchResult> {
55        self.results
56            .get(benchmark_name)?
57            .iter()
58            .find(|r| r.name == sub_benchmark_name)
59    }
60}
61
62#[derive(Serialize, Deserialize)]
63pub struct SubBenchResult {
64    /// Name of the subbenchmark.
65    pub name: String,
66    /// Average duration per iteration of the subbenchmark.
67    pub avg_duration_per_iteration: Duration,
68    /// Total number of iterations executed.
69    pub iterations: u32,
70    /// Throughput unit description. e.g., "bytes", "elements", etc.
71    pub throughput_unit: String,
72    /// Number of throughput units processed per iteration.
73    pub throughput_count_per_iteration: u32,
74}
75
76impl SubBenchResult {
77    pub fn throughput_per_second(&self) -> f64 {
78        let secs_f64 = self.avg_duration_per_iteration.as_secs_f64();
79        if secs_f64 == 0.0 {
80            return 0.0;
81        }
82        self.throughput_count_per_iteration as f64 / secs_f64
83    }
84}
85
86pub struct Benchmark {
87    pub name: &'static str,
88    pub func: fn(BenchmarkContext) -> Result<Vec<SubBenchResult>>,
89}
90
91const HELP: &str = "\
92Usage: wgpu-benchmark [OPTIONS] [BENCHMARK_NAME]
93
94Modes:
95    --bench                     Run in benchmark mode, comparing against previous results.
96    --list                      List available benchmarks.
97    <no flag>                   Run in test mode, executing each benchmark exactly once.
98
99Test Matching:
100    --exact                     When specifying BENCHMARK_NAME, only run exact matches.
101    BENCHMARK_NAME              Only run benchmarks whose names contain this substring.
102
103Comparison:
104    -b, --baseline NAME         Specify a baseline file for comparison.
105    -s, --save-baseline NAME    Save the results as a baseline file.
106
107Timings:
108    --iters N                   Override number of iterations per benchmark.
109    --time SECONDS              Override time per benchmark in seconds.
110
111Other:
112    --color                     Set colored output (always,always-ansi,auto,never).
113    --format terse              Specify --list output format (only 'terse' is supported).
114    --no-capture                (Ignored)
115";
116
117pub fn main(benchmarks: Vec<Benchmark>) {
118    let mut args = Arguments::from_env();
119
120    let help = args.contains(["-h", "--help"]);
121
122    if help {
123        println!("{HELP}");
124        return;
125    }
126
127    let mut color: ColorChoice = args
128        .opt_value_from_str("--color")
129        .unwrap_or(None)
130        .unwrap_or(ColorChoice::Auto);
131    if color == ColorChoice::Auto && !std::io::stdin().is_terminal() {
132        color = ColorChoice::Never;
133    }
134
135    let exact = args.contains("--exact");
136    // We don't actually need this flag, but cargo-nextest passes it in
137    // test mode, so we need to accept it.
138    let _no_capture = args.contains("--no-capture");
139
140    #[expect(clippy::manual_map, reason = "So much clearer this way")]
141    let mut override_iterations = if let Some(iters) = args.opt_value_from_str("--iters").unwrap() {
142        Some(LoopControl::Iterations(iters))
143    } else if let Some(seconds) = args.opt_value_from_str("--time").unwrap() {
144        Some(LoopControl::Time(Duration::from_secs_f64(seconds)))
145    } else {
146        None
147    };
148
149    let baseline_name: Option<String> = args.opt_value_from_str(["-b", "--baseline"]).unwrap();
150    let write_baseline: Option<String> =
151        args.opt_value_from_str(["-s", "--save-baseline"]).unwrap();
152
153    let is_bench = args.contains("--bench");
154    let is_list = args.contains("--list");
155    let is_test = !is_bench && !is_list;
156
157    let format: Option<String> = args.opt_value_from_str("--format").unwrap();
158
159    if let Some(fmt) = format {
160        assert_eq!(fmt, "terse", "Only 'terse' format is supported.");
161    }
162    if let Some(ref baseline) = baseline_name {
163        if baseline == PREVIOUS {
164            eprintln!("Cannot use '{PREVIOUS}' as a baseline name.");
165            return;
166        }
167    }
168    if let Some(ref write_baseline) = write_baseline {
169        if write_baseline == PREVIOUS {
170            eprintln!("Cannot use '{PREVIOUS}' as a baseline name.");
171            return;
172        }
173    }
174
175    if override_iterations.is_none() && is_test {
176        override_iterations = Some(LoopControl::Iterations(1));
177    }
178
179    let name = args.free_from_str::<String>().ok();
180
181    let baseline = if is_bench {
182        let res = file::get_comparison_file(baseline_name.as_deref());
183
184        match (&res, baseline_name.as_deref()) {
185            (Some(_), Some(baseline)) => {
186                println!("Using baseline \"{baseline}\" for comparison.\n")
187            }
188            (None, Some(baseline)) => {
189                eprintln!("Could not find baseline named {baseline:?}.\n");
190                return;
191            }
192            (Some(_), None) => {
193                println!("Using previous benchmark results for comparison.\n");
194            }
195            (None, None) => {
196                println!("No previous benchmark results found for comparison.\n");
197            }
198        }
199
200        res
201    } else {
202        None
203    };
204
205    let mut output_file = BenchmarkFile::default();
206
207    let mut stdout = StandardStream::stdout(color);
208
209    for bench in benchmarks {
210        if let Some(ref bench_name) = name {
211            if exact {
212                if bench.name != bench_name {
213                    continue;
214                }
215            } else if !bench.name.contains(bench_name) {
216                continue;
217            }
218        }
219
220        if is_list {
221            println!("{}: benchmark", bench.name);
222            continue;
223        }
224
225        let ctx = BenchmarkContext {
226            override_iters: override_iterations,
227            default_iterations: LoopControl::default(),
228            is_test,
229        };
230
231        stdout
232            .set_color(ColorSpec::new().set_fg(Some(Color::Blue)))
233            .unwrap();
234        println!("Running benchmark: {}", bench.name);
235        stdout.reset().unwrap();
236
237        let results = {
238            profiling::scope!("bench", bench.name);
239            let r = (bench.func)(ctx);
240            match r {
241                Ok(r) => r,
242                Err(e) => {
243                    eprintln!("  Error running benchmark '{}': {:?}", bench.name, e);
244                    continue;
245                }
246            }
247        };
248
249        let previous_results = if let Some(ref baseline) = baseline {
250            baseline.results.get(bench.name).map(|r| r.as_slice())
251        } else {
252            None
253        };
254
255        print::print_results(&mut stdout, &results, previous_results);
256
257        output_file.results.insert(bench.name.to_string(), results);
258    }
259
260    file::write_results_file(PREVIOUS, &output_file).unwrap();
261    if let Some(output_baseline) = write_baseline {
262        file::write_results_file(&output_baseline, &output_file).unwrap();
263    }
264}