wgpu_benchmark/
lib.rs

1#![cfg(not(target_arch = "wasm32"))]
2#![expect(clippy::disallowed_types)] // We're outside of the main wgpu codebase
3
4//! Benchmarking framework for `wgpu`.
5//!
6//! This crate is a basic framework for benchmarking. Its design is guided
7//! by a few goals:
8//!
9//! - Enumerating tests should be extremely cheap. `criterion` needs
10//!   to run all of your benchmark functions to enumerate them during
11//!   testing. This requires your code to contort itself to avoid doing
12//!   any work until you enter a benchmark callback. This framework
13//!   avoids that by having an explicit list of benchmark function.
14//! - It must be compatible with `cargo-nextest` and have a compatible
15//!   "test" mode that runs each benchmark exactly once.
16//! - It should be able to have intuitive test grouping, allowing for
17//!   allowing for quick execution of a reasonable baseline set of benchmarks
18//!   during development, while still allowing for a more exhaustive
19//!   benchmark suite to be run if desired.
20//!
21//! By default all tests run for 2 seconds, but this can be overridden
22//! by individual tests.
23
24use std::{collections::HashMap, io::IsTerminal, time::Duration};
25
26use anyhow::Result;
27use pico_args::Arguments;
28use serde::{Deserialize, Serialize};
29use termcolor::{Color, ColorChoice, ColorSpec, StandardStream, WriteColor};
30
31mod context;
32mod file;
33mod iter;
34mod print;
35
36pub use context::*;
37pub use iter::*;
38
39use crate::file::PREVIOUS;
40
41#[derive(Serialize, Deserialize, Default)]
42pub struct BenchmarkFile {
43    pub results: HashMap<String, Vec<SubBenchResult>>,
44}
45
46impl BenchmarkFile {
47    pub fn get_result(
48        &self,
49        benchmark_name: &str,
50        sub_benchmark_name: &str,
51    ) -> Option<&SubBenchResult> {
52        self.results
53            .get(benchmark_name)?
54            .iter()
55            .find(|r| r.name == sub_benchmark_name)
56    }
57}
58
59#[derive(Serialize, Deserialize)]
60pub struct SubBenchResult {
61    /// Name of the subbenchmark.
62    pub name: String,
63    /// Average duration per iteration of the subbenchmark.
64    pub avg_duration_per_iteration: Duration,
65    /// Total number of iterations executed.
66    pub iterations: u32,
67    /// Throughput unit description. e.g., "bytes", "elements", etc.
68    pub throughput_unit: String,
69    /// Number of throughput units processed per iteration.
70    pub throughput_count_per_iteration: u32,
71}
72
73impl SubBenchResult {
74    pub fn throughput_per_second(&self) -> f64 {
75        let secs_f64 = self.avg_duration_per_iteration.as_secs_f64();
76        if secs_f64 == 0.0 {
77            return 0.0;
78        }
79        self.throughput_count_per_iteration as f64 / secs_f64
80    }
81}
82
83pub struct Benchmark {
84    pub name: &'static str,
85    pub func: fn(BenchmarkContext) -> Result<Vec<SubBenchResult>>,
86}
87
88const HELP: &str = "\
89Usage: wgpu-benchmark [OPTIONS] [BENCHMARK_NAME]
90
91Modes:
92    --bench                     Run in benchmark mode, comparing against previous results.
93    --list                      List available benchmarks.
94    <no flag>                   Run in test mode, executing each benchmark exactly once.
95
96Test Matching:
97    --exact                     When specifying BENCHMARK_NAME, only run exact matches.
98    BENCHMARK_NAME              Only run benchmarks whose names contain this substring.
99
100Comparison:
101    -b, --baseline NAME         Specify a baseline file for comparison.
102    -s, --save-baseline NAME    Save the results as a baseline file.
103
104Timings:
105    --iters N                   Override number of iterations per benchmark.
106    --time SECONDS              Override time per benchmark in seconds.
107
108Other:
109    --color                     Set colored output (always,always-ansi,auto,never).
110    --format terse              Specify --list output format (only 'terse' is supported).
111    --no-capture                (Ignored)
112";
113
114pub fn main(benchmarks: Vec<Benchmark>) {
115    let mut args = Arguments::from_env();
116
117    let help = args.contains(["-h", "--help"]);
118
119    if help {
120        println!("{HELP}");
121        return;
122    }
123
124    let mut color: ColorChoice = args
125        .opt_value_from_str("--color")
126        .unwrap_or(None)
127        .unwrap_or(ColorChoice::Auto);
128    if color == ColorChoice::Auto && !std::io::stdin().is_terminal() {
129        color = ColorChoice::Never;
130    }
131
132    let exact = args.contains("--exact");
133    // We don't actually need this flag, but cargo-nextest passes it in
134    // test mode, so we need to accept it.
135    let _no_capture = args.contains("--no-capture");
136
137    #[expect(clippy::manual_map)] // So much clearer this way
138    let mut override_iterations = if let Some(iters) = args.opt_value_from_str("--iters").unwrap() {
139        Some(LoopControl::Iterations(iters))
140    } else if let Some(seconds) = args.opt_value_from_str("--time").unwrap() {
141        Some(LoopControl::Time(Duration::from_secs_f64(seconds)))
142    } else {
143        None
144    };
145
146    let baseline_name: Option<String> = args.opt_value_from_str(["-b", "--baseline"]).unwrap();
147    let write_baseline: Option<String> =
148        args.opt_value_from_str(["-s", "--save-baseline"]).unwrap();
149
150    let is_bench = args.contains("--bench");
151    let is_list = args.contains("--list");
152    let is_test = !is_bench && !is_list;
153
154    let format: Option<String> = args.opt_value_from_str("--format").unwrap();
155
156    if let Some(fmt) = format {
157        assert_eq!(fmt, "terse", "Only 'terse' format is supported.");
158    }
159    if let Some(ref baseline) = baseline_name {
160        if baseline == PREVIOUS {
161            eprintln!("Cannot use '{PREVIOUS}' as a baseline name.");
162            return;
163        }
164    }
165    if let Some(ref write_baseline) = write_baseline {
166        if write_baseline == PREVIOUS {
167            eprintln!("Cannot use '{PREVIOUS}' as a baseline name.");
168            return;
169        }
170    }
171
172    if override_iterations.is_none() && is_test {
173        override_iterations = Some(LoopControl::Iterations(1));
174    }
175
176    let name = args.free_from_str::<String>().ok();
177
178    let baseline = if is_bench {
179        let res = file::get_comparison_file(baseline_name.as_deref());
180
181        match (&res, baseline_name.as_deref()) {
182            (Some(_), Some(baseline)) => {
183                println!("Using baseline \"{baseline}\" for comparison.\n")
184            }
185            (None, Some(baseline)) => {
186                eprintln!("Could not find baseline named {baseline:?}.\n");
187                return;
188            }
189            (Some(_), None) => {
190                println!("Using previous benchmark results for comparison.\n");
191            }
192            (None, None) => {
193                println!("No previous benchmark results found for comparison.\n");
194            }
195        }
196
197        res
198    } else {
199        None
200    };
201
202    let mut output_file = BenchmarkFile::default();
203
204    let mut stdout = StandardStream::stdout(color);
205
206    for bench in benchmarks {
207        if let Some(ref bench_name) = name {
208            if exact {
209                if bench.name != bench_name {
210                    continue;
211                }
212            } else if !bench.name.contains(bench_name) {
213                continue;
214            }
215        }
216
217        if is_list {
218            println!("{}: benchmark", bench.name);
219            continue;
220        }
221
222        let ctx = BenchmarkContext {
223            override_iters: override_iterations,
224            default_iterations: LoopControl::default(),
225            is_test,
226        };
227
228        stdout
229            .set_color(ColorSpec::new().set_fg(Some(Color::Blue)))
230            .unwrap();
231        println!("Running benchmark: {}", bench.name);
232        stdout.reset().unwrap();
233
234        let results = {
235            profiling::scope!("bench", bench.name);
236            let r = (bench.func)(ctx);
237            match r {
238                Ok(r) => r,
239                Err(e) => {
240                    eprintln!("  Error running benchmark '{}': {:?}", bench.name, e);
241                    continue;
242                }
243            }
244        };
245
246        let previous_results = if let Some(ref baseline) = baseline {
247            baseline.results.get(bench.name).map(|r| r.as_slice())
248        } else {
249            None
250        };
251
252        print::print_results(&mut stdout, &results, previous_results);
253
254        output_file.results.insert(bench.name.to_string(), results);
255    }
256
257    file::write_results_file(PREVIOUS, &output_file).unwrap();
258    if let Some(output_baseline) = write_baseline {
259        file::write_results_file(&output_baseline, &output_file).unwrap();
260    }
261}