wgpu_test/
native.rs

1#![cfg(not(target_arch = "wasm32"))]
2//! Infrastructure for the native, `cargo-nextest` based harness.
3//!
4//! This is largly used by [`gpu_test_main`](crate::gpu_test_main) and [`gpu_test`](crate::gpu_test).
5
6use std::{future::Future, pin::Pin};
7
8use parking_lot::Mutex;
9
10use crate::{
11    config::GpuTestConfiguration, params::TestInfo, report::AdapterReport, run::execute_test,
12    GpuTestInitializer,
13};
14
15type NativeTestFuture = Pin<Box<dyn Future<Output = ()> + Send>>;
16
17struct NativeTest {
18    name: String,
19    future: NativeTestFuture,
20}
21
22impl NativeTest {
23    /// Adapter index is only used for naming the test, the adapters are matched based on the adapter info.
24    fn from_configuration(
25        config: GpuTestConfiguration,
26        adapter_report: AdapterReport,
27        adapter_index: usize,
28    ) -> Self {
29        let backend = adapter_report.info.backend;
30        let device_name = &adapter_report.info.name;
31
32        let test_info = TestInfo::from_configuration(&config, &adapter_report);
33
34        let full_name = format!(
35            "[{running_msg}] [{backend:?}/{device_name}/{adapter_index}] {base_name}",
36            running_msg = test_info.running_msg,
37            base_name = config.name,
38        );
39        Self {
40            name: full_name,
41            future: Box::pin(async move {
42                // Enable metal validation layers if we're running on metal.
43                //
44                // This is a process-wide setting as it's via environment variable, but all
45                // tests are run in separate processes.
46                //
47                // We don't do this in the instance initializer as we don't want to enable
48                // validation layers for the entire process, or other instances.
49                //
50                // We do not enable metal validation when running on moltenvk.
51                let metal_validation = backend == wgpu::Backend::Metal;
52
53                let env_value = if metal_validation { "1" } else { "0" };
54                std::env::set_var("MTL_DEBUG_LAYER", env_value);
55                if std::env::var("GITHUB_ACTIONS").as_deref() != Ok("true") {
56                    // Metal Shader Validation is entirely broken in the paravirtualized CI environment.
57                    std::env::set_var("MTL_SHADER_VALIDATION", env_value);
58                }
59
60                execute_test(Some(&adapter_report), config, Some(test_info)).await;
61            }),
62        }
63    }
64
65    pub fn into_trial(self) -> libtest_mimic::Trial {
66        libtest_mimic::Trial::test(self.name, || {
67            pollster::block_on(self.future);
68            Ok(())
69        })
70    }
71}
72
73#[doc(hidden)]
74pub static TEST_LIST: Mutex<Vec<crate::GpuTestConfiguration>> = Mutex::new(Vec::new());
75
76/// Return value for the main function.
77pub type MainResult = anyhow::Result<()>;
78
79/// Main function that runs every gpu function once for every adapter on the system.
80pub fn main(tests: Vec<GpuTestInitializer>) -> MainResult {
81    use anyhow::Context;
82
83    use crate::report::GpuReport;
84
85    // If this environment variable is set, we will only enumerate the noop backend. The
86    // main use case is running tests with miri, where we can't even enumerate adapters,
87    // as we cannot load DLLs or make any external calls.
88    let use_noop = std::env::var("WGPU_GPU_TESTS_USE_NOOP_BACKEND").as_deref() == Ok("1");
89
90    let report = if use_noop {
91        GpuReport::noop_only()
92    } else {
93        let config_text = {
94            profiling::scope!("Reading .gpuconfig");
95            &std::fs::read_to_string(format!("{}/../.gpuconfig", env!("CARGO_MANIFEST_DIR")))
96                .context(
97                    "Failed to read .gpuconfig, did you run the tests via `cargo xtask test`?",
98                )?
99        };
100        let mut report =
101            GpuReport::from_json(config_text).context("Could not parse .gpuconfig JSON")?;
102
103        // Filter out the adapters that are not part of WGPU_BACKEND.
104        let wgpu_backends = wgpu::Backends::from_env().unwrap_or_default();
105        report
106            .devices
107            .retain(|report| wgpu_backends.contains(wgpu::Backends::from(report.info.backend)));
108
109        report
110    };
111
112    // Iterate through all the tests. Creating a test per adapter.
113    execute_native(tests.into_iter().flat_map(|initializer| {
114        let test = initializer();
115        report
116            .devices
117            .iter()
118            .enumerate()
119            .map(move |(adapter_index, adapter_report)| {
120                NativeTest::from_configuration(test.clone(), adapter_report.clone(), adapter_index)
121            })
122    }));
123
124    Ok(())
125}
126
127fn execute_native(tests: impl IntoIterator<Item = NativeTest>) {
128    let args = libtest_mimic::Arguments::from_args();
129    let trials = {
130        profiling::scope!("collecting tests");
131        tests.into_iter().map(NativeTest::into_trial).collect()
132    };
133
134    libtest_mimic::run(&args, trials).exit_if_failed();
135}