use std::sync::Arc;
use wgpu::{Instance, Surface};
use winit::{
dpi::PhysicalSize,
event::{Event, KeyEvent, StartCause, WindowEvent},
event_loop::{EventLoop, EventLoopWindowTarget},
keyboard::{Key, NamedKey},
window::Window,
};
pub trait Example: 'static + Sized {
const SRGB: bool = true;
fn optional_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_features() -> wgpu::Features {
wgpu::Features::empty()
}
fn required_downlevel_capabilities() -> wgpu::DownlevelCapabilities {
wgpu::DownlevelCapabilities {
flags: wgpu::DownlevelFlags::empty(),
shader_model: wgpu::ShaderModel::Sm5,
..wgpu::DownlevelCapabilities::default()
}
}
fn required_limits() -> wgpu::Limits {
wgpu::Limits::downlevel_webgl2_defaults() }
fn init(
config: &wgpu::SurfaceConfiguration,
adapter: &wgpu::Adapter,
device: &wgpu::Device,
queue: &wgpu::Queue,
) -> Self;
fn resize(
&mut self,
config: &wgpu::SurfaceConfiguration,
device: &wgpu::Device,
queue: &wgpu::Queue,
);
fn update(&mut self, event: WindowEvent);
fn render(&mut self, view: &wgpu::TextureView, device: &wgpu::Device, queue: &wgpu::Queue);
}
fn init_logger() {
cfg_if::cfg_if! {
if #[cfg(target_arch = "wasm32")] {
let query_string = web_sys::window().unwrap().location().search().unwrap();
let query_level: Option<log::LevelFilter> = parse_url_query_string(&query_string, "RUST_LOG")
.and_then(|x| x.parse().ok());
let base_level = query_level.unwrap_or(log::LevelFilter::Info);
let wgpu_level = query_level.unwrap_or(log::LevelFilter::Error);
fern::Dispatch::new()
.level(base_level)
.level_for("wgpu_core", wgpu_level)
.level_for("wgpu_hal", wgpu_level)
.level_for("naga", wgpu_level)
.chain(fern::Output::call(console_log::log))
.apply()
.unwrap();
std::panic::set_hook(Box::new(console_error_panic_hook::hook));
} else {
env_logger::builder()
.filter_level(log::LevelFilter::Info)
.filter_module("wgpu_core", log::LevelFilter::Info)
.filter_module("wgpu_hal", log::LevelFilter::Error)
.filter_module("naga", log::LevelFilter::Error)
.parse_default_env()
.init();
}
}
}
struct EventLoopWrapper {
event_loop: EventLoop<()>,
window: Arc<Window>,
}
impl EventLoopWrapper {
pub fn new(title: &str) -> Self {
let event_loop = EventLoop::new().unwrap();
let mut builder = winit::window::WindowBuilder::new();
#[cfg(target_arch = "wasm32")]
{
use wasm_bindgen::JsCast;
use winit::platform::web::WindowBuilderExtWebSys;
let canvas = web_sys::window()
.unwrap()
.document()
.unwrap()
.get_element_by_id("canvas")
.unwrap()
.dyn_into::<web_sys::HtmlCanvasElement>()
.unwrap();
builder = builder.with_canvas(Some(canvas));
}
builder = builder.with_title(title);
let window = Arc::new(builder.build(&event_loop).unwrap());
Self { event_loop, window }
}
}
struct SurfaceWrapper {
surface: Option<wgpu::Surface<'static>>,
config: Option<wgpu::SurfaceConfiguration>,
}
impl SurfaceWrapper {
fn new() -> Self {
Self {
surface: None,
config: None,
}
}
fn pre_adapter(&mut self, instance: &Instance, window: Arc<Window>) {
if cfg!(target_arch = "wasm32") {
self.surface = Some(instance.create_surface(window).unwrap());
}
}
fn start_condition(e: &Event<()>) -> bool {
match e {
Event::NewEvents(StartCause::Init) => !cfg!(target_os = "android"),
Event::Resumed => cfg!(target_os = "android"),
_ => false,
}
}
fn resume(&mut self, context: &ExampleContext, window: Arc<Window>, srgb: bool) {
let window_size = window.inner_size();
let width = window_size.width.max(1);
let height = window_size.height.max(1);
log::info!("Surface resume {window_size:?}");
if !cfg!(target_arch = "wasm32") {
self.surface = Some(context.instance.create_surface(window).unwrap());
}
let surface = self.surface.as_ref().unwrap();
let mut config = surface
.get_default_config(&context.adapter, width, height)
.expect("Surface isn't supported by the adapter.");
if srgb {
let view_format = config.format.add_srgb_suffix();
config.view_formats.push(view_format);
} else {
let format = config.format.remove_srgb_suffix();
config.format = format;
config.view_formats.push(format);
};
surface.configure(&context.device, &config);
self.config = Some(config);
}
fn resize(&mut self, context: &ExampleContext, size: PhysicalSize<u32>) {
log::info!("Surface resize {size:?}");
let config = self.config.as_mut().unwrap();
config.width = size.width.max(1);
config.height = size.height.max(1);
let surface = self.surface.as_ref().unwrap();
surface.configure(&context.device, config);
}
fn acquire(&mut self, context: &ExampleContext) -> wgpu::SurfaceTexture {
let surface = self.surface.as_ref().unwrap();
match surface.get_current_texture() {
Ok(frame) => frame,
Err(wgpu::SurfaceError::Timeout) => surface
.get_current_texture()
.expect("Failed to acquire next surface texture!"),
Err(
wgpu::SurfaceError::Outdated
| wgpu::SurfaceError::Lost
| wgpu::SurfaceError::Other
| wgpu::SurfaceError::OutOfMemory,
) => {
surface.configure(&context.device, self.config());
surface
.get_current_texture()
.expect("Failed to acquire next surface texture!")
}
}
}
fn suspend(&mut self) {
if cfg!(target_os = "android") {
self.surface = None;
}
}
fn get(&self) -> Option<&Surface> {
self.surface.as_ref()
}
fn config(&self) -> &wgpu::SurfaceConfiguration {
self.config.as_ref().unwrap()
}
}
struct ExampleContext {
instance: wgpu::Instance,
adapter: wgpu::Adapter,
device: wgpu::Device,
queue: wgpu::Queue,
}
impl ExampleContext {
async fn init_async<E: Example>(surface: &mut SurfaceWrapper, window: Arc<Window>) -> Self {
log::info!("Initializing wgpu...");
let instance = wgpu::Instance::new(&wgpu::util::instance_descriptor_from_env());
surface.pre_adapter(&instance, window);
let adapter = get_adapter_with_capabilities_or_from_env(
&instance,
&E::required_features(),
&E::required_downlevel_capabilities(),
&surface.get(),
)
.await;
let needed_limits = E::required_limits().using_resolution(adapter.limits());
let trace_dir = std::env::var("WGPU_TRACE");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: (E::optional_features() & adapter.features())
| E::required_features(),
required_limits: needed_limits,
memory_hints: wgpu::MemoryHints::MemoryUsage,
},
trace_dir.ok().as_ref().map(std::path::Path::new),
)
.await
.expect("Unable to find a suitable GPU adapter!");
Self {
instance,
adapter,
device,
queue,
}
}
}
struct FrameCounter {
last_printed_instant: web_time::Instant,
frame_count: u32,
}
impl FrameCounter {
fn new() -> Self {
Self {
last_printed_instant: web_time::Instant::now(),
frame_count: 0,
}
}
fn update(&mut self) {
self.frame_count += 1;
let new_instant = web_time::Instant::now();
let elapsed_secs = (new_instant - self.last_printed_instant).as_secs_f32();
if elapsed_secs > 1.0 {
let elapsed_ms = elapsed_secs * 1000.0;
let frame_time = elapsed_ms / self.frame_count as f32;
let fps = self.frame_count as f32 / elapsed_secs;
log::info!("Frame time {:.2}ms ({:.1} FPS)", frame_time, fps);
self.last_printed_instant = new_instant;
self.frame_count = 0;
}
}
}
async fn start<E: Example>(title: &str) {
init_logger();
log::debug!(
"Enabled backends: {:?}",
wgpu::Instance::enabled_backend_features()
);
let window_loop = EventLoopWrapper::new(title);
let mut surface = SurfaceWrapper::new();
let context = ExampleContext::init_async::<E>(&mut surface, window_loop.window.clone()).await;
let mut frame_counter = FrameCounter::new();
let mut example = None;
cfg_if::cfg_if! {
if #[cfg(target_arch = "wasm32")] {
use winit::platform::web::EventLoopExtWebSys;
let event_loop_function = EventLoop::spawn;
} else {
let event_loop_function = EventLoop::run;
}
}
log::info!("Entering event loop...");
#[allow(clippy::let_unit_value)]
let _ = (event_loop_function)(
window_loop.event_loop,
move |event: Event<()>, target: &EventLoopWindowTarget<()>| {
match event {
ref e if SurfaceWrapper::start_condition(e) => {
surface.resume(&context, window_loop.window.clone(), E::SRGB);
if example.is_none() {
example = Some(E::init(
surface.config(),
&context.adapter,
&context.device,
&context.queue,
));
}
}
Event::Suspended => {
surface.suspend();
}
Event::WindowEvent { event, .. } => match event {
WindowEvent::Resized(size) => {
surface.resize(&context, size);
example.as_mut().unwrap().resize(
surface.config(),
&context.device,
&context.queue,
);
window_loop.window.request_redraw();
}
WindowEvent::KeyboardInput {
event:
KeyEvent {
logical_key: Key::Named(NamedKey::Escape),
..
},
..
}
| WindowEvent::CloseRequested => {
target.exit();
}
#[cfg(not(target_arch = "wasm32"))]
WindowEvent::KeyboardInput {
event:
KeyEvent {
logical_key: Key::Character(s),
..
},
..
} if s == "r" => {
println!("{:#?}", context.instance.generate_report());
}
WindowEvent::RedrawRequested => {
if example.is_none() {
return;
}
frame_counter.update();
let frame = surface.acquire(&context);
let view = frame.texture.create_view(&wgpu::TextureViewDescriptor {
format: Some(surface.config().view_formats[0]),
..wgpu::TextureViewDescriptor::default()
});
example
.as_mut()
.unwrap()
.render(&view, &context.device, &context.queue);
frame.present();
window_loop.window.request_redraw();
}
_ => example.as_mut().unwrap().update(event),
},
_ => {}
}
},
);
}
pub fn run<E: Example>(title: &'static str) {
cfg_if::cfg_if! {
if #[cfg(target_arch = "wasm32")] {
wasm_bindgen_futures::spawn_local(async move { start::<E>(title).await })
} else {
pollster::block_on(start::<E>(title));
}
}
}
#[cfg(target_arch = "wasm32")]
pub fn parse_url_query_string<'a>(query: &'a str, search_key: &str) -> Option<&'a str> {
let query_string = query.strip_prefix('?')?;
for pair in query_string.split('&') {
let mut pair = pair.split('=');
let key = pair.next()?;
let value = pair.next()?;
if key == search_key {
return Some(value);
}
}
None
}
#[cfg(test)]
pub use wgpu_test::image::ComparisonType;
use crate::utils::get_adapter_with_capabilities_or_from_env;
#[cfg(test)]
#[derive(Clone)]
pub struct ExampleTestParams<E> {
pub name: &'static str,
pub image_path: &'static str,
pub width: u32,
pub height: u32,
pub optional_features: wgpu::Features,
pub base_test_parameters: wgpu_test::TestParameters,
pub comparisons: &'static [ComparisonType],
pub _phantom: std::marker::PhantomData<E>,
}
#[cfg(test)]
impl<E: Example + wgpu::WasmNotSendSync> From<ExampleTestParams<E>>
for wgpu_test::GpuTestConfiguration
{
fn from(params: ExampleTestParams<E>) -> Self {
wgpu_test::GpuTestConfiguration::new()
.name(params.name)
.parameters({
assert_eq!(params.width % 64, 0, "width needs to be aligned 64");
let features = E::required_features() | params.optional_features;
params.base_test_parameters.clone().features(features)
})
.run_async(move |ctx| async move {
let format = if E::SRGB {
wgpu::TextureFormat::Rgba8UnormSrgb
} else {
wgpu::TextureFormat::Rgba8Unorm
};
let dst_texture = ctx.device.create_texture(&wgpu::TextureDescriptor {
label: Some("destination"),
size: wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let dst_view = dst_texture.create_view(&wgpu::TextureViewDescriptor::default());
let dst_buffer = ctx.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("image map buffer"),
size: params.width as u64 * params.height as u64 * 4,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
let mut example = E::init(
&wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format,
width: params.width,
height: params.height,
desired_maximum_frame_latency: 2,
present_mode: wgpu::PresentMode::Fifo,
alpha_mode: wgpu::CompositeAlphaMode::Auto,
view_formats: vec![format],
},
&ctx.adapter,
&ctx.device,
&ctx.queue,
);
example.render(&dst_view, &ctx.device, &ctx.queue);
let mut cmd_buf = ctx
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
cmd_buf.copy_texture_to_buffer(
wgpu::TexelCopyTextureInfo {
texture: &dst_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
wgpu::TexelCopyBufferInfo {
buffer: &dst_buffer,
layout: wgpu::TexelCopyBufferLayout {
offset: 0,
bytes_per_row: Some(params.width * 4),
rows_per_image: None,
},
},
wgpu::Extent3d {
width: params.width,
height: params.height,
depth_or_array_layers: 1,
},
);
ctx.queue.submit(Some(cmd_buf.finish()));
let dst_buffer_slice = dst_buffer.slice(..);
dst_buffer_slice.map_async(wgpu::MapMode::Read, |_| ());
ctx.async_poll(wgpu::Maintain::wait())
.await
.panic_on_timeout();
let bytes = dst_buffer_slice.get_mapped_range().to_vec();
wgpu_test::image::compare_image_output(
dbg!(env!("CARGO_MANIFEST_DIR").to_string() + "/../" + params.image_path),
&ctx.adapter_info,
params.width,
params.height,
&bytes,
params.comparisons,
)
.await;
})
}
}