wgpu_examples/repeated_compute/
mod.rs1use nanorand::Rng;
9
10const OVERFLOW: u32 = 0xffffffff;
11
12async fn run() {
13 let mut numbers = [0u32; 256];
14 let context = WgpuContext::new(size_of_val(&numbers)).await;
15
16 let mut rand = nanorand::WyRand::new();
17
18 for _ in 0..10 {
19 for p in numbers.iter_mut() {
20 *p = rand.generate::<u16>() as u32;
21 }
22
23 compute(&mut numbers, &context).await;
24
25 let printed_numbers = numbers
26 .iter()
27 .map(|n| match n {
28 &OVERFLOW => "(overflow)".to_string(),
29 n => n.to_string(),
30 })
31 .collect::<Vec<String>>();
32 log::info!("Results: {printed_numbers:?}");
33 }
34}
35
36async fn compute(local_buffer: &mut [u32], context: &WgpuContext) {
37 log::info!("Beginning GPU compute on data {local_buffer:?}.");
38 context.queue.write_buffer(
42 &context.storage_buffer,
43 0,
44 bytemuck::cast_slice(local_buffer),
45 );
46 log::info!("Wrote to buffer.");
47
48 let mut command_encoder = context
49 .device
50 .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
51
52 {
53 let mut compute_pass = command_encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
54 label: None,
55 timestamp_writes: None,
56 });
57 compute_pass.set_pipeline(&context.pipeline);
58 compute_pass.set_bind_group(0, &context.bind_group, &[]);
59 compute_pass.dispatch_workgroups(local_buffer.len() as u32, 1, 1);
60 }
61 command_encoder.copy_buffer_to_buffer(
65 &context.storage_buffer,
66 0,
67 &context.output_staging_buffer,
68 0,
69 context.storage_buffer.size(),
70 );
71
72 context.queue.submit(Some(command_encoder.finish()));
74 log::info!("Submitted commands.");
75
76 let buffer_slice = context.output_staging_buffer.slice(..);
81 let (sender, receiver) = flume::bounded(1);
103 buffer_slice.map_async(wgpu::MapMode::Read, move |r| sender.send(r).unwrap());
104 context
109 .device
110 .poll(wgpu::PollType::wait_indefinitely())
111 .unwrap();
112 log::info!("Device polled.");
113 receiver.recv_async().await.unwrap().unwrap();
115 log::info!("Result received.");
116 {
118 let view = buffer_slice.get_mapped_range();
119 local_buffer.copy_from_slice(bytemuck::cast_slice(&view));
120 }
121 log::info!("Results written to local buffer.");
122 context.output_staging_buffer.unmap();
126}
127
128pub fn main() {
129 #[cfg(not(target_arch = "wasm32"))]
130 {
131 env_logger::builder()
132 .filter_level(log::LevelFilter::Info)
133 .format_timestamp_nanos()
134 .init();
135 pollster::block_on(run());
136 }
137 #[cfg(target_arch = "wasm32")]
138 {
139 std::panic::set_hook(Box::new(console_error_panic_hook::hook));
140 console_log::init_with_level(log::Level::Info).expect("could not initialize logger");
141
142 crate::utils::add_web_nothing_to_see_msg();
143
144 wasm_bindgen_futures::spawn_local(run());
145 }
146}
147
148struct WgpuContext {
150 device: wgpu::Device,
151 queue: wgpu::Queue,
152 pipeline: wgpu::ComputePipeline,
153 bind_group: wgpu::BindGroup,
154 storage_buffer: wgpu::Buffer,
155 output_staging_buffer: wgpu::Buffer,
156}
157
158impl WgpuContext {
159 async fn new(buffer_size: usize) -> WgpuContext {
160 let instance = wgpu::Instance::default();
161 let adapter = instance
162 .request_adapter(&wgpu::RequestAdapterOptions::default())
163 .await
164 .unwrap();
165 let (device, queue) = adapter
166 .request_device(&wgpu::DeviceDescriptor {
167 label: None,
168 required_features: wgpu::Features::empty(),
169 required_limits: wgpu::Limits::downlevel_defaults(),
170 experimental_features: wgpu::ExperimentalFeatures::disabled(),
171 memory_hints: wgpu::MemoryHints::Performance,
172 trace: wgpu::Trace::Off,
173 })
174 .await
175 .unwrap();
176
177 let shader = device.create_shader_module(wgpu::include_wgsl!("shader.wgsl"));
179
180 let storage_buffer = device.create_buffer(&wgpu::BufferDescriptor {
182 label: None,
183 size: buffer_size as wgpu::BufferAddress,
184 usage: wgpu::BufferUsages::STORAGE
185 | wgpu::BufferUsages::COPY_DST
186 | wgpu::BufferUsages::COPY_SRC,
187 mapped_at_creation: false,
188 });
189 let output_staging_buffer = device.create_buffer(&wgpu::BufferDescriptor {
197 label: None,
198 size: buffer_size as wgpu::BufferAddress,
199 usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
200 mapped_at_creation: false,
201 });
202
203 let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
205 label: None,
206 entries: &[wgpu::BindGroupLayoutEntry {
207 binding: 0,
208 visibility: wgpu::ShaderStages::COMPUTE,
209 ty: wgpu::BindingType::Buffer {
210 ty: wgpu::BufferBindingType::Storage { read_only: false },
211 has_dynamic_offset: false,
212 min_binding_size: None,
214 },
215 count: None,
216 }],
217 });
218 let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
221 label: None,
222 layout: &bind_group_layout,
223 entries: &[wgpu::BindGroupEntry {
224 binding: 0,
225 resource: storage_buffer.as_entire_binding(),
226 }],
227 });
228
229 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
230 label: None,
231 bind_group_layouts: &[&bind_group_layout],
232 push_constant_ranges: &[],
233 });
234 let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
235 label: None,
236 layout: Some(&pipeline_layout),
237 module: &shader,
238 entry_point: Some("main"),
239 compilation_options: Default::default(),
240 cache: None,
241 });
242
243 WgpuContext {
244 device,
245 queue,
246 pipeline,
247 bind_group,
248 storage_buffer,
249 output_staging_buffer,
250 }
251 }
252}