wgpu_examples/repeated_compute/
mod.rs1use nanorand::Rng;
9
10const OVERFLOW: u32 = 0xffffffff;
11
12async fn run() {
13 let mut numbers = [0u32; 256];
14 let context = WgpuContext::new(size_of_val(&numbers)).await;
15
16 let mut rand = nanorand::WyRand::new();
17
18 for _ in 0..10 {
19 for p in numbers.iter_mut() {
20 *p = rand.generate::<u16>() as u32;
21 }
22
23 compute(&mut numbers, &context).await;
24
25 let printed_numbers = numbers
26 .iter()
27 .map(|n| match n {
28 &OVERFLOW => "(overflow)".to_string(),
29 n => n.to_string(),
30 })
31 .collect::<Vec<String>>();
32 log::info!("Results: {printed_numbers:?}");
33 }
34}
35
36async fn compute(local_buffer: &mut [u32], context: &WgpuContext) {
37 log::info!("Beginning GPU compute on data {local_buffer:?}.");
38 context.queue.write_buffer(
42 &context.storage_buffer,
43 0,
44 bytemuck::cast_slice(local_buffer),
45 );
46 log::info!("Wrote to buffer.");
47
48 let mut command_encoder = context
49 .device
50 .create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
51
52 {
53 let mut compute_pass = command_encoder.begin_compute_pass(&wgpu::ComputePassDescriptor {
54 label: None,
55 timestamp_writes: None,
56 });
57 compute_pass.set_pipeline(&context.pipeline);
58 compute_pass.set_bind_group(0, &context.bind_group, &[]);
59 compute_pass.dispatch_workgroups(local_buffer.len() as u32, 1, 1);
60 }
61 command_encoder.copy_buffer_to_buffer(
65 &context.storage_buffer,
66 0,
67 &context.output_staging_buffer,
68 0,
69 context.storage_buffer.size(),
70 );
71
72 context.queue.submit(Some(command_encoder.finish()));
74 log::info!("Submitted commands.");
75
76 let buffer_slice = context.output_staging_buffer.slice(..);
81 let (sender, receiver) = flume::bounded(1);
103 buffer_slice.map_async(wgpu::MapMode::Read, move |r| sender.send(r).unwrap());
104 context
109 .device
110 .poll(wgpu::PollType::wait_indefinitely())
111 .unwrap();
112 log::info!("Device polled.");
113 receiver.recv_async().await.unwrap().unwrap();
115 log::info!("Result received.");
116 {
118 let view = buffer_slice.get_mapped_range().unwrap();
119 let data: Vec<u32> = bytemuck::allocation::pod_collect_to_vec(&view);
120 local_buffer.copy_from_slice(&data);
121 }
122 log::info!("Results written to local buffer.");
123 context.output_staging_buffer.unmap();
127}
128
129pub fn main() {
130 #[cfg(not(target_arch = "wasm32"))]
131 {
132 env_logger::builder()
133 .filter_level(log::LevelFilter::Info)
134 .format_timestamp_nanos()
135 .init();
136 pollster::block_on(run());
137 }
138 #[cfg(target_arch = "wasm32")]
139 {
140 std::panic::set_hook(Box::new(console_error_panic_hook::hook));
141 console_log::init_with_level(log::Level::Info).expect("could not initialize logger");
142
143 crate::utils::add_web_nothing_to_see_msg();
144
145 wasm_bindgen_futures::spawn_local(run());
146 }
147}
148
149struct WgpuContext {
151 device: wgpu::Device,
152 queue: wgpu::Queue,
153 pipeline: wgpu::ComputePipeline,
154 bind_group: wgpu::BindGroup,
155 storage_buffer: wgpu::Buffer,
156 output_staging_buffer: wgpu::Buffer,
157}
158
159impl WgpuContext {
160 async fn new(buffer_size: usize) -> WgpuContext {
161 let instance = wgpu::Instance::default();
162 let adapter = instance
163 .request_adapter(&wgpu::RequestAdapterOptions::default())
164 .await
165 .unwrap();
166 let (device, queue) = adapter
167 .request_device(&wgpu::DeviceDescriptor {
168 label: None,
169 required_features: wgpu::Features::empty(),
170 required_limits: wgpu::Limits::downlevel_defaults(),
171 experimental_features: wgpu::ExperimentalFeatures::disabled(),
172 memory_hints: wgpu::MemoryHints::Performance,
173 trace: wgpu::Trace::Off,
174 })
175 .await
176 .unwrap();
177
178 let shader = device.create_shader_module(wgpu::include_wgsl!("shader.wgsl"));
180
181 let storage_buffer = device.create_buffer(&wgpu::BufferDescriptor {
183 label: None,
184 size: buffer_size as wgpu::BufferAddress,
185 usage: wgpu::BufferUsages::STORAGE
186 | wgpu::BufferUsages::COPY_DST
187 | wgpu::BufferUsages::COPY_SRC,
188 mapped_at_creation: false,
189 });
190 let output_staging_buffer = device.create_buffer(&wgpu::BufferDescriptor {
198 label: None,
199 size: buffer_size as wgpu::BufferAddress,
200 usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
201 mapped_at_creation: false,
202 });
203
204 let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
206 label: None,
207 entries: &[wgpu::BindGroupLayoutEntry {
208 binding: 0,
209 visibility: wgpu::ShaderStages::COMPUTE,
210 ty: wgpu::BindingType::Buffer {
211 ty: wgpu::BufferBindingType::Storage { read_only: false },
212 has_dynamic_offset: false,
213 min_binding_size: None,
215 },
216 count: None,
217 }],
218 });
219 let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
222 label: None,
223 layout: &bind_group_layout,
224 entries: &[wgpu::BindGroupEntry {
225 binding: 0,
226 resource: storage_buffer.as_entire_binding(),
227 }],
228 });
229
230 let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
231 label: None,
232 bind_group_layouts: &[Some(&bind_group_layout)],
233 immediate_size: 0,
234 });
235 let pipeline = device.create_compute_pipeline(&wgpu::ComputePipelineDescriptor {
236 label: None,
237 layout: Some(&pipeline_layout),
238 module: &shader,
239 entry_point: Some("main"),
240 compilation_options: Default::default(),
241 cache: None,
242 });
243
244 WgpuContext {
245 device,
246 queue,
247 pipeline,
248 bind_group,
249 storage_buffer,
250 output_staging_buffer,
251 }
252 }
253}