Staging thread-safety + new buffer per flush

This commit is contained in:
mars 2022-04-18 20:10:24 -06:00
parent b81dbd1569
commit 8ef1d47947
1 changed files with 26 additions and 25 deletions

View File

@ -1,54 +1,54 @@
//! Intermediate CPU-mappable, GPU-visible storage for transferral to a GPU buffer.
//!
//! TODO: persistent mapping to bypass spillover
//! TODO: double-buffered staging
//! TODO: use wgpu::util::StagingBelt?
//! TODO: pass access to a wgpu::Queue for write_buffer, staging belt recall, or command encoding
use std::collections::VecDeque;
use std::sync::Arc;
use std::sync::{Arc, Mutex};
pub struct StagingPool<T> {
device: Arc<wgpu::Device>,
buffer: wgpu::Buffer,
spillover: VecDeque<CopyBuffer<T>>,
stage_size: usize,
spillover: Mutex<VecDeque<CopyBuffer<T>>>,
}
impl<T: Clone> StagingPool<T> {
pub fn new(device: Arc<wgpu::Device>, stage_size: usize) -> Self {
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("staging buffer"),
size: stage_size as wgpu::BufferAddress,
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
Self {
device,
buffer,
stage_size,
spillover: Default::default(),
}
}
pub fn flush<'a>(
&mut self,
cmd: &mut wgpu::CommandEncoder,
&self,
encoder: &mut wgpu::CommandEncoder,
get_dst: impl Fn(&T) -> CopyDest<'a>,
on_complete: impl Fn(T),
) {
if self.spillover.is_empty() {
let mut spillover = self.spillover.lock().unwrap();
if spillover.is_empty() {
return;
}
let src = &self.buffer;
let src = self.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("staging buffer"),
size: self.stage_size as wgpu::BufferAddress,
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: true,
});
let mut src_view = src.slice(..).get_mapped_range_mut();
let mut src_offset = 0;
while let Some(copy) = self.spillover.pop_back() {
while let Some(copy) = spillover.pop_back() {
let (copy, next) = copy.eat(&mut src_view[src_offset..]);
let dst = get_dst(&copy.target);
let dst_offset = dst.offset + copy.offset;
cmd.copy_buffer_to_buffer(
src,
encoder.copy_buffer_to_buffer(
&src,
src_offset as wgpu::BufferAddress,
dst.buffer,
dst_offset as wgpu::BufferAddress,
@ -58,7 +58,7 @@ impl<T: Clone> StagingPool<T> {
src_offset += copy.size;
if let Some(next) = next {
self.spillover.push_back(next);
spillover.push_back(next);
break;
} else {
on_complete(copy.target);
@ -66,9 +66,10 @@ impl<T: Clone> StagingPool<T> {
}
}
pub fn queue_copies(&mut self, copies: Vec<CopyBuffer<T>>) {
self.spillover.reserve(copies.len());
self.spillover.extend(copies.into_iter());
pub fn queue_copies(&self, copies: Vec<CopyBuffer<T>>) {
let mut spillover = self.spillover.lock().unwrap();
spillover.reserve(copies.len());
spillover.extend(copies.into_iter());
}
}