Staging thread-safety + new buffer per flush

This commit is contained in:
mars 2022-04-18 20:10:24 -06:00
parent b81dbd1569
commit 8ef1d47947
1 changed files with 26 additions and 25 deletions

View File

@ -1,54 +1,54 @@
//! Intermediate CPU-mappable, GPU-visible storage for transferral to a GPU buffer. //! Intermediate CPU-mappable, GPU-visible storage for transferral to a GPU buffer.
//! //!
//! TODO: persistent mapping to bypass spillover //! TODO: persistent mapping to bypass spillover
//! TODO: double-buffered staging //! TODO: use wgpu::util::StagingBelt?
//! TODO: pass access to a wgpu::Queue for write_buffer, staging belt recall, or command encoding
use std::collections::VecDeque; use std::collections::VecDeque;
use std::sync::Arc; use std::sync::{Arc, Mutex};
pub struct StagingPool<T> { pub struct StagingPool<T> {
device: Arc<wgpu::Device>, device: Arc<wgpu::Device>,
buffer: wgpu::Buffer, stage_size: usize,
spillover: VecDeque<CopyBuffer<T>>, spillover: Mutex<VecDeque<CopyBuffer<T>>>,
} }
impl<T: Clone> StagingPool<T> { impl<T: Clone> StagingPool<T> {
pub fn new(device: Arc<wgpu::Device>, stage_size: usize) -> Self { pub fn new(device: Arc<wgpu::Device>, stage_size: usize) -> Self {
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("staging buffer"),
size: stage_size as wgpu::BufferAddress,
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: false,
});
Self { Self {
device, device,
buffer, stage_size,
spillover: Default::default(), spillover: Default::default(),
} }
} }
pub fn flush<'a>( pub fn flush<'a>(
&mut self, &self,
cmd: &mut wgpu::CommandEncoder, encoder: &mut wgpu::CommandEncoder,
get_dst: impl Fn(&T) -> CopyDest<'a>, get_dst: impl Fn(&T) -> CopyDest<'a>,
on_complete: impl Fn(T), on_complete: impl Fn(T),
) { ) {
if self.spillover.is_empty() { let mut spillover = self.spillover.lock().unwrap();
if spillover.is_empty() {
return; return;
} }
let src = &self.buffer; let src = self.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("staging buffer"),
size: self.stage_size as wgpu::BufferAddress,
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: true,
});
let mut src_view = src.slice(..).get_mapped_range_mut(); let mut src_view = src.slice(..).get_mapped_range_mut();
let mut src_offset = 0; let mut src_offset = 0;
while let Some(copy) = spillover.pop_back() {
while let Some(copy) = self.spillover.pop_back() {
let (copy, next) = copy.eat(&mut src_view[src_offset..]); let (copy, next) = copy.eat(&mut src_view[src_offset..]);
let dst = get_dst(&copy.target); let dst = get_dst(&copy.target);
let dst_offset = dst.offset + copy.offset; let dst_offset = dst.offset + copy.offset;
cmd.copy_buffer_to_buffer(
src, encoder.copy_buffer_to_buffer(
&src,
src_offset as wgpu::BufferAddress, src_offset as wgpu::BufferAddress,
dst.buffer, dst.buffer,
dst_offset as wgpu::BufferAddress, dst_offset as wgpu::BufferAddress,
@ -58,7 +58,7 @@ impl<T: Clone> StagingPool<T> {
src_offset += copy.size; src_offset += copy.size;
if let Some(next) = next { if let Some(next) = next {
self.spillover.push_back(next); spillover.push_back(next);
break; break;
} else { } else {
on_complete(copy.target); on_complete(copy.target);
@ -66,9 +66,10 @@ impl<T: Clone> StagingPool<T> {
} }
} }
pub fn queue_copies(&mut self, copies: Vec<CopyBuffer<T>>) { pub fn queue_copies(&self, copies: Vec<CopyBuffer<T>>) {
self.spillover.reserve(copies.len()); let mut spillover = self.spillover.lock().unwrap();
self.spillover.extend(copies.into_iter()); spillover.reserve(copies.len());
spillover.extend(copies.into_iter());
} }
} }