From fb525ec3d1921ed6dc2abcc27d222840c7d4dcb8 Mon Sep 17 00:00:00 2001 From: mars Date: Sun, 17 Apr 2022 21:56:16 -0600 Subject: [PATCH] Dedicated staging module --- src/lib.rs | 1 + src/staging.rs | 137 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 138 insertions(+) create mode 100644 src/staging.rs diff --git a/src/lib.rs b/src/lib.rs index 67babb0..dfaa93c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,6 +8,7 @@ use strum::IntoEnumIterator; pub mod mesh; pub mod pass; pub mod phase; +pub mod staging; use pass::*; use phase::*; diff --git a/src/staging.rs b/src/staging.rs new file mode 100644 index 0000000..32ac2cd --- /dev/null +++ b/src/staging.rs @@ -0,0 +1,137 @@ +//! Intermediate CPU-mappable, GPU-visible storage for transferral to a GPU buffer. +//! +//! TODO: persistent mapping to bypass spillover +//! TODO: double-buffered staging + +use std::collections::VecDeque; +use std::sync::Arc; + +pub struct StagingPool { + device: Arc, + buffer: wgpu::Buffer, + spillover: VecDeque>, +} + +impl StagingPool { + pub fn new(device: Arc, stage_size: usize) -> Self { + let buffer = device.create_buffer(&wgpu::BufferDescriptor { + label: Some("staging buffer"), + size: stage_size as wgpu::BufferAddress, + usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_SRC, + mapped_at_creation: true, + }); + + Self { + device, + buffer, + spillover: Default::default(), + } + } + + pub fn flush( + &mut self, + cmd: &mut wgpu::CommandEncoder, + get_dst: impl Fn(&T) -> CopyDest<'_>, + on_complete: impl Fn(T), + ) { + if self.spillover.is_empty() { + return; + } + + let src = &self.buffer; + let mut src_view = src.slice(..).get_mapped_range_mut(); + let mut src_offset = 0; + + while let Some(copy) = self.spillover.pop_back() { + let (copy, next) = copy.eat(&mut src_view[src_offset..]); + + let dst = get_dst(©.target); + let dst_offset = dst.offset + copy.offset; + cmd.copy_buffer_to_buffer( + src, + src_offset as wgpu::BufferAddress, + dst.buffer, + dst_offset as wgpu::BufferAddress, + copy.size as wgpu::BufferAddress, + ); + + src_offset += copy.size; + + if let Some(next) = next { + self.spillover.push_back(next); + break; + } else { + on_complete(copy.target); + } + } + } + + pub fn queue_copies(&mut self, copies: Vec>) { + self.spillover.reserve(copies.len()); + self.spillover.extend(copies.into_iter()); + } +} + +pub struct CopyDest<'a> { + /// The destination buffer. + pub buffer: &'a wgpu::Buffer, + + /// The destination offset *in bytes.* + pub offset: usize, +} + +pub struct CopyInfo { + /// The target of the copy. + pub target: T, + + /// The offset for the destination, including the [CopyDest] offset. + pub offset: usize, + + /// The copy size *in bytes.* + pub size: usize, +} + +pub struct CopyBuffer { + /// The target of the copy. + pub target: T, + + /// The offset for the destination, including the [CopyDest] offset. + pub offset: usize, + + /// The CPU memory for the copy. + pub data: Vec, +} + +impl CopyBuffer { + pub fn eat(self, dst: &mut [u8]) -> (CopyInfo, Option) { + let Self { + target, + offset, + mut data, + } = self; + + let dst_size = dst.len(); + let size = data.len(); + + if dst_size >= size { + dst[0..size].copy_from_slice(&data); + let info = CopyInfo { target, offset, size }; + (info, None) + } else { + let remainder = data.split_off(dst_size); + dst.copy_from_slice(&data); + let info = CopyInfo { + target: target.clone(), + offset, + size: dst_size, + }; + let offset = offset + dst_size; + let next = Self { + target, + offset, + data: remainder, + }; + (info, Some(next)) + } + } +}