cyborg/src/staging.rs

147 lines
3.9 KiB
Rust
Raw Permalink Normal View History

2022-04-18 03:56:16 +00:00
//! Intermediate CPU-mappable, GPU-visible storage for transferral to a GPU buffer.
//!
//! TODO: persistent mapping to bypass spillover
//! TODO: use wgpu::util::StagingBelt?
//! TODO: pass access to a wgpu::Queue for write_buffer, staging belt recall, or command encoding
2022-04-18 03:56:16 +00:00
2022-05-17 00:57:54 +00:00
use parking_lot::Mutex;
2022-04-18 03:56:16 +00:00
use std::collections::VecDeque;
2022-05-08 22:25:33 +00:00
use std::sync::Arc;
2022-04-18 03:56:16 +00:00
pub struct StagingPool<T> {
device: Arc<wgpu::Device>,
stage_size: usize,
spillover: Mutex<VecDeque<CopyBuffer<T>>>,
2022-04-18 03:56:16 +00:00
}
impl<T: Clone> StagingPool<T> {
pub fn new(device: Arc<wgpu::Device>, stage_size: usize) -> Self {
Self {
device,
stage_size,
2022-04-18 03:56:16 +00:00
spillover: Default::default(),
}
}
pub fn flush<'a>(
&self,
encoder: &mut wgpu::CommandEncoder,
get_dst: impl Fn(&T) -> CopyDest<'a>,
2022-04-18 03:56:16 +00:00
on_complete: impl Fn(T),
) {
2022-05-08 22:25:33 +00:00
let mut spillover = self.spillover.lock();
if spillover.is_empty() {
2022-04-18 03:56:16 +00:00
return;
}
let src = self.device.create_buffer(&wgpu::BufferDescriptor {
label: Some("staging buffer"),
size: self.stage_size as wgpu::BufferAddress,
usage: wgpu::BufferUsages::MAP_READ | wgpu::BufferUsages::COPY_SRC,
mapped_at_creation: true,
});
2022-04-18 03:56:16 +00:00
let mut src_view = src.slice(..).get_mapped_range_mut();
let mut src_offset = 0;
while let Some(copy) = spillover.pop_back() {
2022-04-18 03:56:16 +00:00
let (copy, next) = copy.eat(&mut src_view[src_offset..]);
let dst = get_dst(&copy.target);
let dst_offset = dst.offset + copy.offset;
encoder.copy_buffer_to_buffer(
&src,
2022-04-18 03:56:16 +00:00
src_offset as wgpu::BufferAddress,
dst.buffer,
dst_offset as wgpu::BufferAddress,
copy.size as wgpu::BufferAddress,
);
src_offset += copy.size;
if let Some(next) = next {
spillover.push_back(next);
2022-04-18 03:56:16 +00:00
break;
} else {
on_complete(copy.target);
}
}
drop(src_view);
src.unmap();
2022-04-18 03:56:16 +00:00
}
pub fn queue_copies(&self, copies: Vec<CopyBuffer<T>>) {
2022-05-08 22:25:33 +00:00
let mut spillover = self.spillover.lock();
spillover.reserve(copies.len());
spillover.extend(copies.into_iter());
2022-04-18 03:56:16 +00:00
}
}
pub struct CopyDest<'a> {
/// The destination buffer.
pub buffer: &'a wgpu::Buffer,
/// The destination offset *in bytes.*
pub offset: usize,
}
pub struct CopyInfo<T> {
/// The target of the copy.
pub target: T,
/// The offset for the destination, including the [CopyDest] offset.
pub offset: usize,
/// The copy size *in bytes.*
pub size: usize,
}
pub struct CopyBuffer<T> {
/// The target of the copy.
pub target: T,
/// The offset for the destination, including the [CopyDest] offset.
pub offset: usize,
/// The CPU memory for the copy.
pub data: Vec<u8>,
}
impl<T: Clone> CopyBuffer<T> {
pub fn eat(self, dst: &mut [u8]) -> (CopyInfo<T>, Option<Self>) {
let Self {
target,
offset,
mut data,
} = self;
let dst_size = dst.len();
let size = data.len();
if dst_size >= size {
dst[0..size].copy_from_slice(&data);
let info = CopyInfo {
target,
offset,
size,
};
2022-04-18 03:56:16 +00:00
(info, None)
} else {
let remainder = data.split_off(dst_size);
dst.copy_from_slice(&data);
let info = CopyInfo {
target: target.clone(),
offset,
size: dst_size,
};
let offset = offset + dst_size;
let next = Self {
target,
offset,
data: remainder,
};
(info, Some(next))
}
}
}