cyborg/src/renderer.rs

427 lines
15 KiB
Rust

use super::camera::Camera;
use super::commands::{Command, CommandSet};
use super::mesh::{MeshData, Vertex};
use super::pool::*;
use super::scene::{PointLight, Scene};
use super::shader::{parse_wgsl, generate_wgsl, add_includes};
use crate::handle::*;
use crate::model::OnLoad;
use wgpu::util::DeviceExt;
use std::fs::read_to_string;
pub struct Renderer {
pub device: wgpu::Device,
pub queue: wgpu::Queue,
pub mesh_pool: MeshPool,
pub texture_pool: TexturePool,
pub material_pool: MaterialPool,
pub size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
config: wgpu::SurfaceConfiguration,
depth_texture: wgpu::Texture,
depth_texture_view: wgpu::TextureView,
camera_uniform: CameraUniform,
camera_buffer: wgpu::Buffer,
point_lights_buffer: wgpu::Buffer,
camera_bind_group: wgpu::BindGroup,
meshes_buffer: wgpu::Buffer,
meshes_bind_group: wgpu::BindGroup,
render_pipeline: wgpu::RenderPipeline,
}
impl Renderer {
pub fn new(
size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
device: wgpu::Device,
queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
) -> Self {
let mesh_pool = MeshPool::default();
let texture_pool = TexturePool::new(&device);
let material_pool = MaterialPool::new(&device);
let (depth_texture, depth_texture_view) = Self::make_depth_texture(&device, &config);
let camera_uniform = CameraUniform::new();
let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Camera Buffer"),
contents: bytemuck::cast_slice(&[camera_uniform]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let point_lights_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Point Lights Buffer"),
size: 65536, // TODO buffer resizing
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let camera_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::VERTEX_FRAGMENT,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
},
],
label: Some("Camera Bind Group Layout"),
});
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &camera_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: camera_buffer.as_entire_binding(),
},
wgpu::BindGroupEntry {
binding: 1,
resource: point_lights_buffer.as_entire_binding(),
},
],
label: Some("Camera Bind Group"),
});
let meshes_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Meshes Buffer"),
size: 65536, // TODO resizable meshes buffer/gpu vectors
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let meshes_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Meshes Bind Group Layout"),
});
let meshes_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &meshes_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: meshes_buffer.as_entire_binding(),
}],
label: Some("Meshes Bind Group"),
});
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&camera_bind_group_layout,
&meshes_bind_group_layout,
&material_pool.bind_group_layout,
],
push_constant_ranges: &[],
});
// Generate a shader and preprocess it
let mut source = read_to_string("src/shader.wgsl").unwrap();
source = add_includes(&source);
// Parse the WGSL into a usable module
let module = parse_wgsl(&source);
// Generate a valid WGSL string from the module
let gen_wgsl = generate_wgsl(&module);
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: Some("shader.wgsl"),
source: wgpu::ShaderSource::Wgsl(
std::borrow::Cow::Owned(gen_wgsl),
),
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: Some(wgpu::Face::Back),
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: Some(wgpu::DepthStencilState {
format: Self::DEPTH_FORMAT,
depth_write_enabled: true,
depth_compare: wgpu::CompareFunction::Less,
stencil: wgpu::StencilState::default(),
bias: wgpu::DepthBiasState::default(),
}),
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
});
Self {
size,
surface,
device,
queue,
config,
mesh_pool,
texture_pool,
material_pool,
depth_texture,
depth_texture_view,
camera_uniform,
camera_buffer,
point_lights_buffer,
camera_bind_group,
meshes_buffer,
meshes_bind_group,
render_pipeline,
}
}
pub const DEPTH_FORMAT: wgpu::TextureFormat = wgpu::TextureFormat::Depth32Float;
fn make_depth_texture(
device: &wgpu::Device,
config: &wgpu::SurfaceConfiguration,
) -> (wgpu::Texture, wgpu::TextureView) {
let size = wgpu::Extent3d {
width: config.width,
height: config.height,
depth_or_array_layers: 1,
};
let desc = wgpu::TextureDescriptor {
label: Some("Depth Texture"),
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: Self::DEPTH_FORMAT,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
};
let texture = device.create_texture(&desc);
let view = texture.create_view(&wgpu::TextureViewDescriptor::default());
(texture, view)
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 {
self.size = new_size;
self.config.width = new_size.width;
self.config.height = new_size.height;
self.surface.configure(&self.device, &self.config);
let (depth_texture, depth_texture_view) =
Self::make_depth_texture(&self.device, &self.config);
self.depth_texture = depth_texture;
self.depth_texture_view = depth_texture_view;
}
}
pub fn render(
&mut self,
camera: &impl Camera,
scene: &Scene,
) -> Result<(), wgpu::SurfaceError> {
self.camera_uniform.update(camera);
self.queue.write_buffer(
&self.camera_buffer,
0,
bytemuck::cast_slice(&[self.camera_uniform]),
);
let Scene {
meshes,
point_lights,
} = scene;
let mesh_commands = CommandSet::build(meshes);
// TODO persistent staging buffer (write_buffer creates a new one per call)
self.queue
.write_buffer(&self.meshes_buffer, 0, mesh_commands.get_storage());
let point_lights: Vec<PointLightUniform> = point_lights.iter().map(|p| p.into()).collect();
// TODO make a function to ease arranging header + array data (this is really ugly)
// researching proper structure alignment will be necessary
self.queue.write_buffer(
&self.point_lights_buffer,
0,
bytemuck::cast_slice(&[point_lights.len() as u32]),
);
self.queue.write_buffer(
&self.point_lights_buffer,
16,
bytemuck::cast_slice(&point_lights),
);
let output = self.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut rp = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: Some(wgpu::RenderPassDepthStencilAttachment {
view: &self.depth_texture_view,
depth_ops: Some(wgpu::Operations {
load: wgpu::LoadOp::Clear(1.0),
store: true,
}),
stencil_ops: None,
}),
});
rp.set_pipeline(&self.render_pipeline);
rp.set_bind_group(0, &self.camera_bind_group, &[]);
rp.set_bind_group(1, &self.meshes_bind_group, &[]);
let mut group: Option<&MeshGroup> = None;
for cmd in mesh_commands.iter() {
match cmd {
Command::BindMeshGroup { group_id } => {
group = self.mesh_pool.groups.get(group_id);
let group = group.unwrap();
rp.set_vertex_buffer(0, group.vertices.slice(..));
rp.set_index_buffer(group.indices.slice(..), wgpu::IndexFormat::Uint32);
}
Command::BindMaterial { material_id } => {
let material = self.material_pool.materials.get(material_id).unwrap();
rp.set_bind_group(2, &material.bind_group, &[]);
}
Command::Draw {
sub_id: _,
instance_range,
} => {
// TODO use sub_id in mesh draw
let indices = 0..(group.unwrap().index_capacity as u32);
rp.draw_indexed(indices, 0, instance_range);
}
}
}
}
self.queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
}
impl OnLoad for &mut Renderer {
fn load_mesh(&mut self, mesh_data: &MeshData) -> MeshHandle {
self.mesh_pool.allocate(&self.device, mesh_data)
}
fn load_texture(&mut self, texture_data: &TextureData) -> TextureHandle {
self.texture_pool
.allocate(&self.device, &self.queue, texture_data)
}
fn load_material(&mut self, material_data: &MaterialData) -> MaterialHandle {
self.material_pool
.allocate(&self.device, &self.texture_pool, material_data)
}
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct CameraUniform {
eye: [f32; 4],
vp: [[f32; 4]; 4],
}
impl CameraUniform {
pub fn new() -> Self {
Self {
eye: [0.0; 4],
vp: glam::Mat4::IDENTITY.to_cols_array_2d(),
}
}
pub fn update(&mut self, camera: &impl Camera) {
self.eye = camera.get_eye();
self.vp = camera.get_vp();
}
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct PointLightUniform {
center: [f32; 4],
intensity: [f32; 4],
}
impl From<&PointLight> for PointLightUniform {
fn from(p: &PointLight) -> Self {
Self {
center: p.center.extend(0.0).to_array(),
intensity: p.intensity.extend(0.0).to_array(),
}
}
}