cyborg/src/main.rs

728 lines
22 KiB
Rust

use wgpu::util::DeviceExt;
use winit::{
event::*,
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
mod camera;
mod commands;
mod mesh;
use camera::*;
use commands::*;
use mesh::*;
struct Renderer {
pub device: wgpu::Device,
pub mesh_pool: MeshPool,
pub texture_pool: TexturePool,
pub size: winit::dpi::PhysicalSize<u32>,
surface: wgpu::Surface,
queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
camera_uniform: CameraUniform,
camera_buffer: wgpu::Buffer,
camera_bind_group: wgpu::BindGroup,
meshes_buffer: wgpu::Buffer,
meshes_bind_group: wgpu::BindGroup,
render_pipeline: wgpu::RenderPipeline,
}
impl Renderer {
pub async fn new(window: &winit::window::Window) -> Self {
let size = window.inner_size();
let instance = wgpu::Instance::new(wgpu::Backends::all());
let surface = unsafe { instance.create_surface(window) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::HighPerformance,
compatible_surface: Some(&surface),
force_fallback_adapter: false,
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
features: wgpu::Features::empty(),
limits: wgpu::Limits::default(),
label: None,
},
None,
)
.await
.unwrap();
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface.get_preferred_format(&adapter).unwrap(),
width: size.width,
height: size.height,
present_mode: wgpu::PresentMode::Fifo,
};
surface.configure(&device, &config);
let mesh_pool = MeshPool::default();
let texture_pool = TexturePool::new(&device);
let camera_uniform = CameraUniform::new();
let camera_buffer = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Camera Buffer"),
contents: bytemuck::cast_slice(&[camera_uniform]),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
});
let camera_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Camera Bind Group Layout"),
});
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &camera_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: camera_buffer.as_entire_binding(),
}],
label: Some("Camera Bind Group"),
});
let meshes_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Meshes Buffer"),
size: 65536, // TODO resizable meshes buffer/gpu vectors
usage: wgpu::BufferUsages::STORAGE | wgpu::BufferUsages::COPY_DST,
mapped_at_creation: false,
});
let meshes_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Storage { read_only: true },
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Meshes Bind Group Layout"),
});
let meshes_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &meshes_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: meshes_buffer.as_entire_binding(),
}],
label: Some("Meshes Bind Group"),
});
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[
&camera_bind_group_layout,
&meshes_bind_group_layout,
&texture_pool.bind_group_layout,
],
push_constant_ranges: &[],
});
let shader = device.create_shader_module(&wgpu::include_wgsl!("shader.wgsl"));
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[Vertex::desc()],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
}],
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleList,
strip_index_format: None,
front_face: wgpu::FrontFace::Ccw,
cull_mode: None,
polygon_mode: wgpu::PolygonMode::Fill,
unclipped_depth: false,
conservative: false,
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
});
Self {
size,
surface,
device,
queue,
config,
mesh_pool,
texture_pool,
camera_uniform,
camera_buffer,
camera_bind_group,
meshes_buffer,
meshes_bind_group,
render_pipeline,
}
}
pub fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 {
self.size = new_size;
self.config.width = new_size.width;
self.config.height = new_size.height;
self.surface.configure(&self.device, &self.config);
}
}
pub fn render(
&mut self,
camera: &impl Camera,
meshes: &Vec<MeshInstance>,
) -> Result<(), wgpu::SurfaceError> {
self.camera_uniform.update(camera);
self.queue.write_buffer(
&self.camera_buffer,
0,
bytemuck::cast_slice(&[self.camera_uniform]),
);
let mesh_commands = commands::MeshCommands::build(meshes);
// TODO persistent staging buffer (write_buffer creates a new one per call)
self.queue
.write_buffer(&self.meshes_buffer, 0, mesh_commands.get_storage());
let output = self.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut rp = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.1,
g: 0.2,
b: 0.3,
a: 1.0,
}),
store: true,
},
}],
depth_stencil_attachment: None,
});
rp.set_pipeline(&self.render_pipeline);
rp.set_bind_group(0, &self.camera_bind_group, &[]);
rp.set_bind_group(1, &self.meshes_bind_group, &[]);
mesh_commands.dispatch(&mut rp, &self.mesh_pool, &self.texture_pool);
}
self.queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
}
pub struct MeshGroup {
vertices: wgpu::Buffer,
vertex_capacity: usize,
indices: wgpu::Buffer,
index_capacity: usize,
}
impl MeshGroup {
fn new(device: &wgpu::Device, data: &MeshData) -> Self {
let vertex_capacity = data.vertices.len();
let vertices = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(&data.vertices),
usage: wgpu::BufferUsages::VERTEX,
});
let index_capacity = data.indices.len();
let indices = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Index Buffer"),
contents: bytemuck::cast_slice(&data.indices),
usage: wgpu::BufferUsages::INDEX,
});
Self {
vertex_capacity,
vertices,
index_capacity,
indices,
}
}
}
#[derive(Default)]
pub struct MeshPool {
groups: slab::Slab<MeshGroup>,
}
impl MeshPool {
pub fn allocate(&mut self, device: &wgpu::Device, data: &MeshData) -> MeshHandle {
let group = MeshGroup::new(device, data);
let group_id = self.groups.insert(group);
let sub_id = 0;
MeshHandle { group_id, sub_id }
}
pub fn get_group(&self, handle: &MeshHandle) -> Option<&MeshGroup> {
self.groups.get(handle.group_id)
}
}
pub struct TextureData {
width: u32,
height: u32,
data: Vec<u8>,
}
pub struct Texture {
texture: wgpu::Texture,
bind_group: wgpu::BindGroup,
}
impl Texture {
pub fn new(
device: &wgpu::Device,
queue: &wgpu::Queue,
sampler: &wgpu::Sampler,
bind_group_layout: &wgpu::BindGroupLayout,
data: &TextureData,
) -> Self {
let size = wgpu::Extent3d {
width: data.width,
height: data.height,
depth_or_array_layers: 1,
};
let texture = device.create_texture(&wgpu::TextureDescriptor {
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
label: None,
});
let texture_view = texture.create_view(&wgpu::TextureViewDescriptor::default());
queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&data.data,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: std::num::NonZeroU32::new(4 * size.width),
rows_per_image: std::num::NonZeroU32::new(size.height),
},
size,
);
let bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&texture_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(sampler),
},
],
label: None,
});
Texture {
texture,
bind_group,
}
}
}
pub struct TexturePool {
textures: slab::Slab<Texture>,
sampler: wgpu::Sampler,
bind_group_layout: wgpu::BindGroupLayout,
}
impl TexturePool {
pub fn new(device: &wgpu::Device) -> Self {
let textures = Default::default();
let sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Linear,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("Texture Bind Group Layout"),
});
Self {
textures,
sampler,
bind_group_layout,
}
}
pub fn allocate(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
data: &TextureData,
) -> TextureHandle {
let texture = Texture::new(device, queue, &self.sampler, &self.bind_group_layout, data);
let id = self.textures.insert(texture);
TextureHandle { id }
}
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct CameraUniform {
vp: [[f32; 4]; 4],
}
impl CameraUniform {
pub fn new() -> Self {
Self {
vp: glam::Mat4::IDENTITY.to_cols_array_2d(),
}
}
pub fn update(&mut self, camera: &impl Camera) {
self.vp = camera.get_vp();
}
}
#[repr(C)]
#[derive(Copy, Clone, Eq, Hash, PartialEq)]
pub struct MeshHandle {
group_id: usize,
// unused for now, since each group contains only one mesh
sub_id: usize,
}
#[repr(C)]
#[derive(Copy, Clone, Eq, Hash, PartialEq)]
pub struct TextureHandle {
// only flat texture ID is used... for now
id: usize,
}
fn load_model() -> (MeshData, TextureData) {
use tobj::*;
let model_data = include_bytes!("viking_room.obj").to_vec();
let model_data = &mut model_data.as_slice();
let load_options = LoadOptions {
triangulate: true,
single_index: true,
..Default::default()
};
let (models, _mats) = load_obj_buf(model_data, &load_options, |_| unimplemented!()).unwrap();
let mut vertices = Vec::new();
let mut indices = Vec::new();
let m = models.first().unwrap();
let index_base = vertices.len() as u32;
for i in 0..m.mesh.positions.len() / 3 {
let t = i * 3;
vertices.push(Vertex {
position: [
m.mesh.positions[t],
m.mesh.positions[t + 2],
-m.mesh.positions[t + 1],
],
tex_coords: [m.mesh.texcoords[i * 2], 1.0 - m.mesh.texcoords[i * 2 + 1]],
});
}
indices.extend(m.mesh.indices.iter().map(|i| i + index_base));
let albedo_data = include_bytes!("viking_room.png");
let albedo = image::load_from_memory(albedo_data).unwrap();
use image::GenericImageView;
let dimensions = albedo.dimensions();
let albedo_rgb = albedo.as_rgb8().unwrap().to_vec();
let mut albedo_rgba = Vec::<u8>::new();
for rgb in albedo_rgb.chunks(3) {
albedo_rgba.extend_from_slice(rgb);
albedo_rgba.push(0xff);
}
(
MeshData { vertices, indices },
TextureData {
width: dimensions.0,
height: dimensions.1,
data: albedo_rgba,
},
)
}
trait WorldState {
fn update(&mut self);
fn render(&self) -> Vec<MeshInstance>;
}
struct Grid {
meshes: Vec<MeshInstance>,
}
impl Grid {
fn new(ren: &mut Renderer) -> Self {
let (mesh_data, albedo_data) = load_model();
let mesh = ren.mesh_pool.allocate(&ren.device, &mesh_data);
let albedo = ren
.texture_pool
.allocate(&ren.device, &ren.queue, &albedo_data);
let mut meshes = Vec::new();
for x in -5..5 {
for y in -5..5 {
let translation = glam::Vec3::new(x as f32, 0.0, y as f32) * 3.0;
let transform = glam::Mat4::from_translation(translation);
meshes.push(MeshInstance {
mesh,
albedo,
transform,
});
}
}
Self { meshes }
}
}
impl WorldState for Grid {
fn update(&mut self) {}
fn render(&self) -> Vec<MeshInstance> {
self.meshes.clone()
}
}
struct Planet {
speed: f32,
offset: f32,
radius: f32,
size: f32,
}
struct Planets {
start: std::time::Instant,
planets: Vec<Planet>,
mesh: MeshHandle,
albedo: TextureHandle,
}
impl Planets {
fn new(ren: &mut Renderer) -> Self {
let start = std::time::Instant::now();
let (mesh_data, albedo_data) = load_model();
let mesh = ren.mesh_pool.allocate(&ren.device, &mesh_data);
let albedo = ren
.texture_pool
.allocate(&ren.device, &ren.queue, &albedo_data);
let mut planets = Vec::new();
for i in 0..10 {
let i = i as f32;
planets.push(Planet {
speed: 1.618 * 1.5 / i,
offset: 0.0,
radius: i * 2.0,
size: 0.5,
});
}
Self {
start,
planets,
mesh,
albedo,
}
}
}
impl WorldState for Planets {
fn update(&mut self) {}
fn render(&self) -> Vec<MeshInstance> {
let elapsed = self.start.elapsed().as_secs_f32();
let mut meshes = Vec::new();
for planet in self.planets.iter() {
let translation = glam::Vec3::new(0.0, 0.0, planet.radius);
let translation = glam::Mat4::from_translation(translation);
let theta = planet.speed * elapsed + planet.offset;
let rotation = glam::Mat4::from_rotation_y(theta);
meshes.push(MeshInstance {
mesh: self.mesh,
albedo: self.albedo,
transform: rotation * translation,
});
}
meshes
}
}
fn main() {
let event_loop = EventLoop::new();
let window = WindowBuilder::new().build(&event_loop).unwrap();
let mut camera = Flycam::new(10.0, 0.002);
let mut is_grabbed = false;
let mut ren = pollster::block_on(Renderer::new(&window));
// let mut state: Box<dyn WorldState> = Box::new(Planets::new(&mut ren));
let mut state: Box<dyn WorldState> = Box::new(Grid::new(&mut ren));
event_loop.run(move |event, _, control_flow| match event {
Event::RedrawRequested(_) => match ren.render(&camera, &state.render()) {
Ok(_) => {}
Err(wgpu::SurfaceError::Lost) => ren.resize(ren.size),
Err(wgpu::SurfaceError::OutOfMemory) => *control_flow = ControlFlow::Exit,
Err(e) => println!("error: {:?}", e),
},
Event::MainEventsCleared => {
camera.update();
state.update();
window.request_redraw();
}
Event::DeviceEvent { ref event, .. } => match event {
DeviceEvent::MouseMotion { delta } => {
if is_grabbed {
camera.process_mouse(delta.0, delta.1);
}
}
_ => {}
},
Event::WindowEvent {
ref event,
window_id,
} if window_id == window.id() => match event {
WindowEvent::KeyboardInput {
input:
KeyboardInput {
virtual_keycode: Some(key),
state,
..
},
..
} => {
if *state == ElementState::Pressed && *key == VirtualKeyCode::Escape {
if is_grabbed {
window.set_cursor_grab(false).unwrap();
window.set_cursor_visible(true);
is_grabbed = false;
}
} else {
camera.process_keyboard(*key, *state);
}
}
WindowEvent::MouseInput {
button: MouseButton::Left,
state: ElementState::Pressed,
..
} => {
if !is_grabbed {
window.set_cursor_grab(true).unwrap();
window.set_cursor_visible(false);
is_grabbed = true;
}
}
WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit,
WindowEvent::Resized(physical_size) => {
ren.resize(*physical_size);
camera.resize(physical_size.width, physical_size.height);
}
WindowEvent::ScaleFactorChanged { new_inner_size, .. } => {
ren.resize(**new_inner_size);
camera.resize(new_inner_size.width, new_inner_size.height);
}
_ => {}
},
_ => {}
});
}