diff --git a/pumpkin-world/src/block/block_registry.rs b/pumpkin-world/src/block/block_registry.rs index 0bc2d31a..eea711bd 100644 --- a/pumpkin-world/src/block/block_registry.rs +++ b/pumpkin-world/src/block/block_registry.rs @@ -1,4 +1,4 @@ -use std::sync::LazyLock; +use std::{collections::HashMap, sync::LazyLock}; use serde::Deserialize; @@ -9,6 +9,14 @@ pub static BLOCKS: LazyLock = LazyLock::new(|| { .expect("Could not parse blocks.json registry.") }); +pub static BLOCK_IDS_TO_BLOCK_STRING: LazyLock> = LazyLock::new(|| { + let mut map = HashMap::new(); + for block in &*BLOCKS.blocks { + map.insert(block.default_state_id, block.name.clone()); + } + map +}); + pub fn get_block(registry_id: &str) -> Option<&Block> { BLOCKS .blocks diff --git a/pumpkin-world/src/chunk/anvil.rs b/pumpkin-world/src/chunk/anvil.rs index 7f9ae006..093ac2b4 100644 --- a/pumpkin-world/src/chunk/anvil.rs +++ b/pumpkin-world/src/chunk/anvil.rs @@ -1,25 +1,39 @@ +const WORLD_DATA_VERSION: usize = 4082; + use std::{ + collections::HashMap, fs::OpenOptions, - io::{Read, Seek}, + io::{Read, Seek, SeekFrom, Write}, }; -use flate2::bufread::{GzDecoder, ZlibDecoder}; +use fastnbt::LongArray; +use flate2::{ + bufread::{GzDecoder, GzEncoder, ZlibDecoder, ZlibEncoder}, + Compression as CompressionLevel, +}; use itertools::Itertools; -use crate::level::SaveFile; +use crate::{ + block::{block_registry::BLOCK_IDS_TO_BLOCK_STRING, BlockId}, + chunk::{ChunkSection, ChunkSectionBlockStates, PaletteEntry}, + level::LevelFolder, + WORLD_LOWEST_Y, +}; -use super::{ChunkData, ChunkReader, ChunkReadingError, CompressionError}; +use super::{ + ChunkData, ChunkNbt, ChunkReader, ChunkReadingError, ChunkSerializingError, ChunkWriter, + ChunkWritingError, CompressionError, +}; -#[derive(Clone)] -pub struct AnvilChunkReader {} +pub struct AnvilChunkFormat; -impl Default for AnvilChunkReader { +impl Default for AnvilChunkFormat { fn default() -> Self { Self::new() } } -impl AnvilChunkReader { +impl AnvilChunkFormat { pub fn new() -> Self { Self {} } @@ -83,20 +97,68 @@ impl Compression { Compression::Custom => todo!(), } } + fn compress_data( + &self, + uncompressed_data: Vec, + compression_level: Option, + ) -> Result, CompressionError> { + let compression_level = compression_level.unwrap_or(CompressionLevel::best()); + match self { + Compression::GZip => { + let mut decoder = GzEncoder::new(&uncompressed_data[..], compression_level); + let mut chunk_data = Vec::new(); + decoder + .read_to_end(&mut chunk_data) + .map_err(CompressionError::GZipError)?; + Ok(chunk_data) + } + Compression::ZLib => { + let mut decoder = ZlibEncoder::new(&uncompressed_data[..], compression_level); + let mut chunk_data = Vec::new(); + decoder + .read_to_end(&mut chunk_data) + .map_err(CompressionError::ZlibError)?; + Ok(chunk_data) + } + Compression::None => Ok(uncompressed_data), + Compression::LZ4 => { + let mut compressed_data = Vec::new(); + let mut encoder = lz4::EncoderBuilder::new() + .level(compression_level.level()) + .build(&mut compressed_data) + .map_err(CompressionError::LZ4Error)?; + if let Err(err) = encoder.write_all(&uncompressed_data) { + return Err(CompressionError::LZ4Error(err)); + } + if let (_output, Err(err)) = encoder.finish() { + return Err(CompressionError::LZ4Error(err)); + } + Ok(compressed_data) + } + Compression::Custom => todo!(), + } + } +} + +fn modulus(a: i32, b: i32) -> i32 { + ((a % b) + b) % b } -impl ChunkReader for AnvilChunkReader { +impl ChunkReader for AnvilChunkFormat { fn read_chunk( &self, - save_file: &SaveFile, + level_folder: &LevelFolder, at: &pumpkin_core::math::vector2::Vector2, ) -> Result { - let region = (at.x >> 5, at.z >> 5); + let region = ( + ((at.x as f32) / 32.0).floor() as i32, + ((at.z as f32) / 32.0).floor() as i32, + ); let mut region_file = OpenOptions::new() .read(true) .open( - save_file + level_folder .region_folder .join(format!("r.{}.{}.mca", region.0, region.1)), ) @@ -116,7 +178,6 @@ impl ChunkReader for AnvilChunkReader { .read_exact(&mut timestamp_table) .map_err(|err| ChunkReadingError::IoError(err.kind()))?; - let modulus = |a: i32, b: i32| ((a % b) + b) % b; let chunk_x = modulus(at.x, 32) as u32; let chunk_z = modulus(at.z, 32) as u32; let table_entry = (chunk_x + chunk_z * 32) * 4; @@ -160,27 +221,242 @@ impl ChunkReader for AnvilChunkReader { } } -#[cfg(test)] -mod tests { - use std::path::PathBuf; - - use pumpkin_core::math::vector2::Vector2; - - use crate::{ - chunk::{anvil::AnvilChunkReader, ChunkReader, ChunkReadingError}, - level::SaveFile, - }; - - #[test] - fn not_existing() { - let region_path = PathBuf::from("not_existing"); - let result = AnvilChunkReader::new().read_chunk( - &SaveFile { - root_folder: PathBuf::from(""), - region_folder: region_path, - }, - &Vector2::new(0, 0), +impl ChunkWriter for AnvilChunkFormat { + fn write_chunk( + &self, + chunk_data: &ChunkData, + level_folder: &LevelFolder, + at: &pumpkin_core::math::vector2::Vector2, + ) -> Result<(), super::ChunkWritingError> { + // TODO: update timestamp + + let bytes = self + .chunk_to_bytes(chunk_data) + .map_err(|err| ChunkWritingError::ChunkSerializingError(err.to_string()))?; + let bytes = Compression::ZLib + .compress_data(bytes, Some(CompressionLevel::best())) + .map_err(ChunkWritingError::Compression)?; + + let region = ( + ((at.x as f32) / 32.0).floor() as i32, + ((at.z as f32) / 32.0).floor() as i32, ); - assert!(matches!(result, Err(ChunkReadingError::ChunkNotExist))); + + let mut region_file = OpenOptions::new() + .read(true) + .write(true) + .create(true) + .truncate(false) + .open( + level_folder + .region_folder + .join(format!("./r.{}.{}.mca", region.0, region.1)), + ) + .map_err(|err| ChunkWritingError::IoError(err.kind()))?; + + let mut location_table: [u8; 4096] = [0; 4096]; + let mut timestamp_table: [u8; 4096] = [0; 4096]; + + let file_meta = region_file + .metadata() + .map_err(|err| ChunkWritingError::IoError(err.kind()))?; + + // fill the location and timestamp tables if they exist + if file_meta.len() >= 4096 * 2 { + region_file + .read_exact(&mut location_table) + .map_err(|err| ChunkWritingError::IoError(err.kind()))?; + region_file + .read_exact(&mut timestamp_table) + .map_err(|err| ChunkWritingError::IoError(err.kind()))?; + } + + let chunk_x = modulus(at.x, 32) as u32; + let chunk_z = modulus(at.z, 32) as u32; + + let table_entry = (chunk_x + chunk_z * 32) * 4; + + let mut offset = vec![0u8]; + offset.extend_from_slice(&location_table[table_entry as usize..table_entry as usize + 3]); + let at_offset = u32::from_be_bytes(offset.try_into().unwrap()) as u64 * 4096; + let at_size = location_table[table_entry as usize + 3] as usize * 4096; + + let mut end_index = 4096 * 2; + if at_offset != 0 || at_size != 0 { + // move other chunks earlier, if there is a hole + for (other_offset, other_size, other_table_entry) in location_table + .chunks(4) + .enumerate() + .filter_map(|(index, v)| { + if table_entry / 4 == index as u32 { + return None; + } + let mut offset = vec![0u8]; + offset.extend_from_slice(&v[0..3]); + let offset = u32::from_be_bytes(offset.try_into().unwrap()) as u64 * 4096; + let size = v[3] as usize * 4096; + if offset == 0 && size == 0 { + return None; + } + Some((offset, size, index * 4)) + }) + .sorted_by(|a, b| a.0.cmp(&b.0)) + { + if at_offset > other_offset { + continue; + } + + fn read_at_most(file: &mut std::fs::File, size: usize) -> std::io::Result> { + let mut buf = vec![0u8; size]; + + let mut cursor = 0; + loop { + match file.read(&mut buf[cursor..])? { + 0 => break, + bytes_read => { + cursor += bytes_read; + } + } + } + + Ok(buf) + } + + region_file.seek(SeekFrom::Start(other_offset)).unwrap(); // TODO + let buf = match read_at_most(&mut region_file, other_size) { + Ok(v) => v, + Err(_) => panic!( + "Region file r.-{},{}.mca got corrupted, sorry", + region.0, region.1 + ), + }; + + region_file + .seek(SeekFrom::Start(other_offset - at_size as u64)) + .unwrap(); // TODO + region_file.write_all(&buf).unwrap_or_else(|_| { + panic!( + "Region file r.-{},{}.mca got corrupted, sorry", + region.0, region.1 + ) + }); + dbg!("aaaa"); + dbg!(other_table_entry, other_offset, at_size); + let location_bytes = + &(((other_offset - at_size as u64) / 4096) as u32).to_be_bytes()[1..4]; + let size_bytes = [(other_size.div_ceil(4096)) as u8]; + let location_entry = [location_bytes, &size_bytes].concat(); + location_table[other_table_entry..other_table_entry + 4] + .as_mut() + .copy_from_slice(&location_entry); + + end_index = (other_offset as isize + other_size as isize - at_size as isize) as u64; + } + } else { + for (offset, size) in location_table.chunks(4).filter_map(|v| { + let mut offset = vec![0u8]; + offset.extend_from_slice(&v[0..3]); + let offset = u32::from_be_bytes(offset.try_into().unwrap()) as u64 * 4096; + let size = v[3] as usize * 4096; + if offset == 0 && size == 0 { + return None; + } + Some((offset, size)) + }) { + end_index = u64::max(offset + size as u64, end_index); + } + } + + let location_bytes = &(end_index as u32 / 4096).to_be_bytes()[1..4]; + let size_bytes = [(bytes.len().div_ceil(4096)) as u8]; + dbg!(end_index, bytes.len()); + dbg!(&location_bytes, &size_bytes); + location_table[table_entry as usize..table_entry as usize + 4] + .as_mut() + .copy_from_slice(&[location_bytes, &size_bytes].concat()); + + // write new location and timestamp table + + region_file.seek(SeekFrom::Start(0)).unwrap(); // TODO + if let Err(err) = region_file.write_all(&[location_table, timestamp_table].concat()) { + return Err(ChunkWritingError::IoError(err.kind())); + } + // dbg!(&location_table.iter().map(|v| v.to_string()).join(",")); + + region_file.seek(SeekFrom::Start(end_index)).unwrap(); // TODO + region_file.write_all(&bytes).unwrap_or_else(|_| { + panic!( + "Region file r.-{},{}.mca got corrupted, sorry", + region.0, region.1 + ) + }); + // region_file.write_all(&vec![0u8; 4096]); + + Ok(()) + } +} + +impl AnvilChunkFormat { + pub fn chunk_to_bytes(&self, chunk_data: &ChunkData) -> Result, ChunkSerializingError> { + let mut sections = Vec::new(); + + for (i, blocks) in chunk_data.blocks.blocks.chunks(16 * 16 * 16).enumerate() { + // get unique blocks + let unique_blocks = blocks.iter().dedup().collect_vec(); + let palette = HashMap::::from_iter( + unique_blocks.iter().enumerate().map(|(i, v)| { + ( + **v, + ( + BLOCK_IDS_TO_BLOCK_STRING + .get(&v.0) + .expect("Tried saving a block which does not exist."), + i, + ), + ) + }), + ); + + let block_bit_size = { + let size = 64 - (palette.len() as i64 - 1).leading_zeros(); + std::cmp::max(4, size) + } as usize; + let blocks_in_pack = 64 / block_bit_size; + let mut section_longs = Vec::new(); + + let mut current_pack_long = 0i64; + + for block_pack in blocks.chunks(blocks_in_pack) { + for block in block_pack { + let index = palette.get(block).expect("Just added all unique").1; + current_pack_long = current_pack_long << block_bit_size | index as i64; + } + section_longs.push(current_pack_long); + current_pack_long = 0; + } + + sections.push(ChunkSection { + y: i as i32 * 16 - WORLD_LOWEST_Y as i32, + block_states: Some(ChunkSectionBlockStates { + data: Some(LongArray::new(section_longs)), + palette: palette + .into_iter() + .map(|entry| PaletteEntry { + name: entry.1 .0.clone(), + }) + .collect(), + }), + }); + } + + let nbt = ChunkNbt { + data_version: WORLD_DATA_VERSION, + heightmaps: chunk_data.blocks.heightmap.clone(), + sections, + }; + + let bytes = fastnbt::to_bytes(&nbt); + + bytes.map_err(ChunkSerializingError::ErrorSerializingChunk) } } diff --git a/pumpkin-world/src/chunk/mod.rs b/pumpkin-world/src/chunk/mod.rs index fdcf55a9..786cf125 100644 --- a/pumpkin-world/src/chunk/mod.rs +++ b/pumpkin-world/src/chunk/mod.rs @@ -1,5 +1,4 @@ use std::cmp::max; -use std::collections::HashMap; use std::ops::Index; use fastnbt::LongArray; @@ -10,7 +9,7 @@ use thiserror::Error; use crate::{ block::{BlockId, BlockState}, coordinates::{ChunkRelativeBlockCoordinates, Height}, - level::SaveFile, + level::LevelFolder, WORLD_HEIGHT, }; @@ -23,11 +22,20 @@ const CHUNK_VOLUME: usize = CHUNK_AREA * WORLD_HEIGHT; pub trait ChunkReader: Sync + Send { fn read_chunk( &self, - save_file: &SaveFile, + level_folder: &LevelFolder, at: &Vector2, ) -> Result; } +pub trait ChunkWriter: Send + Sync { + fn write_chunk( + &self, + chunk: &ChunkData, + level_folder: &LevelFolder, + at: &Vector2, + ) -> Result<(), ChunkWritingError>; +} + #[derive(Error, Debug)] pub enum ChunkReadingError { #[error("Io error: {0}")] @@ -42,6 +50,16 @@ pub enum ChunkReadingError { ParsingError(ChunkParsingError), } +#[derive(Error, Debug)] +pub enum ChunkWritingError { + #[error("Io error: {0}")] + IoError(std::io::ErrorKind), + #[error("Compression error {0}")] + Compression(CompressionError), + #[error("Chunk serializing error: {0}")] + ChunkSerializingError(String), +} + #[derive(Error, Debug)] pub enum CompressionError { #[error("Compression scheme not recognised")] @@ -69,14 +87,13 @@ pub struct ChunkBlocks { pub heightmap: ChunkHeightmaps, } -#[derive(Deserialize, Debug, Clone)] +#[derive(Deserialize, Serialize, Debug, Clone)] #[serde(rename_all = "PascalCase")] struct PaletteEntry { name: String, - _properties: Option>, } -#[derive(Deserialize, Debug, Clone)] +#[derive(Deserialize, Serialize, Debug, Clone)] struct ChunkSectionBlockStates { data: Option, palette: Vec, @@ -89,18 +106,16 @@ pub struct ChunkHeightmaps { world_surface: LongArray, } -#[derive(Deserialize, Debug)] -#[expect(dead_code)] +#[derive(Deserialize, Serialize, Debug)] struct ChunkSection { #[serde(rename = "Y")] y: i32, block_states: Option, } -#[derive(Deserialize, Debug)] +#[derive(Deserialize, Serialize, Debug)] #[serde(rename_all = "PascalCase")] struct ChunkNbt { - #[expect(dead_code)] data_version: usize, #[serde(rename = "sections")] @@ -329,3 +344,9 @@ pub enum ChunkParsingError { #[error("Error deserializing chunk: {0}")] ErrorDeserializingChunk(String), } + +#[derive(Error, Debug)] +pub enum ChunkSerializingError { + #[error("Error serializing chunk: {0}")] + ErrorSerializingChunk(fastnbt::error::Error), +} diff --git a/pumpkin-world/src/level.rs b/pumpkin-world/src/level.rs index c65e4cb6..40413c26 100644 --- a/pumpkin-world/src/level.rs +++ b/pumpkin-world/src/level.rs @@ -11,7 +11,8 @@ use tokio::{ use crate::{ chunk::{ - anvil::AnvilChunkReader, ChunkData, ChunkParsingError, ChunkReader, ChunkReadingError, + anvil::AnvilChunkFormat, ChunkData, ChunkParsingError, ChunkReader, ChunkReadingError, + ChunkWriter, }, world_gen::{get_world_gen, Seed, WorldGenerator}, }; @@ -28,47 +29,39 @@ pub type ConcurrentChunkResult = Vec<(Vector2, JoinHandle<()>)>; /// /// For more details on world generation, refer to the `WorldGenerator` module. pub struct Level { - save_file: Option, + level_folder: LevelFolder, loaded_chunks: Arc, Arc>>>, chunk_watchers: Arc, usize>>, chunk_reader: Arc, + chunk_writer: Arc, world_gen: Arc, } #[derive(Clone)] -pub struct SaveFile { +pub struct LevelFolder { pub root_folder: PathBuf, pub region_folder: PathBuf, } impl Level { pub fn from_root_folder(root_folder: PathBuf) -> Self { - let world_gen = get_world_gen(Seed(0)).into(); // TODO Read Seed from config. - if root_folder.exists() { - let region_folder = root_folder.join("region"); - assert!( - region_folder.exists(), - "World region folder does not exist, despite there being a root folder." - ); - - Self { - world_gen, - save_file: Some(SaveFile { - root_folder, - region_folder, - }), - chunk_reader: Arc::new(AnvilChunkReader::new()), - loaded_chunks: Arc::new(DashMap::new()), - chunk_watchers: Arc::new(DashMap::new()), - } - } else { - Self { - world_gen, - save_file: None, - chunk_reader: Arc::new(AnvilChunkReader::new()), - loaded_chunks: Arc::new(DashMap::new()), - chunk_watchers: Arc::new(DashMap::new()), - } + // TODO Read Seed from config. + let world_gen = get_world_gen(Seed(0)).into(); + // Check if region folder exists, if not lets make one + let region_folder = root_folder.join("region"); + if !region_folder.exists() { + std::fs::create_dir_all(®ion_folder).expect("Failed to create Region folder"); + } + Self { + world_gen, + level_folder: LevelFolder { + root_folder, + region_folder, + }, + chunk_reader: Arc::new(AnvilChunkFormat::new()), + chunk_writer: Arc::new(AnvilChunkFormat::new()), + loaded_chunks: Arc::new(DashMap::new()), + chunk_watchers: Arc::new(DashMap::new()), } } @@ -160,8 +153,15 @@ impl Level { pub fn clean_chunks(&self, chunks: &[Vector2]) { chunks.par_iter().for_each(|chunk_pos| { //log::debug!("Unloading {:?}", chunk_pos); + // Save all chunks if let Some(data) = self.loaded_chunks.remove(chunk_pos) { - self.write_chunk(data); + if let Err(error) = self.chunk_writer.write_chunk( + &data.1.blocking_read(), + &self.level_folder, + &data.0, + ) { + log::error!("Failed writing Chunk to disk {}", error.to_string()); + } }; }); } @@ -182,16 +182,12 @@ impl Level { self.chunk_watchers.shrink_to_fit(); } - pub fn write_chunk(&self, _chunk_to_write: (Vector2, Arc>)) { - //TODO - } - fn load_chunk_from_save( chunk_reader: Arc, - save_file: SaveFile, + level_folder: &LevelFolder, chunk_pos: Vector2, ) -> Result>>, ChunkReadingError> { - match chunk_reader.read_chunk(&save_file, &chunk_pos) { + match chunk_reader.read_chunk(level_folder, &chunk_pos) { Ok(data) => Ok(Some(Arc::new(RwLock::new(data)))), Err( ChunkReadingError::ChunkNotExist @@ -219,7 +215,8 @@ impl Level { let channel = channel.clone(); let loaded_chunks = self.loaded_chunks.clone(); let chunk_reader = self.chunk_reader.clone(); - let save_file = self.save_file.clone(); + let chunk_writer = self.chunk_writer.clone(); + let level_folder = self.level_folder.clone(); let world_gen = self.world_gen.clone(); let chunk_pos = *at; @@ -228,27 +225,40 @@ impl Level { .get(&chunk_pos) .map(|entry| entry.value().clone()) .unwrap_or_else(|| { - let loaded_chunk = save_file - .and_then(|save_file| { - match Self::load_chunk_from_save( - chunk_reader, - save_file, - chunk_pos, - ) { - Ok(chunk) => chunk, - Err(err) => { + let loaded_chunk = match Self::load_chunk_from_save( + chunk_reader, + &level_folder, + chunk_pos, + ) { + Ok(chunk) => { + // Save new Chunk + if let Some(chunk) = &chunk { + if let Err(error) = chunk_writer.write_chunk( + &chunk.blocking_read(), + &level_folder, + &chunk_pos, + ) { log::error!( - "Failed to read chunk (regenerating) {:?}: {:?}", - chunk_pos, - err + "Failed writing Chunk to disk {}", + error.to_string() ); - None - } + }; } - }) - .unwrap_or_else(|| { - Arc::new(RwLock::new(world_gen.generate_chunk(chunk_pos))) - }); + chunk + } + Err(err) => { + log::error!( + "Failed to read chunk (regenerating) {:?}: {:?}", + chunk_pos, + err + ); + None + } + } + .unwrap_or_else(|| { + Arc::new(RwLock::new(world_gen.generate_chunk(chunk_pos))) + }); + // Save Chunk if let Some(data) = loaded_chunks.get(&chunk_pos) { // Another thread populated in between the previous check and now diff --git a/pumpkin/src/main.rs b/pumpkin/src/main.rs index 038ceb59..83ef8735 100644 --- a/pumpkin/src/main.rs +++ b/pumpkin/src/main.rs @@ -91,22 +91,30 @@ const fn convert_logger_filter(level: pumpkin_config::logging::LevelFilter) -> L } } -#[tokio::main] -async fn main() -> io::Result<()> { +fn main() -> io::Result<()> { init_logger(); - // let rt = tokio::runtime::Builder::new_multi_thread() - // .enable_all() - // .build() - // .unwrap(); + let runtime = tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .expect("Failed to construct tokio"); + + // ensure rayon is built outside of tokio scope + // DO NOT MOVE IT INTO TOKIO + // DO NOT CHANGE MAIN INTO #[tokio::main] THIS HAS HAPPENED 3 TIMES ALREADY AND IT CAUSES ISSUES EVERY SINGLE TIME + // if it happens again, make sure to update the number here ^ + rayon::ThreadPoolBuilder::new().build_global().unwrap(); + + runtime.block_on(async_main()) +} + +async fn async_main() -> io::Result<()> { tokio::spawn(async { setup_sighandler() .await .expect("Unable to setup signal handlers"); }); - // ensure rayon is built outside of tokio scope - rayon::ThreadPoolBuilder::new().build_global().unwrap(); let default_panic = std::panic::take_hook(); std::panic::set_hook(Box::new(move |info| { default_panic(info);