mirror of
https://github.com/mat-1/azalea.git
synced 2025-08-02 14:26:04 +00:00
add benchmarks to azalea-world
This commit is contained in:
parent
4f6ab28325
commit
994bac2c13
5 changed files with 60 additions and 21 deletions
1
Cargo.lock
generated
1
Cargo.lock
generated
|
@ -517,6 +517,7 @@ dependencies = [
|
|||
"azalea-nbt",
|
||||
"azalea-registry",
|
||||
"bevy_ecs",
|
||||
"criterion",
|
||||
"derive_more",
|
||||
"enum-as-inner",
|
||||
"log",
|
||||
|
|
|
@ -29,3 +29,8 @@ uuid = "1.4.1"
|
|||
|
||||
[dev-dependencies]
|
||||
azalea-client = { path = "../azalea-client" }
|
||||
criterion = "0.5.1"
|
||||
|
||||
[[bench]]
|
||||
name = "chunks"
|
||||
harness = false
|
||||
|
|
38
azalea-world/benches/chunks.rs
Normal file
38
azalea-world/benches/chunks.rs
Normal file
|
@ -0,0 +1,38 @@
|
|||
use std::hint::black_box;
|
||||
|
||||
use azalea_core::position::ChunkBlockPos;
|
||||
use azalea_world::{BitStorage, Chunk};
|
||||
use criterion::{criterion_group, criterion_main, Criterion};
|
||||
|
||||
fn bench_chunks(c: &mut Criterion) {
|
||||
c.bench_function("Chunk::set", |b| {
|
||||
b.iter(|| {
|
||||
let mut chunk = Chunk::default();
|
||||
|
||||
for x in 0..16 {
|
||||
for z in 0..16 {
|
||||
chunk.set(
|
||||
&ChunkBlockPos::new(x, 1, z),
|
||||
azalea_registry::Block::Bedrock.into(),
|
||||
0,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
black_box(chunk);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
fn bench_bitstorage(c: &mut Criterion) {
|
||||
c.bench_function("BitStorage::set", |b| {
|
||||
let mut storage = BitStorage::new(1, 4096, None).unwrap();
|
||||
b.iter(|| {
|
||||
storage.set(136, 1);
|
||||
});
|
||||
black_box(storage);
|
||||
});
|
||||
}
|
||||
|
||||
criterion_group!(benches, bench_chunks, bench_bitstorage);
|
||||
criterion_main!(benches);
|
|
@ -76,7 +76,7 @@ pub struct BitStorage {
|
|||
bits: usize,
|
||||
mask: u64,
|
||||
size: usize,
|
||||
values_per_long: u8,
|
||||
values_per_long: usize,
|
||||
divide_mul: u64,
|
||||
divide_add: u64,
|
||||
divide_shift: i32,
|
||||
|
@ -141,7 +141,7 @@ impl BitStorage {
|
|||
bits,
|
||||
mask,
|
||||
size,
|
||||
values_per_long: values_per_long as u8,
|
||||
values_per_long,
|
||||
divide_mul: divide_mul as u32 as u64,
|
||||
divide_add: divide_add as u32 as u64,
|
||||
divide_shift,
|
||||
|
@ -153,9 +153,7 @@ impl BitStorage {
|
|||
let first = self.divide_mul;
|
||||
let second = self.divide_add;
|
||||
|
||||
(((index * first) + second) >> 32 >> self.divide_shift)
|
||||
.try_into()
|
||||
.unwrap()
|
||||
(((index * first) + second) >> 32 >> self.divide_shift) as usize
|
||||
}
|
||||
|
||||
/// Get the data at the given index.
|
||||
|
@ -167,8 +165,7 @@ impl BitStorage {
|
|||
pub fn get(&self, index: usize) -> u64 {
|
||||
assert!(
|
||||
index < self.size,
|
||||
"Index {} out of bounds (must be less than {})",
|
||||
index,
|
||||
"Index {index} out of bounds (must be less than {})",
|
||||
self.size
|
||||
);
|
||||
|
||||
|
@ -179,7 +176,7 @@ impl BitStorage {
|
|||
|
||||
let cell_index = self.cell_index(index as u64);
|
||||
let cell = &self.data[cell_index];
|
||||
let bit_index = (index - cell_index * self.values_per_long as usize) * self.bits;
|
||||
let bit_index = (index - cell_index * self.values_per_long) * self.bits;
|
||||
cell >> bit_index & self.mask
|
||||
}
|
||||
|
||||
|
@ -189,11 +186,11 @@ impl BitStorage {
|
|||
return 0;
|
||||
}
|
||||
|
||||
assert!(index < self.size);
|
||||
assert!(value <= self.mask);
|
||||
debug_assert!(index < self.size);
|
||||
debug_assert!(value <= self.mask);
|
||||
let cell_index = self.cell_index(index as u64);
|
||||
let cell = &mut self.data[cell_index];
|
||||
let bit_index = (index - cell_index * self.values_per_long as usize) * self.bits;
|
||||
let bit_index = (index - cell_index * self.values_per_long) * self.bits;
|
||||
let old_value = *cell >> (bit_index as u64) & self.mask;
|
||||
*cell = *cell & !(self.mask << bit_index) | (value & self.mask) << bit_index;
|
||||
old_value
|
||||
|
@ -205,11 +202,11 @@ impl BitStorage {
|
|||
return;
|
||||
}
|
||||
|
||||
assert!(index < self.size);
|
||||
assert!(value <= self.mask);
|
||||
debug_assert!(index < self.size);
|
||||
debug_assert!(value <= self.mask);
|
||||
let cell_index = self.cell_index(index as u64);
|
||||
let cell = &mut self.data[cell_index];
|
||||
let bit_index = (index - cell_index * self.values_per_long as usize) * self.bits;
|
||||
let bit_index = (index - cell_index * self.values_per_long) * self.bits;
|
||||
*cell = *cell & !(self.mask << bit_index) | (value & self.mask) << bit_index;
|
||||
}
|
||||
|
||||
|
|
|
@ -121,13 +121,11 @@ impl PalettedContainer {
|
|||
fn create_or_reuse_data(&self, bits_per_entry: u8) -> PalettedContainer {
|
||||
let new_palette_type =
|
||||
PaletteKind::from_bits_and_type(bits_per_entry, &self.container_type);
|
||||
// note for whoever is trying to optimize this: vanilla has this
|
||||
// but it causes a stack overflow since it's not changing the bits per entry
|
||||
// i don't know how to fix this properly so glhf
|
||||
// let old_palette_type: PaletteType = (&self.palette).into();
|
||||
// if new_palette_type == old_palette_type {
|
||||
// return self.clone();
|
||||
// }
|
||||
|
||||
let old_palette_type = (&self.palette).into();
|
||||
if bits_per_entry == self.bits_per_entry && new_palette_type == old_palette_type {
|
||||
return self.clone();
|
||||
}
|
||||
let storage =
|
||||
BitStorage::new(bits_per_entry as usize, self.container_type.size(), None).unwrap();
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue