1
2
Fork 0
mirror of https://github.com/mat-1/azalea.git synced 2025-08-02 06:16:04 +00:00

despawn entities when switching worlds and some testbot fixes

This commit is contained in:
mat 2025-02-21 22:50:19 +00:00
parent f5f15362f2
commit 27945c8870
7 changed files with 43 additions and 15 deletions

View file

@ -555,6 +555,11 @@ impl Client {
self.query::<Option<&T>>(&mut self.ecs.lock()).cloned()
}
/// Get a resource from the ECS. This will clone the resource and return it.
pub fn resource<T: Resource + Clone>(&self) -> T {
self.ecs.lock().resource::<T>().clone()
}
/// Get a required component for this client and call the given function.
///
/// Similar to [`Self::component`], but doesn't clone the component since

View file

@ -47,6 +47,7 @@ pub struct DisconnectEvent {
pub fn remove_components_from_disconnected_players(
mut commands: Commands,
mut events: EventReader<DisconnectEvent>,
mut loaded_by_query: Query<&mut azalea_entity::LoadedBy>,
) {
for DisconnectEvent { entity, .. } in events.read() {
trace!("Got DisconnectEvent for {entity:?}");
@ -62,6 +63,13 @@ pub fn remove_components_from_disconnected_players(
.remove::<LocalPlayerEvents>();
// note that we don't remove the client from the ECS, so if they decide
// to reconnect they'll keep their state
// now we have to remove ourselves from the LoadedBy for every entity.
// in theory this could be inefficient if we have massive swarms... but in
// practice this is fine.
for mut loaded_by in &mut loaded_by_query.iter_mut() {
loaded_by.remove(entity);
}
}
}

View file

@ -155,8 +155,8 @@ pub fn remove_despawned_entities_from_indexes(
Changed<LoadedBy>,
>,
) {
for (entity, uuid, minecraft_id, position, world_name, loaded_by) in &query {
let Some(instance_lock) = instance_container.get(world_name) else {
for (entity, uuid, minecraft_id, position, instance_name, loaded_by) in &query {
let Some(instance_lock) = instance_container.get(instance_name) else {
// the instance isn't even loaded by us, so we can safely delete the entity
debug!(
"Despawned entity {entity:?} because it's in an instance that isn't loaded anymore"

View file

@ -117,6 +117,13 @@ fn parse_frame(buffer: &mut Cursor<Vec<u8>>) -> Result<Box<[u8]>, FrameSplitterE
// reset the inner vec once we've reached the end of the buffer so we don't keep
// leaking memory
buffer.get_mut().clear();
// we just cap the capacity to 64KB instead of resetting it to save some
// allocations.
// and the reason we bother capping it at all is to avoid wasting memory if we
// get a big packet once and then never again.
buffer.get_mut().shrink_to(1024 * 64);
buffer.set_position(0);
}

View file

@ -197,6 +197,8 @@ pub fn register(commands: &mut CommandDispatcher<Mutex<CommandSource>>) {
.unwrap();
}
writeln!(report).unwrap();
for (info, _) in ecs.iter_resources() {
writeln!(report, "Resource: {}", info.name()).unwrap();
writeln!(report, "- Size: {} bytes", info.layout().size()).unwrap();

View file

@ -57,14 +57,18 @@ async fn main() {
Account::offline(username_or_email)
};
let mut commands = CommandDispatcher::new();
register_commands(&mut commands);
builder = builder.add_account_with_state(account, State::new(args.clone(), commands));
builder = builder.add_account_with_state(account, State::new());
}
let mut commands = CommandDispatcher::new();
register_commands(&mut commands);
builder
.join_delay(Duration::from_millis(100))
.set_swarm_state(SwarmState {
args,
commands: Arc::new(commands),
})
.start(join_address)
.await
.unwrap();
@ -102,17 +106,13 @@ pub enum BotTask {
#[derive(Component, Clone, Default)]
pub struct State {
pub args: Args,
pub commands: Arc<CommandDispatcher<Mutex<CommandSource>>>,
pub killaura: bool,
pub task: Arc<Mutex<BotTask>>,
}
impl State {
fn new(args: Args, commands: CommandDispatcher<Mutex<CommandSource>>) -> Self {
fn new() -> Self {
Self {
args,
commands: Arc::new(commands),
killaura: true,
task: Arc::new(Mutex::new(BotTask::None)),
}
@ -120,9 +120,14 @@ impl State {
}
#[derive(Resource, Default, Clone)]
struct SwarmState;
struct SwarmState {
pub args: Args,
pub commands: Arc<CommandDispatcher<Mutex<CommandSource>>>,
}
async fn handle(bot: Client, event: azalea::Event, state: State) -> anyhow::Result<()> {
let swarm = bot.resource::<SwarmState>();
match event {
azalea::Event::Init => {
bot.set_client_information(ClientInformation {
@ -130,7 +135,7 @@ async fn handle(bot: Client, event: azalea::Event, state: State) -> anyhow::Resu
..Default::default()
})
.await?;
if state.args.pathfinder_debug_particles {
if swarm.args.pathfinder_debug_particles {
bot.ecs
.lock()
.entity_mut(bot.entity)
@ -141,7 +146,7 @@ async fn handle(bot: Client, event: azalea::Event, state: State) -> anyhow::Resu
let (Some(username), content) = chat.split_sender_and_content() else {
return Ok(());
};
if username != state.args.owner_username {
if username != swarm.args.owner_username {
return Ok(());
}
@ -153,7 +158,7 @@ async fn handle(bot: Client, event: azalea::Event, state: State) -> anyhow::Resu
content.strip_prefix('!').map(|s| s.to_owned())
};
if let Some(command) = command {
match state.commands.execute(
match swarm.commands.execute(
command,
Mutex::new(CommandSource {
bot: bot.clone(),

View file

@ -409,6 +409,7 @@ where
{
let mut ecs = ecs_lock.lock();
ecs.insert_resource(swarm.clone());
ecs.insert_resource(self.swarm_state.clone());
ecs.run_schedule(main_schedule_label);
ecs.clear_trackers();
}