diff --git a/azalea-client/src/client.rs b/azalea-client/src/client.rs index c81f6f28..131aef16 100644 --- a/azalea-client/src/client.rs +++ b/azalea-client/src/client.rs @@ -555,6 +555,11 @@ impl Client { self.query::>(&mut self.ecs.lock()).cloned() } + /// Get a resource from the ECS. This will clone the resource and return it. + pub fn resource(&self) -> T { + self.ecs.lock().resource::().clone() + } + /// Get a required component for this client and call the given function. /// /// Similar to [`Self::component`], but doesn't clone the component since diff --git a/azalea-client/src/disconnect.rs b/azalea-client/src/disconnect.rs index a92423d7..c653e195 100644 --- a/azalea-client/src/disconnect.rs +++ b/azalea-client/src/disconnect.rs @@ -47,6 +47,7 @@ pub struct DisconnectEvent { pub fn remove_components_from_disconnected_players( mut commands: Commands, mut events: EventReader, + mut loaded_by_query: Query<&mut azalea_entity::LoadedBy>, ) { for DisconnectEvent { entity, .. } in events.read() { trace!("Got DisconnectEvent for {entity:?}"); @@ -62,6 +63,13 @@ pub fn remove_components_from_disconnected_players( .remove::(); // note that we don't remove the client from the ECS, so if they decide // to reconnect they'll keep their state + + // now we have to remove ourselves from the LoadedBy for every entity. + // in theory this could be inefficient if we have massive swarms... but in + // practice this is fine. + for mut loaded_by in &mut loaded_by_query.iter_mut() { + loaded_by.remove(entity); + } } } diff --git a/azalea-entity/src/plugin/indexing.rs b/azalea-entity/src/plugin/indexing.rs index f1105d89..78d5fb7e 100644 --- a/azalea-entity/src/plugin/indexing.rs +++ b/azalea-entity/src/plugin/indexing.rs @@ -155,8 +155,8 @@ pub fn remove_despawned_entities_from_indexes( Changed, >, ) { - for (entity, uuid, minecraft_id, position, world_name, loaded_by) in &query { - let Some(instance_lock) = instance_container.get(world_name) else { + for (entity, uuid, minecraft_id, position, instance_name, loaded_by) in &query { + let Some(instance_lock) = instance_container.get(instance_name) else { // the instance isn't even loaded by us, so we can safely delete the entity debug!( "Despawned entity {entity:?} because it's in an instance that isn't loaded anymore" diff --git a/azalea-protocol/src/read.rs b/azalea-protocol/src/read.rs index 01744169..84c307d7 100755 --- a/azalea-protocol/src/read.rs +++ b/azalea-protocol/src/read.rs @@ -117,6 +117,13 @@ fn parse_frame(buffer: &mut Cursor>) -> Result, FrameSplitterE // reset the inner vec once we've reached the end of the buffer so we don't keep // leaking memory buffer.get_mut().clear(); + + // we just cap the capacity to 64KB instead of resetting it to save some + // allocations. + // and the reason we bother capping it at all is to avoid wasting memory if we + // get a big packet once and then never again. + buffer.get_mut().shrink_to(1024 * 64); + buffer.set_position(0); } diff --git a/azalea/examples/testbot/commands/debug.rs b/azalea/examples/testbot/commands/debug.rs index a7f15d2b..10b9711d 100644 --- a/azalea/examples/testbot/commands/debug.rs +++ b/azalea/examples/testbot/commands/debug.rs @@ -197,6 +197,8 @@ pub fn register(commands: &mut CommandDispatcher>) { .unwrap(); } + writeln!(report).unwrap(); + for (info, _) in ecs.iter_resources() { writeln!(report, "Resource: {}", info.name()).unwrap(); writeln!(report, "- Size: {} bytes", info.layout().size()).unwrap(); diff --git a/azalea/examples/testbot/main.rs b/azalea/examples/testbot/main.rs index 97340c55..958c17d0 100644 --- a/azalea/examples/testbot/main.rs +++ b/azalea/examples/testbot/main.rs @@ -57,14 +57,18 @@ async fn main() { Account::offline(username_or_email) }; - let mut commands = CommandDispatcher::new(); - register_commands(&mut commands); - - builder = builder.add_account_with_state(account, State::new(args.clone(), commands)); + builder = builder.add_account_with_state(account, State::new()); } + let mut commands = CommandDispatcher::new(); + register_commands(&mut commands); + builder .join_delay(Duration::from_millis(100)) + .set_swarm_state(SwarmState { + args, + commands: Arc::new(commands), + }) .start(join_address) .await .unwrap(); @@ -102,17 +106,13 @@ pub enum BotTask { #[derive(Component, Clone, Default)] pub struct State { - pub args: Args, - pub commands: Arc>>, pub killaura: bool, pub task: Arc>, } impl State { - fn new(args: Args, commands: CommandDispatcher>) -> Self { + fn new() -> Self { Self { - args, - commands: Arc::new(commands), killaura: true, task: Arc::new(Mutex::new(BotTask::None)), } @@ -120,9 +120,14 @@ impl State { } #[derive(Resource, Default, Clone)] -struct SwarmState; +struct SwarmState { + pub args: Args, + pub commands: Arc>>, +} async fn handle(bot: Client, event: azalea::Event, state: State) -> anyhow::Result<()> { + let swarm = bot.resource::(); + match event { azalea::Event::Init => { bot.set_client_information(ClientInformation { @@ -130,7 +135,7 @@ async fn handle(bot: Client, event: azalea::Event, state: State) -> anyhow::Resu ..Default::default() }) .await?; - if state.args.pathfinder_debug_particles { + if swarm.args.pathfinder_debug_particles { bot.ecs .lock() .entity_mut(bot.entity) @@ -141,7 +146,7 @@ async fn handle(bot: Client, event: azalea::Event, state: State) -> anyhow::Resu let (Some(username), content) = chat.split_sender_and_content() else { return Ok(()); }; - if username != state.args.owner_username { + if username != swarm.args.owner_username { return Ok(()); } @@ -153,7 +158,7 @@ async fn handle(bot: Client, event: azalea::Event, state: State) -> anyhow::Resu content.strip_prefix('!').map(|s| s.to_owned()) }; if let Some(command) = command { - match state.commands.execute( + match swarm.commands.execute( command, Mutex::new(CommandSource { bot: bot.clone(), diff --git a/azalea/src/swarm/mod.rs b/azalea/src/swarm/mod.rs index d0219406..2856a1cc 100644 --- a/azalea/src/swarm/mod.rs +++ b/azalea/src/swarm/mod.rs @@ -409,6 +409,7 @@ where { let mut ecs = ecs_lock.lock(); ecs.insert_resource(swarm.clone()); + ecs.insert_resource(self.swarm_state.clone()); ecs.run_schedule(main_schedule_label); ecs.clear_trackers(); }