1
2
Fork 0
mirror of https://github.com/mat-1/azalea.git synced 2025-08-02 14:26:04 +00:00

several pathfinder fixes

This commit is contained in:
mat 2025-06-02 03:44:24 -01:00
parent 99659bd9a3
commit 3d121722d7
6 changed files with 53 additions and 44 deletions

View file

@ -70,11 +70,17 @@ fn get_delay(
auto_reconnect_delay_query: Query<&AutoReconnectDelay>,
entity: Entity,
) -> Option<Duration> {
if let Ok(c) = auto_reconnect_delay_query.get(entity) {
let delay = if let Ok(c) = auto_reconnect_delay_query.get(entity) {
Some(c.delay)
} else {
auto_reconnect_delay_res.as_ref().map(|r| r.delay)
};
if delay == Some(Duration::MAX) {
// if the duration is set to max, treat that as autoreconnect being disabled
return None;
}
delay
}
pub fn rejoin_after_delay(

View file

@ -169,9 +169,9 @@ pub struct RawConnection {
///
/// To check if we haven't disconnected from the server, use
/// [`Self::is_alive`].
network: Option<NetworkConnection>,
pub(crate) network: Option<NetworkConnection>,
pub state: ConnectionProtocol,
is_alive: bool,
pub(crate) is_alive: bool,
/// This exists for internal testing purposes and probably shouldn't be used
/// for normal bots. It's basically a way to make our client think it

View file

@ -8,6 +8,7 @@ use azalea_world::{InstanceContainer, InstanceName};
use bevy_app::{App, Plugin, Update};
use bevy_ecs::prelude::*;
use derive_more::{Deref, DerefMut};
use tracing::trace;
use crate::{
Client, InstanceHolder,
@ -148,7 +149,7 @@ fn handle_auto_mine(
/// Information about the block we're currently mining. This is only present if
/// we're currently mining a block.
#[derive(Component)]
#[derive(Component, Debug, Clone)]
pub struct Mining {
pub pos: BlockPos,
pub dir: Direction,
@ -308,10 +309,12 @@ fn handle_mining_queued(
position: mining_queued.position,
});
} else {
commands.entity(entity).insert(Mining {
let mining = Mining {
pos: mining_queued.position,
dir: mining_queued.direction,
});
};
trace!("inserting mining component {mining:?} for entity {entity:?}");
commands.entity(entity).insert(mining);
**current_mining_pos = Some(mining_queued.position);
**current_mining_item = held_item;
**mine_progress = 0.;
@ -332,6 +335,7 @@ fn handle_mining_queued(
sequence: sequence_number.get_and_increment(),
},
));
// vanilla really does send two swing arm packets
commands.trigger(SwingArmEvent { entity });
commands.trigger(SwingArmEvent { entity });
}
@ -578,12 +582,12 @@ pub fn continue_mining_block(
current_mining_pos,
current_mining_item,
) {
println!("continue mining block at {:?}", mining.pos);
trace!("continue mining block at {:?}", mining.pos);
let instance_lock = instances.get(instance_name).unwrap();
let instance = instance_lock.read();
let target_block_state = instance.get_block_state(&mining.pos).unwrap_or_default();
println!("target_block_state: {target_block_state:?}");
trace!("target_block_state: {target_block_state:?}");
if target_block_state.is_air() {
commands.entity(entity).remove::<Mining>();
@ -604,8 +608,10 @@ pub fn continue_mining_block(
**mine_ticks += 1.;
if **mine_progress >= 1. {
commands.entity(entity).remove::<Mining>();
println!("finished mining block at {:?}", mining.pos);
// MiningQueued is removed in case we were doing an infinite loop that
// repeatedly inserts MiningQueued
commands.entity(entity).remove::<(Mining, MiningQueued)>();
trace!("finished mining block at {:?}", mining.pos);
finish_mining_events.write(FinishMiningBlockEvent {
entity,
position: mining.pos,
@ -631,7 +637,7 @@ pub fn continue_mining_block(
});
commands.trigger(SwingArmEvent { entity });
} else {
println!("switching mining target to {:?}", mining.pos);
trace!("switching mining target to {:?}", mining.pos);
commands.entity(entity).insert(MiningQueued {
position: mining.pos,
direction: mining.dir,

View file

@ -285,26 +285,11 @@ pub struct WeightedNode {
impl Ord for WeightedNode {
#[inline]
fn cmp(&self, other: &Self) -> cmp::Ordering {
// we compare bits instead of floats because it's faster. this is the same as
// f32::total_cmp as long as the numbers aren't negative
debug_assert!(self.f_score >= 0.0);
debug_assert!(other.f_score >= 0.0);
debug_assert!(self.g_score >= 0.0);
debug_assert!(other.g_score >= 0.0);
let self_f_score = self.f_score.to_bits() as i32;
let other_f_score = other.f_score.to_bits() as i32;
if self_f_score == other_f_score {
let self_g_score = self.g_score.to_bits() as i32;
let other_g_score = other.g_score.to_bits() as i32;
return self_g_score.cmp(&other_g_score);
}
// intentionally inverted to make the BinaryHeap a min-heap
other_f_score.cmp(&self_f_score)
match other.f_score.total_cmp(&self.f_score) {
cmp::Ordering::Equal => self.g_score.total_cmp(&other.g_score),
s => s,
}
}
}
impl Eq for WeightedNode {}

View file

@ -861,9 +861,10 @@ pub fn check_for_path_obstruction(
drop(custom_state_ref);
warn!(
"path obstructed at index {obstructed_index} (starting at {:?}, path: {:?})",
executing_path.last_reached_node, executing_path.path
"path obstructed at index {obstructed_index} (starting at {:?})",
executing_path.last_reached_node,
);
debug!("obstructed path: {:?}", executing_path.path);
// if it's near the end, don't bother recalculating a patch, just truncate and
// mark it as partial
if obstructed_index + 5 > executing_path.path.len() {
@ -1223,11 +1224,19 @@ where
}
}
if let Some(found_edge) = found_edge
&& found_edge.cost <= edge.cost
current_position = movement_target;
// if found_edge is None or the cost increased, then return the index
if found_edge
.map(|found_edge| found_edge.cost > edge.cost)
.unwrap_or(true)
{
current_position = found_edge.movement.target;
} else {
// if the node that we're currently executing was obstructed then it's often too
// late to change the path, so it's usually better to just ignore this case :/
if i == 0 {
warn!("path obstructed at index 0, ignoring");
continue;
}
return Some(i);
}
}

View file

@ -32,7 +32,7 @@ use bevy_app::{App, PluginGroup, PluginGroupBuilder, Plugins, SubApp};
use bevy_ecs::prelude::*;
use futures::future::{BoxFuture, join_all};
use parking_lot::{Mutex, RwLock};
use tokio::sync::mpsc;
use tokio::{sync::mpsc, time::sleep};
use tracing::{debug, error, warn};
use crate::{BoxHandleFn, DefaultBotPlugins, HandleFn, JoinOpts, NoState, StartError};
@ -495,9 +495,7 @@ where
for ((account, bot_join_opts), state) in accounts.iter().zip(states) {
let mut join_opts = default_join_opts.clone();
join_opts.update(bot_join_opts);
swarm_clone
.add_and_retry_forever_with_opts(account, state, &join_opts)
.await;
let _ = swarm_clone.add_with_opts(account, state, &join_opts).await;
tokio::time::sleep(join_delay).await;
}
} else {
@ -507,9 +505,9 @@ where
|((account, bot_join_opts), state)| async {
let mut join_opts = default_join_opts.clone();
join_opts.update(bot_join_opts);
swarm_borrow
let _ = swarm_borrow
.clone()
.add_and_retry_forever_with_opts(account, state, &join_opts)
.add_with_opts(account, state, &join_opts)
.await;
},
))
@ -830,24 +828,29 @@ impl Swarm {
///
/// This does exponential backoff (though very limited), starting at 5
/// seconds and doubling up to 15 seconds.
#[deprecated(note = "azalea has auto-reconnect functionality built-in now, use `add` instead")]
pub async fn add_and_retry_forever<S: Component + Clone>(
&self,
account: &Account,
state: S,
) -> Client {
#[allow(deprecated)]
self.add_and_retry_forever_with_opts(account, state, &JoinOpts::default())
.await
}
/// Same as [`Self::add_and_retry_forever`], but allow passing custom join
/// options.
#[deprecated(
note = "azalea has auto-reconnect functionality built-in now, use `add_with_opts` instead"
)]
pub async fn add_and_retry_forever_with_opts<S: Component + Clone>(
&self,
account: &Account,
state: S,
opts: &JoinOpts,
) -> Client {
let mut disconnects = 0;
let mut disconnects: u32 = 0;
loop {
match self.add_with_opts(account, state.clone(), opts).await {
Ok(bot) => return bot,
@ -870,7 +873,7 @@ impl Swarm {
}
}
tokio::time::sleep(delay).await;
sleep(delay).await;
}
}
}