fix: was ddossing the poor database
All checks were successful
/ check_lfs (push) Successful in 24s
On_Push / lint_clippy (push) Successful in 15s
On_Push / lint_fmt (push) Successful in 21s
On_Push / build (push) Successful in 1m51s
On_Push / deploy (push) Successful in 23s

guild_members_chunk is triggered for each chunk for each server it is on, and the bot is currently in 10 servers so it was runnign teh same thigns 10 times, clogging up conenctions
This commit is contained in:
silver 2025-07-21 04:29:03 +01:00
parent 095ff6f2ce
commit a225c14b4f
Signed by: silver
GPG key ID: 36F93D61BAD3FD7D
3 changed files with 46 additions and 17 deletions

View file

@ -52,15 +52,23 @@ async fn main() {
struct Handler; struct Handler;
#[async_trait] #[async_trait]
impl EventHandler for Handler { impl EventHandler for Handler {
async fn cache_ready(&self, ctx: Context, guilds: Vec<GuildId>) { async fn cache_ready(&self, ctx: Context, _guilds: Vec<GuildId>) {
for guild in guilds { let config_lock = {
ctx.shard.chunk_guild(guild, Some(2000), false, ChunkGuildFilter::None, None); let data_read = ctx.data.read().await;
} data_read.get::<Config>().expect("Expected Config in TypeMap.").clone()
println!("Cache built successfully!"); };
let config_global = config_lock.read().await;
let server = config_global.committee_server;
ctx.shard.chunk_guild(server, Some(2000), false, ChunkGuildFilter::None, None);
println!("Cache loaded");
} }
async fn guild_members_chunk(&self, ctx: Context, chunk: GuildMembersChunkEvent) { async fn guild_members_chunk(&self, ctx: Context, chunk: GuildMembersChunkEvent) {
if (chunk.chunk_index + 1) == chunk.chunk_count { if (chunk.chunk_index + 1) == chunk.chunk_count {
println!("Cache built successfully!");
let db_lock = { let db_lock = {
let data_read = ctx.data.read().await; let data_read = ctx.data.read().await;
data_read.get::<DataBase>().expect("Expected Config in TypeMap.").clone() data_read.get::<DataBase>().expect("Expected Config in TypeMap.").clone()

View file

@ -47,15 +47,23 @@ async fn main() {
struct Handler; struct Handler;
#[async_trait] #[async_trait]
impl EventHandler for Handler { impl EventHandler for Handler {
async fn cache_ready(&self, ctx: Context, guilds: Vec<GuildId>) { async fn cache_ready(&self, ctx: Context, _guilds: Vec<GuildId>) {
for guild in guilds { let config_lock = {
ctx.shard.chunk_guild(guild, Some(2000), false, ChunkGuildFilter::None, None); let data_read = ctx.data.read().await;
} data_read.get::<Config>().expect("Expected Config in TypeMap.").clone()
println!("Cache built successfully!"); };
let config_global = config_lock.read().await;
let server = config_global.committee_server;
ctx.shard.chunk_guild(server, Some(2000), false, ChunkGuildFilter::None, None);
println!("Cache loaded");
} }
async fn guild_members_chunk(&self, ctx: Context, chunk: GuildMembersChunkEvent) { async fn guild_members_chunk(&self, ctx: Context, chunk: GuildMembersChunkEvent) {
if (chunk.chunk_index + 1) == chunk.chunk_count { if (chunk.chunk_index + 1) == chunk.chunk_count {
println!("Cache built successfully!");
// u[date committee server // u[date committee server
committee::check_committee(&ctx).await; committee::check_committee(&ctx).await;

View file

@ -12,6 +12,7 @@ use skynet_discord_bot::{
}, },
get_config, Config, get_config, Config,
}; };
use std::sync::atomic::{AtomicUsize, Ordering};
use std::{process, sync::Arc}; use std::{process, sync::Arc};
use tokio::sync::RwLock; use tokio::sync::RwLock;
@ -27,7 +28,10 @@ async fn main() {
let intents = GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::MESSAGE_CONTENT | GatewayIntents::GUILD_MEMBERS; let intents = GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::MESSAGE_CONTENT | GatewayIntents::GUILD_MEMBERS;
// Build our client. // Build our client.
let mut client = Client::builder(&config.discord_token, intents) let mut client = Client::builder(&config.discord_token, intents)
.event_handler(Handler {}) .event_handler(Handler {
server_count: Default::default(),
server_cached: Default::default(),
})
.cache_settings(serenity::cache::Settings::default()) .cache_settings(serenity::cache::Settings::default())
.await .await
.expect("Error creating client"); .expect("Error creating client");
@ -44,23 +48,32 @@ async fn main() {
} }
} }
struct Handler; struct Handler {
server_count: AtomicUsize,
server_cached: AtomicUsize,
}
#[async_trait] #[async_trait]
impl EventHandler for Handler { impl EventHandler for Handler {
async fn cache_ready(&self, ctx: Context, guilds: Vec<GuildId>) { async fn cache_ready(&self, ctx: Context, guilds: Vec<GuildId>) {
self.server_count.swap(guilds.len(), Ordering::SeqCst);
for guild in guilds { for guild in guilds {
ctx.shard.chunk_guild(guild, Some(2000), false, ChunkGuildFilter::None, None); ctx.shard.chunk_guild(guild, Some(2000), false, ChunkGuildFilter::None, None);
} }
println!("Cache built successfully!"); println!("Cache loaded {}", &self.server_count.load(Ordering::SeqCst));
} }
async fn guild_members_chunk(&self, ctx: Context, chunk: GuildMembersChunkEvent) { async fn guild_members_chunk(&self, ctx: Context, chunk: GuildMembersChunkEvent) {
if (chunk.chunk_index + 1) == chunk.chunk_count { if (chunk.chunk_index + 1) == chunk.chunk_count {
// this goes into each server and sets roles for each wolves member self.server_cached.fetch_add(1, Ordering::SeqCst);
check_bulk(&ctx).await; if (self.server_cached.load(Ordering::SeqCst) + 1) == self.server_count.load(Ordering::SeqCst) {
println!("Cache built successfully!");
// finish up // this goes into each server and sets roles for each wolves member
process::exit(0); check_bulk(&ctx).await;
// finish up
process::exit(0);
}
} }
} }
} }