guild_members_chunk is triggered for each chunk for each server it is on, and the bot is currently in 10 servers so it was runnign teh same thigns 10 times, clogging up conenctions
92 lines
2.6 KiB
Rust
92 lines
2.6 KiB
Rust
use serenity::{
|
|
all::{ChunkGuildFilter, GuildId, GuildMembersChunkEvent},
|
|
async_trait,
|
|
client::{Context, EventHandler},
|
|
model::gateway::GatewayIntents,
|
|
Client,
|
|
};
|
|
use skynet_discord_bot::{
|
|
common::{
|
|
database::{db_init, get_server_config_bulk, DataBase},
|
|
set_roles::normal,
|
|
},
|
|
get_config, Config,
|
|
};
|
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
use std::{process, sync::Arc};
|
|
use tokio::sync::RwLock;
|
|
|
|
#[tokio::main]
|
|
async fn main() {
|
|
let config = get_config();
|
|
let db = match db_init(&config).await {
|
|
Ok(x) => x,
|
|
Err(_) => return,
|
|
};
|
|
|
|
// Intents are a bitflag, bitwise operations can be used to dictate which intents to use
|
|
let intents = GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::MESSAGE_CONTENT | GatewayIntents::GUILD_MEMBERS;
|
|
// Build our client.
|
|
let mut client = Client::builder(&config.discord_token, intents)
|
|
.event_handler(Handler {
|
|
server_count: Default::default(),
|
|
server_cached: Default::default(),
|
|
})
|
|
.cache_settings(serenity::cache::Settings::default())
|
|
.await
|
|
.expect("Error creating client");
|
|
|
|
{
|
|
let mut data = client.data.write().await;
|
|
|
|
data.insert::<Config>(Arc::new(RwLock::new(config)));
|
|
data.insert::<DataBase>(Arc::new(RwLock::new(db)));
|
|
}
|
|
|
|
if let Err(why) = client.start().await {
|
|
println!("Client error: {why:?}");
|
|
}
|
|
}
|
|
|
|
struct Handler {
|
|
server_count: AtomicUsize,
|
|
server_cached: AtomicUsize,
|
|
}
|
|
#[async_trait]
|
|
impl EventHandler for Handler {
|
|
async fn cache_ready(&self, ctx: Context, guilds: Vec<GuildId>) {
|
|
self.server_count.swap(guilds.len(), Ordering::SeqCst);
|
|
for guild in guilds {
|
|
ctx.shard.chunk_guild(guild, Some(2000), false, ChunkGuildFilter::None, None);
|
|
}
|
|
println!("Cache loaded {}", &self.server_count.load(Ordering::SeqCst));
|
|
}
|
|
|
|
async fn guild_members_chunk(&self, ctx: Context, chunk: GuildMembersChunkEvent) {
|
|
if (chunk.chunk_index + 1) == chunk.chunk_count {
|
|
self.server_cached.fetch_add(1, Ordering::SeqCst);
|
|
if (self.server_cached.load(Ordering::SeqCst) + 1) == self.server_count.load(Ordering::SeqCst) {
|
|
println!("Cache built successfully!");
|
|
|
|
// this goes into each server and sets roles for each wolves member
|
|
check_bulk(&ctx).await;
|
|
|
|
// finish up
|
|
process::exit(0);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
async fn check_bulk(ctx: &Context) {
|
|
let db_lock = {
|
|
let data_read = ctx.data.read().await;
|
|
data_read.get::<DataBase>().expect("Expected Config in TypeMap.").clone()
|
|
};
|
|
|
|
let db = db_lock.read().await;
|
|
|
|
for server_config in get_server_config_bulk(&db).await {
|
|
normal::update_server(ctx, &server_config, &[], &[]).await;
|
|
}
|
|
}
|