feat: restore teh original services, giving up the logging and control was too much
This commit is contained in:
parent
feff293043
commit
1af7f28a45
6 changed files with 304 additions and 0 deletions
76
src/bin/update_users.rs
Normal file
76
src/bin/update_users.rs
Normal file
|
@ -0,0 +1,76 @@
|
|||
use serenity::all::{ChunkGuildFilter, GuildId};
|
||||
use serenity::{
|
||||
async_trait,
|
||||
client::{Context, EventHandler},
|
||||
model::gateway::{GatewayIntents, Ready},
|
||||
Client,
|
||||
};
|
||||
use skynet_discord_bot::common::database::{db_init, get_server_config_bulk, DataBase};
|
||||
use skynet_discord_bot::common::set_roles::normal;
|
||||
use skynet_discord_bot::{get_config, Config};
|
||||
use std::{process, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let config = get_config();
|
||||
let db = match db_init(&config).await {
|
||||
Ok(x) => x,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Intents are a bitflag, bitwise operations can be used to dictate which intents to use
|
||||
let intents = GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::MESSAGE_CONTENT | GatewayIntents::GUILD_MEMBERS;
|
||||
// Build our client.
|
||||
let mut client = Client::builder(&config.discord_token, intents)
|
||||
.event_handler(Handler {})
|
||||
.cache_settings(serenity::cache::Settings::default())
|
||||
.await
|
||||
.expect("Error creating client");
|
||||
|
||||
{
|
||||
let mut data = client.data.write().await;
|
||||
|
||||
data.insert::<Config>(Arc::new(RwLock::new(config)));
|
||||
data.insert::<DataBase>(Arc::new(RwLock::new(db)));
|
||||
}
|
||||
|
||||
if let Err(why) = client.start().await {
|
||||
println!("Client error: {:?}", why);
|
||||
}
|
||||
}
|
||||
|
||||
struct Handler;
|
||||
#[async_trait]
|
||||
impl EventHandler for Handler {
|
||||
async fn cache_ready(&self, ctx: Context, guilds: Vec<GuildId>) {
|
||||
for guild in guilds {
|
||||
ctx.shard.chunk_guild(guild, Some(2000), false, ChunkGuildFilter::None, None);
|
||||
}
|
||||
println!("Cache built successfully!");
|
||||
}
|
||||
|
||||
async fn ready(&self, ctx: Context, ready: Ready) {
|
||||
let ctx = Arc::new(ctx);
|
||||
println!("{} is connected!", ready.user.name);
|
||||
|
||||
// this goes into each server and sets roles for each wolves member
|
||||
check_bulk(Arc::clone(&ctx)).await;
|
||||
|
||||
// finish up
|
||||
process::exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_bulk(ctx: Arc<Context>) {
|
||||
let db_lock = {
|
||||
let data_read = ctx.data.read().await;
|
||||
data_read.get::<DataBase>().expect("Expected Config in TypeMap.").clone()
|
||||
};
|
||||
|
||||
let db = db_lock.read().await;
|
||||
|
||||
for server_config in get_server_config_bulk(&db).await {
|
||||
normal::update_server(&ctx, &server_config, &[], &[]).await;
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue