247 lines
No EOL
8.6 KiB
Rust
247 lines
No EOL
8.6 KiB
Rust
use std::sync::Arc;
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
|
use std::time::Duration;
|
|
|
|
use serenity::async_trait;
|
|
use serenity::model::gateway::{GatewayIntents, Ready};
|
|
use serenity::model::guild::Member;
|
|
use serenity::model::id::GuildId;
|
|
use serenity::prelude::*;
|
|
use serenity::model::prelude::RoleId;
|
|
|
|
struct Handler {
|
|
is_loop_running: AtomicBool,
|
|
}
|
|
|
|
#[async_trait]
|
|
impl EventHandler for Handler {
|
|
async fn guild_member_addition(&self, ctx: Context, mut new_member: Member) {
|
|
let config = Config {
|
|
server: GuildId(957961810147938334),
|
|
member_role_current: RoleId::from(1144760670995370094),
|
|
member_role_past: RoleId::from(1144760548072886353),
|
|
};
|
|
|
|
let members_lock = {
|
|
let data_read = ctx.data.read().await;
|
|
data_read.get::<Members>().expect("Expected Members in TypeMap.").clone()
|
|
};
|
|
|
|
let members = members_lock.read().await;
|
|
if members.contains(&new_member.user.name) {
|
|
let mut roles = vec![];
|
|
|
|
if !new_member.roles.contains(&config.member_role_past){
|
|
roles.push(config.member_role_past);
|
|
}
|
|
if !new_member.roles.contains(&config.member_role_current){
|
|
roles.push(config.member_role_current);
|
|
}
|
|
|
|
if let Err(e) = new_member.add_roles(&ctx, &roles).await {
|
|
println!("{:?}", e);
|
|
}
|
|
}
|
|
}
|
|
|
|
async fn ready(&self, ctx: Context, ready: Ready) {
|
|
let ctx = Arc::new(ctx);
|
|
println!("{} is connected!", ready.user.name);
|
|
|
|
let config = Config {
|
|
server: GuildId(957961810147938334),
|
|
member_role_current: RoleId::from(1144760670995370094),
|
|
member_role_past: RoleId::from(1144760548072886353),
|
|
};
|
|
|
|
if !self.is_loop_running.load(Ordering::Relaxed) {
|
|
// We have to clone the Arc, as it gets moved into the new thread.
|
|
let ctx1 = Arc::clone(&ctx);
|
|
// tokio::spawn creates a new green thread that can run in parallel with the rest of
|
|
// the application.
|
|
tokio::spawn(async move {
|
|
loop {
|
|
// We clone Context again here, because Arc is owned, so it moves to the
|
|
// new function.
|
|
bulk_check(Arc::clone(&ctx1), &config).await;
|
|
tokio::time::sleep(Duration::from_secs(30)).await;
|
|
}
|
|
});
|
|
|
|
// And of course, we can run more than one thread at different timings.
|
|
let ctx2 = Arc::clone(&ctx);
|
|
tokio::spawn(async move {
|
|
loop {
|
|
bulk_check2(Arc::clone(&ctx2)).await;
|
|
tokio::time::sleep(Duration::from_secs(15)).await;
|
|
}
|
|
});
|
|
|
|
let ctx3 = Arc::clone(&ctx);
|
|
tokio::spawn(async move {
|
|
loop {
|
|
fetch_accounts(Arc::clone(&ctx3)).await;
|
|
tokio::time::sleep(Duration::from_secs(50)).await;
|
|
}
|
|
});
|
|
// fetch_accounts
|
|
|
|
// Now that the loop is running, we set the bool to true
|
|
self.is_loop_running.swap(true, Ordering::Relaxed);
|
|
}
|
|
}
|
|
}
|
|
|
|
struct Config {
|
|
server: GuildId,
|
|
member_role_current: RoleId,
|
|
member_role_past: RoleId,
|
|
}
|
|
|
|
#[derive(Default, Debug)]
|
|
struct MembersCount {
|
|
members: i32,
|
|
members_current: i32,
|
|
}
|
|
struct MemberCounter;
|
|
impl TypeMapKey for MemberCounter {
|
|
type Value = Arc<RwLock<MembersCount>>;
|
|
}
|
|
|
|
struct Members;
|
|
impl TypeMapKey for Members {
|
|
type Value = Arc<RwLock<Vec<String>>>;
|
|
}
|
|
async fn bulk_check(ctx: Arc<Context>, config: &Config){
|
|
let members_lock = {
|
|
let data_read = ctx.data.read().await;
|
|
data_read.get::<Members>().expect("Expected Members in TypeMap.").clone()
|
|
};
|
|
let mut roles_set = [0,0,0];
|
|
let mut res = MembersCount {
|
|
members: 0,
|
|
members_current: 0,
|
|
};
|
|
if let Ok(x) = config.server.members(&ctx, None, None).await {
|
|
for mut member in x {
|
|
if members_lock.read().await.contains(&member.user.name) {
|
|
let mut roles = vec![];
|
|
|
|
if !member.roles.contains(&config.member_role_past){
|
|
roles_set[0] += 1;
|
|
roles.push(config.member_role_past);
|
|
}
|
|
if !member.roles.contains(&config.member_role_current){
|
|
roles_set[1] += 1;
|
|
roles.push(config.member_role_current);
|
|
}
|
|
|
|
if let Err(e) = member.add_roles(&ctx, &roles).await {
|
|
println!("{:?}", e);
|
|
}
|
|
} else if member.roles.contains(&config.member_role_current) {
|
|
roles_set[2] += 1;
|
|
// if theya re not a current member and have the role then remove it
|
|
if let Err(e) = member.remove_role(&ctx, &config.member_role_current).await {
|
|
println!("{:?}", e);
|
|
}
|
|
}
|
|
|
|
if member.roles.contains(&config.member_role_past){
|
|
res.members += 1;
|
|
}
|
|
if member.roles.contains(&config.member_role_current){
|
|
res.members_current += 1;
|
|
}
|
|
}
|
|
}
|
|
// small bit of logging to note changes over time
|
|
println!("Changes: New: +{}, Current: +{}/-{}", roles_set[0], roles_set[1], roles_set[2]);
|
|
|
|
let counter_lock = {
|
|
let data_read = ctx.data.read().await;
|
|
data_read.get::<MemberCounter>().expect("Expected MemberCounter in TypeMap.").clone()
|
|
};
|
|
{
|
|
// The HashMap of CommandCounter is wrapped in an RwLock; since we want to write to it, we will
|
|
// open the lock in write mode.
|
|
let mut counter = counter_lock.write().await;
|
|
|
|
// And we write the amount of times the command has been called to it.
|
|
counter.members_current = res.members_current;
|
|
counter.members = res.members;
|
|
}
|
|
}
|
|
|
|
async fn fetch_accounts(ctx: Arc<Context>){
|
|
let auth = "abcdef";
|
|
let url = format!("http://127.0.0.1:8087/ldap/discord?auth={}", auth);
|
|
if let Ok(result) = surf::get(url).recv_json::<Vec<String>>().await {
|
|
let members_lock = {
|
|
let data_read = ctx.data.read().await;
|
|
data_read.get::<Members>().expect("Expected Members in TypeMap.").clone()
|
|
};
|
|
let mut accounts = members_lock.write().await;
|
|
*accounts = result;
|
|
}
|
|
}
|
|
|
|
async fn bulk_check2(ctx: Arc<Context>){
|
|
let counter_lock = {
|
|
let data_read = ctx.data.read().await;
|
|
data_read.get::<MemberCounter>().expect("Expected MemberCounter in TypeMap.").clone()
|
|
};
|
|
{
|
|
// The HashMap of CommandCounter is wrapped in an RwLock; since we want to write to it, we will
|
|
// open the lock in write mode.
|
|
let counter = counter_lock.read().await;
|
|
println!("Members: {:?}", counter);
|
|
|
|
}
|
|
|
|
let members_lock = {
|
|
let data_read = ctx.data.read().await;
|
|
data_read.get::<Members>().expect("Expected Members in TypeMap.").clone()
|
|
};
|
|
{
|
|
// The HashMap of CommandCounter is wrapped in an RwLock; since we want to write to it, we will
|
|
// open the lock in write mode.
|
|
let counter = members_lock.read().await;
|
|
println!("Members: {:?}", counter);
|
|
|
|
}
|
|
}
|
|
|
|
#[tokio::main]
|
|
async fn main() {
|
|
// Configure the client with your Discord bot token in the environment.
|
|
//let token = env::var("DISCORD_TOKEN").expect("Expected a token in the environment");
|
|
let token = String::from("");
|
|
|
|
// Intents are a bitflag, bitwise operations can be used to dictate which intents to use
|
|
let intents = GatewayIntents::GUILDS | GatewayIntents::GUILD_MESSAGES | GatewayIntents::MESSAGE_CONTENT | GatewayIntents::GUILD_MEMBERS;
|
|
// Build our client.
|
|
let mut client = Client::builder(token, intents)
|
|
.event_handler(Handler { is_loop_running: AtomicBool::new(false)})
|
|
.await
|
|
.expect("Error creating client");
|
|
|
|
{
|
|
let mut data = client.data.write().await;
|
|
|
|
// will keep track of how many past and current members we have
|
|
data.insert::<MemberCounter>(Arc::new(RwLock::new(MembersCount::default())));
|
|
|
|
// a list of all current members
|
|
data.insert::<Members>(Arc::new(RwLock::new(vec![])));
|
|
}
|
|
|
|
|
|
// Finally, start a single shard, and start listening to events.
|
|
//
|
|
// Shards will automatically attempt to reconnect, and will perform
|
|
// exponential backoff until it reconnects.
|
|
if let Err(why) = client.start().await {
|
|
println!("Client error: {:?}", why);
|
|
}
|
|
} |