fix: these were not using teh cache to access teh member/role data
*was executing before teh cache was built (``cache_ready`` is only when teh cache has been init, not when it is populated)
This commit is contained in:
parent
bd9d0cd43f
commit
6d08312f48
3 changed files with 34 additions and 37 deletions
|
@ -1,4 +1,4 @@
|
||||||
use serenity::all::{ChunkGuildFilter, GuildId};
|
use serenity::all::{ChunkGuildFilter, GuildId, GuildMembersChunkEvent};
|
||||||
use serenity::{
|
use serenity::{
|
||||||
async_trait,
|
async_trait,
|
||||||
client::{Context, EventHandler},
|
client::{Context, EventHandler},
|
||||||
|
@ -56,26 +56,25 @@ impl EventHandler for Handler {
|
||||||
println!("Cache built successfully!");
|
println!("Cache built successfully!");
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ready(&self, ctx: Context, ready: Ready) {
|
async fn guild_members_chunk(&self, ctx: Context, chunk: GuildMembersChunkEvent) {
|
||||||
let ctx = Arc::new(ctx);
|
if (chunk.chunk_index + 1) == chunk.chunk_count {
|
||||||
println!("{} is connected!", ready.user.name);
|
let db_lock = {
|
||||||
|
let data_read = ctx.data.read().await;
|
||||||
|
data_read.get::<DataBase>().expect("Expected Config in TypeMap.").clone()
|
||||||
|
};
|
||||||
|
|
||||||
let db_lock = {
|
let db = db_lock.read().await;
|
||||||
let data_read = ctx.data.read().await;
|
|
||||||
data_read.get::<DataBase>().expect("Expected Config in TypeMap.").clone()
|
|
||||||
};
|
|
||||||
|
|
||||||
let db = db_lock.read().await;
|
let config_lock = {
|
||||||
|
let data_read = ctx.data.read().await;
|
||||||
|
data_read.get::<Config>().expect("Expected Config in TypeMap.").clone()
|
||||||
|
};
|
||||||
|
let config = config_lock.read().await;
|
||||||
|
|
||||||
let config_lock = {
|
cleanup(&db, &ctx, &config).await;
|
||||||
let data_read = ctx.data.read().await;
|
// finish up
|
||||||
data_read.get::<Config>().expect("Expected Config in TypeMap.").clone()
|
process::exit(0);
|
||||||
};
|
}
|
||||||
let config = config_lock.read().await;
|
|
||||||
|
|
||||||
cleanup(&db, &ctx, &config).await;
|
|
||||||
// finish up
|
|
||||||
process::exit(0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use serenity::all::{ChunkGuildFilter, GuildId};
|
use serenity::all::{ChunkGuildFilter, GuildId, GuildMembersChunkEvent};
|
||||||
use serenity::{
|
use serenity::{
|
||||||
async_trait,
|
async_trait,
|
||||||
client::{Context, EventHandler},
|
client::{Context, EventHandler},
|
||||||
|
@ -50,14 +50,13 @@ impl EventHandler for Handler {
|
||||||
println!("Cache built successfully!");
|
println!("Cache built successfully!");
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ready(&self, ctx: Context, ready: Ready) {
|
async fn guild_members_chunk(&self, ctx: Context, chunk: GuildMembersChunkEvent) {
|
||||||
let ctx = Arc::new(ctx);
|
if (chunk.chunk_index + 1) == chunk.chunk_count {
|
||||||
println!("{} is connected!", ready.user.name);
|
// u[date committee server
|
||||||
|
committee::check_committee(&ctx).await;
|
||||||
|
|
||||||
// u[date committee server
|
// finish up
|
||||||
committee::check_committee(&ctx).await;
|
process::exit(0);
|
||||||
|
}
|
||||||
// finish up
|
|
||||||
process::exit(0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use serenity::all::{ChunkGuildFilter, GuildId};
|
use serenity::all::{ChunkGuildFilter, GuildId, GuildMembersChunkEvent};
|
||||||
use serenity::{
|
use serenity::{
|
||||||
async_trait,
|
async_trait,
|
||||||
client::{Context, EventHandler},
|
client::{Context, EventHandler},
|
||||||
|
@ -50,19 +50,18 @@ impl EventHandler for Handler {
|
||||||
println!("Cache built successfully!");
|
println!("Cache built successfully!");
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ready(&self, ctx: Context, ready: Ready) {
|
async fn guild_members_chunk(&self, ctx: Context, chunk: GuildMembersChunkEvent) {
|
||||||
let ctx = Arc::new(ctx);
|
if (chunk.chunk_index + 1) == chunk.chunk_count {
|
||||||
println!("{} is connected!", ready.user.name);
|
// this goes into each server and sets roles for each wolves member
|
||||||
|
check_bulk(&ctx).await;
|
||||||
|
|
||||||
// this goes into each server and sets roles for each wolves member
|
// finish up
|
||||||
check_bulk(Arc::clone(&ctx)).await;
|
process::exit(0);
|
||||||
|
}
|
||||||
// finish up
|
|
||||||
process::exit(0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn check_bulk(ctx: Arc<Context>) {
|
async fn check_bulk(ctx: &Context) {
|
||||||
let db_lock = {
|
let db_lock = {
|
||||||
let data_read = ctx.data.read().await;
|
let data_read = ctx.data.read().await;
|
||||||
data_read.get::<DataBase>().expect("Expected Config in TypeMap.").clone()
|
data_read.get::<DataBase>().expect("Expected Config in TypeMap.").clone()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue