diff --git a/.github/labeler.yml b/.github/labeler.yml index dd944c8..ecc1116 100644 --- a/.github/labeler.yml +++ b/.github/labeler.yml @@ -59,4 +59,4 @@ 'cog: unicode': - unicode/* 'cog: werewolf': - - werewolf \ No newline at end of file + - werewolf/* \ No newline at end of file diff --git a/.github/workflows/labeler.yml b/.github/workflows/labeler.yml index 65e6640..82a4441 100644 --- a/.github/workflows/labeler.yml +++ b/.github/workflows/labeler.yml @@ -6,7 +6,7 @@ # https://github.com/actions/labeler name: Labeler -on: [pull_request] +on: [pull_request_target] jobs: label: diff --git a/README.md b/README.md index ec76ead..b1c3b73 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ Check out *Deprecated* my V2 cogs at [Fox-Cogs v2](https://github.com/bobloy/Fox # Contact Get support on the [Third Party Cog Server](https://discord.gg/GET4DVk) -Feel free to @ me in the #support_othercogs channel +Feel free to @ me in the #support_fox-v3 channel Discord: Bobloy#6513 diff --git a/ccrole/ccrole.py b/ccrole/ccrole.py index 59efc55..5248766 100644 --- a/ccrole/ccrole.py +++ b/ccrole/ccrole.py @@ -292,13 +292,13 @@ class CCRole(commands.Cog): # Thank you Cog-Creators cmd = ctx.invoked_with - cmd = cmd.lower() # Continues the proud case_insentivity tradition of ccrole + cmd = cmd.lower() # Continues the proud case-insensitivity tradition of ccrole guild = ctx.guild # message = ctx.message # Unneeded since switch to `on_message_without_command` from `on_command_error` - cmdlist = self.config.guild(guild).cmdlist + cmd_list = self.config.guild(guild).cmdlist # cmd = message.content[len(prefix) :].split()[0].lower() - cmd = await cmdlist.get_raw(cmd, default=None) + cmd = await cmd_list.get_raw(cmd, default=None) if cmd is not None: await self.eval_cc(cmd, message, ctx) diff --git a/chatter/README.md b/chatter/README.md index c831bb8..06331b2 100644 --- a/chatter/README.md +++ b/chatter/README.md @@ -124,7 +124,8 @@ pip install --no-deps "chatterbot>=1.1" #### Step 1: Built-in Downloader ``` -[p]cog install Chatter +[p]repo add Fox https://github.com/bobloy/Fox-V3 +[p]cog install Fox chatter ``` #### Step 2: Install Requirements diff --git a/chatter/chat.py b/chatter/chat.py index 098ba73..e29c317 100644 --- a/chatter/chat.py +++ b/chatter/chat.py @@ -12,7 +12,7 @@ from chatterbot import ChatBot from chatterbot.comparisons import JaccardSimilarity, LevenshteinDistance, SpacySimilarity from chatterbot.response_selection import get_random_response from chatterbot.trainers import ChatterBotCorpusTrainer, ListTrainer, UbuntuCorpusTrainer -from redbot.core import Config, commands +from redbot.core import Config, checks, commands from redbot.core.commands import Cog from redbot.core.data_manager import cog_data_path from redbot.core.utils.predicates import MessagePredicate @@ -57,7 +57,13 @@ class Chatter(Cog): self.bot = bot self.config = Config.get_conf(self, identifier=6710497116116101114) default_global = {} - default_guild = {"whitelist": None, "days": 1, "convo_delta": 15, "chatchannel": None} + default_guild = { + "whitelist": None, + "days": 1, + "convo_delta": 15, + "chatchannel": None, + "reply": True, + } path: pathlib.Path = cog_data_path(self) self.data_path = path / "database.sqlite3" @@ -164,7 +170,9 @@ class Chatter(Cog): return True def _train_ubuntu(self): - trainer = UbuntuCorpusTrainer(self.chatbot) + trainer = UbuntuCorpusTrainer( + self.chatbot, ubuntu_corpus_data_directory=cog_data_path(self) / "ubuntu_data" + ) trainer.train() return True @@ -220,6 +228,25 @@ class Chatter(Cog): await self.config.guild(ctx.guild).chatchannel.set(channel.id) await ctx.maybe_send_embed(f"Chat channel is now {channel.mention}") + @commands.admin() + @chatter.command(name="reply") + async def chatter_reply(self, ctx: commands.Context, toggle: Optional[bool] = None): + """ + Toggle bot reply to messages if conversation continuity is not present + + """ + reply = await self.config.guild(ctx.guild).reply() + if toggle is None: + toggle = not reply + await self.config.guild(ctx.guild).reply.set(toggle) + + if toggle: + await ctx.send("I will now respond to you if conversation continuity is not present") + else: + await ctx.send( + "I will not reply to your message if conversation continuity is not present, anymore" + ) + @commands.is_owner() @chatter.command(name="cleardata") async def chatter_cleardata(self, ctx: commands.Context, confirm: bool = False): @@ -279,7 +306,7 @@ class Chatter(Cog): ) return else: - self.similarity_algo = threshold + self.similarity_threshold = threshold self.similarity_algo = algos[algo_number] async with ctx.typing(): @@ -537,7 +564,15 @@ class Chatter(Cog): # Thank you Cog-Creators channel: discord.TextChannel = message.channel - if guild is not None and channel.id == await self.config.guild(guild).chatchannel(): + # is_reply = False # this is only useful with in_response_to + if ( + message.reference is not None + and isinstance(message.reference.resolved, discord.Message) + and message.reference.resolved.author.id == self.bot.user.id + ): + # is_reply = True # this is only useful with in_response_to + pass # this is a reply to the bot, good to go + elif guild is not None and channel.id == await self.config.guild(guild).chatchannel(): pass # good to go else: when_mentionables = commands.when_mentioned(self.bot, message) @@ -579,8 +614,13 @@ class Chatter(Cog): None, partial(self.chatbot.get_response, text, in_response_to=in_response_to) ) + replying = None + if await self.config.guild(guild).reply(): + if message != ctx.channel.last_message: + replying = message + if future and str(future): - self._last_message_per_channel[ctx.channel.id] = await ctx.send(str(future)) + self._last_message_per_channel[ctx.channel.id] = await channel.send(str(future), reference=replying) else: await ctx.send(":thinking:") diff --git a/chatter/info.json b/chatter/info.json index a048c23..fc31e7c 100644 --- a/chatter/info.json +++ b/chatter/info.json @@ -2,8 +2,8 @@ "author": [ "Bobloy" ], - "min_bot_version": "3.4.0", - "description": "Create an offline chatbot that talks like your average member using Machine Learning", + "min_bot_version": "3.4.6", + "description": "Create an offline chatbot that talks like your average member using Machine Learning. See setup instructions at https://github.com/bobloy/Fox-V3/tree/master/chatter", "hidden": false, "install_msg": "Thank you for installing Chatter! Please make sure you check the install instructions at https://github.com/bobloy/Fox-V3/blob/master/chatter/README.md\nAfter that, get started ith `[p]load chatter` and `[p]help Chatter`", "requirements": [ diff --git a/fifo/__init__.py b/fifo/__init__.py index 34cfd7b..2d5e103 100644 --- a/fifo/__init__.py +++ b/fifo/__init__.py @@ -1,5 +1,15 @@ +import sys + from .fifo import FIFO +# Applying fix from: https://github.com/Azure/azure-functions-python-worker/issues/640 +# [Fix] Create a wrapper for importing imgres +from .date_trigger import * +from . import CustomDateTrigger + +# [Fix] Register imgres into system modules +sys.modules["CustomDateTrigger"] = CustomDateTrigger + async def setup(bot): cog = FIFO(bot) diff --git a/fifo/date_trigger.py b/fifo/date_trigger.py new file mode 100644 index 0000000..b024750 --- /dev/null +++ b/fifo/date_trigger.py @@ -0,0 +1,10 @@ +from apscheduler.triggers.date import DateTrigger + + +class CustomDateTrigger(DateTrigger): + def get_next_fire_time(self, previous_fire_time, now): + next_run = super().get_next_fire_time(previous_fire_time, now) + return next_run if next_run is not None and next_run >= now else None + + def __getstate__(self): + return {"version": 1, "run_date": self.run_date} diff --git a/fifo/fifo.py b/fifo/fifo.py index acd01ac..d152609 100644 --- a/fifo/fifo.py +++ b/fifo/fifo.py @@ -1,8 +1,10 @@ +import itertools import logging -from datetime import datetime, timedelta, tzinfo +from datetime import MAXYEAR, datetime, timedelta, tzinfo from typing import Optional, Union import discord +import pytz from apscheduler.job import Job from apscheduler.jobstores.base import JobLookupError from apscheduler.schedulers.asyncio import AsyncIOScheduler @@ -10,7 +12,7 @@ from apscheduler.schedulers.base import STATE_PAUSED, STATE_RUNNING from redbot.core import Config, checks, commands from redbot.core.bot import Red from redbot.core.commands import TimedeltaConverter -from redbot.core.utils.chat_formatting import pagify +from redbot.core.utils.chat_formatting import humanize_timedelta, pagify from .datetime_cron_converters import CronConverter, DatetimeConverter, TimezoneConverter from .task import Task @@ -21,11 +23,12 @@ schedule_log.setLevel(logging.DEBUG) log = logging.getLogger("red.fox_v3.fifo") -async def _execute_task(task_state): - log.info(f"Executing {task_state=}") +async def _execute_task(**task_state): + log.info(f"Executing {task_state.get('name')}") task = Task(**task_state) if await task.load_from_config(): return await task.execute() + log.warning(f"Failed to load data on {task_state=}") return False @@ -37,6 +40,40 @@ def _disassemble_job_id(job_id: str): return job_id.split("_") +def _get_run_times(job: Job, now: datetime = None): + """ + Computes the scheduled run times between ``next_run_time`` and ``now`` (inclusive). + + Modified to be asynchronous and yielding instead of all-or-nothing + + """ + if not job.next_run_time: + raise StopIteration() + + if now is None: + now = datetime(MAXYEAR, 12, 31, 23, 59, 59, 999999, tzinfo=job.next_run_time.tzinfo) + yield from _get_run_times(job, now) # Recursion + raise StopIteration() + + next_run_time = job.next_run_time + while next_run_time and next_run_time <= now: + yield next_run_time + next_run_time = job.trigger.get_next_fire_time(next_run_time, now) + + +class CapturePrint: + """Silly little class to get `print` output""" + + def __init__(self): + self.string = None + + def write(self, string): + if self.string is None: + self.string = string + else: + self.string = self.string + "\n" + string + + class FIFO(commands.Cog): """ Simple Scheduling Cog @@ -55,7 +92,7 @@ class FIFO(commands.Cog): self.config.register_global(**default_global) self.config.register_guild(**default_guild) - self.scheduler = None + self.scheduler: Optional[AsyncIOScheduler] = None self.jobstore = None self.tz_cog = None @@ -71,17 +108,22 @@ class FIFO(commands.Cog): async def initialize(self): - job_defaults = {"coalesce": False, "max_instances": 1} + job_defaults = { + "coalesce": True, # Multiple missed triggers within the grace time will only fire once + "max_instances": 5, # This is probably way too high, should likely only be one + "misfire_grace_time": 15, # 15 seconds ain't much, but it's honest work + "replace_existing": True, # Very important for persistent data + } # executors = {"default": AsyncIOExecutor()} # Default executor is already AsyncIOExecutor self.scheduler = AsyncIOScheduler(job_defaults=job_defaults, logger=schedule_log) - from .redconfigjobstore import RedConfigJobStore + from .redconfigjobstore import RedConfigJobStore # Wait to import to prevent cyclic import self.jobstore = RedConfigJobStore(self.config, self.bot) - await self.jobstore.load_from_config(self.scheduler, "default") + await self.jobstore.load_from_config() self.scheduler.add_jobstore(self.jobstore, "default") self.scheduler.start() @@ -104,35 +146,53 @@ class FIFO(commands.Cog): await task.delete_self() async def _process_task(self, task: Task): - job: Union[Job, None] = await self._get_job(task) - if job is not None: - job.reschedule(await task.get_combined_trigger()) - return job + # None of this is necessar, we have `replace_existing` already + # job: Union[Job, None] = await self._get_job(task) + # if job is not None: + # combined_trigger_ = await task.get_combined_trigger() + # if combined_trigger_ is None: + # job.remove() + # else: + # job.reschedule(combined_trigger_) + # return job return await self._add_job(task) async def _get_job(self, task: Task) -> Job: return self.scheduler.get_job(_assemble_job_id(task.name, task.guild_id)) async def _add_job(self, task: Task): + combined_trigger_ = await task.get_combined_trigger() + if combined_trigger_ is None: + return None + return self.scheduler.add_job( _execute_task, - args=[task.__getstate__()], + kwargs=task.__getstate__(), id=_assemble_job_id(task.name, task.guild_id), - trigger=await task.get_combined_trigger(), + trigger=combined_trigger_, + name=task.name, + replace_existing=True, ) async def _resume_job(self, task: Task): - try: - job = self.scheduler.resume_job(job_id=_assemble_job_id(task.name, task.guild_id)) - except JobLookupError: + job: Union[Job, None] = await self._get_job(task) + if job is not None: + job.resume() + else: job = await self._process_task(task) return job async def _pause_job(self, task: Task): - return self.scheduler.pause_job(job_id=_assemble_job_id(task.name, task.guild_id)) + try: + return self.scheduler.pause_job(job_id=_assemble_job_id(task.name, task.guild_id)) + except JobLookupError: + return False async def _remove_job(self, task: Task): - return self.scheduler.remove_job(job_id=_assemble_job_id(task.name, task.guild_id)) + try: + self.scheduler.remove_job(job_id=_assemble_job_id(task.name, task.guild_id)) + except JobLookupError: + pass async def _get_tz(self, user: Union[discord.User, discord.Member]) -> Union[None, tzinfo]: if self.tz_cog is None: @@ -173,6 +233,41 @@ class FIFO(commands.Cog): if ctx.invoked_subcommand is None: pass + @fifo.command(name="wakeup") + async def fifo_wakeup(self, ctx: commands.Context): + """Debug command to fix missed executions. + + If you see a negative "Next run time" when adding a trigger, this may help resolve it. + Check the logs when using this command. + """ + + self.scheduler.wakeup() + await ctx.tick() + + @fifo.command(name="checktask", aliases=["checkjob", "check"]) + async def fifo_checktask(self, ctx: commands.Context, task_name: str): + """Returns the next 10 scheduled executions of the task""" + task = Task(task_name, ctx.guild.id, self.config, bot=self.bot) + await task.load_from_config() + + if task.data is None: + await ctx.maybe_send_embed( + f"Task by the name of {task_name} is not found in this guild" + ) + return + + job = await self._get_job(task) + if job is None: + await ctx.maybe_send_embed("No job scheduled for this task") + return + now = datetime.now(job.next_run_time.tzinfo) + + times = [ + humanize_timedelta(timedelta=x - now) + for x in itertools.islice(_get_run_times(job), 10) + ] + await ctx.maybe_send_embed("\n\n".join(times)) + @fifo.command(name="set") async def fifo_set( self, @@ -300,10 +395,14 @@ class FIFO(commands.Cog): else: embed.add_field(name="Server", value="Server not found", inline=False) + triggers, expired_triggers = await task.get_triggers() - trigger_str = "\n".join(str(t) for t in await task.get_triggers()) + trigger_str = "\n".join(str(t) for t in triggers) + expired_str = "\n".join(str(t) for t in expired_triggers) if trigger_str: embed.add_field(name="Triggers", value=trigger_str, inline=False) + if expired_str: + embed.add_field(name="Expired Triggers", value=expired_str, inline=False) job = await self._get_job(task) if job and job.next_run_time: @@ -319,12 +418,12 @@ class FIFO(commands.Cog): Do `[p]fifo list True` to see tasks from all guilds """ if all_guilds: - pass + pass # TODO: All guilds else: out = "" all_tasks = await self.config.guild(ctx.guild).tasks() for task_name, task_data in all_tasks.items(): - out += f"{task_name}: {task_data}\n" + out += f"{task_name}: {task_data}\n\n" if out: if len(out) > 2000: @@ -335,6 +434,27 @@ class FIFO(commands.Cog): else: await ctx.maybe_send_embed("No tasks to list") + @fifo.command(name="printschedule") + async def fifo_printschedule(self, ctx: commands.Context): + """ + Print the current schedule of execution. + + Useful for debugging. + """ + cp = CapturePrint() + self.scheduler.print_jobs(out=cp) + + out = cp.string + + if out: + if len(out) > 2000: + for page in pagify(out): + await ctx.maybe_send_embed(page) + else: + await ctx.maybe_send_embed(out) + else: + await ctx.maybe_send_embed("Failed to get schedule from scheduler") + @fifo.command(name="add") async def fifo_add(self, ctx: commands.Context, task_name: str, *, command_to_execute: str): """ @@ -394,6 +514,7 @@ class FIFO(commands.Cog): return await task.clear_triggers() + await self._remove_job(task) await ctx.tick() @fifo.group(name="addtrigger", aliases=["trigger"]) @@ -413,7 +534,7 @@ class FIFO(commands.Cog): """ task = Task(task_name, ctx.guild.id, self.config, bot=self.bot) - await task.load_from_config() + await task.load_from_config() # Will set the channel and author if task.data is None: await ctx.maybe_send_embed( @@ -435,6 +556,40 @@ class FIFO(commands.Cog): f"Next run time: {job.next_run_time} ({delta_from_now.total_seconds()} seconds)" ) + @fifo_trigger.command(name="relative") + async def fifo_trigger_relative( + self, ctx: commands.Context, task_name: str, *, time_from_now: TimedeltaConverter + ): + """ + Add a "run once" trigger at a time relative from now to the specified task + """ + + task = Task(task_name, ctx.guild.id, self.config, bot=self.bot) + await task.load_from_config() + + if task.data is None: + await ctx.maybe_send_embed( + f"Task by the name of {task_name} is not found in this guild" + ) + return + + time_to_run = datetime.now(pytz.utc) + time_from_now + + result = await task.add_trigger("date", time_to_run, time_to_run.tzinfo) + if not result: + await ctx.maybe_send_embed( + "Failed to add a date trigger to this task, see console for logs" + ) + return + + await task.save_data() + job: Job = await self._process_task(task) + delta_from_now: timedelta = job.next_run_time - datetime.now(job.next_run_time.tzinfo) + await ctx.maybe_send_embed( + f"Task `{task_name}` added {time_to_run} to its scheduled runtimes\n" + f"Next run time: {job.next_run_time} ({delta_from_now.total_seconds()} seconds)" + ) + @fifo_trigger.command(name="date") async def fifo_trigger_date( self, ctx: commands.Context, task_name: str, *, datetime_str: DatetimeConverter @@ -443,7 +598,7 @@ class FIFO(commands.Cog): Add a "run once" datetime trigger to the specified task """ - task = Task(task_name, ctx.guild.id, self.config) + task = Task(task_name, ctx.guild.id, self.config, bot=self.bot) await task.load_from_config() if task.data is None: @@ -483,7 +638,7 @@ class FIFO(commands.Cog): See https://crontab.guru/ for help generating the cron_str """ - task = Task(task_name, ctx.guild.id, self.config) + task = Task(task_name, ctx.guild.id, self.config, bot=self.bot) await task.load_from_config() if task.data is None: diff --git a/fifo/info.json b/fifo/info.json index eb2a576..a690a92 100644 --- a/fifo/info.json +++ b/fifo/info.json @@ -10,7 +10,8 @@ "end_user_data_statement": "This cog does not store any End User Data", "requirements": [ "apscheduler", - "pytz" + "pytz", + "python-dateutil" ], "tags": [ "bobloy", diff --git a/fifo/redconfigjobstore.py b/fifo/redconfigjobstore.py index 7e68697..a494353 100644 --- a/fifo/redconfigjobstore.py +++ b/fifo/redconfigjobstore.py @@ -2,17 +2,14 @@ import asyncio import base64 import logging import pickle -from datetime import datetime -from typing import Tuple, Union from apscheduler.job import Job -from apscheduler.jobstores.base import ConflictingIdError, JobLookupError from apscheduler.jobstores.memory import MemoryJobStore from apscheduler.schedulers.asyncio import run_in_event_loop from apscheduler.util import datetime_to_utc_timestamp from redbot.core import Config -# TODO: use get_lock on config +# TODO: use get_lock on config maybe from redbot.core.bot import Red from redbot.core.utils import AsyncIter @@ -28,44 +25,55 @@ class RedConfigJobStore(MemoryJobStore): self.config = config self.bot = bot self.pickle_protocol = pickle.HIGHEST_PROTOCOL - self._eventloop = self.bot.loop - # TODO: self.config.jobs_index is never used, - # fine but maybe a sign of inefficient use of config - - # task = asyncio.create_task(self.load_from_config()) - # while not task.done(): - # sleep(0.1) - # future = asyncio.ensure_future(self.load_from_config(), loop=self.bot.loop) + self._eventloop = self.bot.loop # Used for @run_in_event_loop @run_in_event_loop def start(self, scheduler, alias): super().start(scheduler, alias) + for job, timestamp in self._jobs: + job._scheduler = self._scheduler + job._jobstore_alias = self._alias - async def load_from_config(self, scheduler, alias): - super().start(scheduler, alias) + async def load_from_config(self): _jobs = await self.config.jobs() - self._jobs = [ - (await self._decode_job(job), timestamp) async for (job, timestamp) in AsyncIter(_jobs) - ] + # self._jobs = [ + # (await self._decode_job(job), timestamp) async for (job, timestamp) in AsyncIter(_jobs) + # ] + async for job, timestamp in AsyncIter(_jobs, steps=5): + job = await self._decode_job(job) + index = self._get_job_index(timestamp, job.id) + self._jobs.insert(index, (job, timestamp)) + self._jobs_index[job.id] = (job, timestamp) + + async def save_to_config(self): + """Yea that's basically it""" + await self.config.jobs.set( + [(self._encode_job(job), timestamp) for job, timestamp in self._jobs] + ) + # self._jobs_index = await self.config.jobs_index.all() # Overwritten by next - self._jobs_index = {job.id: (job, timestamp) for job, timestamp in self._jobs} + # self._jobs_index = {job.id: (job, timestamp) for job, timestamp in self._jobs} def _encode_job(self, job: Job): job_state = job.__getstate__() - new_args = list(job_state["args"]) - new_args[0]["config"] = None - new_args[0]["bot"] = None - job_state["args"] = tuple(new_args) + job_state["kwargs"]["config"] = None + job_state["kwargs"]["bot"] = None + # new_kwargs = job_state["kwargs"] + # new_kwargs["config"] = None + # new_kwargs["bot"] = None + # job_state["kwargs"] = new_kwargs encoded = base64.b64encode(pickle.dumps(job_state, self.pickle_protocol)) out = { "_id": job.id, "next_run_time": datetime_to_utc_timestamp(job.next_run_time), "job_state": encoded.decode("ascii"), } - new_args = list(job_state["args"]) - new_args[0]["config"] = self.config - new_args[0]["bot"] = self.bot - job_state["args"] = tuple(new_args) + job_state["kwargs"]["config"] = self.config + job_state["kwargs"]["bot"] = self.bot + # new_kwargs = job_state["kwargs"] + # new_kwargs["config"] = self.config + # new_kwargs["bot"] = self.bot + # job_state["kwargs"] = new_kwargs # log.debug(f"Encoding job id: {job.id}\n" # f"Encoded as: {out}") @@ -76,10 +84,15 @@ class RedConfigJobStore(MemoryJobStore): return None job_state = in_job["job_state"] job_state = pickle.loads(base64.b64decode(job_state)) - new_args = list(job_state["args"]) - new_args[0]["config"] = self.config - new_args[0]["bot"] = self.bot - job_state["args"] = tuple(new_args) + if job_state["args"]: # Backwards compatibility on args to kwargs + job_state["kwargs"] = {**job_state["args"][0]} + job_state["args"] = [] + job_state["kwargs"]["config"] = self.config + job_state["kwargs"]["bot"] = self.bot + # new_kwargs = job_state["kwargs"] + # new_kwargs["config"] = self.config + # new_kwargs["bot"] = self.bot + # job_state["kwargs"] = new_kwargs job = Job.__new__(Job) job.__setstate__(job_state) job._scheduler = self._scheduler @@ -96,79 +109,6 @@ class RedConfigJobStore(MemoryJobStore): return job - @run_in_event_loop - def add_job(self, job: Job): - if job.id in self._jobs_index: - raise ConflictingIdError(job.id) - # log.debug(f"Check job args: {job.args=}") - timestamp = datetime_to_utc_timestamp(job.next_run_time) - index = self._get_job_index(timestamp, job.id) # This is fine - self._jobs.insert(index, (job, timestamp)) - self._jobs_index[job.id] = (job, timestamp) - asyncio.create_task(self._async_add_job(job, index, timestamp)) - # log.debug(f"Added job: {self._jobs[index][0].args}") - - async def _async_add_job(self, job, index, timestamp): - encoded_job = self._encode_job(job) - job_tuple = tuple([encoded_job, timestamp]) - async with self.config.jobs() as jobs: - jobs.insert(index, job_tuple) - # await self.config.jobs_index.set_raw(job.id, value=job_tuple) - return True - - @run_in_event_loop - def update_job(self, job): - old_tuple: Tuple[Union[Job, None], Union[datetime, None]] = self._jobs_index.get( - job.id, (None, None) - ) - old_job = old_tuple[0] - old_timestamp = old_tuple[1] - if old_job is None: - raise JobLookupError(job.id) - - # If the next run time has not changed, simply replace the job in its present index. - # Otherwise, reinsert the job to the list to preserve the ordering. - old_index = self._get_job_index(old_timestamp, old_job.id) - new_timestamp = datetime_to_utc_timestamp(job.next_run_time) - asyncio.create_task( - self._async_update_job(job, new_timestamp, old_index, old_job, old_timestamp) - ) - - async def _async_update_job(self, job, new_timestamp, old_index, old_job, old_timestamp): - encoded_job = self._encode_job(job) - if old_timestamp == new_timestamp: - self._jobs[old_index] = (job, new_timestamp) - async with self.config.jobs() as jobs: - jobs[old_index] = (encoded_job, new_timestamp) - else: - del self._jobs[old_index] - new_index = self._get_job_index(new_timestamp, job.id) # This is fine - self._jobs.insert(new_index, (job, new_timestamp)) - async with self.config.jobs() as jobs: - del jobs[old_index] - jobs.insert(new_index, (encoded_job, new_timestamp)) - self._jobs_index[old_job.id] = (job, new_timestamp) - # await self.config.jobs_index.set_raw(old_job.id, value=(encoded_job, new_timestamp)) - - log.debug(f"Async Updated {job.id=}") - log.debug(f"Check job args: {job.args=}") - - @run_in_event_loop - def remove_job(self, job_id): - job, timestamp = self._jobs_index.get(job_id, (None, None)) - if job is None: - raise JobLookupError(job_id) - - index = self._get_job_index(timestamp, job_id) - del self._jobs[index] - del self._jobs_index[job.id] - asyncio.create_task(self._async_remove_job(index, job)) - - async def _async_remove_job(self, index, job): - async with self.config.jobs() as jobs: - del jobs[index] - # await self.config.jobs_index.clear_raw(job.id) - @run_in_event_loop def remove_all_jobs(self): super().remove_all_jobs() @@ -180,4 +120,9 @@ class RedConfigJobStore(MemoryJobStore): def shutdown(self): """Removes all jobs without clearing config""" - super().remove_all_jobs() + asyncio.create_task(self.async_shutdown()) + + async def async_shutdown(self): + await self.save_to_config() + self._jobs = [] + self._jobs_index = {} diff --git a/fifo/task.py b/fifo/task.py index f7dc45a..e1b7207 100644 --- a/fifo/task.py +++ b/fifo/task.py @@ -1,18 +1,19 @@ import logging from datetime import datetime, timedelta -from typing import Dict, List, Union +from typing import Dict, List, Optional, Tuple, Union import discord +import pytz from apscheduler.triggers.base import BaseTrigger from apscheduler.triggers.combining import OrTrigger from apscheduler.triggers.cron import CronTrigger -from apscheduler.triggers.date import DateTrigger from apscheduler.triggers.interval import IntervalTrigger from discord.utils import time_snowflake -from pytz import timezone from redbot.core import Config, commands from redbot.core.bot import Red +from fifo.date_trigger import CustomDateTrigger + log = logging.getLogger("red.fox_v3.fifo.task") @@ -26,7 +27,7 @@ def get_trigger(data): return IntervalTrigger(days=parsed_time.days, seconds=parsed_time.seconds) if data["type"] == "date": - return DateTrigger(data["time_data"], timezone=data["tzinfo"]) + return CustomDateTrigger(data["time_data"], timezone=data["tzinfo"]) if data["type"] == "cron": return CronTrigger.from_crontab(data["time_data"], timezone=data["tzinfo"]) @@ -34,20 +35,126 @@ def get_trigger(data): return False +def check_expired_trigger(trigger: BaseTrigger): + return trigger.get_next_fire_time(None, datetime.now(pytz.utc)) is None + + def parse_triggers(data: Union[Dict, None]): if data is None or not data.get("triggers", False): # No triggers return None if len(data["triggers"]) > 1: # Multiple triggers - return OrTrigger(get_trigger(t_data) for t_data in data["triggers"]) + triggers_list = [get_trigger(t_data) for t_data in data["triggers"]] + triggers_list = [t for t in triggers_list if not check_expired_trigger(t)] + if not triggers_list: + return None + return OrTrigger(triggers_list) + else: + trigger = get_trigger(data["triggers"][0]) + if check_expired_trigger(trigger): + return None + return trigger + + +# class FakeMessage: +# def __init__(self, message: discord.Message): +# d = {k: getattr(message, k, None) for k in dir(message)} +# self.__dict__.update(**d) + + +# Potential FakeMessage subclass of Message +# class DeleteSlots(type): +# @classmethod +# def __prepare__(metacls, name, bases): +# """Borrowed a bit from https://stackoverflow.com/q/56579348""" +# super_prepared = super().__prepare__(name, bases) +# print(super_prepared) +# return super_prepared + +things_for_fakemessage_to_steal = [ + "_state", + "id", + "webhook_id", + # "reactions", + # "attachments", + "embeds", + "application", + "activity", + "channel", + "_edited_time", + "type", + "pinned", + "flags", + "mention_everyone", + "tts", + "content", + "nonce", + "reference", +] + +things_fakemessage_sets_by_default = { + "attachments": [], + "reactions": [], +} + + +class FakeMessage(discord.Message): + def __init__(self, *args, message: discord.Message, **kwargs): + d = {k: getattr(message, k, None) for k in things_for_fakemessage_to_steal} + d.update(things_fakemessage_sets_by_default) + for k, v in d.items(): + try: + # log.debug(f"{k=} {v=}") + setattr(self, k, v) + except TypeError: + # log.exception("This is fine") + pass + except AttributeError: + # log.exception("This is fine") + pass + + self.id = time_snowflake(datetime.utcnow(), high=False) # Pretend to be now + self.type = discord.MessageType.default + + def process_the_rest( + self, + author: discord.Member, + channel: discord.TextChannel, + content, + ): + # self.content = content + # log.debug(self.content) + + # for handler in ('author', 'member', 'mentions', 'mention_roles', 'call', 'flags'): + # try: + # getattr(self, '_handle_%s' % handler)(data[handler]) + # except KeyError: + # continue + self.author = author + # self._handle_author(author._user._to_minimal_user_json()) + # self._handle_member(author) + self._rebind_channel_reference(channel) + self._update( + { + "content": content, + } + ) + self._update( + { + "mention_roles": self.raw_role_mentions, + "mentions": self.raw_mentions, + } + ) - return get_trigger(data["triggers"][0]) + # self._handle_content(content) + # log.debug(self.content) + self.mention_everyone = "@everyone" in self.content or "@here" in self.content -class FakeMessage: - def __init__(self, message: discord.Message): - d = {k: getattr(message, k, None) for k in dir(message)} - self.__dict__.update(**d) + # self._handle_mention_roles(self.raw_role_mentions) + # self._handle_mentions(self.raw_mentions) + + # self.__dict__.update(**d) def neuter_message(message: FakeMessage): @@ -66,11 +173,11 @@ def neuter_message(message: FakeMessage): class Task: - default_task_data = {"triggers": [], "command_str": ""} + default_task_data = {"triggers": [], "command_str": "", "expired_triggers": []} default_trigger = { "type": "", - "time_data": None, # Used for Interval and Date Triggers + "time_data": None, "tzinfo": None, } @@ -87,9 +194,10 @@ class Task: async def _encode_time_triggers(self): if not self.data or not self.data.get("triggers", None): - return [] + return [], [] triggers = [] + expired_triggers = [] for t in self.data["triggers"]: if t["type"] == "interval": # Convert into timedelta td: timedelta = t["time_data"] @@ -101,27 +209,15 @@ class Task: if t["type"] == "date": # Convert into datetime dt: datetime = t["time_data"] - triggers.append( - { - "type": t["type"], - "time_data": dt.isoformat(), - "tzinfo": getattr(t["tzinfo"], "zone", None), - } - ) - # triggers.append( - # { - # "type": t["type"], - # "time_data": { - # "year": dt.year, - # "month": dt.month, - # "day": dt.day, - # "hour": dt.hour, - # "minute": dt.minute, - # "second": dt.second, - # "tzinfo": dt.tzinfo, - # }, - # } - # ) + data_to_append = { + "type": t["type"], + "time_data": dt.isoformat(), + "tzinfo": getattr(t["tzinfo"], "zone", None), + } + if dt < datetime.now(pytz.utc): + expired_triggers.append(data_to_append) + else: + triggers.append(data_to_append) continue if t["type"] == "cron": @@ -139,7 +235,7 @@ class Task: raise NotImplemented - return triggers + return triggers, expired_triggers async def _decode_time_triggers(self): if not self.data or not self.data.get("triggers", None): @@ -152,7 +248,7 @@ class Task: # First decode timezone if there is one if t["tzinfo"] is not None: - t["tzinfo"] = timezone(t["tzinfo"]) + t["tzinfo"] = pytz.timezone(t["tzinfo"]) if t["type"] == "interval": # Convert into timedelta t["time_data"] = timedelta(**t["time_data"]) @@ -180,7 +276,7 @@ class Task: return self.author_id = data["author_id"] - self.guild_id = data["guild_id"] + self.guild_id = data["guild_id"] # Weird I'm doing this, since self.guild_id was just used self.channel_id = data["channel_id"] self.data = data["data"] @@ -188,14 +284,23 @@ class Task: await self._decode_time_triggers() return self.data - async def get_triggers(self) -> List[Union[IntervalTrigger, DateTrigger]]: + async def get_triggers(self) -> Tuple[List[BaseTrigger], List[BaseTrigger]]: if not self.data: await self.load_from_config() if self.data is None or "triggers" not in self.data: # No triggers - return [] + return [], [] + + trigs = [] + expired_trigs = [] + for t in self.data["triggers"]: + trig = get_trigger(t) + if check_expired_trigger(trig): + expired_trigs.append(t) + else: + trigs.append(t) - return [get_trigger(t) for t in self.data["triggers"]] + return trigs, expired_trigs async def get_combined_trigger(self) -> Union[BaseTrigger, None]: if not self.data: @@ -215,7 +320,10 @@ class Task: data_to_save = self.default_task_data.copy() if self.data: data_to_save["command_str"] = self.get_command_str() - data_to_save["triggers"] = await self._encode_time_triggers() + ( + data_to_save["triggers"], + data_to_save["expired_triggers"], + ) = await self._encode_time_triggers() to_save = { "guild_id": self.guild_id, @@ -231,7 +339,10 @@ class Task: return data_to_save = self.data.copy() - data_to_save["triggers"] = await self._encode_time_triggers() + ( + data_to_save["triggers"], + data_to_save["expired_triggers"], + ) = await self._encode_time_triggers() await self.config.guild_from_id(self.guild_id).tasks.set_raw( self.name, "data", value=data_to_save @@ -239,63 +350,87 @@ class Task: async def execute(self): if not self.data or not self.get_command_str(): - log.warning(f"Could not execute task due to data problem: {self.data=}") + log.warning(f"Could not execute Task[{self.name}] due to data problem: {self.data=}") return False guild: discord.Guild = self.bot.get_guild(self.guild_id) # used for get_prefix if guild is None: - log.warning(f"Could not execute task due to missing guild: {self.guild_id}") + log.warning( + f"Could not execute Task[{self.name}] due to missing guild: {self.guild_id}" + ) return False channel: discord.TextChannel = guild.get_channel(self.channel_id) if channel is None: - log.warning(f"Could not execute task due to missing channel: {self.channel_id}") + log.warning( + f"Could not execute Task[{self.name}] due to missing channel: {self.channel_id}" + ) return False - author: discord.User = guild.get_member(self.author_id) + author: discord.Member = guild.get_member(self.author_id) if author is None: - log.warning(f"Could not execute task due to missing author: {self.author_id}") + log.warning( + f"Could not execute Task[{self.name}] due to missing author: {self.author_id}" + ) return False - actual_message: discord.Message = channel.last_message + actual_message: Optional[discord.Message] = channel.last_message # I'd like to present you my chain of increasingly desperate message fetching attempts if actual_message is None: # log.warning("No message found in channel cache yet, skipping execution") # return - actual_message = await channel.fetch_message(channel.last_message_id) + if channel.last_message_id is not None: + try: + actual_message = await channel.fetch_message(channel.last_message_id) + except discord.NotFound: + actual_message = None if actual_message is None: # last_message_id was an invalid message I guess actual_message = await channel.history(limit=1).flatten() if not actual_message: # Basically only happens if the channel has no messages actual_message = await author.history(limit=1).flatten() if not actual_message: # Okay, the *author* has never sent a message? log.warning("No message found in channel cache yet, skipping execution") - return + return False actual_message = actual_message[0] - message = FakeMessage(actual_message) - # message = FakeMessage2 - message.author = author - message.guild = guild # Just in case we got desperate - message.channel = channel - message.id = time_snowflake(datetime.now()) # Pretend to be now - message = neuter_message(message) + # message._handle_author(author) # Option when message is subclass + # message._state = self.bot._get_state() + # Time to set the relevant attributes + # message.author = author + # Don't need guild with subclass, guild is just channel.guild + # message.guild = guild # Just in case we got desperate, see above + # message.channel = channel # absolutely weird that this takes a message object instead of guild - prefixes = await self.bot.get_prefix(message) + prefixes = await self.bot.get_prefix(actual_message) if isinstance(prefixes, str): prefix = prefixes else: prefix = prefixes[0] - message.content = f"{prefix}{self.get_command_str()}" + new_content = f"{prefix}{self.get_command_str()}" + # log.debug(f"{new_content=}") - if not message.guild or not message.author or not message.content: - log.warning(f"Could not execute task due to message problem: {message}") + message = FakeMessage(message=actual_message) + message = neuter_message(message) + message.process_the_rest(author=author, channel=channel, content=new_content) + + if ( + not message.guild + or not message.author + or not message.content + or message.content == prefix + ): + log.warning( + f"Could not execute Task[{self.name}] due to message problem: " + f"{message.guild=}, {message.author=}, {message.content=}" + ) return False new_ctx: commands.Context = await self.bot.get_context(message) new_ctx.assume_yes = True if not new_ctx.valid: log.warning( - f"Could not execute Task[{self.name}] due invalid context: {new_ctx.invoked_with}" + f"Could not execute Task[{self.name}] due invalid context: " + f"{new_ctx.invoked_with=} {new_ctx.prefix=} {new_ctx.command=}" ) return False diff --git a/fifo/timezones.py b/fifo/timezones.py index 54d7c3e..bd1c239 100644 --- a/fifo/timezones.py +++ b/fifo/timezones.py @@ -5,6 +5,8 @@ All credit to https://github.com/prefrontal/dateutil-parser-timezones """ # from dateutil.tz import gettz +from datetime import datetime + from pytz import timezone @@ -227,4 +229,6 @@ def assemble_timezones(): timezones["YAKT"] = timezone("Asia/Yakutsk") # Yakutsk Time (UTC+09) timezones["YEKT"] = timezone("Asia/Yekaterinburg") # Yekaterinburg Time (UTC+05) + dt = datetime(2020, 1, 1) + timezones.update((x, y.localize(dt).tzinfo) for x, y in timezones.items()) return timezones diff --git a/flag/flag.py b/flag/flag.py index 6216f65..10f0334 100644 --- a/flag/flag.py +++ b/flag/flag.py @@ -53,9 +53,7 @@ class Flag(Cog): @commands.group() async def flagset(self, ctx: commands.Context): """ - My custom cog - - Extra information goes here + Commands for managing Flag settings """ if ctx.invoked_subcommand is None: pass diff --git a/infochannel/__init__.py b/infochannel/__init__.py index 514cd5f..bbff901 100644 --- a/infochannel/__init__.py +++ b/infochannel/__init__.py @@ -1,5 +1,7 @@ from .infochannel import InfoChannel -def setup(bot): - bot.add_cog(InfoChannel(bot)) +async def setup(bot): + ic_cog = InfoChannel(bot) + bot.add_cog(ic_cog) + await ic_cog.initialize() diff --git a/infochannel/infochannel.py b/infochannel/infochannel.py index b8d36a3..33e2b10 100644 --- a/infochannel/infochannel.py +++ b/infochannel/infochannel.py @@ -1,25 +1,50 @@ import asyncio -from typing import Union +import logging +from collections import defaultdict +from typing import Dict, Optional, Union import discord from redbot.core import Config, checks, commands from redbot.core.bot import Red from redbot.core.commands import Cog -# Cog: Any = getattr(commands, "Cog", object) -# listener = getattr(commands.Cog, "listener", None) # Trusty + Sinbad -# if listener is None: -# def listener(name=None): -# return lambda x: x - -RATE_LIMIT_DELAY = 60 * 10 # If you're willing to risk rate limiting, you can decrease the delay +# 10 minutes. Rate limit is 2 per 10, so 1 per 6 is safe. +RATE_LIMIT_DELAY = 60 * 6 # If you're willing to risk rate limiting, you can decrease the delay + +log = logging.getLogger("red.fox_v3.infochannel") + + +async def get_channel_counts(category, guild): + # Gets count of bots + bot_num = len([m for m in guild.members if m.bot]) + # Gets count of roles in the server + roles_num = len(guild.roles) - 1 + # Gets count of channels in the server + # - - + channels_num = len(guild.channels) - len(category.voice_channels) - len(guild.categories) + # Gets all counts of members + members = guild.member_count + offline_num = len(list(filter(lambda m: m.status is discord.Status.offline, guild.members))) + online_num = members - offline_num + # Gets count of actual users + human_num = members - bot_num + return { + "members": members, + "humans": human_num, + "bots": bot_num, + "roles": roles_num, + "channels": channels_num, + "online": online_num, + "offline": offline_num, + } class InfoChannel(Cog): """ Create a channel with updating server info - Less important information about the cog + This relies on editing channels, which is a strictly rate-limited activity. + As such, updates will not be frequent. Currently capped at 1 per 5 minutes per server. """ def __init__(self, bot: Red): @@ -29,23 +54,55 @@ class InfoChannel(Cog): self, identifier=731101021116710497110110101108, force_registration=True ) + # self. so I can get the keys from this later + self.default_channel_names = { + "members": "Members: {count}", + "humans": "Humans: {count}", + "bots": "Bots: {count}", + "roles": "Roles: {count}", + "channels": "Channels: {count}", + "online": "Online: {count}", + "offline": "Offline: {count}", + } + + default_channel_ids = {k: None for k in self.default_channel_names.keys()} + # Only members is enabled by default + default_enabled_counts = {k: k == "members" for k in self.default_channel_names.keys()} + default_guild = { - "channel_id": None, - "botchannel_id": None, - "onlinechannel_id": None, - "member_count": True, - "bot_count": False, - "online_count": False, + "category_id": None, + "channel_ids": default_channel_ids, + "enabled_channels": default_enabled_counts, + "channel_names": self.default_channel_names, } self.config.register_guild(**default_guild) + self.default_role = {"enabled": False, "channel_id": None, "name": "{role}: {count}"} + + self.config.register_role(**self.default_role) + self._critical_section_wooah_ = 0 + self.channel_data = defaultdict(dict) + + self.edit_queue = defaultdict(lambda: defaultdict(lambda: asyncio.Queue(maxsize=2))) + + self._rate_limited_edits: Dict[int, Dict[str, Optional[asyncio.Task]]] = defaultdict( + lambda: defaultdict(lambda: None) + ) + async def red_delete_data_for_user(self, **kwargs): """Nothing to delete""" return + async def initialize(self): + for guild in self.bot.guilds: + await self.update_infochannel(guild) + + def cog_unload(self): + self.stop_all_queues() + @commands.command() @checks.admin() async def infochannel(self, ctx: commands.Context): @@ -61,37 +118,42 @@ class InfoChannel(Cog): ) guild: discord.Guild = ctx.guild - channel_id = await self.config.guild(guild).channel_id() - channel = None - if channel_id is not None: - channel: Union[discord.VoiceChannel, None] = guild.get_channel(channel_id) + category_id = await self.config.guild(guild).category_id() + category = None + + if category_id is not None: + category: Union[discord.CategoryChannel, None] = guild.get_channel(category_id) - if channel_id is not None and channel is None: - await ctx.send("Info channel has been deleted, recreate it?") - elif channel_id is None: - await ctx.send("Enable info channel on this server?") + if category_id is not None and category is None: + await ctx.maybe_send_embed("Info category has been deleted, recreate it?") + elif category_id is None: + await ctx.maybe_send_embed("Enable info channels on this server?") else: - await ctx.send("Do you wish to delete current info channels?") + await ctx.maybe_send_embed("Do you wish to delete current info channels?") msg = await self.bot.wait_for("message", check=check) if msg.content.upper() in ["N", "NO"]: - await ctx.send("Cancelled") + await ctx.maybe_send_embed("Cancelled") return - if channel is None: + if category is None: try: await self.make_infochannel(guild) except discord.Forbidden: - await ctx.send("Failure: Missing permission to create voice channel") + await ctx.maybe_send_embed( + "Failure: Missing permission to create necessary channels" + ) return else: await self.delete_all_infochannels(guild) + ctx.message = msg + if not await ctx.tick(): - await ctx.send("Done!") + await ctx.maybe_send_embed("Done!") - @commands.group() + @commands.group(aliases=["icset"]) @checks.admin() async def infochannelset(self, ctx: commands.Context): """ @@ -100,194 +162,418 @@ class InfoChannel(Cog): if not ctx.invoked_subcommand: pass - @infochannelset.command(name="botcount") - async def _infochannelset_botcount(self, ctx: commands.Context, enabled: bool = None): - """ - Toggle an infochannel that shows the amount of bots in the server + @infochannelset.command(name="togglechannel") + async def _infochannelset_togglechannel( + self, ctx: commands.Context, channel_type: str, enabled: Optional[bool] = None + ): + """Toggles the infochannel for the specified channel type. + + Valid Types are: + - `members`: Total members on the server + - `humans`: Total members that aren't bots + - `bots`: Total bots + - `roles`: Total number of roles + - `channels`: Total number of channels excluding infochannels, + - `online`: Total online members, + - `offline`: Total offline members, """ guild = ctx.guild + if channel_type not in self.default_channel_names.keys(): + await ctx.maybe_send_embed("Invalid channel type provided.") + return + if enabled is None: - enabled = not await self.config.guild(guild).bot_count() + enabled = not await self.config.guild(guild).enabled_channels.get_raw(channel_type) - await self.config.guild(guild).bot_count.set(enabled) - await self.make_infochannel(ctx.guild) + await self.config.guild(guild).enabled_channels.set_raw(channel_type, value=enabled) + await self.make_infochannel(ctx.guild, channel_type=channel_type) if enabled: - await ctx.send("InfoChannel for bot count has been enabled.") + await ctx.maybe_send_embed(f"InfoChannel `{channel_type}` has been enabled.") else: - await ctx.send("InfoChannel for bot count has been disabled.") + await ctx.maybe_send_embed(f"InfoChannel `{channel_type}` has been disabled.") - @infochannelset.command(name="onlinecount") - async def _infochannelset_onlinecount(self, ctx: commands.Context, enabled: bool = None): - """ - Toggle an infochannel that shows the amount of online users in the server - """ - guild = ctx.guild + @infochannelset.command(name="togglerole") + async def _infochannelset_rolecount( + self, ctx: commands.Context, role: discord.Role, enabled: bool = None + ): + """Toggle an infochannel that shows the count of users with the specified role""" if enabled is None: - enabled = not await self.config.guild(guild).online_count() + enabled = not await self.config.role(role).enabled() + + await self.config.role(role).enabled.set(enabled) - await self.config.guild(guild).online_count.set(enabled) - await self.make_infochannel(ctx.guild) + await self.make_infochannel(ctx.guild, channel_role=role) if enabled: - await ctx.send("InfoChannel for online user count has been enabled.") + await ctx.maybe_send_embed(f"InfoChannel for {role.name} count has been enabled.") else: - await ctx.send("InfoChannel for online user count has been disabled.") + await ctx.maybe_send_embed(f"InfoChannel for {role.name} count has been disabled.") - async def make_infochannel(self, guild: discord.Guild): - botcount = await self.config.guild(guild).bot_count() - onlinecount = await self.config.guild(guild).online_count() - overwrites = { - guild.default_role: discord.PermissionOverwrite(connect=False), - guild.me: discord.PermissionOverwrite(manage_channels=True, connect=True), - } + @infochannelset.command(name="name") + async def _infochannelset_name(self, ctx: commands.Context, channel_type: str, *, text=None): + """ + Change the name of the infochannel for the specified channel type. + + {count} must be used to display number of total members in the server. + Leave blank to set back to default. + + Examples: + - `[p]infochannelset name members Cool Cats: {count}` + - `[p]infochannelset name bots {count} Robot Overlords` - # Remove the old info channel first - channel_id = await self.config.guild(guild).channel_id() + Valid Types are: + - `members`: Total members on the server + - `humans`: Total members that aren't bots + - `bots`: Total bots + - `roles`: Total number of roles + - `channels`: Total number of channels excluding infochannels + - `online`: Total online members + - `offline`: Total offline members + + Warning: This command counts against the channel update rate limit and may be queued. + """ + guild = ctx.guild + if channel_type not in self.default_channel_names.keys(): + await ctx.maybe_send_embed("Invalid channel type provided.") + return + + if text is None: + text = self.default_channel_names.get(channel_type) + elif "{count}" not in text: + await ctx.maybe_send_embed( + "Improperly formatted. Make sure to use `{count}` in your channel name" + ) + return + elif len(text) > 93: + await ctx.maybe_send_embed("Name is too long, max length is 93.") + return + + await self.config.guild(guild).channel_names.set_raw(channel_type, value=text) + await self.update_infochannel(guild, channel_type=channel_type) + if not await ctx.tick(): + await ctx.maybe_send_embed("Done!") + + @infochannelset.command(name="rolename") + async def _infochannelset_rolename( + self, ctx: commands.Context, role: discord.Role, *, text=None + ): + """ + Change the name of the infochannel for specific roles. + + {count} must be used to display number members with the given role. + {role} can be used for the roles name. + Leave blank to set back to default. + + Default is set to: `{role}: {count}` + + Examples: + - `[p]infochannelset rolename @Patrons {role}: {count}` + - `[p]infochannelset rolename Elite {count} members with {role} role` + - `[p]infochannelset rolename "Space Role" Total boosters: {count}` + + Warning: This command counts against the channel update rate limit and may be queued. + """ + guild = ctx.message.guild + if text is None: + text = self.default_role["name"] + elif "{count}" not in text: + await ctx.maybe_send_embed( + "Improperly formatted. Make sure to use `{count}` in your channel name" + ) + return + + await self.config.role(role).name.set(text) + await self.update_infochannel(guild, channel_role=role) + if not await ctx.tick(): + await ctx.maybe_send_embed("Done!") + + async def create_individual_channel( + self, guild, category: discord.CategoryChannel, overwrites, channel_type, count + ): + # Delete the channel if it exists + channel_id = await self.config.guild(guild).channel_ids.get_raw(channel_type) if channel_id is not None: channel: discord.VoiceChannel = guild.get_channel(channel_id) if channel: + self.stop_queue(guild.id, channel_type) await channel.delete(reason="InfoChannel delete") - # Then create the new one - channel = await guild.create_voice_channel( - "Total Humans:", reason="InfoChannel make", overwrites=overwrites - ) - await self.config.guild(guild).channel_id.set(channel.id) + # Only make the channel if it's enabled + if await self.config.guild(guild).enabled_channels.get_raw(channel_type): + name = await self.config.guild(guild).channel_names.get_raw(channel_type) + name = name.format(count=count) + channel = await category.create_voice_channel( + name, reason="InfoChannel make", overwrites=overwrites + ) + await self.config.guild(guild).channel_ids.set_raw(channel_type, value=channel.id) + return channel + return None + + async def create_role_channel( + self, guild, category: discord.CategoryChannel, overwrites, role: discord.Role + ): + # Delete the channel if it exists + channel_id = await self.config.role(role).channel_id() + if channel_id is not None: + channel: discord.VoiceChannel = guild.get_channel(channel_id) + if channel: + self.stop_queue(guild.id, role.id) + await channel.delete(reason="InfoChannel delete") - if botcount: - # Remove the old bot channel first - botchannel_id = await self.config.guild(guild).botchannel_id() - if channel_id is not None: - botchannel: discord.VoiceChannel = guild.get_channel(botchannel_id) - if botchannel: - await botchannel.delete(reason="InfoChannel delete") + # Only make the channel if it's enabled + if await self.config.role(role).enabled(): + count = len(role.members) + name = await self.config.role(role).name() + name = name.format(role=role.name, count=count) + channel = await category.create_voice_channel( + name, reason="InfoChannel make", overwrites=overwrites + ) + await self.config.role(role).channel_id.set(channel.id) + return channel + return None + + async def make_infochannel(self, guild: discord.Guild, channel_type=None, channel_role=None): + overwrites = { + guild.default_role: discord.PermissionOverwrite(connect=False), + guild.me: discord.PermissionOverwrite(manage_channels=True, connect=True), + } - # Then create the new one - botchannel = await guild.create_voice_channel( - "Bots:", reason="InfoChannel botcount", overwrites=overwrites + # Check for and create the Infochannel category + category_id = await self.config.guild(guild).category_id() + if category_id is not None: + category: discord.CategoryChannel = guild.get_channel(category_id) + if category is None: # Category id is invalid, probably deleted. + category_id = None + if category_id is None: + category: discord.CategoryChannel = await guild.create_category( + "Server Stats", reason="InfoChannel Category make" ) - await self.config.guild(guild).botchannel_id.set(botchannel.id) - if onlinecount: - # Remove the old online channel first - onlinechannel_id = await self.config.guild(guild).onlinechannel_id() - if channel_id is not None: - onlinechannel: discord.VoiceChannel = guild.get_channel(onlinechannel_id) - if onlinechannel: - await onlinechannel.delete(reason="InfoChannel delete") + await self.config.guild(guild).category_id.set(category.id) + await category.edit(position=0) + category_id = category.id + + category: discord.CategoryChannel = guild.get_channel(category_id) - # Then create the new one - onlinechannel = await guild.create_voice_channel( - "Online:", reason="InfoChannel onlinecount", overwrites=overwrites + channel_data = await get_channel_counts(category, guild) + + # Only update a single channel + if channel_type is not None: + await self.create_individual_channel( + guild, category, overwrites, channel_type, channel_data[channel_type] ) - await self.config.guild(guild).onlinechannel_id.set(onlinechannel.id) + return + if channel_role is not None: + await self.create_role_channel(guild, category, overwrites, channel_role) + return - await self.update_infochannel(guild) + # Update all channels + for channel_type in self.default_channel_names.keys(): + await self.create_individual_channel( + guild, category, overwrites, channel_type, channel_data[channel_type] + ) + + for role in guild.roles: + await self.create_role_channel(guild, category, overwrites, role) + + # await self.update_infochannel(guild) async def delete_all_infochannels(self, guild: discord.Guild): + self.stop_guild_queues(guild.id) # Stop processing edits + + # Delete regular channels + for channel_type in self.default_channel_names.keys(): + channel_id = await self.config.guild(guild).channel_ids.get_raw(channel_type) + if channel_id is not None: + channel = guild.get_channel(channel_id) + if channel is not None: + await channel.delete(reason="InfoChannel delete") + await self.config.guild(guild).channel_ids.clear_raw(channel_type) + + # Delete role channels + for role in guild.roles: + channel_id = await self.config.role(role).channel_id() + if channel_id is not None: + channel = guild.get_channel(channel_id) + if channel is not None: + await channel.delete(reason="InfoChannel delete") + await self.config.role(role).channel_id.clear() + + # Delete the category last + category_id = await self.config.guild(guild).category_id() + if category_id is not None: + category = guild.get_channel(category_id) + if category is not None: + await category.delete(reason="InfoChannel delete") + + async def add_to_queue(self, guild, channel, identifier, count, formatted_name): + self.channel_data[guild.id][identifier] = (count, formatted_name, channel.id) + if not self.edit_queue[guild.id][identifier].full(): + try: + self.edit_queue[guild.id][identifier].put_nowait(identifier) + except asyncio.QueueFull: + pass # If queue is full, disregard + + if self._rate_limited_edits[guild.id][identifier] is None: + await self.start_queue(guild.id, identifier) + + async def update_individual_channel(self, guild, channel_type, count, guild_data): + name = guild_data["channel_names"][channel_type] + name = name.format(count=count) + channel = guild.get_channel(guild_data["channel_ids"][channel_type]) + if channel is None: + return # abort + await self.add_to_queue(guild, channel, channel_type, count, name) + + async def update_role_channel(self, guild, role: discord.Role, role_data): + if not role_data["enabled"]: + return # Not enabled + count = len(role.members) + name = role_data["name"] + name = name.format(role=role.name, count=count) + channel = guild.get_channel(role_data["channel_id"]) + if channel is None: + return # abort + await self.add_to_queue(guild, channel, role.id, count, name) + + async def update_infochannel(self, guild: discord.Guild, channel_type=None, channel_role=None): + if channel_type is None and channel_role is None: + return await self.trigger_updates_for( + guild, + members=True, + humans=True, + bots=True, + roles=True, + channels=True, + online=True, + offline=True, + extra_roles=set(guild.roles), + ) + + if channel_type is not None: + return await self.trigger_updates_for(guild, **{channel_type: True}) + + return await self.trigger_updates_for(guild, extra_roles={channel_role}) + + async def start_queue(self, guild_id, identifier): + self._rate_limited_edits[guild_id][identifier] = asyncio.create_task( + self._process_queue(guild_id, identifier) + ) + + def stop_queue(self, guild_id, identifier): + if self._rate_limited_edits[guild_id][identifier] is not None: + self._rate_limited_edits[guild_id][identifier].cancel() + + def stop_guild_queues(self, guild_id): + for identifier in self._rate_limited_edits[guild_id].keys(): + self.stop_queue(guild_id, identifier) + + def stop_all_queues(self): + for guild_id in self._rate_limited_edits.keys(): + self.stop_guild_queues(guild_id) + + async def _process_queue(self, guild_id, identifier): + while True: + identifier = await self.edit_queue[guild_id][identifier].get() # Waits forever + + count, formatted_name, channel_id = self.channel_data[guild_id][identifier] + channel: discord.VoiceChannel = self.bot.get_channel(channel_id) + + if channel.name == formatted_name: + continue # Nothing to process + + log.debug(f"Processing guild_id: {guild_id} - identifier: {identifier}") + + try: + await channel.edit(reason="InfoChannel update", name=formatted_name) + except (discord.Forbidden, discord.HTTPException): + pass # Don't bother figuring it out + except discord.InvalidArgument: + log.exception(f"Invalid formatted infochannel: {formatted_name}") + else: + await asyncio.sleep(RATE_LIMIT_DELAY) # Wait a reasonable amount of time + + async def trigger_updates_for(self, guild, **kwargs): + extra_roles: Optional[set] = kwargs.pop("extra_roles", False) guild_data = await self.config.guild(guild).all() - botchannel_id = guild_data["botchannel_id"] - onlinechannel_id = guild_data["onlinechannel_id"] - botchannel: discord.VoiceChannel = guild.get_channel(botchannel_id) - onlinechannel: discord.VoiceChannel = guild.get_channel(onlinechannel_id) - channel_id = guild_data["channel_id"] - channel: discord.VoiceChannel = guild.get_channel(channel_id) - await channel.delete(reason="InfoChannel delete") - if botchannel_id is not None: - await botchannel.delete(reason="InfoChannel delete") - if onlinechannel_id is not None: - await onlinechannel.delete(reason="InfoChannel delete") - - await self.config.guild(guild).clear() - - async def update_infochannel(self, guild: discord.Guild): - guild_data = await self.config.guild(guild).all() - botcount = guild_data["bot_count"] - onlinecount = guild_data["online_count"] - - # Gets count of bots - # bots = lambda x: x.bot - # def bots(x): return x.bot - - bot_num = len([m for m in guild.members if m.bot]) - # bot_msg = f"Bots: {num}" - - # Gets count of online users - members = guild.member_count - offline = len(list(filter(lambda m: m.status is discord.Status.offline, guild.members))) - online_num = members - offline - # online_msg = f"Online: {num}" - - # Gets count of actual users - total = lambda x: not x.bot - human_num = len([m for m in guild.members if total(m)]) - # human_msg = f"Total Humans: {num}" - - channel_id = guild_data["channel_id"] - if channel_id is None: - return False - - botchannel_id = guild_data["botchannel_id"] - onlinechannel_id = guild_data["onlinechannel_id"] - channel_id = guild_data["channel_id"] - channel: discord.VoiceChannel = guild.get_channel(channel_id) - botchannel: discord.VoiceChannel = guild.get_channel(botchannel_id) - onlinechannel: discord.VoiceChannel = guild.get_channel(onlinechannel_id) - - if guild_data["member_count"]: - name = f"{channel.name.split(':')[0]}: {human_num}" - - await channel.edit(reason="InfoChannel update", name=name) - - if botcount: - name = f"{botchannel.name.split(':')[0]}: {bot_num}" - await botchannel.edit(reason="InfoChannel update", name=name) - - if onlinecount: - name = f"{onlinechannel.name.split(':')[0]}: {online_num}" - await onlinechannel.edit(reason="InfoChannel update", name=name) - - async def update_infochannel_with_cooldown(self, guild): - """My attempt at preventing rate limits, lets see how it goes""" - if self._critical_section_wooah_: - if self._critical_section_wooah_ == 2: - # print("Already pending, skipping") - return # Another one is already pending, don't queue more than one - # print("Queuing another update") - self._critical_section_wooah_ = 2 - - while self._critical_section_wooah_: - await asyncio.sleep( - RATE_LIMIT_DELAY // 4 - ) # Max delay ends up as 1.25 * RATE_LIMIT_DELAY - - # print("Issuing queued update") - return await self.update_infochannel_with_cooldown(guild) - - # print("Entering critical") - self._critical_section_wooah_ = 1 - await self.update_infochannel(guild) - await asyncio.sleep(RATE_LIMIT_DELAY) - self._critical_section_wooah_ = 0 - # print("Exiting critical") - @Cog.listener() - async def on_member_join(self, member: discord.Member): + to_update = ( + kwargs.keys() & guild_data["enabled_channels"].keys() + ) # Value in kwargs doesn't matter + + log.debug(f"{to_update=}") + + if to_update or extra_roles: + category = guild.get_channel(guild_data["category_id"]) + if category is None: + return # Nothing to update, must be off + + channel_data = await get_channel_counts(category, guild) + if to_update: + for channel_type in to_update: + await self.update_individual_channel( + guild, channel_type, channel_data[channel_type], guild_data + ) + if extra_roles: + role_data = await self.config.all_roles() + for channel_role in extra_roles: + if channel_role.id in role_data: + await self.update_role_channel( + guild, channel_role, role_data[channel_role.id] + ) + + @Cog.listener(name="on_member_join") + @Cog.listener(name="on_member_remove") + async def on_member_join_remove(self, member: discord.Member): if await self.bot.cog_disabled_in_guild(self, member.guild): return - await self.update_infochannel_with_cooldown(member.guild) - @Cog.listener() - async def on_member_remove(self, member: discord.Member): - if await self.bot.cog_disabled_in_guild(self, member.guild): - return - await self.update_infochannel_with_cooldown(member.guild) + if member.bot: + await self.trigger_updates_for( + member.guild, members=True, bots=True, online=True, offline=True + ) + else: + await self.trigger_updates_for( + member.guild, members=True, humans=True, online=True, offline=True + ) @Cog.listener() async def on_member_update(self, before: discord.Member, after: discord.Member): if await self.bot.cog_disabled_in_guild(self, after.guild): return - onlinecount = await self.config.guild(after.guild).online_count() - if onlinecount: - if before.status != after.status: - await self.update_infochannel_with_cooldown(after.guild) + + if before.status != after.status: + return await self.trigger_updates_for(after.guild, online=True, offline=True) + + # XOR + c = set(after.roles) ^ set(before.roles) + + if c: + await self.trigger_updates_for(after.guild, extra_roles=c) + + @Cog.listener("on_guild_channel_create") + @Cog.listener("on_guild_channel_delete") + async def on_guild_channel_create_delete(self, channel: discord.TextChannel): + if await self.bot.cog_disabled_in_guild(self, channel.guild): + return + await self.trigger_updates_for(channel.guild, channels=True) + + @Cog.listener() + async def on_guild_role_create(self, role): + if await self.bot.cog_disabled_in_guild(self, role.guild): + return + await self.trigger_updates_for(role.guild, roles=True) + + @Cog.listener() + async def on_guild_role_delete(self, role): + if await self.bot.cog_disabled_in_guild(self, role.guild): + return + await self.trigger_updates_for(role.guild, roles=True) + + role_channel_id = await self.config.role(role).channel_id() + if role_channel_id is not None: + rolechannel: discord.VoiceChannel = role.guild.get_channel(role_channel_id) + if rolechannel: + await rolechannel.delete(reason="InfoChannel delete") + + await self.config.role(role).clear() diff --git a/isitdown/isitdown.py b/isitdown/isitdown.py index f786928..b72549a 100644 --- a/isitdown/isitdown.py +++ b/isitdown/isitdown.py @@ -10,9 +10,9 @@ log = logging.getLogger("red.fox_v3.isitdown") class IsItDown(commands.Cog): """ - Cog Description + Cog for checking whether a website is down or not. - Less important information about the cog + Uses the `isitdown.site` API """ def __init__(self, bot: Red): @@ -36,23 +36,25 @@ class IsItDown(commands.Cog): Alias: iid """ try: - resp = await self._check_if_down(url_to_check) + resp, url = await self._check_if_down(url_to_check) except AssertionError: await ctx.maybe_send_embed("Invalid URL provided. Make sure not to include `http://`") return + # log.debug(resp) if resp["isitdown"]: - await ctx.maybe_send_embed(f"{url_to_check} is DOWN!") + await ctx.maybe_send_embed(f"{url} is DOWN!") else: - await ctx.maybe_send_embed(f"{url_to_check} is UP!") + await ctx.maybe_send_embed(f"{url} is UP!") async def _check_if_down(self, url_to_check): - url = re.compile(r"https?://(www\.)?") - url.sub("", url_to_check).strip().strip("/") + re_compiled = re.compile(r"https?://(www\.)?") + url = re_compiled.sub("", url_to_check).strip().strip("/") url = f"https://isitdown.site/api/v3/{url}" + # log.debug(url) async with aiohttp.ClientSession() as session: async with session.get(url) as response: assert response.status == 200 resp = await response.json() - return resp + return resp, url diff --git a/launchlib/info.json b/launchlib/info.json index c1c7ad7..f9b7f11 100644 --- a/launchlib/info.json +++ b/launchlib/info.json @@ -8,7 +8,7 @@ "install_msg": "Thank you for installing LaunchLib. Get started with `[p]load launchlib`, then `[p]help LaunchLib`", "short": "Access launch data for space flights", "end_user_data_statement": "This cog does not store any End User Data", - "requirements": ["python-launch-library>=1.0.6"], + "requirements": ["python-launch-library>=2.0.3"], "tags": [ "bobloy", "utils", diff --git a/launchlib/launchlib.py b/launchlib/launchlib.py index ae870fd..3d3eb0e 100644 --- a/launchlib/launchlib.py +++ b/launchlib/launchlib.py @@ -1,7 +1,7 @@ import asyncio import functools import logging - +import re import discord import launchlibrary as ll from redbot.core import Config, commands @@ -14,9 +14,7 @@ log = logging.getLogger("red.fox_v3.launchlib") class LaunchLib(commands.Cog): """ - Cog Description - - Less important information about the cog + Cog using `thespacedevs` API to get details about rocket launches """ def __init__(self, bot: Red): @@ -37,27 +35,86 @@ class LaunchLib(commands.Cog): return async def _embed_launch_data(self, launch: ll.AsyncLaunch): - status: ll.AsyncLaunchStatus = await launch.get_status() + + if False: + example_launch = ll.AsyncLaunch( + id="9279744e-46b2-4eca-adea-f1379672ec81", + name="Atlas LV-3A | Samos 2", + tbddate=False, + tbdtime=False, + status={"id": 3, "name": "Success"}, + inhold=False, + windowstart="1961-01-31 20:21:19+00:00", + windowend="1961-01-31 20:21:19+00:00", + net="1961-01-31 20:21:19+00:00", + info_urls=[], + vid_urls=[], + holdreason=None, + failreason=None, + probability=0, + hashtag=None, + agency=None, + changed=None, + pad=ll.Pad( + id=93, + name="Space Launch Complex 3W", + latitude=34.644, + longitude=-120.593, + map_url="http://maps.google.com/maps?q=34.644+N,+120.593+W", + retired=None, + total_launch_count=3, + agency_id=161, + wiki_url=None, + info_url=None, + location=ll.Location( + id=11, + name="Vandenberg AFB, CA, USA", + country_code="USA", + total_launch_count=83, + total_landing_count=3, + pads=None, + ), + map_image="https://spacelaunchnow-prod-east.nyc3.digitaloceanspaces.com/media/launch_images/pad_93_20200803143225.jpg", + ), + rocket=ll.Rocket( + id=2362, + name=None, + default_pads=None, + family=None, + wiki_url=None, + info_url=None, + image_url=None, + ), + missions=None, + ) + + # status: ll.AsyncLaunchStatus = await launch.get_status() + status = launch.status rocket: ll.AsyncRocket = launch.rocket title = launch.name - description = status.description + description = status["name"] urls = launch.vid_urls + launch.info_urls - if not urls and rocket: - urls = rocket.info_urls + [rocket.wiki_url] + if rocket: + urls += [rocket.info_url, rocket.wiki_url] + if launch.pad: + urls += [launch.pad.info_url, launch.pad.wiki_url] + if urls: - url = urls[0] + url = next((url for url in urls if urls is not None), None) else: url = None - color = discord.Color.green() if status.id in [1, 3] else discord.Color.red() + color = discord.Color.green() if status["id"] in [1, 3] else discord.Color.red() em = discord.Embed(title=title, description=description, url=url, color=color) if rocket and rocket.image_url and rocket.image_url != "Array": em.set_image(url=rocket.image_url) + elif launch.pad and launch.pad.map_image: + em.set_image(url=launch.pad.map_image) agency = getattr(launch, "agency", None) if agency is not None: @@ -89,6 +146,18 @@ class LaunchLib(commands.Cog): data = mission.get(f[0], None) if data is not None and data: em.add_field(name=f[1], value=data) + if launch.pad: + location_url = getattr(launch.pad, "map_url", None) + pad_name = getattr(launch.pad, "name", None) + + if pad_name is not None: + if location_url is not None: + location_url = re.sub( + "[^a-zA-Z0-9/:.'+\"°?=,-]", "", location_url + ) # Fix bad URLS + em.add_field(name="Launch Pad Name", value=f"[{pad_name}]({location_url})") + else: + em.add_field(name="Launch Pad Name", value=pad_name) if rocket and rocket.family: em.add_field(name="Rocket Family", value=rocket.family) @@ -101,11 +170,17 @@ class LaunchLib(commands.Cog): @commands.group() async def launchlib(self, ctx: commands.Context): + """Base command for getting launches""" if ctx.invoked_subcommand is None: pass @launchlib.command() async def next(self, ctx: commands.Context, num_launches: int = 1): + """ + Show the next launches + + Use `num_launches` to get more than one. + """ # launches = await api.async_next_launches(num_launches) # loop = asyncio.get_running_loop() # @@ -115,6 +190,8 @@ class LaunchLib(commands.Cog): # launches = await self.api.async_fetch_launch(num=num_launches) + # log.debug(str(launches)) + async with ctx.typing(): for x, launch in enumerate(launches): if x >= num_launches: diff --git a/nudity/nudity.py b/nudity/nudity.py index 4233460..64ec02a 100644 --- a/nudity/nudity.py +++ b/nudity/nudity.py @@ -8,9 +8,7 @@ from redbot.core.data_manager import cog_data_path class Nudity(commands.Cog): - """ - V3 Cog Template - """ + """Monitor images for NSFW content and moves them to a nsfw channel if possible""" def __init__(self, bot: Red): super().__init__() diff --git a/planttycoon/planttycoon.py b/planttycoon/planttycoon.py index 665fc9a..4209b53 100644 --- a/planttycoon/planttycoon.py +++ b/planttycoon/planttycoon.py @@ -793,7 +793,7 @@ class PlantTycoon(commands.Cog): pass await asyncio.sleep(self.defaults["timers"]["notification"] * 60) - def __unload(self): + def cog_unload(self): self.completion_task.cancel() # self.degradation_task.cancel() self.notification_task.cancel() diff --git a/qrinvite/qrinvite.py b/qrinvite/qrinvite.py index ab5f5dc..684b69d 100644 --- a/qrinvite/qrinvite.py +++ b/qrinvite/qrinvite.py @@ -67,8 +67,10 @@ class QRInvite(Cog): extension = pathlib.Path(image_url).parts[-1].replace(".", "?").split("?")[1] + save_as_name = f"{ctx.guild.id}-{ctx.author.id}" + path: pathlib.Path = cog_data_path(self) - image_path = path / (ctx.guild.icon + "." + extension) + image_path = path / f"{save_as_name}.{extension}" async with aiohttp.ClientSession() as session: async with session.get(image_url) as response: image = await response.read() @@ -77,27 +79,29 @@ class QRInvite(Cog): file.write(image) if extension == "webp": - new_path = convert_webp_to_png(str(image_path)) + new_image_path = convert_webp_to_png(str(image_path)) elif extension == "gif": await ctx.maybe_send_embed("gif is not supported yet, stay tuned") return elif extension == "png": - new_path = str(image_path) + new_image_path = str(image_path) + elif extension == "jpg": + new_image_path = convert_jpg_to_png(str(image_path)) else: await ctx.maybe_send_embed(f"{extension} is not supported yet, stay tuned") return myqr.run( invite, - picture=new_path, - save_name=ctx.guild.icon + "_qrcode.png", + picture=new_image_path, + save_name=f"{save_as_name}_qrcode.png", save_dir=str(cog_data_path(self)), colorized=colorized, ) - png_path: pathlib.Path = path / (ctx.guild.icon + "_qrcode.png") - with png_path.open("rb") as png_fp: - await ctx.send(file=discord.File(png_fp.read(), "qrcode.png")) + png_path: pathlib.Path = path / f"{save_as_name}_qrcode.png" + # with png_path.open("rb") as png_fp: + await ctx.send(file=discord.File(png_path, "qrcode.png")) def convert_webp_to_png(path): @@ -110,3 +114,10 @@ def convert_webp_to_png(path): new_path = path.replace(".webp", ".png") im.save(new_path, transparency=255) return new_path + + +def convert_jpg_to_png(path): + im = Image.open(path) + new_path = path.replace(".jpg", ".png") + im.save(new_path) + return new_path diff --git a/stealemoji/stealemoji.py b/stealemoji/stealemoji.py index a492527..8f32d74 100644 --- a/stealemoji/stealemoji.py +++ b/stealemoji/stealemoji.py @@ -16,16 +16,16 @@ log = logging.getLogger("red.fox_v3.stealemoji") async def check_guild(guild, emoji): - if len(guild.emojis) >= 100: + if len(guild.emojis) >= 2 * guild.emoji_limit: return False - if len(guild.emojis) < 50: + if len(guild.emojis) < guild.emoji_limit: return True if emoji.animated: - return sum(e.animated for e in guild.emojis) < 50 + return sum(e.animated for e in guild.emojis) < guild.emoji_limit else: - return sum(not e.animated for e in guild.emojis) < 50 + return sum(not e.animated for e in guild.emojis) < guild.emoji_limit class StealEmoji(Cog): diff --git a/werewolf/builder.py b/werewolf/builder.py index f57a669..da85b40 100644 --- a/werewolf/builder.py +++ b/werewolf/builder.py @@ -71,6 +71,7 @@ W1, W2, W5, W6 = Random Werewolf N1 = Benign Neutral 0001-1112T11W112N2 +which translates to 0,0,0,1,11,12,E1,R1,R1,R1,R2,P2 pre-letter = exact role position diff --git a/werewolf/role.py b/werewolf/role.py index e267283..90e5f5f 100644 --- a/werewolf/role.py +++ b/werewolf/role.py @@ -72,6 +72,9 @@ class Role(WolfListener): self.blocked = False self.properties = {} # Extra data for other roles (i.e. arsonist) + def __str__(self): + return self.__repr__() + def __repr__(self): return f"{self.__class__.__name__}({self.player.__repr__()})" @@ -86,7 +89,7 @@ class Role(WolfListener): log.debug(f"Assigned {self} to {player}") - async def get_alignment(self, source=None): + async def get_alignment(self, source=None): # TODO: Rework to be "strength" tiers """ Interaction for powerful access of alignment (Village, Werewolf, Other) diff --git a/werewolf/werewolf.py b/werewolf/werewolf.py index bd68a6f..a4083a9 100644 --- a/werewolf/werewolf.py +++ b/werewolf/werewolf.py @@ -1,11 +1,10 @@ import logging -from typing import List, Union +from typing import Optional import discord from redbot.core import Config, checks, commands from redbot.core.bot import Red from redbot.core.commands import Cog -from redbot.core.utils import AsyncIter from redbot.core.utils.menus import DEFAULT_CONTROLS, menu from werewolf.builder import ( @@ -15,19 +14,11 @@ from werewolf.builder import ( role_from_id, role_from_name, ) -from werewolf.game import Game +from werewolf.game import Game, anyone_has_role log = logging.getLogger("red.fox_v3.werewolf") -async def anyone_has_role( - member_list: List[discord.Member], role: discord.Role -) -> Union[None, discord.Member]: - return await AsyncIter(member_list).find( - lambda m: AsyncIter(m.roles).find(lambda r: r.id == role.id) - ) - - class Werewolf(Cog): """ Base to host werewolf on a guild @@ -56,17 +47,19 @@ class Werewolf(Cog): """Nothing to delete""" return - def __unload(self): + def cog_unload(self): log.debug("Unload called") - for game in self.games.values(): - del game + for key in self.games.keys(): + del self.games[key] @commands.command() async def buildgame(self, ctx: commands.Context): """ Create game codes to run custom games. - Pick the roles or randomized roles you want to include in a game + Pick the roles or randomized roles you want to include in a game. + + Note: The same role can be picked more than once. """ gb = GameBuilder() code = await gb.build_game(ctx) @@ -92,9 +85,6 @@ class Werewolf(Cog): Lists current guild settings """ valid, role, category, channel, log_channel = await self._get_settings(ctx) - # if not valid: - # await ctx.send("Failed to get settings") - # return None embed = discord.Embed( title="Current Guild Settings", @@ -263,6 +253,7 @@ class Werewolf(Cog): game = await self._get_game(ctx) if not game: await ctx.maybe_send_embed("No game running, cannot start") + return if not await game.setup(ctx): pass # ToDo something? @@ -285,7 +276,8 @@ class Werewolf(Cog): game = await self._get_game(ctx) game.game_over = True - game.current_action.cancel() + if game.current_action: + game.current_action.cancel() await ctx.maybe_send_embed("Game has been stopped") @commands.guild_only() @@ -399,7 +391,7 @@ class Werewolf(Cog): else: await ctx.maybe_send_embed("Role ID not found") - async def _get_game(self, ctx: commands.Context, game_code=None) -> Union[Game, None]: + async def _get_game(self, ctx: commands.Context, game_code=None) -> Optional[Game]: guild: discord.Guild = getattr(ctx, "guild", None) if guild is None: @@ -426,7 +418,7 @@ class Werewolf(Cog): return self.games[guild.id] - async def _game_start(self, game): + async def _game_start(self, game: Game): await game.start() async def _get_settings(self, ctx):