diff --git a/cogs/commands/summarise.py b/cogs/commands/summarise.py index c80edf1..ca060a7 100644 --- a/cogs/commands/summarise.py +++ b/cogs/commands/summarise.py @@ -1,5 +1,7 @@ import contextlib import logging +from datetime import datetime, timedelta, timezone +from math import ceil, exp from typing import Optional import openai @@ -20,6 +22,10 @@ mentions = AllowedMentions(everyone=False, users=False, roles=False, replied_user=True) model = "gpt-4o-mini" +# weights/coefficients for sigmoid function +a = 750 +b = 7 +c = 400 def clean(msg, *prefixes): @@ -31,14 +37,11 @@ def clean(msg, *prefixes): class Summarise(commands.Cog): def __init__(self, bot: Bot): self.bot = bot + self.cooldowns = {} openai.api_key = CONFIG.OPENAI_API_KEY - def build_prompt(self, bullet_points, channel_name): - bullet_points = "Put it in bullet points for readability." if bullet_points else "" - prompt = f"""People yap too much, I don't want to read all of it. The topic is something related to {channel_name}. In 2 sentences or less give me the gist of what is being said. {bullet_points} Note that the messages are in reverse chronological order: - """ - return prompt + def optional_context_manager(self, use: bool, cm: callable): if use: @@ -46,11 +49,14 @@ def optional_context_manager(self, use: bool, cm: callable): return contextlib.nullcontext() - @commands.cooldown(CONFIG.SUMMARISE_LIMIT, CONFIG.SUMMARISE_COOLDOWN * 60, commands.BucketType.channel) + @commands.hybrid_command(help=LONG_HELP_TEXT, brief=SHORT_HELP_TEXT) async def tldr( self, ctx: Context, number_of_messages: int = 100, bullet_point_output: bool = False, private_view: bool = False): - number_of_messages = 400 if number_of_messages > 400 else number_of_messages + if await self.in_cooldown(ctx): + return + + number_of_messages = CONFIG.SUMMARISE_MESSAGE_LIMIT if number_of_messages > CONFIG.SUMMARISE_MESSAGE_LIMIT else number_of_messages # avoid banned users if not await is_author_banned_openai(ctx): @@ -58,9 +64,10 @@ async def tldr( return # get the last "number_of_messages" messages from the current channel and build the prompt - prompt = self.build_prompt(bullet_point_output, ctx.channel) + prompt = self.build_prompt(bullet_point_output, ctx.channel, self.sigmoid(number_of_messages)) + messages = ctx.channel.history(limit=number_of_messages) - messages = await self.create_message(messages, prompt) + messages = await self.create_message(messages, prompt, ctx) # send the prompt to the ai overlords to process async with self.optional_context_manager(not private_view, ctx.typing): @@ -71,6 +78,28 @@ async def tldr( prev = await prev.reply(content, allowed_mentions=mentions, ephemeral=private_view) + + async def in_cooldown(self, ctx): + now = datetime.now(timezone.utc) + # channel based cooldown + if self.cooldowns.get(ctx.channel.id): + # check that message limit hasn't been reached + if CONFIG.SUMMARISE_LIMIT <= self.cooldowns[ctx.channel.id][1]: + + message_time = self.cooldowns[ctx.channel.id][0] + cutoff = message_time + timedelta(minutes=CONFIG.SUMMARISE_COOLDOWN) + # check that message time + cooldown time period is still in the future + if now < cutoff: + await ctx.reply("STFU!! Wait " + str(int((cutoff - now).total_seconds())) + " Seconds. You are on Cool Down." ) + return True + else: + self.cooldowns[ctx.channel.id] = [now, 1] # reset the cooldown + else: + self.cooldowns[ctx.channel.id][1]+=1 + else: + self.cooldowns[ctx.channel.id] = [now, 1] + return False + async def dispatch_api(self, messages) -> Optional[str]: logging.info(f"Making OpenAI request: {messages}") @@ -85,19 +114,30 @@ async def dispatch_api(self, messages) -> Optional[str]: reply = clean(reply, "Apollo: ", "apollo: ", name) return reply - async def create_message(self, message_chain, prompt): + async def create_message(self, message_chain, prompt, ctx): # get initial prompt initial = prompt + "\n" # for each message, append it to the prompt as follows --- author : message \n + message_length = 0 async for msg in message_chain: if CONFIG.AI_INCLUDE_NAMES and msg.author != self.bot.user: - initial += msg.author.name + ":" + msg.content + "\n" - + message_length += len(msg.content.split()) + initial += msg.author.name + ": " + msg.content + "\n" messages = [dict(role="system", content=initial)] return messages + def build_prompt(self, bullet_points, channel_name, response_size): + + bullet_points = "Put it in bullet points for readability." if bullet_points else "" + prompt = f"""People yap too much, I don't want to read all of it. The topic is related to {channel_name}. In {response_size} words or less give me the gist of what is being said. {bullet_points} Note that the messages are in reverse chronological order: + """ + return prompt + + def sigmoid(self, x): + return int(ceil(c / (1 + b * exp((-x)/ a)))) + async def setup(bot: Bot): await bot.add_cog(Summarise(bot)) diff --git a/config.example.yaml b/config.example.yaml index 9b2e906..3dc4c5e 100644 --- a/config.example.yaml +++ b/config.example.yaml @@ -35,10 +35,12 @@ config: portainer_api_key: portainer # Liege Chancellor User ID liege_chancellor_id: 1234 - # Summarise Use Limit + # Summarise/TLDR Command Usage Limit summarise_limit: 3 - # Summarise Cooldown Period (minutes) + # Summarise/TLDR Command Cooldown Period (minutes) summarise_cooldown: 10 + # Summarise/TLDR Command Message Limit + summarise_message_limit : 400 # whether to load general.txt and markov chains markov_enabled: False diff --git a/config/config.py b/config/config.py index 64cc34a..6ccd225 100644 --- a/config/config.py +++ b/config/config.py @@ -29,6 +29,7 @@ def __init__(self, filepath: str): self.LIEGE_CHANCELLOR_ID: int = parsed.get("liege_chancellor_id") self.SUMMARISE_LIMIT: int = parsed.get("summarise_limit") self.SUMMARISE_COOLDOWN: int = parsed.get("summarise_cooldown") + self.SUMMARISE_MESSAGE_LIMIT: int = parsed.get("summarise_message_limit") self.MARKOV_ENABLED: bool = parsed.get("markov_enabled") # Configuration