diff --git a/app/bot.py b/app/bot.py
index 413f9070..7acd1435 100755
--- a/app/bot.py
+++ b/app/bot.py
@@ -119,73 +119,4 @@ async def fix_social_media_links(ctx):
return
-def remove_between(text, string1, string2):
- start_index = text.find(string1)
- end_index = text.find(string2, start_index + len(string1))
-
- if start_index != -1 and end_index != -1:
- return text[:start_index] + text[end_index + len(string2) :]
- else:
- return text # Return original text if delimiters not found
-
-
-@bot.event
-async def on_message(ctx):
- if str(bot.user.id) in ctx.content:
- # if ctx.author.id == core_utils.my_id:
- # pass
- # elif ctx.guild.id not in core_utils.my_guilds:
- # return
-
- llm_rules = requests.get(core_utils.json_endpoint + "rules.json").json()
-
- if ctx.author.id in llm_rules["disallowed_users"]:
- responses = [
- "You cant do that right now",
- "You cant use this feature right now",
- "You're on time out from this",
- ]
- await ctx.respond(random.choice(responses))
- return
-
- url = f"http://{core_utils.ein_ip}:7869/api/chat"
- instructions = llm_rules["prompt"]
-
- payload = {
- "messages": [
- {
- "role": "system",
- "content": instructions,
- },
- {
- "content": ctx.content.replace(str(bot.user.id), "").replace(
- "<@> ", ""
- ),
- "role": "user",
- },
- ],
- "options": {"num_ctx": 1999},
- "model": llm_rules["model"],
- "stream": False,
- "stop": llm_rules["stop_tokens"],
- # "max_tokens": 4096,
- # "frequency_penalty": 0,
- # "presence_penalty": 0,
- # "temperature": 0.7,
- # "top_p": 0.95,
- }
- headers = {"Content-Type": "application/json"}
-
- try:
- await ctx.channel.trigger_typing()
- client = httpx.AsyncClient()
- response = await client.post(url, json=payload, headers=headers)
- answer = response.json()["message"]["content"]
-
- await ctx.reply(remove_between(answer, "", ""))
- except Exception as e:
- print(e)
- await ctx.reply("Somethings wrong, maybe the LLM crashed")
-
-
bot.run(os.getenv("discord_token"))
diff --git a/app/cogs/actual_utils.py b/app/cogs/actual_utils.py
index 18dcaf0d..3aa20260 100755
--- a/app/cogs/actual_utils.py
+++ b/app/cogs/actual_utils.py
@@ -322,6 +322,30 @@ class ActualUtils(commands.Cog):
for res in results:
await ctx.respond(embed=res)
+ @commands.slash_command(
+ guld_ids=None,
+ name="llm",
+ description="Send a question to the LLM",
+ contexts={
+ discord.InteractionContextType.guild,
+ discord.InteractionContextType.bot_dm,
+ discord.InteractionContextType.private_channel,
+ },
+ integration_types={
+ discord.IntegrationType.guild_install,
+ discord.IntegrationType.user_install,
+ },
+ )
+ @option(
+ name="question", description="The qustion to send to the LLM", required=True
+ )
+ async def send_to_llm(self, ctx, question: str):
+ import core_utils
+
+ await ctx.channel.trigger_typing()
+ await ctx.defer()
+ await ctx.send_followup(await core_utils.send_to_llm(ctx, question))
+
def setup(bot):
bot.add_cog(ActualUtils(bot))
diff --git a/app/core b/app/core
deleted file mode 100755
index e69de29b..00000000
diff --git a/app/core_utils.py b/app/core_utils.py
index 1e7032d5..6abd5097 100755
--- a/app/core_utils.py
+++ b/app/core_utils.py
@@ -2,6 +2,7 @@ import requests
import discord
import os
import json
+import httpx
my_guilds = [826547484632678450, 152921472304676865]
my_id = 144986109804412928
@@ -12,6 +13,16 @@ if os.getenv("DRAGON_ENV") == "prod":
json_endpoint = "http://dragon-bot-json.dragon-bot.svc.cluster.local/"
+def remove_between(text, string1, string2):
+ start_index = text.find(string1)
+ end_index = text.find(string2, start_index + len(string1))
+
+ if start_index != -1 and end_index != -1:
+ return text[:start_index] + text[end_index + len(string2) :]
+ else:
+ return text # Return original text if delimiters not found
+
+
def download_image(url, path=None):
request = requests.get(url)
suffix_list = [
@@ -120,3 +131,52 @@ def write_incident_file(file_path, url, details):
}
with open(file_path, "w") as out_file:
out_file.write(json.dumps(info_dict))
+
+
+async def send_to_llm(ctx, message):
+ llm_rules = requests.get(json_endpoint + "rules.json").json()
+
+ if ctx.author.id in llm_rules["disallowed_users"]:
+ responses = [
+ "You cant do that right now",
+ "You cant use this feature right now",
+ "You're on time out from this",
+ ]
+ await ctx.respond(random.choice(responses))
+ return
+
+ url = f"http://{ein_ip}:7869/api/chat"
+ instructions = llm_rules["prompt"]
+
+ payload = {
+ "messages": [
+ {
+ "role": "system",
+ "content": instructions,
+ },
+ {
+ "content": message,
+ "role": "user",
+ },
+ ],
+ "options": {"num_ctx": 1999},
+ "model": llm_rules["model"],
+ "stream": False,
+ "stop": llm_rules["stop_tokens"],
+ # "max_tokens": 4096,
+ # "frequency_penalty": 0,
+ # "presence_penalty": 0,
+ # "temperature": 0.7,
+ # "top_p": 0.95,
+ }
+ headers = {"Content-Type": "application/json"}
+
+ try:
+ client = httpx.AsyncClient()
+ response = await client.post(url, json=payload, headers=headers)
+ answer = response.json()["message"]["content"]
+
+ return remove_between(answer, "", "")
+ except Exception as e:
+ print(e)
+ return "Somethings wrong, maybe the LLM crashed"