Move the llm to a slash command
All checks were successful
Build and push / changes (push) Successful in 3s
Build and push / Lint-Python (push) Successful in 1s
Build and push / Build-and-Push-Docker (push) Successful in 2m20s
Build and push / post-status-to-discord (push) Successful in 1s
Build and push / sync-argocd-app (push) Successful in 2s
All checks were successful
Build and push / changes (push) Successful in 3s
Build and push / Lint-Python (push) Successful in 1s
Build and push / Build-and-Push-Docker (push) Successful in 2m20s
Build and push / post-status-to-discord (push) Successful in 1s
Build and push / sync-argocd-app (push) Successful in 2s
This commit is contained in:
parent
6caff3c076
commit
ea268b3c23
69
app/bot.py
69
app/bot.py
@ -119,73 +119,4 @@ async def fix_social_media_links(ctx):
|
|||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
def remove_between(text, string1, string2):
|
|
||||||
start_index = text.find(string1)
|
|
||||||
end_index = text.find(string2, start_index + len(string1))
|
|
||||||
|
|
||||||
if start_index != -1 and end_index != -1:
|
|
||||||
return text[:start_index] + text[end_index + len(string2) :]
|
|
||||||
else:
|
|
||||||
return text # Return original text if delimiters not found
|
|
||||||
|
|
||||||
|
|
||||||
@bot.event
|
|
||||||
async def on_message(ctx):
|
|
||||||
if str(bot.user.id) in ctx.content:
|
|
||||||
# if ctx.author.id == core_utils.my_id:
|
|
||||||
# pass
|
|
||||||
# elif ctx.guild.id not in core_utils.my_guilds:
|
|
||||||
# return
|
|
||||||
|
|
||||||
llm_rules = requests.get(core_utils.json_endpoint + "rules.json").json()
|
|
||||||
|
|
||||||
if ctx.author.id in llm_rules["disallowed_users"]:
|
|
||||||
responses = [
|
|
||||||
"You cant do that right now",
|
|
||||||
"You cant use this feature right now",
|
|
||||||
"You're on time out from this",
|
|
||||||
]
|
|
||||||
await ctx.respond(random.choice(responses))
|
|
||||||
return
|
|
||||||
|
|
||||||
url = f"http://{core_utils.ein_ip}:7869/api/chat"
|
|
||||||
instructions = llm_rules["prompt"]
|
|
||||||
|
|
||||||
payload = {
|
|
||||||
"messages": [
|
|
||||||
{
|
|
||||||
"role": "system",
|
|
||||||
"content": instructions,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"content": ctx.content.replace(str(bot.user.id), "").replace(
|
|
||||||
"<@> ", ""
|
|
||||||
),
|
|
||||||
"role": "user",
|
|
||||||
},
|
|
||||||
],
|
|
||||||
"options": {"num_ctx": 1999},
|
|
||||||
"model": llm_rules["model"],
|
|
||||||
"stream": False,
|
|
||||||
"stop": llm_rules["stop_tokens"],
|
|
||||||
# "max_tokens": 4096,
|
|
||||||
# "frequency_penalty": 0,
|
|
||||||
# "presence_penalty": 0,
|
|
||||||
# "temperature": 0.7,
|
|
||||||
# "top_p": 0.95,
|
|
||||||
}
|
|
||||||
headers = {"Content-Type": "application/json"}
|
|
||||||
|
|
||||||
try:
|
|
||||||
await ctx.channel.trigger_typing()
|
|
||||||
client = httpx.AsyncClient()
|
|
||||||
response = await client.post(url, json=payload, headers=headers)
|
|
||||||
answer = response.json()["message"]["content"]
|
|
||||||
|
|
||||||
await ctx.reply(remove_between(answer, "<think>", "</think>"))
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
await ctx.reply("Somethings wrong, maybe the LLM crashed")
|
|
||||||
|
|
||||||
|
|
||||||
bot.run(os.getenv("discord_token"))
|
bot.run(os.getenv("discord_token"))
|
||||||
|
@ -322,6 +322,30 @@ class ActualUtils(commands.Cog):
|
|||||||
for res in results:
|
for res in results:
|
||||||
await ctx.respond(embed=res)
|
await ctx.respond(embed=res)
|
||||||
|
|
||||||
|
@commands.slash_command(
|
||||||
|
guld_ids=None,
|
||||||
|
name="llm",
|
||||||
|
description="Send a question to the LLM",
|
||||||
|
contexts={
|
||||||
|
discord.InteractionContextType.guild,
|
||||||
|
discord.InteractionContextType.bot_dm,
|
||||||
|
discord.InteractionContextType.private_channel,
|
||||||
|
},
|
||||||
|
integration_types={
|
||||||
|
discord.IntegrationType.guild_install,
|
||||||
|
discord.IntegrationType.user_install,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
@option(
|
||||||
|
name="question", description="The qustion to send to the LLM", required=True
|
||||||
|
)
|
||||||
|
async def send_to_llm(self, ctx, question: str):
|
||||||
|
import core_utils
|
||||||
|
|
||||||
|
await ctx.channel.trigger_typing()
|
||||||
|
await ctx.defer()
|
||||||
|
await ctx.send_followup(await core_utils.send_to_llm(ctx, question))
|
||||||
|
|
||||||
|
|
||||||
def setup(bot):
|
def setup(bot):
|
||||||
bot.add_cog(ActualUtils(bot))
|
bot.add_cog(ActualUtils(bot))
|
||||||
|
@ -2,6 +2,7 @@ import requests
|
|||||||
import discord
|
import discord
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
|
import httpx
|
||||||
|
|
||||||
my_guilds = [826547484632678450, 152921472304676865]
|
my_guilds = [826547484632678450, 152921472304676865]
|
||||||
my_id = 144986109804412928
|
my_id = 144986109804412928
|
||||||
@ -12,6 +13,16 @@ if os.getenv("DRAGON_ENV") == "prod":
|
|||||||
json_endpoint = "http://dragon-bot-json.dragon-bot.svc.cluster.local/"
|
json_endpoint = "http://dragon-bot-json.dragon-bot.svc.cluster.local/"
|
||||||
|
|
||||||
|
|
||||||
|
def remove_between(text, string1, string2):
|
||||||
|
start_index = text.find(string1)
|
||||||
|
end_index = text.find(string2, start_index + len(string1))
|
||||||
|
|
||||||
|
if start_index != -1 and end_index != -1:
|
||||||
|
return text[:start_index] + text[end_index + len(string2) :]
|
||||||
|
else:
|
||||||
|
return text # Return original text if delimiters not found
|
||||||
|
|
||||||
|
|
||||||
def download_image(url, path=None):
|
def download_image(url, path=None):
|
||||||
request = requests.get(url)
|
request = requests.get(url)
|
||||||
suffix_list = [
|
suffix_list = [
|
||||||
@ -120,3 +131,52 @@ def write_incident_file(file_path, url, details):
|
|||||||
}
|
}
|
||||||
with open(file_path, "w") as out_file:
|
with open(file_path, "w") as out_file:
|
||||||
out_file.write(json.dumps(info_dict))
|
out_file.write(json.dumps(info_dict))
|
||||||
|
|
||||||
|
|
||||||
|
async def send_to_llm(ctx, message):
|
||||||
|
llm_rules = requests.get(json_endpoint + "rules.json").json()
|
||||||
|
|
||||||
|
if ctx.author.id in llm_rules["disallowed_users"]:
|
||||||
|
responses = [
|
||||||
|
"You cant do that right now",
|
||||||
|
"You cant use this feature right now",
|
||||||
|
"You're on time out from this",
|
||||||
|
]
|
||||||
|
await ctx.respond(random.choice(responses))
|
||||||
|
return
|
||||||
|
|
||||||
|
url = f"http://{ein_ip}:7869/api/chat"
|
||||||
|
instructions = llm_rules["prompt"]
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"messages": [
|
||||||
|
{
|
||||||
|
"role": "system",
|
||||||
|
"content": instructions,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"content": message,
|
||||||
|
"role": "user",
|
||||||
|
},
|
||||||
|
],
|
||||||
|
"options": {"num_ctx": 1999},
|
||||||
|
"model": llm_rules["model"],
|
||||||
|
"stream": False,
|
||||||
|
"stop": llm_rules["stop_tokens"],
|
||||||
|
# "max_tokens": 4096,
|
||||||
|
# "frequency_penalty": 0,
|
||||||
|
# "presence_penalty": 0,
|
||||||
|
# "temperature": 0.7,
|
||||||
|
# "top_p": 0.95,
|
||||||
|
}
|
||||||
|
headers = {"Content-Type": "application/json"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
client = httpx.AsyncClient()
|
||||||
|
response = await client.post(url, json=payload, headers=headers)
|
||||||
|
answer = response.json()["message"]["content"]
|
||||||
|
|
||||||
|
return remove_between(answer, "<think>", "</think>")
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
return "Somethings wrong, maybe the LLM crashed"
|
||||||
|
Loading…
x
Reference in New Issue
Block a user