Turn on gpt again

This commit is contained in:
Luke Robles 2025-01-28 11:26:25 -08:00
parent b6bb9691dc
commit e0505e8815

View File

@ -118,64 +118,66 @@ async def fix_social_media_links(ctx):
return
# @bot.event
# async def on_message(ctx):
# if str(bot.user.id) in ctx.content:
# # if ctx.author.id == core_utils.my_id:
# # pass
# # elif ctx.guild.id not in core_utils.my_guilds:
# # return
@bot.event
async def on_message(ctx):
if str(bot.user.id) in ctx.content:
# if ctx.author.id == core_utils.my_id:
# pass
# elif ctx.guild.id not in core_utils.my_guilds:
# return
# llm_rules = requests.get(core_utils.json_endpoint + "rules.json").json()
llm_rules = requests.get(core_utils.json_endpoint + "rules.json").json()
# if ctx.author.id in llm_rules["disallowed_users"]:
# responses = [
# "You cant do that right now",
# "You cant use this feature right now",
# "You're on time out from this",
# ]
# await ctx.respond(random.choice(responses))
# return
if ctx.author.id in llm_rules["disallowed_users"]:
responses = [
"You cant do that right now",
"You cant use this feature right now",
"You're on time out from this",
]
await ctx.respond(random.choice(responses))
return
# url = f"http://{core_utils.ein_ip}:1337/v1/chat/completions"
# instructions = llm_rules["prompt"]
url = f"http://{core_utils.ein_ip}:7869/api/chat"
instructions = llm_rules["prompt"]
# payload = {
# "messages": [
# {
# "content": instructions,
# "role": "system",
# },
# {
# "content": ctx.content.replace(str(bot.user.id), "").replace(
# "<@> ", ""
# ),
# "role": "user",
# },
# ],
# "model": llm_rules["model"],
# "stream": False,
# "max_tokens": 4096,
# "stop": llm_rules["stop_tokens"],
# "frequency_penalty": 0,
# "presence_penalty": 0,
# "temperature": 0.7,
# "top_p": 0.95,
# }
# headers = {"Content-Type": "application/json"}
payload = {
"messages": [
{
"role": "system",
"content": instructions,
},
{
"content": ctx.content.replace(str(bot.user.id), "").replace(
"<@> ", ""
),
"role": "user",
},
],
"options": {"num_ctx": 1999},
"model": llm_rules["model"],
"stream": False,
"stop": llm_rules["stop_tokens"],
# "max_tokens": 4096,
# "frequency_penalty": 0,
# "presence_penalty": 0,
# "temperature": 0.7,
# "top_p": 0.95,
}
headers = {"Content-Type": "application/json"}
# try:
# await ctx.channel.trigger_typing()
# response = requests.post(url, json=payload, headers=headers)
# answer = response.json()["choices"][0]["message"]["content"]
try:
await ctx.channel.trigger_typing()
response = requests.post(url, json=payload, headers=headers)
answer = response.json()["message"]["content"]
# if len(answer) > 2000:
# await ctx.reply(answer[:2000].replace("<|end_of_turn|>", ""))
# await ctx.reply(answer[2000:].replace("<|end_of_turn|>", ""))
# else:
# await ctx.reply(answer.replace("<|end_of_turn|>", ""))
# except KeyError:
# await ctx.reply("Somethings wrong, maybe the LLM crashed")
if len(answer) > 2000:
await ctx.reply(answer[:2000].replace("<end▁of▁sentence>", ""))
await ctx.reply(answer[2000:].replace("<end▁of▁sentence>", ""))
else:
await ctx.reply(answer.replace("<end▁of▁sentence>", ""))
except Exception as e:
print(e)
await ctx.reply("Somethings wrong, maybe the LLM crashed")
bot.run(os.getenv("discord_token"))