Turn on gpt again
All checks were successful
Build and push / changes (push) Successful in 51s
Build and push / Lint-Python (push) Successful in 4s
Build and push / Build-and-Push-Docker (push) Successful in 2m47s
Build and push / sync-argocd-app (push) Successful in 3s
Build and push / post-status-to-discord (push) Successful in 5s

This commit is contained in:
Luke R 2025-01-28 11:26:25 -08:00
parent f0ad6c2c4f
commit 06b5b1fc06

View File

@ -118,64 +118,66 @@ async def fix_social_media_links(ctx):
return return
# @bot.event @bot.event
# async def on_message(ctx): async def on_message(ctx):
# if str(bot.user.id) in ctx.content: if str(bot.user.id) in ctx.content:
# # if ctx.author.id == core_utils.my_id: # if ctx.author.id == core_utils.my_id:
# # pass # pass
# # elif ctx.guild.id not in core_utils.my_guilds: # elif ctx.guild.id not in core_utils.my_guilds:
# # return # return
# llm_rules = requests.get(core_utils.json_endpoint + "rules.json").json() llm_rules = requests.get(core_utils.json_endpoint + "rules.json").json()
# if ctx.author.id in llm_rules["disallowed_users"]: if ctx.author.id in llm_rules["disallowed_users"]:
# responses = [ responses = [
# "You cant do that right now", "You cant do that right now",
# "You cant use this feature right now", "You cant use this feature right now",
# "You're on time out from this", "You're on time out from this",
# ] ]
# await ctx.respond(random.choice(responses)) await ctx.respond(random.choice(responses))
# return return
# url = f"http://{core_utils.ein_ip}:1337/v1/chat/completions" url = f"http://{core_utils.ein_ip}:7869/api/chat"
# instructions = llm_rules["prompt"] instructions = llm_rules["prompt"]
# payload = { payload = {
# "messages": [ "messages": [
# { {
# "content": instructions, "role": "system",
# "role": "system", "content": instructions,
# }, },
# { {
# "content": ctx.content.replace(str(bot.user.id), "").replace( "content": ctx.content.replace(str(bot.user.id), "").replace(
# "<@> ", "" "<@> ", ""
# ), ),
# "role": "user", "role": "user",
# }, },
# ], ],
# "model": llm_rules["model"], "options": {"num_ctx": 1999},
# "stream": False, "model": llm_rules["model"],
# "max_tokens": 4096, "stream": False,
# "stop": llm_rules["stop_tokens"], "stop": llm_rules["stop_tokens"],
# "frequency_penalty": 0, # "max_tokens": 4096,
# "presence_penalty": 0, # "frequency_penalty": 0,
# "temperature": 0.7, # "presence_penalty": 0,
# "top_p": 0.95, # "temperature": 0.7,
# } # "top_p": 0.95,
# headers = {"Content-Type": "application/json"} }
headers = {"Content-Type": "application/json"}
# try: try:
# await ctx.channel.trigger_typing() await ctx.channel.trigger_typing()
# response = requests.post(url, json=payload, headers=headers) response = requests.post(url, json=payload, headers=headers)
# answer = response.json()["choices"][0]["message"]["content"] answer = response.json()["message"]["content"]
# if len(answer) > 2000: if len(answer) > 2000:
# await ctx.reply(answer[:2000].replace("<|end_of_turn|>", "")) await ctx.reply(answer[:2000].replace("<end▁of▁sentence>", ""))
# await ctx.reply(answer[2000:].replace("<|end_of_turn|>", "")) await ctx.reply(answer[2000:].replace("<end▁of▁sentence>", ""))
# else: else:
# await ctx.reply(answer.replace("<|end_of_turn|>", "")) await ctx.reply(answer.replace("<end▁of▁sentence>", ""))
# except KeyError: except Exception as e:
# await ctx.reply("Somethings wrong, maybe the LLM crashed") print(e)
await ctx.reply("Somethings wrong, maybe the LLM crashed")
bot.run(os.getenv("discord_token")) bot.run(os.getenv("discord_token"))