I'm dumb, dont make 3 rquests for the same json file
This commit is contained in:
parent
b91e6a7ca5
commit
bd0d576c7e
@ -157,8 +157,9 @@ async def on_message(ctx):
|
||||
llm_rule_endpoint = (
|
||||
"http://dragon-bot-json.dragon-bot.svc.cluster.local/rules.json"
|
||||
)
|
||||
llm_rules = requests.get(llm_rule_endpoint).json()
|
||||
|
||||
if ctx.author.id in requests.get(llm_rule_endpoint).json()["disallowed_users"]:
|
||||
if ctx.author.id in llm_rules["disallowed_users"]:
|
||||
responses = [
|
||||
"You cant do that right now",
|
||||
"You cant use this feature right now",
|
||||
@ -168,7 +169,7 @@ async def on_message(ctx):
|
||||
return
|
||||
|
||||
url = "http://192.168.1.137:1337/v1/chat/completions"
|
||||
instructions = requests.get(llm_rule_endpoint).json()["prompt"]
|
||||
instructions = llm_rules["prompt"]
|
||||
|
||||
payload = {
|
||||
"messages": [
|
||||
@ -183,7 +184,7 @@ async def on_message(ctx):
|
||||
"role": "user",
|
||||
},
|
||||
],
|
||||
"model": requests.get(llm_rule_endpoint).json()["model"],
|
||||
"model": llm_rules["model"],
|
||||
"stream": False,
|
||||
"max_tokens": 4096,
|
||||
"stop": ["goon"],
|
||||
|
Loading…
x
Reference in New Issue
Block a user