diff --git a/Makefile b/Makefile index ed60282c..33cc3df3 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,9 @@ format: clean: docker rm -f dragon-bot-test +build-test: + docker build -f ./Dockerfile-test-env -t dragon-bot-test . + DRAGON_ENV := "test" -test: clean format - docker run -ti -v ${PWD}/app:/app --rm --name dragon-bot-test -e DRAGON_ENV=${DRAGON_ENV} -e discord_token=${discord_token} dragon-bot-test bash \ No newline at end of file +test: clean format build-test + docker run -ti -v ${PWD}/app:/app --rm --name dragon-bot-test -e DRAGON_ENV=${DRAGON_ENV} -e discord_token=${discord_token} dragon-bot-test bash diff --git a/app/apex_legends.py b/app/apex_legends.py index 1ae90814..8ea20e59 100755 --- a/app/apex_legends.py +++ b/app/apex_legends.py @@ -11,7 +11,7 @@ def get_player(player): "TRN-Api-Key": os.getenv("tracker_network_token"), } - response = requests.get(url).json()["data"] + response = requests.get(url, headers=headers).json()["data"] # Build the embed embed = discord.Embed(description="-------", color=discord.Color.red(), type="rich") diff --git a/app/cogs/gpt.py b/app/cogs/gpt.py index eebd9266..e957c88c 100755 --- a/app/cogs/gpt.py +++ b/app/cogs/gpt.py @@ -1,6 +1,9 @@ -import discord +import openai +import os from discord import option from discord.ext import commands +import discord +import requests class Gpt(commands.Cog): @@ -13,52 +16,33 @@ class Gpt(commands.Cog): description="Talk to an LLM", ) @option(name="question", description="The question to ask", required=True) - @option( - name="temperature", description="I dunno", min_value=0, max_value=1, default=0.1 - ) - @option(name="top_p", description="I dunno", min_value=0, max_value=1, default=0.75) - @option(name="top_k", description="I dunno", min_value=0, max_value=100, default=40) - @option(name="beams", description="I dunno", min_value=0, max_value=4, default=4) - @option( - name="tokens", description="I dunno", min_value=1, max_value=512, default=128 - ) async def gpt( self, ctx, question: str, - temperature: int, - top_p: int, - top_k: int, - beams: int, - tokens: int, ): - import requests + openai.api_key = os.getenv("OPENAI_API_KEY") - await ctx.defer() - response = requests.post( - "https://tloen-alpaca-lora.hf.space/run/predict", - json={ - "data": [ - question, - "", - float(temperature), - float(top_p), - float(top_k), - int(beams), - int(tokens), - ] - }, - ).json() - - data = response["data"][0] + completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"}, + ], + ) embed = discord.Embed( - description=data, + description=completion.choices[0].message, color=discord.Color.green(), type="rich", ) - embed.set_author(name="You asked me: %s" % question) + embed.set_ + + embed.set_author( + name="You asked me: %s" % question, + icon_url="https://upload.wikimedia.org/wikipedia/commons/thumb/0/04/ChatGPT_logo.svg/2048px-ChatGPT_logo.svg.png", + ) await ctx.followup.send(embed=embed) diff --git a/app/trackdays.py b/app/trackdays.py index 5bf716c1..7562df07 100755 --- a/app/trackdays.py +++ b/app/trackdays.py @@ -47,16 +47,17 @@ async def get_msreg(track): if track not in events: events[track] = [] events[track].append(event_object) + for track, track_events in events.items(): + events[track] = sorted( + track_events, + key=lambda event: datetime.fromisoformat( + event["event_date"] + ), + ) + except TypeError: pass - # sort track events by date - def date_to_datetime(input): - return datetime.strptime(input["event_date"], "%Y-%m-%d") - - for races in events.values(): - sorted(races, key=date_to_datetime) - return events diff --git a/helm/values.yaml b/helm/values.yaml index bd3db931..e27b57f4 100755 --- a/helm/values.yaml +++ b/helm/values.yaml @@ -83,7 +83,6 @@ env: PYTHONUNBUFFERED: 1 secrets: - apex_api_key: apex_api_key discord_token: discord_token ffxiv_token: ffxiv_token OPENAI_API_KEY: OPENAI_API_KEY