diff --git a/app/cogs/actual_utils.py b/app/cogs/actual_utils.py index e8effd64..60c76467 100644 --- a/app/cogs/actual_utils.py +++ b/app/cogs/actual_utils.py @@ -83,12 +83,15 @@ class ActualUtils(commands.Cog): description="The query you want to pass to wolfram alpha", ) async def wolfram(self, ctx, query): - import questions + import wolframalpha + client = wolframalpha.Client(os.getenv("wolfram_token")) await ctx.defer() - await ctx.send_followup( - questions.answer_question(query), - ) + try: + res = client.query(query) + return await ctx.send_followup(next(res.results).text) + except Exception: + return await ctx.send_followup("Sorry, I'm unable to answer that") @commands.has_role("Track day gamers") @commands.slash_command( diff --git a/app/questions.py b/app/questions.py deleted file mode 100755 index 9abf10d9..00000000 --- a/app/questions.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -import wolframalpha - -import help_methods - - -def answer_question(message): - """ - answer_question(message) - - Submits a request to the wolfram API and returns the response - If no answer is found, tries wikipedia. If that fails, apologizes - """ - - client = wolframalpha.Client(os.getenv("wolfram_token")) - try: - res = client.query(message) - return next(res.results).text - except Exception: - return "Sorry, I'm unable to answer that" - - return help_methods.get_help_message("message") - - -def open_ai(prompt): - import os - import openai - - openai.api_key = os.getenv("OPENAI_API_KEY") - - response = openai.Completion.create( - model="text-ada-001", - prompt=prompt, - temperature=0.7, - max_tokens=256, - top_p=1, - frequency_penalty=0, - presence_penalty=0, - )["choices"][0]["text"] - - return response