adding openai. might as well use it till we lose it
This commit is contained in:
parent
0c49cc2522
commit
dee0adc42f
@ -47,6 +47,22 @@ class ActualUtils(commands.Cog):
|
||||
await ctx.message.delete()
|
||||
os.remove(file_path)
|
||||
|
||||
@commands.command(name="ask", aliases=["wolfram"])
|
||||
async def ask(self, ctx: commands.Context):
|
||||
import questions
|
||||
|
||||
await ctx.reply(
|
||||
questions.answer_question(ctx.message.content),
|
||||
)
|
||||
|
||||
@commands.command(name="openai")
|
||||
async def openai(self, ctx: commands.Context):
|
||||
import questions
|
||||
|
||||
await ctx.reply(
|
||||
questions.open_ai(ctx.message.content),
|
||||
)
|
||||
|
||||
@commands.command(name="trackdays")
|
||||
async def trackdays(self, ctx: commands.Context):
|
||||
role = discord.utils.find(
|
||||
|
@ -114,14 +114,6 @@ class Cheeky(commands.Cog):
|
||||
|
||||
await ctx.reply(excuse.get_excuse())
|
||||
|
||||
@commands.command(name="ask", aliases=["wolfram"])
|
||||
async def ask(self, ctx: commands.Context):
|
||||
import questions
|
||||
|
||||
await ctx.reply(
|
||||
questions.answer_question(ctx.message.content),
|
||||
)
|
||||
|
||||
@commands.command(name="meme")
|
||||
async def meme(self, ctx: commands.Context):
|
||||
|
||||
|
@ -1,37 +0,0 @@
|
||||
from discord.ext import commands
|
||||
import discord
|
||||
import markovify
|
||||
import random
|
||||
|
||||
|
||||
class Markov(commands.Cog):
|
||||
def __init__(self, bot):
|
||||
self.bot: commands.Bot = bot
|
||||
|
||||
@commands.command(name="markov")
|
||||
async def markov(self, ctx: commands.Context, user: discord.Member):
|
||||
|
||||
temp_message = await ctx.send("Just a moment, generating a markov chain")
|
||||
|
||||
# Get messages from passed in user
|
||||
authors_mesages = []
|
||||
for message in await ctx.history(limit=15000).flatten():
|
||||
if message.author.id == user.id and "https" not in message.content:
|
||||
authors_mesages.append(message.content)
|
||||
|
||||
# Make the model
|
||||
user_model = markovify.Text(". ".join(authors_mesages))
|
||||
model_json = user_model.to_json()
|
||||
|
||||
dummy = []
|
||||
for i in range(random.randint(3, 9)):
|
||||
dummy.append(str(user_model.make_sentence(max_words=25, tries=500)))
|
||||
await temp_message.edit(
|
||||
"Heres a markov chain based on %s's shitposts" % user.mention
|
||||
+ "\n\n"
|
||||
+ " ".join(dummy)
|
||||
)
|
||||
|
||||
|
||||
def setup(bot):
|
||||
bot.add_cog(Markov(bot))
|
@ -26,3 +26,22 @@ def answer_question(message):
|
||||
return "Sorry, I'm unable to answer that"
|
||||
|
||||
return help_methods.get_help_message("message")
|
||||
|
||||
|
||||
def open_ai(prompt):
|
||||
import os
|
||||
import openai
|
||||
|
||||
openai.api_key = os.getenv("OPENAI_API_KEY")
|
||||
|
||||
response = openai.Completion.create(
|
||||
model="text-davinci-002",
|
||||
prompt=prompt,
|
||||
temperature=0.7,
|
||||
max_tokens=256,
|
||||
top_p=1,
|
||||
frequency_penalty=0,
|
||||
presence_penalty=0,
|
||||
)["choices"][0]["text"]
|
||||
|
||||
return response
|
||||
|
@ -9,4 +9,4 @@ git+https://github.com/pycord-development/pycord@8df222d86319dd16a5e559585246343
|
||||
requests
|
||||
wikipedia
|
||||
wolframalpha
|
||||
markovify
|
||||
openai
|
@ -87,3 +87,4 @@ secrets:
|
||||
ffxiv_token: ffxiv_token
|
||||
gitlab_token: gitlab_token
|
||||
wolfram_token: wolfram_token
|
||||
OPENAI_API_KEY: OPENAI_API_KEY
|
||||
|
Loading…
x
Reference in New Issue
Block a user