68 lines
1.8 KiB
Python
Executable File
68 lines
1.8 KiB
Python
Executable File
import discord
|
|
from discord import option
|
|
from discord.ext import commands
|
|
|
|
|
|
class Gpt(commands.Cog):
|
|
def __init__(self, bot):
|
|
self.bot: commands.Bot = bot
|
|
|
|
@commands.slash_command(
|
|
guld_ids=None,
|
|
name="gpt",
|
|
description="Talk to an LLM",
|
|
)
|
|
@option(name="question", description="The question to ask", required=True)
|
|
@option(
|
|
name="temperature", description="I dunno", min_value=0, max_value=1, default=0.1
|
|
)
|
|
@option(name="top_p", description="I dunno", min_value=0, max_value=1, default=0.75)
|
|
@option(name="top_k", description="I dunno", min_value=0, max_value=100, default=40)
|
|
@option(name="beams", description="I dunno", min_value=0, max_value=4, default=4)
|
|
@option(
|
|
name="tokens", description="I dunno", min_value=1, max_value=512, default=128
|
|
)
|
|
async def gpt(
|
|
self,
|
|
ctx,
|
|
question: str,
|
|
temperature: int,
|
|
top_p: int,
|
|
top_k: int,
|
|
beams: int,
|
|
tokens: int,
|
|
):
|
|
import requests
|
|
|
|
await ctx.defer()
|
|
response = requests.post(
|
|
"https://tloen-alpaca-lora.hf.space/run/predict",
|
|
json={
|
|
"data": [
|
|
question,
|
|
"",
|
|
float(temperature),
|
|
float(top_p),
|
|
float(top_k),
|
|
int(beams),
|
|
int(tokens),
|
|
]
|
|
},
|
|
).json()
|
|
|
|
data = response["data"][0]
|
|
|
|
embed = discord.Embed(
|
|
description=data,
|
|
color=discord.Color.green(),
|
|
type="rich",
|
|
)
|
|
|
|
embed.set_author(name="You asked me: %s" % question)
|
|
|
|
await ctx.followup.send(embed=embed)
|
|
|
|
|
|
def setup(bot):
|
|
bot.add_cog(Gpt(bot))
|