Add support for fuckinghomepage.com. Fixes #23

This commit is contained in:
Zoid 2017-09-19 17:08:51 -06:00
parent 204b14cc1e
commit 9a7d24bcbb
5 changed files with 28 additions and 6 deletions

View File

@ -4,7 +4,7 @@ services:
before_script:
- apk add --no-cache python3
- pip3 install pylint requests discord.py docker pylint wolframalpha
- pip3 install pylint requests discord.py docker pylint wolframalpha beautifulsoup4
stages:
- test
@ -28,4 +28,4 @@ build_and_push_container:
only:
- master
tags:
- docker
- docker

View File

@ -2,7 +2,7 @@ FROM python:3.6.2-alpine3.6
LABEL name="Dragon Bot"
RUN apk update && apk add --no-cache docker
RUN pip install requests discord.py docker wolframalpha
RUN pip install requests discord.py docker wolframalpha beautifulsoup4
ADD app /app
CMD python app/dragon-bot.py
CMD python app/dragon-bot.py

View File

@ -2,7 +2,7 @@ FROM python:3.6.2-alpine3.6
LABEL name="Dragon Bot Test environment"
RUN apk update && apk add --no-cache vim docker
RUN pip install requests discord.py docker pylint wolframalpha
RUN pip install requests discord.py docker pylint wolframalpha beautifulsoup4
ADD app /app
RUN printf "\n\nTesting your python code for errors\n\n" && \

View File

@ -147,7 +147,12 @@ async def on_message(message):
message.channel,
wallpaper.get_wall(message.content)
)
if message.content.startswith('!homepage'):
await client.send_message(
message.channel,
wallpaper.get_picture(message.content)
)
if message.content.startswith('!docker'):
# Check permissions
if not role_check.docker_permissions(message.author.roles):

View File

@ -1,5 +1,8 @@
import requests
import requests
from bs4 import BeautifulSoup
import urllib
from urllib.parse import urlparse
def get_wall(message):
unsplash_url = "https://source.unsplash.com/3840x2160/?"
@ -26,3 +29,17 @@ def get_wall(message):
return "Could not find an image for those tags."
else:
return response
def get_picture(find):
url = requests.get("http://fuckinghomepage.com")
soup = BeautifulSoup(url.content)
soup.prettify(formatter=None)
for parse in soup.find_all("p"):
if 'SWEET-ASS PICTURE' in ''.join(parse.findAll(text=True)):
link = parse.find_next_sibling('p')
if "http://" or "https://" in link.get('href', ''):
link = link.find('small').find_next('a', href=True)['href']
return urllib.parse.unquote(link.split('http://t.umblr.com/redirect?z=')[1].split('&')[0])