Upload files to "cogs"

This commit is contained in:
lexio 2024-03-18 22:45:44 +00:00
parent a956a10f44
commit de4c277cb2
1 changed files with 293 additions and 0 deletions

293
cogs/cmds.py Normal file
View File

@ -0,0 +1,293 @@
import discord
import os
import requests
from discord.ext import commands
import json
import random
import pyautogui
import cv2
import psutil
import pytumblr
from bs4 import BeautifulSoup
from datetime import datetime
from PIL import Image
import io
from urllib.parse import urljoin
import time
import re
# Replace with your own API keys
CONSUMER_KEY = os.getenv("CONSUMER_KEY")
CONSUMER_SECRET = os.getenv("CONSUMER_SECRET")
TOKEN_KEY = os.getenv("TOKEN_KEY")
TOKEN_SECRET = os.getenv("TOKEN_SECRET")
class Cmds(commands.Cog):
def __init__(self, client):
self.client = client
@commands.command(brief='Grabs a random Sparklecare page', help='Highly experimental command that generates a random page number and shows it')
async def sparklecare(self, ctx):
async with ctx.typing():
base_url = "https://sparklecarehospital.com"
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Language": "en-US,en;q=0.9",
}
def fetch_random_link():
response = requests.get(base_url + "/archive/", headers=headers)
tabcontentid = [
'tabcontent',
'tabcontent3',
'tabcontent4'
]
tabcontentt = random.choice(tabcontentid)
if response.status_code == 200:
soup = BeautifulSoup(response.content, "html.parser")
tabelement = soup.find("div", {"id": {tabcontentt}})
if tabelement:
links = tabelement.find_all("a", text=lambda text: text and "volume summary" not in text.lower() and "no warning" not in text.lower())
if links:
random_link = random.choice(links)
return random_link.get("href")
return None
async def process_page(url):
full_url = urljoin(base_url, random_link)
response = requests.get(full_url, headers=headers)
page_number = None
match = re.search(r'page(\d+)', url, re.IGNORECASE) # Search for 'page' followed by a number
if match:
pagee = int(match.group(1))
pagenumber = f"Page {pagee}"
print(f"The page number in the URL is: {pagenumber}")
elif 'cover' in random_link:
pagenumber = 'Cover'
elif 'cover2' in random_link:
pagenumber = 'Back Cover'
elif 'cover3' in random_link:
pagenumber = 'Inside Cover'
elif 'cover4' in random_link:
pagenumber = 'Content Warning'
else:
ctx.reply('grevious error (no page number found)')
if 'fules' in random_link:
title = f"April Fools 2019 {pagenumber}"
elif 'aprilfools' in random_link:
title = f"April Fools 2022 {pagenumber}"
elif 'comic2' in random_link:
title = f"ESV2 {pagenumber}"
elif 'comic' in random_link and 'fules' not in random_link and 'comic3' not in random_link:
title = f"ESV1 {pagenumber}"
elif 'comic3' in random_link and 'fules' not in random_link:
title = f"ESV3 {pagenumber}"
else:
title = "grevious error"
has_series = "unknown"
if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
content_warning = soup.find("div", {"id": "content-warning"})
main_content = soup.find("div", {"id": "main"})
css_files = [link.get('href') for link in soup.find_all('link', {'rel': 'stylesheet'})]
# Filter CSS files containing 'theme' in their name
theme_css_files = [css for css in css_files if 'theme' in css]
if theme_css_files:
for css_file in theme_css_files:
# Fetch the content of each 'theme' CSS file
absolute_css_url = urljoin(base_url, css_file)
css_response = requests.get(absolute_css_url, headers=headers)
if css_response.status_code == 200:
css_content = css_response.text
# Search for the 'topnav' class and extract its color property
topnav_color_match = re.search(r'\.topnav\s*{[^}]*color:\s*([^;}]*)', css_content)
if topnav_color_match:
topnav_color_hex = topnav_color_match.group(1)
print(f"In {absolute_css_url}: The color of 'topnav' class is: {topnav_color_hex}")
topnav_color = int(topnav_color_hex[1:], 16)
else:
print(f"In {absolute_css_url}: 'topnav' class color not found.")
topnav_color = 0xff00aa
else:
print(f"Failed to fetch CSS file: {absolute_css_url}")
topnav_color = 0xff00aa
else:
print("No CSS files containing 'theme' in their name found.")
topnav_color = 0xff00aa
if content_warning and "display: none" not in content_warning.get("style", ""):
content_warning_text = content_warning.find("p").text.strip() if content_warning.find("p") else "No content warning text available."
embeddesc = f'This page has a trigger warning.\n⚠️ {content_warning_text}'
embedt=discord.Embed(title=f"{title}", url=f'{base_url}{random_link}', description=f"{embeddesc}", color=0xff0000)
await ctx.reply(embed=embedt)
elif main_content:
main_image = main_content.find("img")
if main_image:
main_image_src = main_image.get("src")
cleaned_imageURL = main_image_src.replace("../../..", "")
cleaned_cleaned_imageURL = cleaned_imageURL.replace("https://sparklecarehospital.com", "")
print(f'{base_url}{cleaned_cleaned_imageURL}')
embedcolor = 0xff00aa
embeddesc = f"This page doesn't have a trigger warning."
embed=discord.Embed(title=f"{title}", url=f'{base_url}{random_link}', description=f"{embeddesc}", color=topnav_color)
embed.set_image(url=f'{base_url}{cleaned_cleaned_imageURL}')
await ctx.reply(embed=embed)
else:
await ctx.reply("No image found in the main content.")
else:
await ctx.reply("Content sections not found.")
else:
await ctx.reply("Failed to fetch the webpage. Status code:", response.status_code)
random_link = fetch_random_link()
if random_link:
print("Random Link:", random_link)
time.sleep(1)
await process_page(random_link)
else:
await ctx.reply("No suitable random link found.")
@commands.command(brief='Converts sentences to Countrountrish', help="Puts sentences backwards, but in the same word order")
async def countrountrish(self, ctx, *, message):
reversed_words = []
words = message.split()
for word in words:
reversed_word = word[::-1]
reversed_words.append(reversed_word)
reversed_message = ' '.join(reversed_words)
embed=discord.Embed(description=f"```{reversed_message}```", color=0xff00aa)
await ctx.reply(embed=embed)
@commands.command(brief='Gets a random Cometcare page', help='Gets a random Cometcare page')
async def cometcare(self, ctx):
cliont = pytumblr.TumblrRestClient(
CONSUMER_KEY,
CONSUMER_SECRET,
TOKEN_KEY,
TOKEN_SECRET
)
posts = cliont.posts('askcometcare.tumblr.com', type='photo', tag="cometcare au")
postsinposts = len(posts['posts'])
random_number = random.randint(1, postsinposts)
if 'posts' in posts and len(posts['posts']) > 0:
latest_post = posts['posts'][random_number]
# Get the HTML content from the JSON
html_content = latest_post['body']
# Parse the HTML using BeautifulSoup
soup = BeautifulSoup(html_content, 'html.parser')
# Find the img tag and extract the src attribute
img_tag = soup.find('img')
img_src = img_tag['src']
embed=discord.Embed(color=0x8A37D5)
embed.set_image(url=f'{img_src}')
await ctx.reply(embed=embed)
else:
await ctx.reply("An error occurred. No posts were returned.")
@commands.command(brief='Gets a random Darker Matters page', help='Gets a random Darker Matters page')
async def darkermatters(self, ctx):
cliont = pytumblr.TumblrRestClient(
CONSUMER_KEY,
CONSUMER_SECRET,
TOKEN_KEY,
TOKEN_SECRET
)
posts = cliont.posts('askdarkermatters.tumblr.com', type='photo', tag="ask blog")
postsinposts = len(posts['posts'])
random_number = random.randint(1, postsinposts)
if 'posts' in posts and len(posts['posts']) > 0:
latest_post = posts['posts'][random_number]
# Get the HTML content from the JSON
html_content = latest_post['body']
# Parse the HTML using BeautifulSoup
soup = BeautifulSoup(html_content, 'html.parser')
# Find the img tag and extract the src attribute
img_tag = soup.find('img')
img_src = img_tag['src']
embed=discord.Embed(color=0x8A37D5)
embed.set_image(url=f'{img_src}')
await ctx.reply(embed=embed)
else:
await ctx.reply("An error occurred. No posts were returned.")
@commands.command(brief='Gets a random Askblog page', help='Gets a random Askblog page')
async def askblog(self, ctx):
cliont = pytumblr.TumblrRestClient(
CONSUMER_KEY,
CONSUMER_SECRET,
TOKEN_KEY,
TOKEN_SECRET
)
posts = cliont.posts('asksparklecare.tumblr.com', type='photo')
image_urls = []
for post in posts['posts']:
if 'photos' in post:
for photo in post['photos']:
image_urls.append(photo['original_size']['url'])
# Select a random image URL
random_image_url = random.choice(image_urls)
embed=discord.Embed(color=0xff47d1)
embed.set_image(url=f'{random_image_url}')
await ctx.reply(embed=embed)
@commands.command(brief='Gets a random story and a paragraph from that story', help='Grabs a random story from sparklecarehospital.com/stories')
async def story(self, ctx):
async with ctx.typing():
stories = [
'Scribbles',
'Fact or Fiction',
'Goldfish'
]
random_story = random.choice(stories)
if random_story == 'Scribbles':
tnail = 'https://sparklecarehospital.com/media/assets/rates.png'
story_url = "https://sparklecarehospital.com/stories/scribbles"
elif random_story == 'Fact or Fiction':
tnail = 'https://sparklecarehospital.com/media/assets/factfiction1.png'
story_url = "https://sparklecarehospital.com/stories/fact-or-fiction"
else:
tnail = 'https://sparklecarehospital.com/media/assets/factfiction1.png'
story_url = "https://sparklecarehospital.com/stories/goldfish"
# Define headers with a User-Agent
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
}
response = requests.get(story_url, headers=headers)
# Check if the request was successful
if response.status_code == 200:
# Parse the HTML content using BeautifulSoup
soup = BeautifulSoup(response.content, 'html.parser')
body_div = soup.find('td', class_='body')
# Find all <p> elements
p_elements = body_div.find_all('p')
# Choose a random <p> element
random_p = random.choice(p_elements)
# Print the text of the random <p> element
paragraph = random_p.get_text()
embed=discord.Embed(title=f"{random_story}", description=f"{paragraph}", url=f"{story_url}", color=0xff47d1)
embed.set_thumbnail(url=f"{tnail}")
await ctx.reply(embed=embed)
async def setup(client):
await client.add_cog(Cmds(client))