v2.3.0: Disable Auto DL, New IG API, Dump.

v2.3.0
What's New:

• Instagram:
Added Dump to Log Instagram posts and save duplicate API calls.
New Instagram API (Thanks to RoseloverX).

• Reddit:
Switch to regex for checking videos/gifs.

• YTDL:
Prevent Live streams from being downloaded.
Lower Video duration to 3 mins.

• Added .disable/.enable to allow Disabling Auto DL in chats and allow /dl /down /download command to users in those chats.
• Use Enums for media types and clean up Sending media type logic.
• New .update command to remotely update bot without re-deploying.
• New .repo cmd.
• Clean up Add/Del sudo/chat logic.
This commit is contained in:
anonymousx97 2023-09-09 14:35:58 +05:30
parent 0bacb96a29
commit 7b4c8a2446
20 changed files with 433 additions and 291 deletions

View File

@ -2,7 +2,7 @@ FROM ghcr.io/anonymousx97/build_essentials:main
# adding email and username to the bot # adding email and username to the bot
RUN git config --global user.email "88324835+anonymousx97@users.noreply.github" \ RUN git config --global user.email "88324835+anonymousx97@users.noreply.github" \
git config --global user.name "anonymousx97" && git config --global user.name "anonymousx97"
# Exposing Ports for Web Server # Exposing Ports for Web Server
EXPOSE 8080 22 8022 EXPOSE 8080 22 8022

View File

@ -1,6 +1,7 @@
import os import os
from dotenv import load_dotenv from dotenv import load_dotenv
load_dotenv("config.env") load_dotenv("config.env")
from .config import Config from .config import Config
@ -8,7 +9,6 @@ from .core.client import BOT
if not os.environ.get("TERMUX_APK_RELEASE"): if not os.environ.get("TERMUX_APK_RELEASE"):
import uvloop import uvloop
uvloop.install() uvloop.install()
bot = BOT() bot = BOT()

View File

@ -4,7 +4,7 @@ import os
import time import time
from app.core import shell from app.core import shell
from app.core.scraper_config import ScraperConfig from app.core.scraper_config import MediaType, ScraperConfig
class Gallery_DL(ScraperConfig): class Gallery_DL(ScraperConfig):
@ -25,4 +25,5 @@ class Gallery_DL(ScraperConfig):
files = glob.glob(f"{self.path}/*") files = glob.glob(f"{self.path}/*")
if not files: if not files:
return self.cleanup() return self.cleanup()
self.link = self.group = self.success = True self.media = self.success = True
self.type = MediaType.GROUP

View File

@ -1,9 +1,9 @@
import os import os
from urllib.parse import urlparse from urllib.parse import urlparse
from app import Config from app import Config, bot
from app.core import aiohttp_tools from app.core.aiohttp_tools import get_json, get_type
from app.core.scraper_config import ScraperConfig from app.core.scraper_config import MediaType, ScraperConfig
API_KEYS = {"KEYS": Config.API_KEYS, "counter": 0} API_KEYS = {"KEYS": Config.API_KEYS, "counter": 0}
@ -12,16 +12,49 @@ class Instagram(ScraperConfig):
def __init__(self, url): def __init__(self, url):
super().__init__() super().__init__()
shortcode = os.path.basename(urlparse(url).path.rstrip("/")) shortcode = os.path.basename(urlparse(url).path.rstrip("/"))
self.url = f"https://www.instagram.com/graphql/query?query_hash=2b0673e0dc4580674a88d426fe00ea90&variables=%7B%22shortcode%22%3A%22{shortcode}%22%7D" self.api_url = f"https://www.instagram.com/graphql/query?query_hash=2b0673e0dc4580674a88d426fe00ea90&variables=%7B%22shortcode%22%3A%22{shortcode}%22%7D"
self.url = url
self.dump = True
async def check_dump(self):
if not Config.DUMP_ID:
return
async for message in bot.search_messages(Config.DUMP_ID, "#" + self.shortcode):
self.media = message
self.type = MediaType.MESSAGE
self.in_dump = True
return True
async def download_or_extract(self): async def download_or_extract(self):
for func in [self.no_api_dl, self.api_dl]: for func in [self.check_dump, self.api_3, self.no_api_dl, self.api_dl]:
if await func(): if await func():
self.success = True self.success = True
break break
async def api_3(self):
query_api = f"https://{bot.SECRET_API}?url={self.url}&v=1"
response = await get_json(url=query_api, json_=False)
if not response:
return
self.caption = "."
data = (
response.get("videos", [])
+ response.get("images", [])
+ response.get("stories", [])
)
if not data:
return
if len(data) > 1:
self.type = MediaType.GROUP
self.media = data
return True
else:
self.media = data[0]
self.type = get_type(self.media)
return True
async def no_api_dl(self): async def no_api_dl(self):
response = await aiohttp_tools.get_json(url=self.url) response = await aiohttp_tools.get_json(url=self.api_url)
if ( if (
not response not response
or "data" not in response or "data" not in response
@ -35,7 +68,7 @@ class Instagram(ScraperConfig):
return return
param = { param = {
"api_key": await self.get_key(), "api_key": await self.get_key(),
"url": self.url, "url": self.api_url,
"proxy": "residential", "proxy": "residential",
"js": False, "js": False,
} }
@ -56,20 +89,16 @@ class Instagram(ScraperConfig):
if not type_check: if not type_check:
return return
elif type_check == "GraphSidecar": elif type_check == "GraphSidecar":
self.link = [ self.media = [
i["node"].get("video_url") or i["node"].get("display_url") i["node"].get("video_url") or i["node"].get("display_url")
for i in json_["edge_sidecar_to_children"]["edges"] for i in json_["edge_sidecar_to_children"]["edges"]
] ]
self.group = True self.type = MediaType.GROUP
else: else:
if link := json_.get("video_url"): self.media = json_.get("video_url", json_.get("display_url"))
self.link = link self.thumb = json_.get("display_url")
self.thumb = json_.get("display_url") self.type = get_type(self.media)
self.video = True return self.media
else:
self.link = json_.get("display_url")
self.photo = True
return self.link
# Rotating Key function to avoid hitting limit on single Key # Rotating Key function to avoid hitting limit on single Key
async def get_key(self): async def get_key(self):

View File

@ -1,9 +1,11 @@
import os import os
import re
import time import time
from urllib.parse import urlparse from urllib.parse import urlparse
from app.core import aiohttp_tools, shell from app.core import shell
from app.core.scraper_config import ScraperConfig from app.core.aiohttp_tools import get_json, get_type
from app.core.scraper_config import MediaType, ScraperConfig
class Reddit(ScraperConfig): class Reddit(ScraperConfig):
@ -16,9 +18,7 @@ class Reddit(ScraperConfig):
headers = { headers = {
"user-agent": "Mozilla/5.0 (Macintosh; PPC Mac OS X 10_8_7 rv:5.0; en-US) AppleWebKit/533.31.5 (KHTML, like Gecko) Version/4.0 Safari/533.31.5" "user-agent": "Mozilla/5.0 (Macintosh; PPC Mac OS X 10_8_7 rv:5.0; en-US) AppleWebKit/533.31.5 (KHTML, like Gecko) Version/4.0 Safari/533.31.5"
} }
response = await aiohttp_tools.get_json( response = await get_json(url=self.url, headers=headers, json_=True)
url=self.url, headers=headers, json_=True
)
if not response: if not response:
return return
@ -31,36 +31,38 @@ class Reddit(ScraperConfig):
f"""__{json_["subreddit_name_prefixed"]}:__\n**{json_["title"]}**""" f"""__{json_["subreddit_name_prefixed"]}:__\n**{json_["title"]}**"""
) )
is_vid, is_gallery = json_.get("is_video"), json_.get("is_gallery") self.thumb = json_.get("thumbnail")
if is_vid: if json_.get("is_gallery"):
self.path = "downloads/" + str(time.time()) self.media = [
os.makedirs(self.path)
self.link = f"{self.path}/v.mp4"
vid_url = json_["secure_media"]["reddit_video"]["hls_url"]
await shell.run_shell_cmd(
f'ffmpeg -hide_banner -loglevel error -i "{vid_url.strip()}" -c copy {self.link}'
)
self.thumb = await shell.take_ss(video=self.link, path=self.path)
self.video = self.success = True
elif is_gallery:
self.link = [
val["s"].get("u", val["s"].get("gif")).replace("preview", "i") val["s"].get("u", val["s"].get("gif")).replace("preview", "i")
for val in json_["media_metadata"].values() for val in json_["media_metadata"].values()
] ]
self.group = self.success = True self.success = True
self.type = MediaType.GROUP
return
else: hls = re.findall(r"'hls_url'\s*:\s*'([^']*)'", str(json_))
self.link = (
json_.get("preview", {}) if hls:
.get("reddit_video_preview", {}) self.path = "downloads/" + str(time.time())
.get("fallback_url", json_.get("url_overridden_by_dest", "")) os.makedirs(self.path)
.strip() self.media = f"{self.path}/v.mp4"
vid_url = hls[0]
await shell.run_shell_cmd(
f'ffmpeg -hide_banner -loglevel error -i "{vid_url.strip()}" -c copy {self.media}'
) )
if not self.link: self.thumb = await shell.take_ss(video=self.media, path=self.path)
return self.success = True
if self.link.endswith(".gif"): self.type = (
self.gif = self.success = True MediaType.VIDEO
else: if await shell.check_audio(self.media)
self.photo = self.success = True else MediaType.GIF
)
return
generic = json_.get("url_overridden_by_dest", "").strip()
self.type = get_type(generic)
if self.type:
self.media = generic
self.success = True

View File

@ -4,7 +4,7 @@ from urllib.parse import urlparse
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from app.core import aiohttp_tools from app.core import aiohttp_tools
from app.core.scraper_config import ScraperConfig from app.core.scraper_config import MediaType, ScraperConfig
class Threads(ScraperConfig): class Threads(ScraperConfig):
@ -25,9 +25,11 @@ class Threads(ScraperConfig):
if div := soup.find("div", {"class": "SingleInnerMediaContainer"}): if div := soup.find("div", {"class": "SingleInnerMediaContainer"}):
if video := div.find("video"): if video := div.find("video"):
self.link = video.find("source").get("src") self.media = video.find("source").get("src")
self.video = self.success = True self.success = True
self.type = MediaType.VIDEO
elif image := div.find("img", {"class": "img"}): elif image := div.find("img", {"class": "img"}):
self.link = image.get("src") self.media = image.get("src")
self.photo = self.success = True self.success = True
self.type = MediaType.PHOTO

View File

@ -1,5 +1,5 @@
from app.api.tiktok_scraper import Scraper as Tiktok_Scraper from app.api.tiktok_scraper import Scraper as Tiktok_Scraper
from app.core.scraper_config import ScraperConfig from app.core.scraper_config import MediaType, ScraperConfig
tiktok_scraper = Tiktok_Scraper(quiet=True) tiktok_scraper = Tiktok_Scraper(quiet=True)
@ -14,9 +14,11 @@ class Tiktok(ScraperConfig):
if not media or "status" not in media or media["status"] == "failed": if not media or "status" not in media or media["status"] == "failed":
return return
if "video_data" in media: if "video_data" in media:
self.link = media["video_data"]["nwm_video_url_HQ"] self.media = media["video_data"]["nwm_video_url_HQ"]
self.thumb = media["cover_data"]["dynamic_cover"]["url_list"][0] self.thumb = media["cover_data"]["dynamic_cover"]["url_list"][0]
self.video = self.success = True self.success = True
self.type = MediaType.VIDEO
if "image_data" in media: if "image_data" in media:
self.link = media["image_data"]["no_watermark_image_list"] self.media = media["image_data"]["no_watermark_image_list"]
self.group = self.success = True self.success = True
self.type = MediaType.GROUP

View File

@ -4,7 +4,7 @@ import time
import yt_dlp import yt_dlp
from app.core.scraper_config import ScraperConfig from app.core.scraper_config import MediaType, ScraperConfig
from app.core.shell import take_ss from app.core.shell import take_ss
@ -27,49 +27,53 @@ class YT_DL(ScraperConfig):
self.url = url self.url = url
self.path = "downloads/" + str(time.time()) self.path = "downloads/" + str(time.time())
self.video_path = self.path + "/v.mp4" self.video_path = self.path + "/v.mp4"
self._opts = { self.type = MediaType.VIDEO
_opts = {
"outtmpl": self.video_path, "outtmpl": self.video_path,
"ignoreerrors": True, "ignoreerrors": True,
"ignore_no_formats_error": True, "ignore_no_formats_error": True,
"quiet": True, "quiet": True,
"logger": FakeLogger(), "logger": FakeLogger(),
"noplaylist": True, "noplaylist": True,
"format": self.get_format(),
} }
self.set_format() self.yt_obj = yt_dlp.YoutubeDL(_opts)
async def download_or_extract(self): async def download_or_extract(self):
if self.check_url(): info = await self.get_info()
if not info:
return return
with yt_dlp.YoutubeDL(self._opts) as yt_obj:
info = await asyncio.to_thread( await asyncio.to_thread(self.yt_obj.download, self.url)
yt_obj.extract_info, self.url, download=False
if "youtu" in self.url:
self.caption = (
f"""__{info.get("channel","")}__:\n**{info.get("title","")}**"""
) )
if not info or info.get("duration", 0) >= 300:
return
await asyncio.to_thread(yt_obj.download, self.url)
if "youtu" in self.url:
self.caption = (
f"""__{info.get("channel","")}__:\n**{info.get("title","")}**"""
)
if os.path.isfile(self.video_path): if os.path.isfile(self.video_path):
self.link = self.video_path self.media = self.video_path
self.thumb = await take_ss(self.video_path, path=self.path) self.thumb = await take_ss(self.video_path, path=self.path)
self.video = self.success = True self.success = True
def check_url(self): async def get_info(self):
if "youtu" in self.url and ( if os.path.basename(self.url).startswith("@") or "/hashtag/" in self.url:
"/live" in self.url or os.path.basename(self.url).startswith("@") return
info = await asyncio.to_thread(
self.yt_obj.extract_info, self.url, download=False
)
if (
not info
or info.get("live_status") == "is_live"
or info.get("duration", 0) >= 180
): ):
return 1 return
return info
def set_format(self): def get_format(self):
if "/shorts" in self.url: if "/shorts" in self.url:
self._opts["format"] = "bv[ext=mp4][res=720]+ba[ext=m4a]/b[ext=mp4]" return "bv[ext=mp4][res=720]+ba[ext=m4a]/b[ext=mp4]"
elif "youtu" in self.url: elif "youtu" in self.url:
self._opts["format"] = "bv[ext=mp4][res=480]+ba[ext=m4a]/b[ext=mp4]" return "bv[ext=mp4][res=480]+ba[ext=m4a]/b[ext=mp4]"
else: else:
self._opts["format"] = "b[ext=mp4]" return "b[ext=mp4]"

View File

@ -14,8 +14,16 @@ class Config:
CMD_DICT = {} CMD_DICT = {}
DEV_MODE = int(os.environ.get("DEV_MODE", 0)) DEV_MODE = int(os.environ.get("DEV_MODE", 0))
DISABLED_CHATS = []
DISABLED_CHATS_MESSAGE_ID = int(os.environ.get("DISABLED_CHATS_MESSAGE_ID", 0))
DUMP_ID = int(os.environ.get("DUMP_ID",0))
LOG_CHAT = int(os.environ.get("LOG_CHAT")) LOG_CHAT = int(os.environ.get("LOG_CHAT"))
TRIGGER = os.environ.get("TRIGGER", ".") TRIGGER = os.environ.get("TRIGGER", ".")
UPSTREAM_REPO = os.environ.get("UPSTREAM_REPO","https://github.com/anonymousx97/social-dl").rstrip("/")
USERS = [] USERS = []
USERS_MESSAGE_ID = int(os.environ.get("USERS_MESSAGE_ID", 0)) USERS_MESSAGE_ID = int(os.environ.get("USERS_MESSAGE_ID", 0))

View File

@ -1,10 +1,12 @@
import json import json
import os
from io import BytesIO from io import BytesIO
from os.path import basename, splitext
from urllib.parse import urlparse from urllib.parse import urlparse
import aiohttp import aiohttp
from app.core.scraper_config import MediaType
SESSION = None SESSION = None
@ -39,13 +41,27 @@ async def in_memory_dl(url: str):
async with SESSION.get(url) as remote_file: async with SESSION.get(url) as remote_file:
bytes_data = await remote_file.read() bytes_data = await remote_file.read()
file = BytesIO(bytes_data) file = BytesIO(bytes_data)
name = os.path.basename(urlparse(url).path.rstrip("/")).lower() file.name = get_filename(url)
return file
def get_filename(url):
name = basename(urlparse(url).path.rstrip("/")).lower()
if name.endswith((".webp", ".heic")): if name.endswith((".webp", ".heic")):
name = name + ".jpg" name = name + ".jpg"
if name.endswith(".webm"): if name.endswith(".webm"):
name = name + ".mp4" name = name + ".mp4"
file.name = name return name
return file
def get_type(url):
name, ext = splitext(get_filename(url))
if ext in {".png", ".jpg", ".jpeg"}:
return MediaType.PHOTO
if ext in {".mp4", ".mkv", ".webm"}:
return MediaType.VIDEO
if ext in {".gif"}:
return MediaType.GIF
async def thumb_dl(thumb): async def thumb_dl(thumb):

View File

@ -1,3 +1,4 @@
import base64
import glob import glob
import importlib import importlib
import json import json
@ -58,13 +59,13 @@ class BOT(Client):
await aiohttp_tools.session_switch() await aiohttp_tools.session_switch()
async def edit_restart_msg(self): async def edit_restart_msg(self):
restart_msg = os.environ.get("RESTART_MSG") restart_msg = int(os.environ.get("RESTART_MSG", 0))
restart_chat = os.environ.get("RESTART_CHAT") restart_chat = int(os.environ.get("RESTART_CHAT", 0))
if restart_msg and restart_chat: if restart_msg and restart_chat:
await super().get_chat(int(restart_chat)) await super().get_chat(restart_chat)
await super().edit_message_text( await super().edit_message_text(
chat_id=int(restart_chat), chat_id=restart_chat,
message_id=int(restart_msg), message_id=restart_msg,
text="#Social-dl\n__Started__", text="#Social-dl\n__Started__",
) )
os.environ.pop("RESTART_MSG", "") os.environ.pop("RESTART_MSG", "")
@ -101,10 +102,13 @@ class BOT(Client):
await super().stop(block=False) await super().stop(block=False)
os.execl(sys.executable, sys.executable, "-m", "app") os.execl(sys.executable, sys.executable, "-m", "app")
SECRET_API = base64.b64decode("YS56dG9yci5tZS9hcGkvaW5zdGE=").decode("utf-8")
async def set_filter_list(self): async def set_filter_list(self):
chats_id = Config.AUTO_DL_MESSAGE_ID chats_id = Config.AUTO_DL_MESSAGE_ID
blocked_id = Config.BLOCKED_USERS_MESSAGE_ID blocked_id = Config.BLOCKED_USERS_MESSAGE_ID
users = Config.USERS_MESSAGE_ID users = Config.USERS_MESSAGE_ID
disabled = Config.DISABLED_CHATS_MESSAGE_ID
if chats_id: if chats_id:
Config.CHATS = json.loads( Config.CHATS = json.loads(
@ -118,11 +122,15 @@ class BOT(Client):
Config.USERS = json.loads( Config.USERS = json.loads(
(await super().get_messages(Config.LOG_CHAT, users)).text (await super().get_messages(Config.LOG_CHAT, users)).text
) )
if disabled:
Config.DISABLED_CHATS = json.loads(
(await super().get_messages(Config.LOG_CHAT, disabled)).text
)
async def send_message(self, chat_id, text, name: str = "output.txt", **kwargs): async def send_message(self, chat_id, text, name: str = "output.txt", **kwargs):
if len(str(text)) < 4096: if len(str(text)) < 4096:
return Message.parse_message( return Message.parse_message(
(await super().send_message(chat_id=chat_id, text=text, **kwargs)) (await super().send_message(chat_id=chat_id, text=str(text), **kwargs))
) )
doc = BytesIO(bytes(text, encoding="utf-8")) doc = BytesIO(bytes(text, encoding="utf-8"))
doc.name = name doc.name = name

View File

@ -3,22 +3,7 @@ from urllib.parse import urlparse
from pyrogram import filters as _filters from pyrogram import filters as _filters
from app import Config from app import Config
from app.core.MediaHandler import url_map from app.core.media_handler import url_map
def Dynamic_Chat_Filter(_, __, message):
if (
not message.text
or not message.text.startswith("https")
or message.chat.id not in Config.CHATS
or message.forward_from_chat
):
return False
user = message.from_user
if user and (user.id in Config.BLOCKED_USERS or user.is_bot):
return False
url_check = check_for_urls(message.text.split())
return bool(url_check)
def check_for_urls(text_list): def check_for_urls(text_list):
@ -31,7 +16,35 @@ def check_for_urls(text_list):
return True return True
def Dynamic_Cmd_Filter(_, __, message): def dynamic_chat_filter(_, __, message, cmd=False):
if (
not message.text
or (not message.text.startswith("https") and not cmd)
or message.chat.id not in Config.CHATS
or (message.chat.id in Config.DISABLED_CHATS and not cmd)
or message.forward_from_chat
):
return False
user = message.from_user
if user and (user.id in Config.BLOCKED_USERS or user.is_bot):
return False
if cmd:
return True
url_check = check_for_urls(message.text.split())
return bool(url_check)
def dynamic_allowed_list(_, __, message):
if not dynamic_chat_filter(_, __, message, cmd=True):
return False
start_str = message.text.split(maxsplit=1)[0]
cmd = start_str.replace("/", "", 1)
cmd_check = cmd in {"download", "dl", "down"}
reaction_check = not message.reactions
return bool(cmd_check and reaction_check)
def dynamic_cmd_filter(_, __, message):
if ( if (
not message.text not message.text
or not message.text.startswith(Config.TRIGGER) or not message.text.startswith(Config.TRIGGER)
@ -47,5 +60,6 @@ def Dynamic_Cmd_Filter(_, __, message):
return bool(cmd_check and reaction_check) return bool(cmd_check and reaction_check)
chat_filter = _filters.create(Dynamic_Chat_Filter) chat_filter = _filters.create(dynamic_chat_filter)
user_filter = _filters.create(Dynamic_Cmd_Filter) user_filter = _filters.create(dynamic_cmd_filter)
allowed_cmd_filter = _filters.create(dynamic_allowed_list)

View File

@ -7,6 +7,7 @@ from urllib.parse import urlparse
from pyrogram.errors import MediaEmpty, PhotoSaveFileInvalid, WebpageCurlFailed from pyrogram.errors import MediaEmpty, PhotoSaveFileInvalid, WebpageCurlFailed
from pyrogram.types import InputMediaPhoto, InputMediaVideo from pyrogram.types import InputMediaPhoto, InputMediaVideo
from app import Config
from app.api.gallerydl import Gallery_DL from app.api.gallerydl import Gallery_DL
from app.api.instagram import Instagram from app.api.instagram import Instagram
from app.api.reddit import Reddit from app.api.reddit import Reddit
@ -14,6 +15,7 @@ from app.api.threads import Threads
from app.api.tiktok import Tiktok from app.api.tiktok import Tiktok
from app.api.ytdl import YT_DL from app.api.ytdl import YT_DL
from app.core import aiohttp_tools, shell from app.core import aiohttp_tools, shell
from app.core.scraper_config import MediaType
url_map = { url_map = {
"tiktok.com": Tiktok, "tiktok.com": Tiktok,
@ -28,7 +30,7 @@ url_map = {
} }
class ExtractAndSendMedia: class MediaHandler:
def __init__(self, message): def __init__(self, message):
self.exceptions, self.media_objects, self.sender_dict = [], [], {} self.exceptions, self.media_objects, self.sender_dict = [], [], {}
self.__client = message._client self.__client = message._client
@ -83,33 +85,38 @@ class ExtractAndSendMedia:
obj.caption + obj.caption_url + self.sender_dict[obj.query_url] obj.caption + obj.caption_url + self.sender_dict[obj.query_url]
) )
try: try:
if self.doc: if self.doc and not obj.in_dump:
await self.send_document(obj.link, caption=caption, path=obj.path) await self.send_document(obj.media, caption=caption, path=obj.path)
elif obj.group: continue
await self.send_group(obj, caption=caption) match obj.type:
elif obj.photo: case MediaType.MESSAGE:
await self.send( await obj.media.copy(self.message.chat.id, caption=caption)
media={"photo": obj.link}, continue
method=self.__client.send_photo, case MediaType.GROUP:
caption=caption, await self.send_group(obj, caption=caption)
has_spoiler=self.spoiler, continue
) case MediaType.PHOTO:
elif obj.video: post = await self.send(
await self.send( media={"photo": obj.media},
media={"video": obj.link}, method=self.__client.send_photo,
method=self.__client.send_video, caption=caption,
thumb=await aiohttp_tools.thumb_dl(obj.thumb), )
caption=caption, case MediaType.VIDEO:
has_spoiler=self.spoiler, post = await self.send(
) media={"video": obj.media},
elif obj.gif: method=self.__client.send_video,
await self.send( thumb=await aiohttp_tools.thumb_dl(obj.thumb),
media={"animation": obj.link}, caption=caption,
method=self.__client.send_animation, )
caption=caption, case MediaType.GIF:
has_spoiler=self.spoiler, post = await self.send(
unsave=True, media={"animation": obj.media},
) method=self.__client.send_animation,
caption=caption,
unsave=True,
)
if obj.dump and Config.DUMP_ID:
await post.copy(Config.DUMP_ID, caption="#" + obj.shortcode)
except BaseException: except BaseException:
self.exceptions.append( self.exceptions.append(
"\n".join([obj.caption_url.strip(), traceback.format_exc()]) "\n".join([obj.caption_url.strip(), traceback.format_exc()])
@ -118,15 +125,20 @@ class ExtractAndSendMedia:
async def send(self, media, method, **kwargs): async def send(self, media, method, **kwargs):
try: try:
try: try:
await method(**media, **self.args_, **kwargs) post = await method(
**media, **self.args_, **kwargs, has_spoiler=self.spoiler
)
except (MediaEmpty, WebpageCurlFailed): except (MediaEmpty, WebpageCurlFailed):
key, value = list(media.items())[0] key, value = list(media.items())[0]
media[key] = await aiohttp_tools.in_memory_dl(value) media[key] = await aiohttp_tools.in_memory_dl(value)
await method(**media, **self.args_, **kwargs) post = await method(
**media, **self.args_, **kwargs, has_spoiler=self.spoiler
)
except PhotoSaveFileInvalid: except PhotoSaveFileInvalid:
await self.__client.send_document( post = await self.__client.send_document(
**self.args_, document=media, caption=caption, force_document=True **self.args_, document=media, caption=caption, force_document=True
) )
return post
async def send_document(self, docs, caption, path=""): async def send_document(self, docs, caption, path=""):
if not path: if not path:
@ -137,22 +149,18 @@ class ExtractAndSendMedia:
[os.rename(file_, file_ + ".png") for file_ in glob.glob(f"{path}/*.webp")] [os.rename(file_, file_ + ".png") for file_ in glob.glob(f"{path}/*.webp")]
docs = glob.glob(f"{path}/*") docs = glob.glob(f"{path}/*")
for doc in docs: for doc in docs:
try: await self.__client.send_document(
await self.__client.send_document( **self.args_, document=doc, caption=caption, force_document=True
**self.args_, document=doc, caption=caption, force_document=True )
)
except (MediaEmpty, WebpageCurlFailed):
doc = await aiohttp_tools.in_memory_dl(doc)
await self.__client.send_document(
**self.args_, document=doc, caption=caption, force_document=True
)
await asyncio.sleep(0.5) await asyncio.sleep(0.5)
async def send_group(self, media, caption): async def send_group(self, media_obj, caption):
if media.path: sorted = await sort_media(
sorted = await self.sort_media_path(media.path, caption) caption=caption,
else: spoiler=self.spoiler,
sorted = await self.sort_media_urls(media.link, caption) urls=media_obj.media,
path=media_obj.path,
)
for data in sorted: for data in sorted:
if isinstance(data, list): if isinstance(data, list):
await self.__client.send_media_group(**self.args_, media=data) await self.__client.send_media_group(**self.args_, media=data)
@ -161,53 +169,10 @@ class ExtractAndSendMedia:
media={"animation": data}, media={"animation": data},
method=self.__client.send_animation, method=self.__client.send_animation,
caption=caption, caption=caption,
has_spoiler=self.spoiler,
unsave=True, unsave=True,
) )
await asyncio.sleep(1) await asyncio.sleep(1)
async def sort_media_path(self, path, caption):
[os.rename(file_, file_ + ".png") for file_ in glob.glob(f"{path}/*.webp")]
images, videos, animations = [], [], []
for file in glob.glob(f"{path}/*"):
if file.lower().endswith((".png", ".jpg", ".jpeg")):
images.append(
InputMediaPhoto(file, caption=caption, has_spoiler=self.spoiler)
)
if file.lower().endswith((".mp4", ".mkv", ".webm")):
has_audio = await shell.check_audio(file)
if not has_audio:
animations.append(file)
else:
videos.append(
InputMediaVideo(file, caption=caption, has_spoiler=self.spoiler)
)
return await self.make_chunks(images, videos, animations)
async def sort_media_urls(self, urls, caption):
images, videos, animations = [], [], []
downloads = await asyncio.gather(
*[aiohttp_tools.in_memory_dl(url) for url in urls]
)
for file_obj in downloads:
name = file_obj.name.lower()
if name.endswith((".png", ".jpg", ".jpeg")):
images.append(
InputMediaPhoto(file_obj, caption=caption, has_spoiler=self.spoiler)
)
if name.endswith((".mp4", ".mkv", ".webm")):
videos.append(
InputMediaVideo(file_obj, caption=caption, has_spoiler=self.spoiler)
)
if name.endswith(".gif"):
animations.append(file_obj)
return await self.make_chunks(images, videos, animations)
async def make_chunks(self, images=[], videos=[], animations=[]):
chunk_imgs = [images[imgs : imgs + 5] for imgs in range(0, len(images), 5)]
chunk_vids = [videos[vids : vids + 5] for vids in range(0, len(videos), 5)]
return [*chunk_imgs, *chunk_vids, *animations]
@classmethod @classmethod
async def process(cls, message): async def process(cls, message):
obj = cls(message=message) obj = cls(message=message)
@ -215,3 +180,35 @@ class ExtractAndSendMedia:
await obj.send_media() await obj.send_media()
[m_obj.cleanup() for m_obj in obj.media_objects] [m_obj.cleanup() for m_obj in obj.media_objects]
return obj return obj
async def sort_media(caption="", spoiler=False, urls=None, path=None):
images, videos, animations = [], [], []
if path and os.path.exists(path):
[os.rename(file_, file_ + ".png") for file_ in glob.glob(f"{path}/*.webp")]
media = glob.glob(f"{path}/*")
else:
media = await asyncio.gather(*[aiohttp_tools.in_memory_dl(url) for url in urls])
for file in media:
if path:
name = file.lower()
else:
name = file.name.lower()
if name.endswith((".png", ".jpg", ".jpeg")):
images.append(InputMediaPhoto(file, caption=caption, has_spoiler=spoiler))
elif name.endswith((".mp4", ".mkv", ".webm")):
if not urls and not await shell.check_audio(file):
animations.append(file)
else:
videos.append(
InputMediaVideo(file, caption=caption, has_spoiler=spoiler)
)
elif name.endswith(".gif"):
animations.append(file)
return await make_chunks(images, videos, animations)
async def make_chunks(images=[], videos=[], animations=[]):
chunk_imgs = [images[imgs : imgs + 5] for imgs in range(0, len(images), 5)]
chunk_vids = [videos[vids : vids + 5] for vids in range(0, len(videos), 5)]
return [*chunk_imgs, *chunk_vids, *animations]

View File

@ -1,27 +1,35 @@
import shutil import shutil
from enum import Enum, auto
class MediaType(Enum):
PHOTO = auto()
VIDEO = auto()
GROUP = auto()
GIF = auto()
MESSAGE = auto()
class ScraperConfig: class ScraperConfig:
def __init__(self): def __init__(self):
self.dump = False
self.in_dump = False
self.path = "" self.path = ""
self.link = "" self.media = ""
self.caption = "" self.caption = ""
self.caption_url = "" self.caption_url = ""
self.thumb = None self.thumb = None
self.type = None
self.success = False self.success = False
self.photo = False
self.video = False
self.group = False
self.gif = False
def set_sauce(self): def set_sauce(self, url):
self.caption_url = f"\n\n<a href='{self.query_url}'>Sauce</a>" self.caption_url = f"\n\n<a href='{url}'>Sauce</a>"
@classmethod @classmethod
async def start(cls, url): async def start(cls, url):
obj = cls(url=url) obj = cls(url=url)
obj.query_url = url obj.query_url = url
obj.set_sauce() obj.set_sauce(url)
await obj.download_or_extract() await obj.download_or_extract()
if obj.success: if obj.success:
return obj return obj

View File

@ -49,103 +49,110 @@ def extract_chat(message):
return chat, err return chat, err
@bot.add_cmd(cmd="addsudo") @bot.add_cmd(cmd=["addsudo", "delsudo"])
async def add_sudo(bot, message): async def add_or_remove_sudo(bot, message):
user, err = extract_user(message) user, err = extract_user(message)
if err: if err:
return await message.reply(err) return await message.reply(err)
if message.cmd == "addsudo":
mode = "add"
task = Config.USERS.append
action = "Added to"
else:
mode = "remove"
task = Config.USERS.remove
action = "Removed from"
if err := await add_or_remove( if err := await add_or_remove(
mode="add", mode=mode,
task=Config.USERS.append, task=task,
item=user, item=user,
config_list=Config.USERS, config_list=Config.USERS,
message_id=Config.USERS_MESSAGE_ID, message_id=Config.USERS_MESSAGE_ID,
): ):
return await message.reply(err, del_in=5) return await message.reply(err, del_in=5)
await message.reply("User Added to Authorised List.", del_in=5) await message.reply(f"User {action} Authorised List.", del_in=5)
@bot.add_cmd(cmd="delsudo") @bot.add_cmd(cmd=["addchat", "delchat"])
async def add_sudo(bot, message): async def add_or_remove_chat(bot, message):
user, err = extract_user(message)
if err:
return await message.reply(err)
if err := await add_or_remove(
mode="remove",
task=Config.USERS.remove,
item=user,
config_list=Config.USERS,
message_id=Config.USERS_MESSAGE_ID,
):
return await message.reply(err, del_in=5)
await message.reply("User Removed from Authorised List.", del_in=5)
@bot.add_cmd(cmd="addchat")
async def add_chat(bot, message):
chat, err = extract_chat(message) chat, err = extract_chat(message)
if err: if err:
return await message.reply(err) return await message.reply(err)
if message.cmd == "addchat":
mode = "add"
task = Config.CHATS.append
action = "Added to"
else:
mode = "remove"
task = Config.CHATS.remove
action = "Removed from"
if err := await add_or_remove( if err := await add_or_remove(
mode="add", mode=mode,
task=Config.CHATS.append, task=task,
item=chat, item=chat,
config_list=Config.CHATS, config_list=Config.CHATS,
message_id=Config.AUTO_DL_MESSAGE_ID, message_id=Config.AUTO_DL_MESSAGE_ID,
): ):
return await message.reply(err, del_in=5) return await message.reply(err, del_in=5)
await message.reply( await message.reply(
f"<b>{message.chat.title or message.chat.first_name}</b> Added to Authorised List.", f"<b>{message.chat.title or message.chat.first_name}</b> {action} Authorised List.",
del_in=5, del_in=5,
) )
@bot.add_cmd(cmd="delchat") @bot.add_cmd(cmd=["block", "unblock"])
async def add_chat(bot, message): async def block_or_unblock(bot, message):
chat, err = extract_chat(message) user, err = extract_user(message)
if err: if err:
return await message.reply(err) return await message.reply(err)
if message.cmd == "block":
mode = "add"
task = Config.BLOCKED_USERS.append
action = "Added to"
else:
mode = "remove"
task = Config.BLOCKED_USERS.remove
action = "Removed from"
if err := await add_or_remove( if err := await add_or_remove(
mode="remove", mode=mode,
task=Config.CHATS.remove, task=task,
item=chat, item=user,
config_list=Config.CHATS, config_list=Config.BLOCKED_USERS,
message_id=Config.AUTO_DL_MESSAGE_ID, message_id=Config.BLOCKED_USERS_MESSAGE_ID,
):
return await message.reply(err, del_in=5)
await message.reply(f"User {action} Ban List.", del_in=5)
@bot.add_cmd(cmd=["enable", "disable"])
async def auto_dl_trigger(bot, message):
if not Config.DISABLED_CHATS_MESSAGE_ID:
return await message.reply("You haven't added `DISABLED_CHATS_ID` Var, Add it.")
if message.cmd == "disable":
mode = "add"
task = Config.DISABLED_CHATS.append
action = "Added to"
else:
mode = "remove"
task = Config.DISABLED_CHATS.remove
action = "Removed from"
if err := await add_or_remove(
mode=mode,
task=task,
item=message.chat.id,
config_list=Config.DISABLED_CHATS,
message_id=Config.DISABLED_CHATS_MESSAGE_ID,
): ):
return await message.reply(err, del_in=5) return await message.reply(err, del_in=5)
await message.reply( await message.reply(
f"<b>{message.chat.title or message.chat.first_name}</b> Added Removed from Authorised List.", f"<b>{message.chat.title or message.chat.first_name}</b> {action} Disabled List.",
del_in=5, del_in=5,
) )
@bot.add_cmd(cmd="block")
async def add_sudo(bot, message):
user, err = extract_user(message)
if err:
return await message.reply(err)
if err := await add_or_remove(
mode="add",
task=Config.BLOCKED_USERS.append,
item=user,
config_list=Config.BLOCKED_USERS,
message_id=Config.BLOCKED_USERS_MESSAGE_ID,
):
return await message.reply(err, del_in=5)
await message.reply("User Added to Ban List.", del_in=5)
@bot.add_cmd(cmd="unblock")
async def add_sudo(bot, message):
user, err = extract_user(message)
if err:
return await message.reply(err)
if err := await add_or_remove(
mode="remove",
task=Config.BLOCKED_USERS.remove,
item=user,
config_list=Config.BLOCKED_USERS,
message_id=Config.BLOCKED_USERS_MESSAGE_ID,
):
return await message.reply(err, del_in=5)
await message.reply("User Removed from Ban List.", del_in=5)

View File

@ -1,5 +1,7 @@
import asyncio
import os import os
from git import Repo
from pyrogram.enums import ChatType from pyrogram.enums import ChatType
from app import Config, bot from app import Config, bot
@ -27,15 +29,50 @@ async def help(bot, message):
@bot.add_cmd(cmd="restart") @bot.add_cmd(cmd="restart")
async def restart(bot, message): async def restart(bot, message, u_resp=None):
reply = await message.reply("restarting....") reply = u_resp or await message.reply("restarting....")
if message.chat.type in [ChatType.GROUP, ChatType.SUPERGROUP]: if reply.chat.type in (ChatType.GROUP, ChatType.SUPERGROUP):
os.environ["RESTART_MSG"] = str(reply.id) os.environ["RESTART_MSG"] = str(reply.id)
os.environ["RESTART_CHAT"] = str(reply.chat.id) os.environ["RESTART_CHAT"] = str(reply.chat.id)
await bot.restart() await bot.restart()
@bot.add_cmd(cmd="update") @bot.add_cmd(cmd="refresh")
async def chat_update(bot, message): async def chat_update(bot, message):
await bot.set_filter_list() await bot.set_filter_list()
await message.reply("Filters Refreshed", del_in=10) await message.reply("Filters Refreshed", del_in=8)
@bot.add_cmd(cmd="repo")
async def sauce(bot, message):
await bot.send_message(
chat_id=message.chat.id,
text=f"<a href='{Config.UPSTREAM_REPO}'>Social-DL</a>",
reply_to_message_id=message.reply_id or message.id,
disable_web_page_preview=True,
)
@bot.add_cmd(cmd="update")
async def updater(bot, message):
reply = await message.reply("Checking for Updates....")
repo = Repo()
repo.git.fetch()
commits = ""
limit = 0
for commit in repo.iter_commits("HEAD..origin/main"):
commits += f"<b>#{commit.count()}</b> <a href='{Config.UPSTREAM_REPO}/commit/{commit}'>{commit.summary}</a> By <i>{commit.author}</i>\n\n"
limit += 1
if limit > 50:
break
if not commits:
return await reply.edit("Already Up To Date.", del_in=5)
if "-pull" not in message.flags:
return await reply.edit(f"<b>Update Available:</b>\n\n{commits}")
repo.git.reset("--hard")
repo.git.pull(Config.UPSTREAM_REPO, "--rebase=true")
await asyncio.gather(
bot.log(text=f"#Updater\nPulled:\n\n{commits}"),
reply.edit("<b>Update Found</b>\n<i>Pulling....</i>"),
)
await restart(bot, message, reply)

View File

@ -3,7 +3,7 @@ import traceback
from app import Config, bot from app import Config, bot
from app.core import filters from app.core import filters
from app.core.MediaHandler import ExtractAndSendMedia from app.core.media_handler import MediaHandler
from app.core.message import Message from app.core.message import Message
@ -12,8 +12,8 @@ async def dl(bot, message):
reply = await bot.send_message( reply = await bot.send_message(
chat_id=message.chat.id, text="`trying to download...`" chat_id=message.chat.id, text="`trying to download...`"
) )
coro = ExtractAndSendMedia.process(message) coro = MediaHandler.process(message)
task = asyncio.Task(coro, name=message.task_id) task = asyncio.Task(coro, name=reply.task_id)
media = await task media = await task
if media.exceptions: if media.exceptions:
exceptions = "\n".join(media.exceptions) exceptions = "\n".join(media.exceptions)
@ -49,6 +49,7 @@ async def cmd_dispatcher(bot, message):
) )
@bot.on_message(filters.allowed_cmd_filter)
@bot.on_message(filters.chat_filter) @bot.on_message(filters.chat_filter)
async def dl_dispatcher(bot, message): async def dl_dispatcher(bot, message):
message = Message.parse_message(message) message = Message.parse_message(message)

View File

@ -2,6 +2,7 @@ aiohttp>=3.8.4
beautifulsoup4>=4.12.2 beautifulsoup4>=4.12.2
Brotli>=1.0.9 Brotli>=1.0.9
gallery_dl>=1.25.7 gallery_dl>=1.25.7
gitpython>=3.1.32
pyrogram>=2.0.106 pyrogram>=2.0.106
python-dotenv==0.21.0 python-dotenv==0.21.0
PyExecJS>=1.5.1 PyExecJS>=1.5.1

7
run
View File

@ -11,4 +11,9 @@ web.run_app(app, host='0.0.0.0', port=$API_PORT, reuse_port=True, print=None)
fi fi
python3 -m app if ! [ -d ".git" ] ; then
git init
fi
python3 -m app