Compare commits

..

No commits in common. "master" and "webserver" have entirely different histories.

2 changed files with 18 additions and 54 deletions

View File

@ -1,5 +1,5 @@
{
"DownDir":"/home/tuxedo/Video",
"email":"##########@gmail.com",
"email_password":"#############"
"DownDir":"/media/media/anime",
"DownPriority":["streamtape", "saturn", "streamhide", "legacy"],
"ServerLink":"www.animesaturn.tv"
}

View File

@ -1,6 +1,5 @@
from bs4 import BeautifulSoup as bs
import csv, jsbeautifier, cloudscraper, os, re, smtplib
#from selenium import webdriver
from email.mime.text import MIMEText
from json import loads
from datetime import datetime
@ -19,12 +18,8 @@ def get_config():
return configs
def get_soup(url): #dall'url outputta la soup della pagina
#driver.get(url)
#data = driver.page_source
r = cloudscraper.create_scraper(session)
headers = {"User-Agent":"Mozilla/5.0 (X11; Linux x86_64; rv:135.0) Gecko/20100101 Firefox/135.0"}
cookies = {"ASNew-q8":"2fa7ff5d81fa1a0db26b3f696a98dec9"}
page=r.get(url, headers=headers, cookies=cookies)
r = cloudscraper.create_scraper()
page=r.get(url)
data=page.text
return bs(data, features="html.parser")
@ -47,14 +42,11 @@ def link_ep_da_url(url): #prende la pagina del "Guarda lo Streaming" e trova il
cosetta=get_soup(url).find('div', {'class':"btn btn-light w-100 mt-3 mb-3"}).parent
return cosetta['href']
def links_ep_da_scaricare(url): #prende il link output di link_ep_da_url e cerca al suo interno tutti i link da inserire in yt-dlp
def links_ep_da_scaricare(url): #prende il link output di link_ep_da_url e cerca al suo interno tutti i link di file di tutti i provider possibili ritornando un dict
#print('links_ep_da_scaricare')
soup=get_soup(url)
cosetta=soup.find('div', {'class':'main-container'}).find('source')
links=[url,]+[link['href'] for link in soup.find('div', {'id':'wtf', 'class':'button'}).findAll('a')]
return links
'''
links={}
if cosetta:
links.update({'legacy':cosetta['src']}) #legacy video format
else:
@ -73,9 +65,7 @@ def links_ep_da_scaricare(url): #prende il link output di link_ep_da_url e cerca
except Exception as e:
pass
return links
'''
'''
def link_down(named_urls): #prende il dict di links_ep_da_scaricare e per quanto può converte i siti in link scaricabili tramite yt-dlp
#print('link-down')
#print(named_urls)
@ -100,28 +90,29 @@ def link_down(named_urls): #prende il dict di links_ep_da_scaricare e per quanto
pass
#print(links)
return links
'''
def scarica(lista,configs): #la lista sarà nella forma filepath,url e prova in ordine di priorità da conf
orario=datetime.now().strftime("%d/%m/%Y %H:%M:%S")
#urls_file=link_down(links_ep_da_scaricare(link_ep_da_url(lista[1])))
urls_file=links_ep_da_scaricare(link_ep_da_url(lista[1]))
for i in range(len(urls_file)):
priority=configs['DownPriority']
urls_file=link_down(links_ep_da_scaricare(link_ep_da_url(lista[1])))
for provider in priority:
try:
print('['+orario+']'+'Avvio download try '+str(i)+' per '+lista[0].split('/')[-1])
print('['+orario+']'+'Avvio download '+provider+' per '+lista[0].split('/')[-1])
#open(lista[0], 'wb').write(r.get(urls_file['provider']).content)
with YoutubeDL({'outtmpl':{'default':lista[0]},'quiet':True,'no_warnings':True,'ignoreerrors':False,'retries':10}) as ydl:
ydl.download(urls_file[i])
ydl.download(urls_file[provider])
print('['+orario+']'+'Successo per '+lista[0].split('/')[-1])
return
except Exception as e:
print(e)
print('['+orario+']'+'Errore try '+str(i)+' per '+lista[0].split('/')[-1])
print('['+orario+']'+'Errore '+provider+' per '+lista[0].split('/')[-1])
def modulo_scarica():
file = get_animu()
configs = get_config()
#lines 0 è url ep, lines 1 è cartella download, lines 2 è nome anime
lista_email=[]
for lines in file[1:]: #lines 0 è url, lines 1 è cart down, lines 2 è naming episodi, lines 3 è abilitato?, lines 4 è jellynaming?
for lines in file[1:]:
ora=datetime.now()
orario=ora.strftime("%d/%m/%Y %H:%M:%S")
if int(lines[3]):
@ -155,7 +146,7 @@ def modulo_scarica():
results.append((filepath,episodi))
for ep in results:
scarica(ep,configs)
lista_email.append((ep[0],0))
lista_email.append((os.path.basename(ep[0]),0))
except Exception as e:
print(e)
lista_email.append((lines[1],1))
@ -192,32 +183,5 @@ def modulo_scarica():
new_email(subject,body,configs)
if __name__ == "__main__":
########################to correct the tls error
#import cloudscraper
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.poolmanager import PoolManager
import ssl
class SSLAdapter(HTTPAdapter):
def init_poolmanager(self, connections, maxsize, block=False):
self.poolmanager = PoolManager(
num_pools=connections,
maxsize=maxsize,
block=block,
ssl_version=ssl.PROTOCOL_TLS,
ciphers='ECDHE-RSA-AES128-GCM-SHA256')
#firefox_options = webdriver.FirefoxOptions()
#firefox_options.add_argument("--profile=/opt/saturn_cli/selenium_profile")
#driver = webdriver.Chrome()
session = cloudscraper.Session()
session.mount('https://', SSLAdapter())
#response = session.get('https://example.com')
#print(response.text)
########################
modulo_scarica()