This commit is contained in:
Pavel Suha
2025-04-25 16:30:00 +02:00
parent 9d43f8f476
commit ef3ef9f859
34 changed files with 2550 additions and 0 deletions

Binary file not shown.

Binary file not shown.

156
resources/lib/bezvadata.py Normal file
View File

@@ -0,0 +1,156 @@
# -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2012 Libor Zoubek
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import sys
import urllib
#Python 2
try:
import cookielib
import urllib2
#import sys
reload(sys) # Reload does the trick!
sys.setdefaultencoding('UTF8')
#Python 3
except:
import http.cookiejar
cookielib = http.cookiejar
urllib2 = urllib.request
import re,random,util,os,traceback,base64
from provider import ContentProvider
class BezvadataContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None,tmp_dir='.'):
ContentProvider.__init__(self,'bezvadata.cz','http://bezvadata.cz/',username,password,filter,tmp_dir)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['search','resolve','categories']
def search(self,keyword):
return self.list('vyhledavani/?s='+urllib.parse.quote(keyword))
def categories(self):
page = util.request(self.base_url)
page = util.substr(page,'<div class=\"stats','<footer>')
result = []
for m in re.finditer('<section class=\"(?P<type>[^\"]+)[^<]+<h3>(?P<title>[^<]+)',page, re.IGNORECASE|re.DOTALL):
item = self.dir_item()
item['title'] = m.group('title')
item['url'] = '#'+m.group('type')
result.append(item)
return result
def list_special(self,type):
page = util.request(self.base_url)
page = util.substr(page,'<section class=\"'+type,'</section>')
result = []
for m in re.finditer('<a href=\"(?P<url>[^\"]+)[^>]+>(?P<title>[^<]+)',page, re.IGNORECASE|re.DOTALL):
item = self.video_item()
item['title'] = m.group('title')
item['url'] = m.group('url')
result.append(item)
return result
def list(self,url):
if url.find('#') == 0:
return self.list_special(url[1:])
page = util.request(self._url(url))
ad = re.search('<a href=\"(?P<url>/vyhledavani/souhlas-zavadny-obsah[^\"]+)',page,re.IGNORECASE|re.DOTALL)
if ad:
page = util.request(self._url(ad.group('url')))
data = util.substr(page,'<div class=\"content','<div class=\"stats')
pattern = '<section class=\"img[^<]+<a href=\"(?P<url>[^\"]+)(.+?)<img src=\"(?P<img>[^\"]+)\" alt=\"(?P<name>[^\"]+)(.+?)<b>velikost:</b>(?P<size>[^<]+)'
result = []
for m in re.finditer(pattern,data,re.IGNORECASE | re.DOTALL ):
item = self.video_item()
item['title'] = m.group('name')
item['size'] = m.group('size').strip()
item['img'] = m.group('img')
item['url'] = m.group('url')
# mark 18+ content
if ad:
item['18+'] = True
if self.filter:
if self.filter(item):
result.append(item)
else:
result.append(item)
# page navigation
data = util.substr(page,'<div class=\"pagination','</div>')
m = re.search('<li class=\"previous[^<]+<a href=\"(?P<url>[^\"]+)',data,re.DOTALL|re.IGNORECASE)
if m:
item = self.dir_item()
item['type'] = 'prev'
item['url'] = m.group('url')
result.append(item)
n = re.search('<li class=\"next[^<]+<a href=\"(?P<url>[^\"]+)',data,re.DOTALL|re.IGNORECASE)
if n:
item = self.dir_item()
item['type'] = 'next'
item['url'] = n.group('url')
result.append(item)
return result
def resolve(self,item,captcha_cb=None,wait_cb=None):
item = item.copy()
url = self._url(item['url'])
item['surl'] = url
data = util.request(url)
link = re.search('<a class="stahnoutSoubor.+?href=\"([^\"]+)',data)
if link:
url = self._url(link.group(1))
data = util.request(url)
m = re.search('<img src=\"(?P<img>[^\"]+)\" alt=\"Captcha\"',data)
cap_id = re.search('<input type=\"hidden\" name=\"_uid_captcha.+?value=\"(?P<cid>[^\"]+)',data)
if m and cap_id:
cid = cap_id.group('cid')
img_data = m.group('img')[m.group('img').find('base64,')+7:]
if not os.path.exists(self.tmp_dir):
os.makedirs(self.tmp_dir)
tmp_image = os.path.join(self.tmp_dir,'captcha.png')
util.save_data_to_file(base64.b64decode(img_data),tmp_image)
code = captcha_cb({'id':cid,'img': tmp_image})
if not code:
return
data = util.post(url+'?do=stahnoutFreeForm-submit',{'_uid_captcha':cid,'captcha':code,'stahnoutSoubor':'Stáhnout'})
countdown = re.search('shortly\.getSeconds\(\) \+ (\d+)',data)
last_url = re.search('<a class=\"stahnoutSoubor2.+?href=\"([^\"]+)',data)
if countdown and last_url:
wait = int(countdown.group(1))
url = self._url(last_url.group(1))
wait_cb(wait)
req = urllib2.Request(url)
req.add_header('User-Agent',util.UA)
resp = urllib2.urlopen(req)
item['url'] = resp.geturl()
resp.close()
return item

BIN
resources/lib/bezvadata.pyo Normal file

Binary file not shown.

149
resources/lib/fastshare.py Normal file
View File

@@ -0,0 +1,149 @@
# -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2012 Libor Zoubek
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import sys
import urllib
#Python 2
try:
import cookielib
import urllib2
import urlparse
#import sys
reload(sys) # Reload does the trick!
sys.setdefaultencoding('UTF8')
#Python 3
except:
import http.cookiejar
cookielib = http.cookiejar
urllib2 = urllib.request
urlparse = urllib.parse
import re,random,util,os,traceback
from provider import ContentProvider
from provider import ResolveException
class FastshareContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None,tmp_dir='.'):
ContentProvider.__init__(self,'fastshare.cz','https://www.fastshare.cz/',username,password,filter,tmp_dir)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['search','resolve']
def search(self,keyword):
return self.list('?term='+urlparse.quote(keyword))
def list(self,url):
result = []
page = util.request(self._url(url))
data = util.substr(page,'<div class=\"search','<footer')
for m in re.finditer('<div class=\"search-result-box(.+?)</a>',data,re.IGNORECASE | re.DOTALL ):
it = m.group(1)
link = re.search('<a href=([^ ]+)',it,re.IGNORECASE | re.DOTALL)
name = re.search('title=\"([^\"]+)',it,re.IGNORECASE | re.DOTALL)
img = re.search('<img src=\"([^\"]+)',it,re.IGNORECASE | re.DOTALL)
size = re.search('<div class=\"fs\">([^<]+)',it,re.IGNORECASE | re.DOTALL)
time = re.search('<div class=\"vd\">([^<]+)',it,re.IGNORECASE | re.DOTALL)
if name and link:
item = self.video_item()
item['title'] = name.group(1)
if size:
item['size'] = size.group(1).strip()
if time:
item['length'] = time.group(1).strip()
item['url'] = self._url(link.group(1))
item['img'] = self._url(img.group(1))
self._filter(result,item)
next = re.search('<a href=\"(?P<url>[^\"]+)[^>]+>dal',data,re.IGNORECASE | re.DOTALL)
if next:
item = self.dir_item()
item['type'] = 'next'
item['url'] = next.group('url')
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
util.init_urllib()
url = self._url(item['url'])
page = ''
try:
opener = urllib2.OpenerDirector()
opener.add_handler(urllib2.HTTPHandler())
opener.add_handler(urllib2.UnknownHandler())
urllib2.install_opener(opener)
request = urllib2.Request(url)
request.add_header('User-Agent',util.UA)
response= urllib2.urlopen(request)
page = response.read()
response.close()
except urllib2.HTTPErro as e:
traceback.print_exc()
return
data = util.substr(page,'<form method=post target=\"iframe_dwn\"','</form>')
action = re.search('action=(?P<url>[^>]+)',data,re.IGNORECASE | re.DOTALL)
img = re.search('<img src=\"(?P<url>[^\"]+)',data,re.IGNORECASE | re.DOTALL)
if img and action:
sessid=[]
for cookie in re.finditer('(PHPSESSID=[^\;]+)',response.headers.get('Set-Cookie'),re.IGNORECASE | re.DOTALL):
sessid.append(cookie.group(1))
# we have to download image ourselves
image = util.request(self._url(img.group('url')),headers={'Referer':url,'Cookie':sessid[-1]})
img_file = os.path.join(self.tmp_dir,'captcha.png')
util.save_data_to_file(image,img_file)
code = None
if captcha_cb:
code = captcha_cb({'id':'0','img':img_file})
if not code:
self.info('No captcha received, exit')
return
request = urllib.urlencode({'code':code})
req = urllib2.Request(self._url(action.group('url')),request)
req.add_header('User-Agent',util.UA)
req.add_header('Referer',url)
req.add_header('Cookie',sessid[-1])
try:
resp = urllib2.urlopen(req)
if resp.code == 302:
file_url = resp.headers.get('location')
else:
file_url = resp.geturl()
if file_url.find(action.group('url')) > 0:
msg = resp.read()
resp.close()
js_msg = re.search('alert\(\'(?P<msg>[^\']+)',msg,re.IGNORECASE | re.DOTALL)
if js_msg:
raise ResolveException(js_msg.group('msg'))
self.error(msg)
raise ResolveException('Nelze ziskat soubor, zkuste to znovu')
resp.close()
if file_url.find('data') >=0 or file_url.find('download_free') > 0:
item['url'] = file_url
return item
self.error('wrong captcha, retrying')
return self.resolve(item,captcha_cb,select_cb)
except urllib2.HTTPError:
traceback.print_exc()
return

BIN
resources/lib/fastshare.pyo Normal file

Binary file not shown.

193
resources/lib/hellspy.py Normal file
View File

@@ -0,0 +1,193 @@
# -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2012 Libor Zoubek
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import sys
import urllib
#Python 2
try:
import cookielib
import urllib2
#import sys
reload(sys) # Reload does the trick!
sys.setdefaultencoding('UTF8')
#Python 3
except:
import http.cookiejar
cookielib = http.cookiejar
urllib2 = urllib.request
import re,random,util,os,traceback
import json, xbmcaddon
from base64 import b64decode
from provider import ContentProvider
__scriptid__ = 'plugin.video.online-files'
__addon__ = xbmcaddon.Addon(id=__scriptid__)
__settings__ = __addon__.getSetting
class HellspyContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None,site_url='https://hellspy.cz/'):
ContentProvider.__init__(self,'hellspy.cz',site_url,username,password,filter)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
def capabilities(self):
return ['login','search','resolve','categories']
def search(self,keyword):
return self.list('search/?usrq='+urllib.parse.quote(keyword))
def login(self):
if not self.username and not self.password:
return True # fall back to free account
elif self.username and self.password and len(self.username)>0 and len(self.password)>0:
page = util.request(self.base_url+'?do=loginBox-loginpopup')
if page.find('href="/?do=loginBox-logout') > 0:
self.info('Already logged in')
return True
data = util.substr(page,'<td class=\"popup-lef','</form')
m = re.search('<form action=\"(?P<url>[^\"]+)',data,re.IGNORECASE | re.DOTALL)
if m:
login_url = self._url(m.group('url')).replace('&amp;','&')
data = util.post(login_url,{'username':self.username,'password':self.password,'pernament_login':'on','login':'1','redir_url':self.base_url+'?do=loginBox-login'})
if data.find('href="/?do=loginBox-logout') > 0:
return True
return False
def list_favourites(self,url):
url = self._url(url)
page = util.request(url)
data = util.substr(page,'<div class=\"file-list file-list-vertical','<div id=\"layout-push')
result = []
for m in re.finditer('<div class=\"file-entry.+?<div class="preview.+?<div class=\"data.+?</div>',data, re.IGNORECASE|re.DOTALL):
entry = m.group(0)
item = self.video_item()
murl = re.search('<[hH]3><a href=\"(?P<url>[^\"]+)[^>]+>(?P<name>[^<]+)',entry)
item['url'] = murl.group('url')
item['title'] = murl.group('name')
mimg = re.search('<img src=\"(?P<img>[^\"]+)',entry)
if mimg:
item['img'] = mimg.group('img')
msize = re.search('<span class=\"file-size[^>]+>(?P<size>[^<]+)',entry)
if msize:
item['size'] = msize.group('size').strip()
mtime = re.search('<span class=\"duration[^>]+>(?P<time>[^<]+)',entry)
if mtime:
item['length'] = mtime.group('time').strip()
self._filter(result,item)
return result
def list(self,url,filter=None):
if url.find('ucet/favourites') >= 0:# and self.login():
return self.list_favourites(url)
url = self._url(url)
util.init_urllib()
page = util.request(url)
adult = '0'
if __settings__('hellspy_adult') == 'true':
adult = '1'
if page.find('adultWarn-') > 0:
page = util.request(url + '&adultControl-state=' + adult + '&do=adultControl-confirmed')
data = util.substr(page,'<div class=\"file-list file-list-horizontal','<div id=\"layout-push')
result = []
for m in re.finditer('<div class=\"file-entry.+?<div class="preview.+?<div class=\"data.+?</div>',data, re.IGNORECASE|re.DOTALL):
entry = m.group(0)
item = self.video_item()
murl = re.search('<[hH]3><a href=\"(?P<url>[^\"]+)[^>]+>(?P<name>[^<]+)',entry)
if murl:
item['url'] = murl.group('url')
item['title'] = murl.group('name')
mimg = re.search('<img src=\"(?P<img>[^\"]+)',entry)
if mimg:
item['img'] = mimg.group('img')
msize = re.search('<span class=\"file-size[^>]+>(?P<size>[^<]+)',entry)
if msize:
item['size'] = msize.group('size').strip()
mtime = re.search('<span class=\"duration[^>]+>(?P<time>[^<]+)',entry)
if mtime:
item['length'] = mtime.group('time').strip()
self._filter(result,item)
# page navigation
data = util.substr(page,'<div class=\"paginator','</div')
mprev = re.search('<li class=\"prev[^<]+<a href=\"(?P<url>[^\"]+)',data)
if mprev:
item = self.dir_item()
item['type'] = 'prev'
item['url'] = mprev.group('url')
result.append(item)
mnext = re.search('<li class=\"next[^<]+<a href=\"(?P<url>[^\"]+)',data)
if mnext:
item = self.dir_item()
item['type'] = 'next'
item['url'] = mnext.group('url').replace('&amp;','&')
result.append(item)
return result
def categories(self):
result = []
page = util.request(self.base_url)
data = util.substr(page,'<div id=\"layout-menu','</div')
index = 0
for m in re.finditer('<a href=\"(?P<url>[^\"]+)[^>]+>(?P<title>[^<]+)',data):
if index > 0 and index <= 3:
item = self.dir_item()
item['title'] = m.group('title')
item['url'] = m.group('url')
result.append(item)
index +=1
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
url = self._url(item['url'])
# if not self.login():
# util.error('[hellspy] login failed, unable to resolve')
if url.find('?') > 0:
url+='&download=1'
else:
url+='?download=1'
data = util.request(url)
if data.find('Soubor nenalezen') > 0:
util.error('[hellspy] - page with movie was not found on server')
return
m = re.search('launchFullDownload\(\'(?P<url>[^\']+)',data)
if m:
item['url'] = m.group('url')
item['surl'] = url
return item
def to_downloads(self,url):
# if not self.login():
# util.error('[hellspy] login failed, unable to add to downloads')
util.info('adding to downloads')
try:
util.request(self._url(url+'&do=downloadControl-favourite'))
except urllib2.HTTPError:
traceback.print_exc()
util.error('[hellspy] failed to add to downloads')
return
util.info('added, DONE')

BIN
resources/lib/hellspy.pyo Normal file

Binary file not shown.

469
resources/lib/ulozto.py Normal file
View File

@@ -0,0 +1,469 @@
# -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2013 Libor Zoubek
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import sys
import urllib
import ulozto_search
query = "your_query"
file_type = "documents|videos|images|archives|audios" # optional
kwargs = {
"insecure": False, # disables SSL check, optional, default False
"includeApproximate": False # also return approximate results
}
#Python 2
try:
import cookielib
import urllib2
#import sys
reload(sys) # Reload does the trick!
sys.setdefaultencoding('UTF8')
#Python 3
except:
import http.cookiejar
cookielib = http.cookiejar
urllib2 = urllib.request
import re,random,util,os,traceback
import json
from base64 import b64decode
from provider import ContentProvider
from provider import ResolveException
from provider import cached
def loadurl(url, req, headers=None):
print (url)
req = urllib.parse.urlencode(req).encode('utf-8')
if headers:
req = urllib.request.Request(url, req, headers=headers)
else:
req = urllib.request.Request(url, req)
with urllib.request.urlopen(req) as f:
try:
if f.getcode()==200:
response = f.read()
return response
except:
return False
class UloztoContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None):
ContentProvider.__init__(self,'ulozto.cz','https://www.ulozto.cz/',username,password,filter)
self.search_type=''
self.cp = urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar())
self.rh = UloztoHTTPRedirectHandler()
self.rh.throw = False
self.rh.location = None
self.init_urllib()
def init_urllib(self):
opener = urllib2.build_opener(self.cp,self.rh)
urllib2.install_opener(opener)
def capabilities(self):
return ['login','search','resolve','categories']
def categories(self):
result = []
if not self.login():
return result
data = util.request(self.base_url+'m/'+self.username)
fav = re.search('<li id=\"fmFavoritesFolder.+?href=\"(?P<url>[^\"]+)[^>]*>(?P<title>[^<]+)',data,re.IGNORECASE|re.DOTALL)
if fav:
item = self.dir_item()
item['url'] = '#fm#'+fav.group('url')
item['title'] = fav.group('title')
result.append(item)
myfiles = re.search('<a class=\"fmHomeFolder.+?href=\"(?P<url>[^\"]+)[^>]*>(?P<title>[^<]+)',data,re.IGNORECASE|re.DOTALL)
if myfiles:
item = self.dir_item()
item['url'] = '#fm#' + myfiles.group('url')
item['title'] = myfiles.group('title')
result.append(item)
return result
def search(self, keyword):
#https://ulozto.sk/hledej?type=videos&q=matrix
# brb = self.base_url+'hledej?'+self.search_type+'q='+urllib.parse.quote(keyword)
# self.info (brb)
# return self.list('https://ulozto.cz/hledej?'+self.search_type+'q='+urllib.parse.quote(keyword))
query = keyword
l = ulozto_search.search(query, 1, file_type, **kwargs)
return self.list( l )
def login(self):
return True
# if self.username and self.password and len(self.username)>0 and len(self.password)>0:
# self.info('Login user=%s, pass=*****' % self.username)
# self.rh.throw = False
# page = util.request(self.base_url+'login?key=logreg')#.decode('utf-8-sig')
# # headers = util.headers
# self.info(page)
# self.info(type(page))
# if page.find('href="/?do=web-logout') > 0:
# self.info('Already logged in')
# return True
# data = util.substr(page,'<li class=\"menu-username','</li')
# m = re.search('key=(?P<key>[^\"]+)\"',data,re.IGNORECASE | re.DOTALL)
# token = re.search('<input type=\"hidden\" name=\"_token_\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
# self.info (token)
# #if m and token:
# if True:
# #login_url = self.base_url+'login?key='+m.group('key')+'&do=loginForm-submit'
# #login_url = self.base_url+'login?key=logreg'
# # urllib.parse.urljoin("https://ulozto.net/login", url),
# # post=inputs)
# inputs = {'username' : self.username,
# 'password' : self.password,
# 'fakeUsername':'',
# 'fakePassword':'',
# 'login': '',
# '_do': 'loginComponent-loginForm-form-submit',
# }
# data = util.post(self.base_url+'login&do=loginForm-submit', inputs)
# #data = util.post('https://uloz.to/login&do=loginForm-submit', inputs)
# #data = loadurl(self._url('login'), inputs, headers=headers)
# self.info (data)
# if data.find('href="/?do=web-logout') > 0:
# self.info('Login successfull')
# return True
# self.info('Login failed')
# return False
def list_folder(self,url):
self.login()
result = []
page = util.request(self._url(url))
page = util.substr(page,'<div id=\"fmItems','</ul')
for m in re.finditer('<div class=\"fmFolder(.+?)</em',page,re.IGNORECASE | re.DOTALL):
data = m.group(1)
item = self.dir_item()
item['url'] = '#fm#' + re.search('data-href=\"([^\"]+)',data).group(1)
item['title'] = re.search('data-name=\"([^\"]+)',data).group(1)
item['img'] = re.search('<img src=\"([^\"]+)',data).group(1)
result.append(item)
for m in re.finditer('<div class=\"fmFile(.+?)</em>',page,re.IGNORECASE | re.DOTALL):
data = m.group(1)
item = self.video_item()
item['url'] = re.search('data-href=\"([^\"]+)',data).group(1)
item['title'] = '%s.%s' % (re.search('data-name=\"([^\"]+)',data).group(1),re.search('data-ext=\"([^\"]+)',data).group(1))
item['img'] = re.search('<img src=\"([^\"]+)',data).group(1)
result.append(item)
return result
@cached(1)
def list(self, url):
self.info(url)
# if url.find('#fm#') == 0:
# return self.list_folder(url[5:])
# url = self._url(url)
# # page = util.request(url,headers={'X-Requested-With':'XMLHttpRequest','Referer':url,'Cookie':'uloz-to-id=1561277170;'})#.decode('string-escape')
# self.info(page)
# # script = util.substr(page,'var kn','</script>')
# keymap = None
# key = None
# # self.info(script)
# # k = re.search(r'({.+?})',script)
# # if k:
# # keymap = util.json.loads(k.group(1))
# # j = re.search(r'ad.push\(\[kn, kn\["([^"]+)', script)
# # if j:
# # key = j.group(1)
# # if not (j and k):
# # self.error('error parsing page - unable to locate keys')
# # return []
# keymap = json.loads(page)
# burl = b64decode('I2h0dHA6Ly9kZWNyLWNlY2gucmhjbG91ZC5jb20vZGVjcnlwdC8/a2V5PSVzJnZhbHVlPSVz')
# murl = b64decode('aHR0cDovL2RlY3ItY2VjaC5yaGNsb3VkLmNvbS9kZWNyeXB0Lw==')
# result = []
# req = {'seed':keymap[key], 'values':keymap}
# decr = json.loads(util.post_json(murl, req))
page = url
result = []
for p in page:
item = self.video_item()
item['title'] = p['name']
item['url'] = p['link']
item['length'] = p['length']
item['size'] = p['size']
# div_media = util.substr(body, 'div class="media"', '<div class="tools">')
# img_match = re.search(r'img src="([^"]+)', div_media)
# if img_match:
# item['img'] = "http:" + img_match.group(1)
# time_match = re.search(r'<span>Čas</span>(.+)', div_media)
# if time_match:
# item['length'] = time_match.group(1).strip()
# size_match = re.search(r'<span>Velikost</span>([^<]+)', div_media)
# if size_match:
# item['size'] = size_match.group(1).strip()
self._filter(result, item)
# for li in re.finditer('<div data-icon=\"(?P<key>[^\"]+)',page, re.IGNORECASE | re.DOTALL):
# body = urllib.unquote(b64decode(decr[li.group('key')]))
# div_name = util.substr(body, '<div class="name"', '</div>')
# title_url_match = re.search(r'<a href="(?P<url>[^"]+)" title="(?P<title>[^"]+)', div_name)
# if not title_url_match:
# continue
# item = self.video_item()
# item['title'] = title_url_match.group('title')
# item['url'] = title_url_match.group('url')
# div_media = util.substr(body, 'div class="media"', '<div class="tools">')
# img_match = re.search(r'img src="([^"]+)', div_media)
# if img_match:
# item['img'] = "http:" + img_match.group(1)
# time_match = re.search(r'<span>Čas</span>(.+)', div_media)
# if time_match:
# item['length'] = time_match.group(1).strip()
# size_match = re.search(r'<span>Velikost</span>([^<]+)', div_media)
# if size_match:
# item['size'] = size_match.group(1).strip()
# self._filter(result,item)
# page navigation
# data = util.substr(page,'<div class=\"paginator','</div')
# mnext = re.search('<a href=\"(?P<url>[^\"]+)\" class="next',data)
# ulozto_search.search(query, file_type, **kwargs)
# if mnext:
# item = self.dir_item()
# item['type'] = 'next'
# item['url'] = util.decode_html(mnext.group('url'))
# result.append(item)
# mnext = re.search('page=(?P<id>[\d]+)', url)
# # idx = url.find('page=')
# # if idx != -1:
# # st = url[idx + len('page='):]
# if mnext:
# s = int(mnext.group('id'))
# url = re.sub(mnext.group(), 'page={}'.format(s+1), url)
# item = self.dir_item()
# item['type'] = 'next'
# item['url'] = util.decode_html(url)
# result.append(item)
# return result
@cached(48)
def decr_url(self,url):
if url.startswith('#'):
ret = json.loads(util.request(url[1:]))
#if ret.has_key('result'):
if 'result' in ret.keys():
url = b64decode(ret['result'])
url = self._url(url)
return url
def resolve(self,item,captcha_cb=None):
item = item.copy()
url = item['url']
if url.startswith('http://www.ulozto.sk'):
url = self.base_url + url[20:]
url = self.decr_url(url)
url = self._url(url)
if url.startswith('#'):
util.error('[uloz.to] - url was not correctly decoded')
return
self.init_urllib()
self.login()
self.info('Resolving %s'% url)
# if not item.has_key('vip'):
if not 'vip' in item.keys():
item['vip'] = False
vip = item['vip']
if vip:
page = util.request(url)
else:
try:
request = urllib2.Request(url)
response = urllib2.urlopen(request)
page = response.read().decode('utf-8')
response.close()
except urllib2.HTTPError as e:
traceback.print_exc()
return
self.info( page )
if page.find('Stránka nenalezena!') > 0:
self.error('page with movie was not found on server')
return
if vip:
url = self._url('quickDownload/' + 'e2ZbEck4nD1E')
self.info( url )
# data = util.substr(page,'<h3>Neomezené stahování</h3>','</div')
# m = re.search('<a(.+?)href=\"(?P<url>[^\"#]+)\"',data,re.IGNORECASE | re.DOTALL)
if url:
try:
self.rh.throw = True
resp = urllib2.urlopen(urllib2.Request(url))
except RedirectionException:
# this is what we need, our redirect handler raises this
pass
except urllib2.HTTPError:
# this is not OK, something went wrong
traceback.print_exc()
self.error('Cannot resolve stream url, server did not redirected us')
self.info('POST url:'+post_url)
return
stream = self.rh.location
item['url'] = self._fix_stream_url(stream)
item['surl'] = url
return item
else:
# m = re.search('<form action="(?P<action>[^"]+)[^>]+class="jsFreeDownloadForm"', page)
m = re.search('data-href=\"(/download-dialog/free/download\?fileSlug=\S+)\"', page)
self.info (m)
if m:
self.info (m.group(1))
self.rh.throw = True
stream_url = self._get_file_url_anonymous(page,self._url(m.group(1)),response.headers,captcha_cb)
#stream_url = self._get_file_url_anonymous(page,self._url(m.group('action')),response.headers,captcha_cb)
if stream_url:
item['url'] = stream_url
# free ulozto allows seeking but doesn't allow multiple connections.
# kodi does this when seeking is possible so playback doesn't work.
# To prevent from use of multiple connections we set header special for kodi
# which disables seeking -> only one connection -> playback works, though we lose
# seeking possibility.
# more info - http://forum.kodi.tv/showthread.php?tid=236411
item['headers'] = {'seekable':'0'}
item['surl'] = url
return item
def _get_file_url_anonymous(self,page,post_url,headers,captcha_cb):
data = util.request(self._url('reloadXapca.php'))
capdata = json.loads(data)
captcha = capdata['image']
if not captcha.startswith('http'):
captcha = 'http:' + captcha
sound = capdata['sound']
if not sound.startswith('http'):
sound = 'http:' + sound
# ask callback to provide captcha code
self.info('Asking for captcha img %s' % captcha)
code = captcha_cb({'id':captcha,'img': captcha,'snd':sound})
if not code:
self.info('Captcha not provided, done')
return
self.info( page )
ts = re.search('<input type=\"hidden\" name=\"ts\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
cid = re.search('<input type=\"hidden\" name=\"cid\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
sign = re.search('<input type=\"hidden\" name=\"sign\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
sign_a = re.search('<input type=\"hidden\" name=\"sign_a\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
has = capdata['hash']
salt = capdata['salt']
timestamp = capdata['timestamp']
token = re.search('<input type=\"hidden\" name=\"_token_\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
if not (sign and ts and cid and has and token):
util.error('[uloz.to] - unable to parse required params from page, plugin needs fix')
return
request = {
'captcha_type':'xapca',
'hash':has,
'salt':salt,
'timestamp':timestamp,
'ts':ts.group(1),
'cid':'',
'sign':sign.group(1),
'sign_a':sign_a.group(1),
'captcha_value':code,
'_do':'download-freeDownloadTab-freeDownloadForm-submit',
'_token_':token.group(1),
'adi':'f'
}
req = urllib2.Request(post_url, urllib.urlencode(request))
req.add_header('User-Agent',util.UA)
req.add_header('Referer',post_url)
req.add_header('Accept','application/json')
req.add_header('X-Requested-With','XMLHttpRequest')
sessid=[]
for cookie in re.finditer('(ULOSESSID=[^\;]+)',headers.get('Set-Cookie'),re.IGNORECASE | re.DOTALL):
sessid.append(cookie.group(1))
req.add_header('Cookie','nomobile=1; uloztoid='+cid.group(1)+'; uloztoid2='+cid.group(1)+'; '+sessid[-1])
util.info(req.headers)
util.info(request)
try:
resp = urllib2.urlopen(req)
page = resp.read()
headers = resp.headers
except urllib2.HTTPError:
# this is not OK, something went wrong
traceback.print_exc()
util.error('[uloz.to] cannot resolve stream url, server did not redirected us')
util.info('[uloz.to] POST url:'+post_url)
return
try:
result = json.loads(page)
except:
raise ResolveException('Unexpected error, addon needs fix')
if not 'status' in result.keys():
raise ResolveException('Unexpected error, addon needs fix')
if result['status'] == 'ok':
return self._fix_stream_url(result['url'])
elif result['status'] == 'error':
# the only known state is wrong captcha for now
util.error('Captcha validation failed, please try playing/downloading again')
util.error(result)
raise ResolveException('Captcha failed, try again')
def _fix_stream_url(self,stream):
index = stream.rfind('/')
if index > 0:
fn = stream[index:]
index2 = fn.find('?')
if index2 > 0:
fn = urllib.quote(fn[:index2])+fn[index2:]
else:
fn = urllib.quote(fn)
stream = stream[:index]+fn
return stream
def _regex(url):
return re.search('(#(.*)|ulozto\.cz|uloz\.to)',url,re.IGNORECASE | re.DOTALL)
class UloztoHTTPRedirectHandler(urllib2.HTTPRedirectHandler):
def http_error_302(self, req, fp, code, msg, headers):
if self.throw:
self.location = headers.getheader('Location')
raise RedirectionException()
else:
return urllib2.HTTPRedirectHandler.http_error_302(self,req,fp,code,msg,headers)
class RedirectionException(Exception):
pass

BIN
resources/lib/ulozto.pyo Normal file

Binary file not shown.

View File

@@ -0,0 +1,203 @@
# *
# * ulozto-search
# * https://github.com/ZDVokoun/ulozto-search
# * MIT License
# *
# *
from math import floor
from requests import get as getRequest
#from urllib.request import Request as getRequest
from json import loads as loadjson
from bs4 import BeautifulSoup
from urllib.parse import unquote as unquoteURI
import re
#import urllib
#getRequest("https://uloz.to/hledej", params=queryPayload, verify=not(insecure), headers={"X-Requested-With": "XMLHttpRequest"})
# def getRequest(url, params, verify, headers):
# req = params
# class d:
# def __init__(self, text):
# self.text = text.decode('utf-8')
# print (type(text))
# req = urllib.parse.urlencode(req).encode('utf-8')
# req = urllib.request.Request(url, req, headers=headers)
# with urllib.request.urlopen(req) as f:
# try:
# if f.getcode() == 200:
# response = f.read()
# return d(response)
# except:
# return None
class _decoder:
def __init__(self, inputKey):
self.numberSet1 = [608135816, 2242054355, 320440878, 57701188, 2752067618, 698298832, 137296536, 3964562569, 1160258022, 953160567, 3193202383, 887688300, 3232508343, 3380367581, 1065670069, 3041331479, 2450970073, 2306472731];
self.numberSet2 = [3509652390, 2564797868, 805139163, 3491422135, 3101798381, 1780907670, 3128725573, 4046225305, 614570311, 3012652279, 134345442, 2240740374, 1667834072, 1901547113, 2757295779, 4103290238, 227898511, 1921955416, 1904987480, 2182433518, 2069144605, 3260701109, 2620446009, 720527379, 3318853667, 677414384, 3393288472, 3101374703, 2390351024, 1614419982, 1822297739, 2954791486, 3608508353, 3174124327, 2024746970, 1432378464, 3864339955, 2857741204, 1464375394, 1676153920, 1439316330, 715854006, 3033291828, 289532110, 2706671279, 2087905683, 3018724369, 1668267050, 732546397, 1947742710, 3462151702, 2609353502, 2950085171, 1814351708, 2050118529, 680887927, 999245976, 1800124847, 3300911131, 1713906067, 1641548236, 4213287313, 1216130144, 1575780402, 4018429277, 3917837745, 3693486850, 3949271944, 596196993, 3549867205, 258830323, 2213823033, 772490370, 2760122372, 1774776394, 2652871518, 566650946, 4142492826, 1728879713, 2882767088, 1783734482, 3629395816, 2517608232, 2874225571, 1861159788, 326777828, 3124490320, 2130389656, 2716951837, 967770486, 1724537150, 2185432712, 2364442137, 1164943284, 2105845187, 998989502, 3765401048, 2244026483, 1075463327, 1455516326, 1322494562, 910128902, 469688178, 1117454909, 936433444, 3490320968, 3675253459, 1240580251, 122909385, 2157517691, 634681816, 4142456567, 3825094682, 3061402683, 2540495037, 79693498, 3249098678, 1084186820, 1583128258, 426386531, 1761308591, 1047286709, 322548459, 995290223, 1845252383, 2603652396, 3431023940, 2942221577, 3202600964, 3727903485, 1712269319, 422464435, 3234572375, 1170764815, 3523960633, 3117677531, 1434042557, 442511882, 3600875718, 1076654713, 1738483198, 4213154764, 2393238008, 3677496056, 1014306527, 4251020053, 793779912, 2902807211, 842905082, 4246964064, 1395751752, 1040244610, 2656851899, 3396308128, 445077038, 3742853595, 3577915638, 679411651, 2892444358, 2354009459, 1767581616, 3150600392, 3791627101, 3102740896, 284835224, 4246832056, 1258075500, 768725851, 2589189241, 3069724005, 3532540348, 1274779536, 3789419226, 2764799539, 1660621633, 3471099624, 4011903706, 913787905, 3497959166, 737222580, 2514213453, 2928710040, 3937242737, 1804850592, 3499020752, 2949064160, 2386320175, 2390070455, 2415321851, 4061277028, 2290661394, 2416832540, 1336762016, 1754252060, 3520065937, 3014181293, 791618072, 3188594551, 3933548030, 2332172193, 3852520463, 3043980520, 413987798, 3465142937, 3030929376, 4245938359, 2093235073, 3534596313, 375366246, 2157278981, 2479649556, 555357303, 3870105701, 2008414854, 3344188149, 4221384143, 3956125452, 2067696032, 3594591187, 2921233993, 2428461, 544322398, 577241275, 1471733935, 610547355, 4027169054, 1432588573, 1507829418, 2025931657, 3646575487, 545086370, 48609733, 2200306550, 1653985193, 298326376, 1316178497, 3007786442, 2064951626, 458293330, 2589141269, 3591329599, 3164325604, 727753846, 2179363840, 146436021, 1461446943, 4069977195, 705550613, 3059967265, 3887724982, 4281599278, 3313849956, 1404054877, 2845806497, 146425753, 1854211946];
self.numberSet3 = [1266315497, 3048417604, 3681880366, 3289982499, 290971e4, 1235738493, 2632868024, 2414719590, 3970600049, 1771706367, 1449415276, 3266420449, 422970021, 1963543593, 2690192192, 3826793022, 1062508698, 1531092325, 1804592342, 2583117782, 2714934279, 4024971509, 1294809318, 4028980673, 1289560198, 2221992742, 1669523910, 35572830, 157838143, 1052438473, 1016535060, 1802137761, 1753167236, 1386275462, 3080475397, 2857371447, 1040679964, 2145300060, 2390574316, 1461121720, 2956646967, 4031777805, 4028374788, 33600511, 2920084762, 1018524850, 629373528, 3691585981, 3515945977, 2091462646, 2486323059, 586499841, 988145025, 935516892, 3367335476, 2599673255, 2839830854, 265290510, 3972581182, 2759138881, 3795373465, 1005194799, 847297441, 406762289, 1314163512, 1332590856, 1866599683, 4127851711, 750260880, 613907577, 1450815602, 3165620655, 3734664991, 3650291728, 3012275730, 3704569646, 1427272223, 778793252, 1343938022, 2676280711, 2052605720, 1946737175, 3164576444, 3914038668, 3967478842, 3682934266, 1661551462, 3294938066, 4011595847, 840292616, 3712170807, 616741398, 312560963, 711312465, 1351876610, 322626781, 1910503582, 271666773, 2175563734, 1594956187, 70604529, 3617834859, 1007753275, 1495573769, 4069517037, 2549218298, 2663038764, 504708206, 2263041392, 3941167025, 2249088522, 1514023603, 1998579484, 1312622330, 694541497, 2582060303, 2151582166, 1382467621, 776784248, 2618340202, 3323268794, 2497899128, 2784771155, 503983604, 4076293799, 907881277, 423175695, 432175456, 1378068232, 4145222326, 3954048622, 3938656102, 3820766613, 2793130115, 2977904593, 26017576, 3274890735, 3194772133, 1700274565, 1756076034, 4006520079, 3677328699, 720338349, 1533947780, 354530856, 688349552, 3973924725, 1637815568, 332179504, 3949051286, 53804574, 2852348879, 3044236432, 1282449977, 3583942155, 3416972820, 4006381244, 1617046695, 2628476075, 3002303598, 1686838959, 431878346, 2686675385, 1700445008, 1080580658, 1009431731, 832498133, 3223435511, 2605976345, 2271191193, 2516031870, 1648197032, 4164389018, 2548247927, 300782431, 375919233, 238389289, 3353747414, 2531188641, 2019080857, 1475708069, 455242339, 2609103871, 448939670, 3451063019, 1395535956, 2413381860, 1841049896, 1491858159, 885456874, 4264095073, 4001119347, 1565136089, 3898914787, 1108368660, 540939232, 1173283510, 2745871338, 3681308437, 4207628240, 3343053890, 4016749493, 1699691293, 1103962373, 3625875870, 2256883143, 3830138730, 1031889488, 3479347698, 1535977030, 4236805024, 3251091107, 2132092099, 1774941330, 1199868427, 1452454533, 157007616, 2904115357, 342012276, 595725824, 1480756522, 206960106, 497939518, 591360097, 863170706, 2375253569, 3596610801, 1814182875, 2094937945, 3421402208, 1082520231, 3463918190, 2785509508, 435703966, 3908032597, 1641649973, 2842273706, 3305899714, 1510255612, 2148256476, 2655287854, 3276092548, 4258621189, 236887753, 3681803219, 274041037, 1734335097, 3815195456, 3317970021, 1899903192, 1026095262, 4050517792, 356393447, 2410691914, 3873677099, 3682840055];
self.numberSet4 = [3913112168, 2491498743, 4132185628, 2489919796, 1091903735, 1979897079, 3170134830, 3567386728, 3557303409, 857797738, 1136121015, 1342202287, 507115054, 2535736646, 337727348, 3213592640, 1301675037, 2528481711, 1895095763, 1721773893, 3216771564, 62756741, 2142006736, 835421444, 2531993523, 1442658625, 3659876326, 2882144922, 676362277, 1392781812, 170690266, 3921047035, 1759253602, 3611846912, 1745797284, 664899054, 1329594018, 3901205900, 3045908486, 2062866102, 2865634940, 3543621612, 3464012697, 1080764994, 553557557, 3656615353, 3996768171, 991055499, 499776247, 1265440854, 648242737, 3940784050, 980351604, 3713745714, 1749149687, 3396870395, 4211799374, 3640570775, 1161844396, 3125318951, 1431517754, 545492359, 4268468663, 3499529547, 1437099964, 2702547544, 3433638243, 2581715763, 2787789398, 1060185593, 1593081372, 2418618748, 4260947970, 69676912, 2159744348, 86519011, 2512459080, 3838209314, 1220612927, 3339683548, 133810670, 1090789135, 1078426020, 1569222167, 845107691, 3583754449, 4072456591, 1091646820, 628848692, 1613405280, 3757631651, 526609435, 236106946, 48312990, 2942717905, 3402727701, 1797494240, 859738849, 992217954, 4005476642, 2243076622, 3870952857, 3732016268, 765654824, 3490871365, 2511836413, 1685915746, 3888969200, 1414112111, 2273134842, 3281911079, 4080962846, 172450625, 2569994100, 980381355, 4109958455, 2819808352, 2716589560, 2568741196, 3681446669, 3329971472, 1835478071, 660984891, 3704678404, 4045999559, 3422617507, 3040415634, 1762651403, 1719377915, 3470491036, 2693910283, 3642056355, 3138596744, 1364962596, 2073328063, 1983633131, 926494387, 3423689081, 2150032023, 4096667949, 1749200295, 3328846651, 309677260, 2016342300, 1779581495, 3079819751, 111262694, 1274766160, 443224088, 298511866, 1025883608, 3806446537, 1145181785, 168956806, 3641502830, 3584813610, 1689216846, 3666258015, 3200248200, 1692713982, 2646376535, 4042768518, 1618508792, 1610833997, 3523052358, 4130873264, 2001055236, 3610705100, 2202168115, 4028541809, 2961195399, 1006657119, 2006996926, 3186142756, 1430667929, 3210227297, 1314452623, 4074634658, 4101304120, 2273951170, 1399257539, 3367210612, 3027628629, 1190975929, 2062231137, 2333990788, 2221543033, 2438960610, 1181637006, 548689776, 2362791313, 3372408396, 3104550113, 3145860560, 296247880, 1970579870, 3078560182, 3769228297, 1714227617, 3291629107, 3898220290, 166772364, 1251581989, 493813264, 448347421, 195405023, 2709975567, 677966185, 3703036547, 1463355134, 2715995803, 1338867538, 1343315457, 2802222074, 2684532164, 233230375, 2599980071, 2000651841, 3277868038, 1638401717, 4028070440, 3237316320, 6314154, 819756386, 300326615, 590932579, 1405279636, 3267499572, 3150704214, 2428286686, 3959192993, 3461946742, 1862657033, 1266418056, 963775037, 2089974820, 2263052895, 1917689273, 448879540, 3550394620, 3981727096, 150775221, 3627908307, 1303187396, 508620638, 2975983352, 2726630617, 1817252668, 1876281319, 1457606340, 908771278, 3720792119, 3617206836, 2455994898, 1729034894, 1080033504];
self.numberSet5 = [976866871, 3556439503, 2881648439, 1522871579, 1555064734, 1336096578, 3548522304, 2579274686, 3574697629, 3205460757, 3593280638, 3338716283, 3079412587, 564236357, 2993598910, 1781952180, 1464380207, 3163844217, 3332601554, 1699332808, 1393555694, 1183702653, 3581086237, 1288719814, 691649499, 2847557200, 2895455976, 3193889540, 2717570544, 1781354906, 1676643554, 2592534050, 3230253752, 1126444790, 2770207658, 2633158820, 2210423226, 2615765581, 2414155088, 3127139286, 673620729, 2805611233, 1269405062, 4015350505, 3341807571, 4149409754, 1057255273, 2012875353, 2162469141, 2276492801, 2601117357, 993977747, 3918593370, 2654263191, 753973209, 36408145, 2530585658, 25011837, 3520020182, 2088578344, 530523599, 2918365339, 1524020338, 1518925132, 3760827505, 3759777254, 1202760957, 3985898139, 3906192525, 674977740, 4174734889, 2031300136, 2019492241, 3983892565, 4153806404, 3822280332, 352677332, 2297720250, 60907813, 90501309, 3286998549, 1016092578, 2535922412, 2839152426, 457141659, 509813237, 4120667899, 652014361, 1966332200, 2975202805, 55981186, 2327461051, 676427537, 3255491064, 2882294119, 3433927263, 1307055953, 942726286, 933058658, 2468411793, 3933900994, 4215176142, 1361170020, 2001714738, 2830558078, 3274259782, 1222529897, 1679025792, 2729314320, 3714953764, 1770335741, 151462246, 3013232138, 1682292957, 1483529935, 471910574, 1539241949, 458788160, 3436315007, 1807016891, 3718408830, 978976581, 1043663428, 3165965781, 1927990952, 4200891579, 2372276910, 3208408903, 3533431907, 1412390302, 2931980059, 4132332400, 1947078029, 3881505623, 4168226417, 2941484381, 1077988104, 1320477388, 886195818, 18198404, 3786409e3, 2509781533, 112762804, 3463356488, 1866414978, 891333506, 18488651, 661792760, 1628790961, 3885187036, 3141171499, 876946877, 2693282273, 1372485963, 791857591, 2686433993, 3759982718, 3167212022, 3472953795, 2716379847, 445679433, 3561995674, 3504004811, 3574258232, 54117162, 3331405415, 2381918588, 3769707343, 4154350007, 1140177722, 4074052095, 668550556, 3214352940, 367459370, 261225585, 2610173221, 4209349473, 3468074219, 3265815641, 314222801, 3066103646, 3808782860, 282218597, 3406013506, 3773591054, 379116347, 1285071038, 846784868, 2669647154, 3771962079, 3550491691, 2305946142, 453669953, 1268987020, 3317592352, 3279303384, 3744833421, 2610507566, 3859509063, 266596637, 3847019092, 517658769, 3462560207, 3443424879, 370717030, 4247526661, 2224018117, 4143653529, 4112773975, 2788324899, 2477274417, 1456262402, 2901442914, 1517677493, 1846949527, 2295493580, 3734397586, 2176403920, 1280348187, 1908823572, 3871786941, 846861322, 1172426758, 3287448474, 3383383037, 1655181056, 3139813346, 901632758, 1897031941, 2986607138, 3066810236, 3447102507, 1393639104, 373351379, 950779232, 625454576, 3124240540, 4148612726, 2007998917, 544563296, 2244738638, 2330496472, 2058025392, 1291430526, 424198748, 50039436, 29584100, 3605783033, 2429876329, 2791104160, 1057563949, 3255363231, 3075367218, 3463963227, 1469046755, 985887462];
if len(str(inputKey)) == 0:
raise "error"
key = inputKey[0:56] if len(inputKey) > 56 else inputKey
n = 0
for i in range(18):
r = ord(key[n])
for index in range(1,4):
r = 256 * r + ord(key[(n + index) % len(key)])
self.numberSet1[i] = self.xorUnsigned(self.numberSet1[i], r)
n = (n + 4) % len(key)
self.first, self.second = 0, 0
for i in range(0,18,2):
self.multipleRound()
self.numberSet1[i] = self.first
self.numberSet1[i + 1] = self.second
for i in range(0,256,2):
self.multipleRound()
self.numberSet2[i] = self.first
self.numberSet2[i + 1] = self.second
for i in range(0,256,2):
self.multipleRound()
self.numberSet3[i] = self.first
self.numberSet3[i + 1] = self.second
for i in range(0,256,2):
self.multipleRound()
self.numberSet4[i] = self.first
self.numberSet4[i + 1] = self.second
for i in range(0,256,2):
self.multipleRound()
self.numberSet5[i] = self.first
self.numberSet5[i + 1] = self.second
def modulosAfterDivision (self, n):
array = []
for i in range(4):
array.append(floor(n / (256**i)) % 256)
return array
def xorUnsigned(self,x,y):
result = int(x)^int(y)
if result < 0:
result += (1 << 32)
return result
def partialDecode (self, string):
result = ""
for i in range(0, len(string), 2):
x,y = ord(string[i]), ord(string[i + 1])
x -= 48 if x < 58 else (87 if x > 96 else 55)
y -= 48 if y < 58 else (87 if y > 96 else 55)
result += chr(16 * x + y)
return result
def decode (self, encoded):
for i in range(len(encoded) % 16):
encoded += "0"
string = ""
for i in range(0, len(encoded), 16):
a = self.partialDecode(encoded[i:i+8])
b = self.partialDecode(encoded[i+8:i+16])
x = (ord(a[3])) | (ord(a[2]) << 8) | (ord(a[1]) << 16) | (ord(a[0]) << 24)
x += 1 << 32 if x < 0 else 0
y = ord(b[3]) | (ord(b[2]) << 8) | (ord(b[1]) << 16) | (ord(b[0]) << 24)
y += 1 << 32 if y < 0 else 0
self.first = x
self.second = y
self.reverseMultipleRound()
string += self.toHexadecimal(self.first) + self.toHexadecimal(self.second)
return self.partialDecode(string)
def toHexadecimal (self, n):
string, modulos = "", self.modulosAfterDivision(n)
for i in range (3,-1, -1):
x = floor(modulos[i] / 16)
y = modulos[i] % 16
x += 48 if x < 10 else 55
y += 48 if y < 10 else 55
string += chr(x) + chr(y)
return string
def round (self,x,y,i):
modulos = self.modulosAfterDivision(y)
a = self.xorUnsigned(self.numberSet2[modulos[3]] + self.numberSet3[modulos[2]], self.numberSet4[modulos[1]])
b = self.xorUnsigned(a + self.numberSet5[modulos[0]], self.numberSet1[i])
result = self.xorUnsigned(x,b)
return result
def multipleRound (self):
x = self.xorUnsigned(self.first, self.numberSet1[0])
y = self.second
for i in range(1,17):
if i % 2 == 0:
x = self.round(x,y,i)
else:
y = self.round(y,x,i)
y = self.xorUnsigned(y, self.numberSet1[17])
self.first = y
self.second = x
def reverseMultipleRound (self):
x = self.xorUnsigned(self.first, self.numberSet1[17])
y = self.second
for i in range(16,0,-1):
if i % 2 == 1:
x = self.round(x,y,i)
else:
y = self.round(y,x,i)
y = self.xorUnsigned(y, self.numberSet1[0])
self.first = y
self.second = x
def searchHTML(query, page=1, fileType="", *, insecure=False, includeApproximate=False):
queryPayload = {"q": query, "page": page}
if fileType in ("documents", "videos", "images", "archives", "audios"):
queryPayload["type"] = fileType
req = getRequest("https://uloz.to/hledej", params=queryPayload, verify=not(insecure), headers={"X-Requested-With": "XMLHttpRequest"})
#print(req)
result = []
if re.search('class=[\\\]"flash', req.text) != None:
return result
res = loadjson(req.json()["items"].split("pg.push(")[1].split(");\n</script>")[0])
decoder = _decoder(res[1])
#print (decoder)
for key in res[0].keys():
rawDecode = decoder.decode(res[0][key])
fixedDiacritics = unquoteURI(re.sub("\\\\x", "%", rawDecode.encode('ascii', 'backslashreplace').decode()))
result.append(fixedDiacritics)
return result
def search(query, page=1, fileType="", *, insecure=False, includeApproximate=False):
html = "".join(searchHTML(query, page, fileType, insecure=insecure, includeApproximate=includeApproximate))
soup = BeautifulSoup(html, "html.parser")
# print (soup)
results = []
for result in soup.select(".js-result-item"):
filenameEl = result.find("a", class_="js-file-name")
item_len = None
# filenameEl_len = result.find("i", class_="fi-clock-o")
# filenameEl_size = result.find("a", class_="fi-database")
# print ('filenameEl_len', filenameEl_len)
# print ('filenameEl_size', filenameEl_size)
info = result.find("div", class_="info")
spans = info.find_all('span',"")
info_length = None
# for span in spans:
# print(span.text)
if len(spans) == 2:
info_length = spans[0].text
info_size = spans[1].text
else:
info_size = spans[0].text
# print(soup.find("span", title=re.compile("RAM")).text)
results.append({"name": filenameEl.text.strip(),
"link": "https://uloz.to" + filenameEl["href"] ,
"length": info_length,
"size": info_size,
# "img": filenameEl_size.text.strip()
})
return results
# def search_for_kodi(query, fileType="", *, insecure=False, includeApproximate=False):
# html = "".join(searchHTML(query, fileType, insecure=insecure, includeApproximate=includeApproximate))
# # soup = BeautifulSoup(html, "html.parser")
# return html

164
resources/lib/webshare.py Normal file
View File

@@ -0,0 +1,164 @@
# -*- coding: UTF-8 -*-
#/*
# * Copyright (C) 2013 Libor Zoubek
# *
# *
# * This Program is free software; you can redistribute it and/or modify
# * it under the terms of the GNU General Public License as published by
# * the Free Software Foundation; either version 2, or (at your option)
# * any later version.
# *
# * This Program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; see the file COPYING. If not, write to
# * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# * http://www.gnu.org/copyleft/gpl.html
# *
# */
import re,random,util,sys,os,traceback,hashlib
import urllib
#Python 2
try:
import cookielib
import urllib2
import urlparse
import elementtree.ElementTree as ET
#import sys
# reload(sys) # Reload does the trick!
# sys.setdefaultencoding('UTF8')
#Python 3
except:
import http.cookiejar
cookielib = http.cookiejar
urllib2 = urllib.request
urlparse = urllib.parse
# urllib = urllib.parse
import xml.etree.ElementTree as ET
from provider import ContentProvider
from provider import ResolveException
from crypto.md5crypt import md5crypt
def loadurl(url, req, headers):
req = urllib.parse.urlencode(req).encode('utf-8')
req = urllib.request.Request(url, req, headers=headers)
with urllib.request.urlopen(req) as f:
try:
if f.getcode()==200:
response = f.read()
return response
except:
return False
class WebshareContentProvider(ContentProvider):
def __init__(self,username=None,password=None,filter=None,tmp_dir='.'):
ContentProvider.__init__(self,'webshare.cz','https://webshare.cz/',username,password,filter,tmp_dir)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.LWPCookieJar()))
urllib2.install_opener(opener)
self.token = ''
def capabilities(self):
return ['login', 'search', 'resolve']
def search(self, keyword):
return self.list('what=%s'%urllib.parse.quote(keyword))
def _create_request(self, url, base):
args = dict(urlparse.parse_qsl(url))
headers = {'X-Requested-With':'XMLHttpRequest','Accept':'text/xml; charset=UTF-8','Referer':self.base_url}
req = base.copy()
for key in req:
if key in args.keys():
req[key] = args[key]
return headers, req
def login(self):
if not self.username and not self.password:
return True # fall back to free account
elif self.username and self.password and len(self.username)>0 and len(self.password)>0:
self.info('Login user=%s, pass=*****' % (self.username))
# get salt
headers,req = self._create_request('',{'username_or_email':self.username})
data = util.post(self._url('api/salt/'),req,headers=headers)
xml = ET.fromstring(data)
if not xml.find('status').text == 'OK':
self.error('Server returned error status, response: %s' % data)
return False
salt = xml.find('salt').text
# create hashes
password = hashlib.sha1(md5crypt(self.password, salt).encode('utf-8')).hexdigest()
digest = hashlib.md5((self.username + ':Webshare:' + self.password).encode('utf-8')).hexdigest()
# login
headers,req = self._create_request('',{'username_or_email':self.username,'password':password,'digest':digest,'keep_logged_in':1})
data = util.post(self._url('api/login/'),req,headers=headers)
xml = ET.fromstring(data)
if not xml.find('status').text == 'OK':
self.error('Server returned error status, response: %s' % data)
return False
self.token = xml.find('token').text
self.info('Login successfull')
return True
return False
def list(self, url):
self.info(' ==== list ==========')
result = []
headers, req = self._create_request( url, {'what':'','offset':0,'limit':25,'category':'','sort':'','wst':self.token} )
self.info(' ==== headers ==========' )
self.info(headers)
self.info(type(headers))
self.info(' ==== req ==========')
self.info(req)
self.info(type(req))
data = loadurl(self._url('api/search/'), req, headers=headers)
# with urllib.request.urlopen(req, data=data) as f:
# resp = f.read()
self.info(data)
xml = ET.fromstring(data)
if not xml.find('status').text == 'OK':
self.error('Server returned error status, response: %s' % data)
return []
total = int(xml.find('total').text)
for file in xml.findall('file'):
item = self.video_item()
item['title'] = file.find('name').text
item['url'] = 'ident=%s' % file.find('ident').text
size = int(file.find('size').text)
item['size'] = '%d MB' % (int(size)/1024/1024)
img = file.find('img').text
if img:
item['img'] = self._url(img)
self._filter(result,item)
listed = int(req['limit']) + int(req['offset'])
if total > listed:
req['offset'] = listed
item = self.dir_item()
item['type'] = 'next'
item['url'] = urllib.parse.urlencode(req)
result.append(item)
return result
def resolve(self,item,captcha_cb=None,select_cb=None):
item = item.copy()
util.init_urllib()
headers, req = self._create_request(item['url'],{'ident':'','wst':self.token})
#data = util.post(self._url('api/file_link/'),req,headers=headers)
data = loadurl(self._url('api/file_link/'), req, headers=headers)
self.info(data)
xml = ET.fromstring(data)
if not xml.find('status').text == 'OK':
self.error('Server returned error status, response: %s' % data)
raise ResolveException(xml.find('message').text)
item['url'] = xml.find('link').text
return item

BIN
resources/lib/webshare.pyo Normal file

Binary file not shown.