from spider import Spider, SpiderItemType, SpiderSource, SpiderDanmaku, SpiderItem, SpiderPlayURL
from proxy import get_proxy_url
from urllib.parse import urlparse
from danmaku import get_danmaku_url
import requests
import hashlib
import time
import re
import json
import base64
import urllib
import difflib
import threading
from bs4 import BeautifulSoup
from utils import get_image_path
import xbmcaddon
_ADDON = xbmcaddon.Addon()
base_params = {
'pcode': '010110005',
'version': '2.0.5',
'devid': hashlib.md5(str(time.time()).encode()).hexdigest(),
'sys': 'android',
'sysver': 11,
'brand': 'google',
'model': 'Pixel_3_XL',
'package': 'com.sevenVideo.app.android'
}
base_headers = {
'User-Agent': 'okhttp/3.12.0',
}
class Spideryingshi(Spider):
def name(self):
return '影视'
def logo(self):
return get_image_path('yingshi.png')
def hide(self):
return not _ADDON.getSettingBool('data_source_yingshi_switch')
def is_searchable(self):
return True
def list_items(self, parent_item=None, page=1):
if parent_item is None:
items = []
items.append(
SpiderItem(
type=SpiderItemType.Directory,
id='dianying',
name='电影',
params={
'type': 'category',
'pf': 'bw'
},
))
items.append(
SpiderItem(
type=SpiderItemType.Directory,
id='lianxuju',
name='剧集',
params={
'type': 'category',
'pf': 'bw'
},
))
items.append(
SpiderItem(
type=SpiderItemType.Directory,
id='dongman',
name='动漫',
params={
'type': 'category',
'pf': 'bw'
},
))
items.append(
SpiderItem(
type=SpiderItemType.Directory,
id='zongyi',
name='综艺',
params={
'type': 'category',
'pf': 'bw'
},
))
return items, False
elif parent_item['params']['type'] == 'category':
items = []
url = 'https://beiwo360.com/bs/{0}/page/{1}/'.format(parent_item['id'],page)
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36"
}
r = requests.get(url, headers=header)
soup = BeautifulSoup(r.text, 'html.parser')
data = soup.select('ul.myui-vodlist.clearfix > li')
maxpage = int(soup.select('ul.myui-page.text-center.clearfix > li.visible-xs')[0].get_text().split('/')[-1])
for video in data:
vid = video.select('a')[0].get('href')
cover = video.select('a')[0].get('data-original')
name = video.select('a')[0].get('title').strip()
remark = video.select('a > span.pic-text.text-right')[0].get_text().strip().replace(' ','|')
items.append(
SpiderItem(
type=SpiderItemType.Directory,
name='{0}/[{1}]'.format(name, remark),
id=vid,
cover=cover,
params={
'type': 'video',
'pf': 'bw'
},
))
if page < maxpage:
has_next_page = True
else:
has_next_page = False
return items, has_next_page
elif parent_item['params']['type'] == 'video':
if parent_item['params']['pf'] == 'qq':
ts = int(time.time())
params = base_params.copy()
params['ids'] = parent_item['id']
params['sj'] = ts
headers = base_headers.copy()
headers['t'] = str(ts)
url = 'http://api.kunyu77.com/api.php/provide/videoDetail'
headers['TK'] = self._get_tk(url, params, ts)
r = requests.get(url, params=params, headers=headers)
detail = r.json()['data']
url = 'http://api.kunyu77.com/api.php/provide/videoPlaylist'
headers['TK'] = self._get_tk(url, params, ts)
r = requests.get(url, params=params, headers=headers)
episodes = r.json()['data']['episodes']
items = []
for episode in episodes:
sources = []
danmakus = []
for playurl in episode['playurls']:
sources.append(
SpiderSource(
playurl['playfrom'],
{
'playfrom': playurl['playfrom'],
'url': playurl['playurl'],
'pf': 'qq'
},
))
if playurl['playfrom'] in [
'qq', 'mgtv', 'qiyi', 'youku', 'bilibili'
]:
danmakus.append(
SpiderDanmaku(
playurl['playfrom'],
get_danmaku_url(playurl['playurl']),
)
)
items.append(
SpiderItem(
type=SpiderItemType.File,
name=episode['title'].strip(),
cover=detail['videoCover'],
description=detail['brief'].replace('\u3000', '').replace('<p>', '').replace('</p>','').strip(),
cast=detail['actor'].replace(' ', '').split('/'),
director=detail['director'],
area=detail['area'].strip(),
year=int(detail['year'].strip()),
sources=sources,
danmakus=danmakus,
))
return items, False
if parent_item['params']['pf'] == 'bw':
items = []
url = 'https://beiwo360.com{}'.format(parent_item['id'])
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Referer": 'https://beiwo360.com/'
}
r = requests.get(url, headers=header)
soup = BeautifulSoup(r.text, 'html.parser')
cover = parent_item['cover']
desc = soup.select('div.col-pd.text-collapse.content > span.data')[0].get_text().strip().replace('\u3000\u3000', '\n')
infos = soup.select('div.myui-content__detail > p.data')
for info in infos:
content = info.get_text()
if content.startswith('导演'):
dire = content.strip()[3:].strip('\xa0')
if content.startswith('主演'):
cast = content.strip()[3:].strip('\xa0').split('\xa0')
if content.startswith('分类'):
area = content.strip().split(':')[2][:-2]
year = content.strip().split(':')[3]