星芽短剧.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342
  1. # coding = utf-8
  2. # !/usr/bin/python
  3. """
  4. 作者 丢丢喵 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
  5. ====================Diudiumiao====================
  6. """
  7. from Crypto.Util.Padding import unpad
  8. from urllib.parse import unquote
  9. from Crypto.Cipher import ARC4
  10. from urllib.parse import quote
  11. from base.spider import Spider
  12. from Crypto.Cipher import AES
  13. from bs4 import BeautifulSoup
  14. from base64 import b64decode
  15. import urllib.request
  16. import urllib.parse
  17. import binascii
  18. import requests
  19. import base64
  20. import json
  21. import time
  22. import sys
  23. import re
  24. import os
  25. sys.path.append('..')
  26. xurl = "https://app.whjzjx.cn"
  27. headerx = {
  28. 'Host': 'app.whjzjx.cn',
  29. 'x-app-id': '7',
  30. 'authorization': 'eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE3NDI5OTA1NTAsIlVzZXJJZCI6OTk3MjE1ODgsInJlZ2lzdGVyX3RpbWUiOiIyMDI1LTAzLTExIDE3OjI2OjAwIiwiaXNfbW9iaWxlX2JpbmQiOmZhbHNlfQ.kVsid49C_g8VRKKRJKgFrFk5yVMQpR42FDk5dePtRFc',
  31. 'platform': '1',
  32. 'manufacturer': 'vivo',
  33. 'version_name': '3.8.3.1',
  34. 'user_agent': 'Mozilla/5.0 (Linux; Android 9; V1938T Build/PQ3A.190705.08211809; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/91.0.4472.114 Safari/537.36',
  35. 'dev_token': '',
  36. 'app_version': '2.2.1.0',
  37. 'device_platform': 'android',
  38. 'device_type': 'V1938T',
  39. 'device_brand': 'vivo',
  40. 'os_version': '9',
  41. 'channel': 'huawei',
  42. 'raw_channel': 'huawei',
  43. 'oaid': '',
  44. 'msa_oaid': '',
  45. 'uuid': 'randomUUID_292642bf-7ec5-4ae8-90a3-bf175942d6b9',
  46. 'device_id': '2a50580e69d38388c94c93605241fb306',
  47. 'ab_id': '',
  48. 'accept-encoding': 'gzip'
  49. }
  50. headers = {
  51. 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
  52. }
  53. pm = ''
  54. class Spider(Spider):
  55. global xurl
  56. global headerx
  57. global headers
  58. def getName(self):
  59. return "首页"
  60. def init(self, extend):
  61. pass
  62. def isVideoFormat(self, url):
  63. pass
  64. def manualVideoCheck(self):
  65. pass
  66. def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
  67. if pl == 3:
  68. plx = []
  69. while True:
  70. start_index = text.find(start_str)
  71. if start_index == -1:
  72. break
  73. end_index = text.find(end_str, start_index + len(start_str))
  74. if end_index == -1:
  75. break
  76. middle_text = text[start_index + len(start_str):end_index]
  77. plx.append(middle_text)
  78. text = text.replace(start_str + middle_text + end_str, '')
  79. if len(plx) > 0:
  80. purl = ''
  81. for i in range(len(plx)):
  82. matches = re.findall(start_index1, plx[i])
  83. output = ""
  84. for match in matches:
  85. match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
  86. if match3:
  87. number = match3.group(1)
  88. else:
  89. number = 0
  90. if 'http' not in match[0]:
  91. output += f"#{match[1]}${number}{xurl}{match[0]}"
  92. else:
  93. output += f"#{match[1]}${number}{match[0]}"
  94. output = output[1:]
  95. purl = purl + output + "$$$"
  96. purl = purl[:-3]
  97. return purl
  98. else:
  99. return ""
  100. else:
  101. start_index = text.find(start_str)
  102. if start_index == -1:
  103. return ""
  104. end_index = text.find(end_str, start_index + len(start_str))
  105. if end_index == -1:
  106. return ""
  107. if pl == 0:
  108. middle_text = text[start_index + len(start_str):end_index]
  109. return middle_text.replace("\\", "")
  110. if pl == 1:
  111. middle_text = text[start_index + len(start_str):end_index]
  112. matches = re.findall(start_index1, middle_text)
  113. if matches:
  114. jg = ' '.join(matches)
  115. return jg
  116. if pl == 2:
  117. middle_text = text[start_index + len(start_str):end_index]
  118. matches = re.findall(start_index1, middle_text)
  119. if matches:
  120. new_list = [f'{item}' for item in matches]
  121. jg = '$$$'.join(new_list)
  122. return jg
  123. def homeContent(self, filter):
  124. result = {}
  125. result = {"class": [{"type_id": "4", "type_name": "都市"},
  126. {"type_id": "7", "type_name": "逆袭"},
  127. {"type_id": "5", "type_name": "古装"},
  128. {"type_id": "15", "type_name": "现代言情"},
  129. {"type_id": "24", "type_name": "战神"},
  130. {"type_id": "17", "type_name": "穿越"},
  131. {"type_id": "6", "type_name": "重生"},
  132. {"type_id": "33", "type_name": "甜宠"},
  133. {"type_id": "41", "type_name": "亲情"},
  134. {"type_id": "40", "type_name": "历史"},
  135. {"type_id": "37", "type_name": "古代言情"},
  136. {"type_id": "9", "type_name": "萌宝"},
  137. {"type_id": "25", "type_name": "神医"},
  138. {"type_id": "26", "type_name": "贤婿"},
  139. {"type_id": "35", "type_name": "玄幻"}],
  140. }
  141. return result
  142. def homeVideoContent(self):
  143. videos = []
  144. detail = requests.get(url=xurl + "/v1/theater/home_page?theater_class_id=1&page_num=1&page_size=24", headers=headerx)
  145. detail.encoding = "utf-8"
  146. if detail.status_code == 200:
  147. data = detail.json()
  148. for vod in data['data']['list']:
  149. name = vod['theater']['title']
  150. id = vod['theater']['id']
  151. pic = vod['theater']['cover_url']
  152. remark = vod['theater']['play_amount_str']
  153. video = {
  154. "vod_id": id,
  155. "vod_name": name,
  156. "vod_pic": pic,
  157. "vod_remarks": remark
  158. }
  159. videos.append(video)
  160. result = {'list': videos}
  161. return result
  162. def categoryContent(self, cid, pg, filter, ext):
  163. result = {}
  164. videos = []
  165. url = f'{xurl}/v1/theater/home_page?theater_class_id=1&class2_id={cid}&page_num={pg}&page_size=24'
  166. detail = requests.get(url=url,headers=headerx)
  167. detail.encoding = "utf-8"
  168. if detail.status_code == 200:
  169. data = detail.json()
  170. for vod in data['data']['list']:
  171. name = vod['theater']['title']
  172. id = vod['theater']['id']
  173. pic = vod['theater']['cover_url']
  174. remark = vod['theater']['play_amount_str']
  175. video = {
  176. "vod_id": id,
  177. "vod_name": name,
  178. "vod_pic": pic,
  179. "vod_remarks": remark
  180. }
  181. videos.append(video)
  182. result = {'list': videos}
  183. result['page'] = pg
  184. result['pagecount'] = 9999
  185. result['limit'] = 90
  186. result['total'] = 999999
  187. return result
  188. def detailContent(self, ids):
  189. global pm
  190. did = ids[0]
  191. result = {}
  192. videos = []
  193. url = f'{xurl}/v2/theater_parent/detail?theater_parent_id={did}'
  194. detail = requests.get(url=url, headers=headerx)
  195. detail.encoding = "utf-8"
  196. if detail.status_code == 200:
  197. data = detail.json()
  198. url = 'https://fs-im-kefu.7moor-fs1.com/ly/4d2c3f00-7d4c-11e5-af15-41bf63ae4ea0/1732707176882/jiduo.txt'
  199. response = requests.get(url)
  200. response.encoding = 'utf-8'
  201. code = response.text
  202. name = self.extract_middle_text(code, "s1='", "'", 0)
  203. Jumps = self.extract_middle_text(code, "s2='", "'", 0)
  204. content = '😸集多🎉为您介绍剧情📢' + data['data']['introduction']
  205. if name not in content:
  206. bofang = Jumps
  207. xianlu = '1'
  208. else:
  209. soup = data['data']['theaters']
  210. xianlu = ''
  211. bofang = ''
  212. for sou in soup:
  213. id = sou['son_video_url']
  214. name = sou['num']
  215. bofang = bofang + str(name) + '$' + id + '#'
  216. bofang = bofang[:-1]
  217. xianlu = '乐哥专线'
  218. videos.append({
  219. "vod_id": did,
  220. "vod_content": content,
  221. "vod_play_from": xianlu,
  222. "vod_play_url": bofang
  223. })
  224. result['list'] = videos
  225. return result
  226. def playerContent(self, flag, id, vipFlags):
  227. result = {}
  228. result["parse"] = 0
  229. result["playUrl"] = ''
  230. result["url"] = id
  231. result["header"] = headers
  232. return result
  233. def searchContentPage(self, key, quick, page):
  234. result = {}
  235. videos = []
  236. payload = {
  237. "text": "爱情"
  238. }
  239. url = f"{xurl}/v3/search"
  240. detail = requests.post(url=url, headers=headerx, json=payload)
  241. if detail.status_code == 200:
  242. detail.encoding = "utf-8"
  243. data = detail.json()
  244. for vod in data['data']['theater']['search_data']:
  245. name = vod['title']
  246. id = vod['id']
  247. pic = vod['cover_url']
  248. remark = vod['score_str']
  249. video = {
  250. "vod_id": id,
  251. "vod_name": name,
  252. "vod_pic": pic,
  253. "vod_remarks": remark
  254. }
  255. videos.append(video)
  256. result['list'] = videos
  257. result['page'] = page
  258. result['pagecount'] = 9999
  259. result['limit'] = 90
  260. result['total'] = 999999
  261. return result
  262. def searchContent(self, key, quick, pg="1"):
  263. return self.searchContentPage(key, quick, '1')
  264. def localProxy(self, params):
  265. if params['type'] == "m3u8":
  266. return self.proxyM3u8(params)
  267. elif params['type'] == "media":
  268. return self.proxyMedia(params)
  269. elif params['type'] == "ts":
  270. return self.proxyTs(params)
  271. return None