iptv345.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236
  1. # -*- coding: utf-8 -*-
  2. # @Author : Doubebly
  3. # @Time : 2025/3/23 21:55
  4. import base64
  5. import sys
  6. import time
  7. import json
  8. import requests
  9. import re # 新增导入re模块
  10. sys.path.append('..')
  11. from base.spider import Spider
  12. from bs4 import BeautifulSoup
  13. class Spider(Spider):
  14. def getName(self):
  15. return "Litv"
  16. def init(self, extend):
  17. self.extend = extend
  18. try:
  19. self.extendDict = json.loads(extend)
  20. except:
  21. self.extendDict = {}
  22. proxy = self.extendDict.get('proxy', None)
  23. if proxy is None:
  24. self.is_proxy = False
  25. else:
  26. self.proxy = proxy
  27. self.is_proxy = True
  28. pass
  29. def getDependence(self):
  30. return []
  31. def isVideoFormat(self, url):
  32. pass
  33. def manualVideoCheck(self):
  34. pass
  35. def liveContent(self, url):
  36. channel_list = ["#EXTM3U"]
  37. try:
  38. base_url = "https://iptv345.com/"
  39. fenlei = ["央视,ys", "卫视,ws", "综合,itv", "体育,ty", "电影,movie", "其他,other"]
  40. headers = {
  41. 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36'
  42. }
  43. for group in fenlei:
  44. try:
  45. group_name, group_id = group.split(",")
  46. api_url = f"{base_url.rstrip('/')}/?tid={group_id}"
  47. response = requests.get(api_url, headers=headers, timeout=10)
  48. response.raise_for_status()
  49. soup = BeautifulSoup(response.text, 'html.parser')
  50. ul_tag = soup.find('ul', {
  51. 'data-role': 'listview',
  52. 'data-inset': 'true',
  53. 'data-divider-theme': 'a'
  54. })
  55. if not ul_tag:
  56. print(f"警告:未找到{group_name}分类的列表")
  57. continue
  58. for li in ul_tag.find_all('li'):
  59. a_tag = li.find('a')
  60. if not a_tag:
  61. continue
  62. channel_path = a_tag.get('href', '').strip()
  63. if not channel_path:
  64. continue
  65. full_url = requests.compat.urljoin(base_url, channel_path)
  66. name = a_tag.text.strip()
  67. m3u_entry = (
  68. f'#EXTINF:-1 tvg-id="{name}" '
  69. f'tvg-name="{name}" '
  70. f'tvg-logo="https://logo.doube.eu.org/{name}.png" '
  71. f'group-title="{group_name}",{name}\n'
  72. f'video://{full_url}'
  73. )
  74. channel_list.append(m3u_entry)
  75. except requests.exceptions.RequestException as e:
  76. print(f"{group_name}分类请求失败: {str(e)}")
  77. except Exception as e:
  78. print(f"{group_name}分类处理异常: {str(e)}")
  79. except Exception as e:
  80. print(f"全局异常: {str(e)}")
  81. return '\n'.join(channel_list)
  82. def homeContent(self, filter):
  83. return {}
  84. def homeVideoContent(self):
  85. return {}
  86. def categoryContent(self, cid, page, filter, ext):
  87. return {}
  88. def detailContent(self, did):
  89. return {}
  90. def searchContent(self, key, quick, page='1'):
  91. return {}
  92. def searchContentPage(self, keywords, quick, page):
  93. return {}
  94. def playerContent(self, flag, pid, vipFlags):
  95. return {}
  96. def localProxy(self, params):
  97. if params['type'] == "m3u8":
  98. return self.proxyM3u8(params)
  99. if params['type'] == "ts":
  100. return self.get_ts(params)
  101. return [302, "text/plain", None, {'Location': 'https://sf1-cdn-tos.huoshanstatic.com/obj/media-fe/xgplayer_doc_video/mp4/xgplayer-demo-720p.mp4'}]
  102. def proxyM3u8(self, params):
  103. pid = params['pid']
  104. info = pid.split(',')
  105. a = info[0]
  106. b = info[1]
  107. c = info[2]
  108. timestamp = int(time.time() / 4 - 355017625)
  109. t = timestamp * 4
  110. m3u8_text = f'#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X-TARGETDURATION:4\n#EXT-X-MEDIA-SEQUENCE:{timestamp}\n'
  111. for i in range(10):
  112. url = f'https://ntd-tgc.cdn.hinet.net/live/pool/{a}/litv-pc/{a}-avc1_6000000={b}-mp4a_134000_zho={c}-begin={t}0000000-dur=40000000-seq={timestamp}.ts'
  113. if self.is_proxy:
  114. url = f'http://127.0.0.1:9978/proxy?do=py&type=ts&url={self.b64encode(url)}'
  115. m3u8_text += f'#EXTINF:4,\n{url}\n'
  116. timestamp += 1
  117. t += 4
  118. return [200, "application/vnd.apple.mpegurl", m3u8_text]
  119. def get_ts(self, params):
  120. url = self.b64decode(params['url'])
  121. headers = {'User-Agent': 'Mozilla/5.0'}
  122. response = requests.get(url, headers=headers, stream=True, proxies=self.proxy)
  123. return [206, "application/octet-stream", response.content]
  124. def destroy(self):
  125. return '正在Destroy'
  126. def b64encode(self, data):
  127. return base64.b64encode(data.encode('utf-8')).decode('utf-8')
  128. def b64decode(self, data):
  129. return base64.b64decode(data.encode('utf-8')).decode('utf-8')
  130. if __name__ == '__main__':
  131. pass
  132. import requests
  133. from bs4 import BeautifulSoup
  134. base_url = "https://iptv345.com/"
  135. fenlei = ["央视,ys","卫视,ws","综合,itv","体育,ty","电影,movie","其他,other"]
  136. channel_list = []
  137. for i in fenlei:
  138. group_name,group_id = i.split(",")
  139. api_url = f"https://iptv345.com?tid={group_id}"
  140. response = requests.get(api_url)
  141. if response.status_code == 200:
  142. print("请求成功!")
  143. #print(response.text) # 打印返回的内容
  144. html_content = response.text
  145. soup = BeautifulSoup(html_content, 'html.parser')
  146. # 根据HTML结构定位目标<ul>标签
  147. ul_tag = soup.find('ul', {
  148. 'data-role': 'listview',
  149. 'data-inset': 'true',
  150. 'data-divider-theme': 'a'
  151. })
  152. for li in ul_tag.find_all('li'):
  153. a_tag = li.find('a')
  154. if a_tag:
  155. # 处理相对路径链接
  156. channel_url = base_url.rstrip('/') + '/' + a_tag['href'].lstrip('/')
  157. channel_list.append(f"{a_tag.text.strip()},{channel_url}")
  158. else:
  159. print("请求失败,状态码:", response.status_code)
  160. # 打印结果
  161. for channel in channel_list:
  162. print(channel)