This commit is contained in:
DengDai
2025-12-08 14:47:24 +08:00
commit 644b5aaaf8
21 changed files with 1543 additions and 0 deletions

View File

15
pt_gen/services/cache.py Normal file
View File

@@ -0,0 +1,15 @@
import redis.asyncio as redis
from typing import Optional
class RedisCache:
def __init__(self, host: str, port: int, db: int):
self.client = redis.Redis(host=host, port=port, db=db, decode_responses=True)
async def get(self, key: str) -> Optional[str]:
return await self.client.get(key)
async def set(self, key: str, value: str, ttl: int):
await self.client.setex(key, ttl, value)
async def close(self):
await self.client.close()

126
pt_gen/services/douban.py Normal file
View File

@@ -0,0 +1,126 @@
# pt_gen/services/douban.py
import re
import httpx
from bs4 import BeautifulSoup, NavigableString
from typing import Optional, Dict, List
class DoubanScraper:
def __init__(self, cookie: Optional[str] = None):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8'
}
if cookie:
self.headers['Cookie'] = cookie
def _get_info_text(self, soup_tag: BeautifulSoup, label: str) -> Optional[str]:
"""
一个辅助函数,用于在 #info 块中通过标签名(如“导演”)查找信息。
它查找包含指定标签文本的 <span class="pl"> 元素,然后获取其后的文本内容。
"""
tag = soup_tag.find('span', class_='pl', string=re.compile(label))
if tag:
# next_sibling 可能是 NavigableString, 也可能是 Tag
# 我们需要循环直到找到有意义的文本
next_node = tag.next_sibling
while next_node:
if isinstance(next_node, NavigableString) and next_node.strip():
return next_node.strip().strip(':').strip()
# 如果是 Tag我们尝试获取它的文本
if hasattr(next_node, 'get_text') and next_node.get_text(strip=True):
return next_node.get_text(strip=True)
next_node = next_node.next_sibling
return None
def _split_info(self, text: Optional[str]) -> List[str]:
"""将通过' / '分隔的字符串拆分为列表"""
if not text:
return []
return [item.strip() for item in text.split(' / ')]
async def scrape_movie_info(self, douban_id: str) -> Dict:
url = f"https://movie.douban.com/subject/{douban_id}/"
try:
async with httpx.AsyncClient(headers=self.headers, follow_redirects=True) as client:
response = await client.get(url, timeout=20)
response.raise_for_status()
except httpx.HTTPStatusError as e:
print(f"请求豆瓣页面失败: {e.response.status_code}")
if e.response.status_code == 403:
print("访问被拒绝 (403 Forbidden)。请检查你的 Cookie 是否有效或 IP 是否被限制。")
return {}
except httpx.RequestError as e:
print(f"请求豆瓣时发生网络错误: {e}")
return {}
soup = BeautifulSoup(response.text, 'html.parser')
info = {}
# 1. 标题和年份
title_tag = soup.find('h1')
if title_tag:
info['chinese_title'] = title_tag.find('span', property='v:itemreviewed').text.strip()
year_span = title_tag.find('span', class_='year')
if year_span:
info['year'] = re.search(r'\((\d{4})\)', year_span.text).group(1)
# 2. 海报
poster_img = soup.find('img', rel='v:image')
if poster_img and poster_img.get('src'):
info['poster_url'] = poster_img['src'].replace('/s_ratio_poster/', '/l_ratio_poster/')
# 3. #info 块的结构化解析
info_div = soup.find('div', id='info')
if info_div:
# 使用辅助函数和分割函数来获取信息
info['directors'] = self._split_info(self._get_info_text(info_div, '导演'))
info['writers'] = self._split_info(self._get_info_text(info_div, '编剧'))
# 主演信息可能跟在a标签后单独处理
actors_tag = info_div.find('span', class_='actor')
if actors_tag:
actors_list = [a.text.strip() for a in actors_tag.find_all('a')]
info['actors'] = actors_list[:15] # 最多取15个主演
else: # 备用方案
info['actors'] = self._split_info(self._get_info_text(info_div, '主演'))[:15]
info['countries'] = self._split_info(self._get_info_text(info_div, '制片国家/地区'))
info['spoken_languages'] = self._split_info(self._get_info_text(info_div, '语言'))
release_date_text = self._get_info_text(info_div, '上映日期')
if release_date_text:
# 只取第一个上映日期
info['release_date'] = self._split_info(release_date_text)[0]
info['runtime'] = self._get_info_text(info_div, '片长')
info['aka_titles'] = self._split_info(self._get_info_text(info_div, '又名'))
imdb_link_text = self._get_info_text(info_div, 'IMDb')
if imdb_link_text:
imdb_match = re.search(r'(tt\d+)', imdb_link_text)
if imdb_match:
info['imdb_id'] = imdb_match.group(1)
# 4. 类型 (Genres) - property='v:genre' 的方式更可靠
info['genres'] = [g.get_text(strip=True) for g in soup.find_all('span', property='v:genre')]
# 5. 简介
synopsis_span = soup.find('span', property='v:summary')
if synopsis_span:
info['synopsis'] = synopsis_span.get_text(strip=True).replace('\u3000', '')
else: # 备用方案
hidden_synopsis = soup.find('span', class_='all hidden')
if hidden_synopsis:
info['synopsis'] = hidden_synopsis.get_text(strip=True).replace('\u3000', '')
else:
info['synopsis'] = ""
# 6. 评分
rating_strong = soup.find('strong', property='v:average')
info['douban_rating'] = f"{rating_strong.text}/10" if rating_strong and rating_strong.text else "N/A"
# 7. 获奖
awards_ul = soup.find('ul', class_='award')
info['awards'] = [li.get_text(strip=True, separator=' ') for li in awards_ul.find_all('li')] if awards_ul else []
return info

109
pt_gen/services/tmdb.py Normal file
View File

@@ -0,0 +1,109 @@
import httpx
from typing import Optional, List, Dict, Any
class TMDBClient:
BASE_URL = "https://api.themoviedb.org/3"
def __init__(self, api_key: str):
self.api_key = api_key
async def _make_request(self, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Optional[Dict[str, Any]]:
if params is None:
params = {}
# 总是包含 api_key 和 language
params['api_key'] = self.api_key
params['language'] = 'zh-CN'
url = f"{self.BASE_URL}{endpoint}"
async with httpx.AsyncClient() as client:
try:
response = await client.get(url, params=params)
response.raise_for_status()
return response.json()
except httpx.HTTPStatusError as e:
print(f"TMDB API 请求失败: {e.response.status_code} for URL: {e.request.url}")
return None
except httpx.RequestError as e:
print(f"TMDB 请求错误: {e}")
return None
async def find_by_imdb_id(self, imdb_id: str) -> Optional[Dict[str, Any]]:
endpoint = f"/find/{imdb_id}"
return await self._make_request(endpoint, params={'external_source': 'imdb_id'})
# --- 重构 get_movie_details 方法 ---
async def get_movie_details(self, tmdb_id: str) -> Optional[Dict[str, Any]]:
params = {
'append_to_response': 'credits,alternative_titles'
}
endpoint = f"/movie/{tmdb_id}"
data = await self._make_request(endpoint, params=params)
if not data:
return None
return self._parse_movie_details(data)
def _parse_movie_details(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""将从 TMDB API 获取的原始数据解析为干净的字典"""
directors = []
if 'credits' in data and 'crew' in data['credits']:
for member in data['credits']['crew']:
if member.get('job') == 'Director':
directors.append(member['name'])
writers = []
writing_jobs = {'Writer', 'Screenplay', 'Story'}
if 'credits' in data and 'crew' in data['credits']:
for member in data['credits']['crew']:
# 检查部门是否为'Writing'并且职位是我们想要的
if member.get('department') == 'Writing':# and member.get('job') in writing_jobs:
# 避免重复添加同一个人
if member['name'] not in writers:
writers.append(member['name'])
actors = []
if 'credits' in data and 'cast' in data['credits']:
for member in data['credits']['cast']:
actors.append(member['name'])
genres = [genre['name'] for genre in data.get('genres', [])]
countries = [country['name'] for country in data.get('production_countries', [])]
aka_titles = []
if 'alternative_titles' in data and 'titles' in data['alternative_titles']:
for item in data['alternative_titles']['titles']:
if item.get('iso_3166_1') == 'CN':
aka_titles.append(item['title'])
runtime_min = data.get('runtime')
runtime = f"{runtime_min}分钟" if runtime_min else None
spoken_languages = [lang['english_name'] for lang in data.get('spoken_languages', [])]
parsed_data = {
"id": data.get('id'),
"imdb_id": data.get('imdb_id'),
"title": data.get('title'),
"original_title": data.get('original_title'),
"overview": data.get('overview'),
"release_date": data.get('release_date', ''),
"runtime": runtime,
"spoken_languages": spoken_languages,
"tagline": data.get('tagline'),
"tagline": data.get('tagline'),
"poster_path": data.get('poster_path'),
"vote_average": data.get('vote_average', 0),
"directors": directors,
"writers": writers,
"actors": actors,
"genres": genres,
"countries": countries,
"aka_titles": aka_titles
}
return parsed_data

View File

@@ -0,0 +1,34 @@
import httpx
from . import uploader
class ImageUploader:
def __init__(self, api_url: str, api_key: str):
self.api_url = api_url
self.api_key = api_key
async def upload(self, image_url: str) -> str:
"""
从一个URL下载图片然后上传到你自己的图床。
你需要根据你的图床 API 修改这部分代码。
"""
try:
async with httpx.AsyncClient() as client:
# 1. 下载图片
get_resp = await client.get(image_url)
get_resp.raise_for_status()
image_bytes = get_resp.content
# 2. 上传到你的图床
# 这是一个通用示例,你需要修改 files 和 headers
files = {'file': ('poster.jpg', image_bytes, 'image/jpeg')}
headers = {'Authorization': f'Bearer {self.api_key}'}
post_resp = await client.post(self.api_url, files=files, headers=headers)
post_resp.raise_for_status()
# 3. 解析响应返回新的图片URL
# 假设返回的JSON是 {"data": {"url": "..."}}
return post_resp.json()['data']['url']
except Exception as e:
print(f"图片上传失败: {e}")
return image_url # 上传失败返回原始URL