This commit is contained in:
DengDai
2025-12-08 14:45:14 +08:00
commit 519589f8f5
60 changed files with 8191 additions and 0 deletions

View File

View File

@@ -0,0 +1,76 @@
import os
import json
import time
import logging
log = logging.getLogger(__name__)
# 定义缓存目录
CACHE_DIR = os.path.join(os.path.dirname(__file__), '..', 'cache')
os.makedirs(CACHE_DIR, exist_ok=True) # 自动创建目录
class CacheManager:
"""
一个通用的JSON文件缓存管理器。
"""
def __init__(self, cache_file_name: str, ttl_seconds: int = 3600):
"""
:param cache_file_name: 缓存文件名, e.g., 'plex_library.json'
:param ttl_seconds: 缓存的生命周期默认1小时
"""
self.cache_path = os.path.join(CACHE_DIR, cache_file_name)
self.ttl = ttl_seconds
def _is_stale(self) -> bool:
"""检查缓存是否已过期"""
if not os.path.exists(self.cache_path):
return True # 文件不存在,视为过期
file_mod_time = os.path.getmtime(self.cache_path)
if (time.time() - file_mod_time) > self.ttl:
return True # 文件存在但已超过TTL视为过期
return False
def read_cache(self) -> list | dict | None:
"""从文件读取缓存"""
try:
with open(self.cache_path, 'r', encoding='utf-8') as f:
return json.load(f)
except (FileNotFoundError, json.JSONDecodeError):
return None
def write_cache(self, data: list | dict):
"""将数据写入缓存文件"""
try:
with open(self.cache_path, 'w', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
except Exception as e:
log.error(f"写入缓存文件 {self.cache_path} 失败: {e}")
def get_data(self, fetch_func, *args, **kwargs) -> list | dict:
"""
获取数据的主要方法。如果缓存有效则从缓存读取,否则调用 fetch_func 获取新数据。
:param fetch_func: 一个用于获取实时数据的函数 (e.g., plex_manager.get_items)
:param args: 传递给 fetch_func 的位置参数
:param kwargs: 传递给 fetch_func 的关键字参数
:return: 数据
"""
if not self._is_stale():
log.debug(f"从缓存加载数据: {self.cache_path}")
cached_data = self.read_cache()
if cached_data is not None:
return cached_data
log.info(f"缓存失效或不存在,正在从源获取新数据: {fetch_func.__name__}")
try:
fresh_data = fetch_func(*args, **kwargs)
self.write_cache(fresh_data)
return fresh_data
except Exception as e:
log.error(f"调用 fetch_func ({fetch_func.__name__}) 失败: {e}", exc_info=True)
# 如果获取新数据失败,尝试返回旧的缓存(即使已过期),这比返回错误要好
log.warning("获取新数据失败,尝试使用旧缓存作为备用。")
old_data = self.read_cache()
return old_data if old_data is not None else []

View File

@@ -0,0 +1,40 @@
import logging
from config import config_manager
from utils.plex_utils import PlexManager
from .cache_service import CacheManager
log = logging.getLogger(__name__)
def update_all_plex_libraries_cache():
"""
强制刷新所有已配置的Plex媒体库的缓存。
这个函数将被定时任务调用。
"""
log.info("开始执行定时任务更新Plex媒体库缓存...")
plex_config = config_manager.get('plex')
if not plex_config or not plex_config.get('host'):
log.info("Plex未配置跳过更新任务。")
return
try:
manager = PlexManager(plex_config)
libraries = manager.get_libraries()
for lib in libraries:
lib_id = lib['id']
# 为每个媒体库创建一个独立的缓存文件
cache_mgr = CacheManager(f"plex_lib_{lib_id}.json")
# 调用 get_data 并传入真正的 fetch 函数,这会强制刷新缓存
log.debug(f"正在为Plex媒体库 '{lib['name']}' ({lib_id}) 刷新缓存...")
cache_mgr.get_data(manager.get_library_items, library_id=lib_id)
log.info("Plex媒体库缓存更新任务完成。")
except Exception as e:
log.error(f"更新Plex缓存时发生错误: {e}", exc_info=True)
# TODO: 在这里添加其他更新函数,例如:
# def update_local_skits_cache():
# log.info("开始更新本地短剧缓存...")
# ...

View File

@@ -0,0 +1,35 @@
import logging
import atexit
from apscheduler.schedulers.background import BackgroundScheduler
from .data_updater import update_all_plex_libraries_cache
log = logging.getLogger(__name__)
scheduler = BackgroundScheduler(daemon=True)
def init_scheduler():
"""初始化并启动定时任务调度器"""
if scheduler.running:
log.warning("调度器已在运行中。")
return
try:
# 在这里添加你的定时任务
scheduler.add_job(
func=update_all_plex_libraries_cache,
trigger='interval',
hours=1, # 每1小时执行一次
id='update_plex_job',
replace_existing=True
)
# TODO: 添加其他任务
# scheduler.add_job(...)
scheduler.start()
log.info("定时任务调度器已启动。")
# 确保在应用退出时能优雅地关闭调度器
atexit.register(lambda: scheduler.shutdown())
except Exception as e:
log.error(f"启动调度器失败: {e}", exc_info=True)