import uvicorn from multiprocessing import Process, Event, freeze_support, Manager from loguru import logger from helper import PromptQueue, LiveChatConfig from liveMan import DouyinLiveWebFetcher, DouyinLiveWebReply logger.add("log.log", encoding="utf-8", rotation="500MB") def start_fastapi_server(shared_config): from app import app app.shared_config = shared_config uvicorn.run(app, host="0.0.0.0", port=8000) def fetch_user_chat_content(ws_open_event, queue): fetcher = DouyinLiveWebFetcher(ws_open_event, queue) fetcher.start() def reply_user_chat_content(queue, config): reply = DouyinLiveWebReply(queue, config) reply() if __name__ == "__main__": freeze_support() LiveChatConfig().update_chat_enable_status("启动中") with Manager() as manager: prediction_queue = manager.list() # 共享队列,用来存放预测时间(字符串) shared_config = manager.dict({ "is_speaking": False, "predicted_time": None, # 最近一次用于对时差计算的预测时间 "speaking_false_time": None, # is_speaking 变为 False 的真实时间 "time_diff_seconds": None, # 真实时间 - 预测时间(秒) "prediction_queue": prediction_queue # FIFO 队列,避免覆盖 }) queue = PromptQueue(10) ws_open_event = Event() api_process = Process( target=start_fastapi_server, args=(shared_config,), name="FastAPI Server" ) fetch_process = Process( target=fetch_user_chat_content, args=(ws_open_event, queue), name="Fetcher" ) reply_process = Process( target=reply_user_chat_content, args=(queue, shared_config), name="Replier" ) api_process.start() fetch_process.start() ws_open_event.wait() reply_process.start() fetch_process.join() reply_process.join() api_process.join()