From d48c86fbd1006004af2a027f35a1dd4fa80680fe Mon Sep 17 00:00:00 2001 From: fanpt <320622572@qq.com> Date: Fri, 5 Sep 2025 12:26:09 +0800 Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0=E5=9B=9E=E5=A4=8D=E5=BC=B9?= =?UTF-8?q?=E5=B9=95=E7=9A=84=E7=94=A8=E6=88=B7=E5=90=8D=E7=A7=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- liveMan.py | 54 ++++++++++++++++++++------------------------ message_processor.py | 2 +- 2 files changed, 26 insertions(+), 30 deletions(-) diff --git a/liveMan.py b/liveMan.py index 1ed92cd..9dcb7aa 100644 --- a/liveMan.py +++ b/liveMan.py @@ -352,7 +352,7 @@ class DouyinLiveWebReply: buffer += char if char in self.punctuation: if len(buffer.strip()) < 10: - continue # 不够长,继续缓冲 + continue yield buffer.strip() buffer = '' if buffer.strip(): @@ -388,10 +388,6 @@ class DouyinLiveWebReply: ) def post_to_human_sync(self, text: str): - """ - 同步调用post_to_human - :param text: 要发送的文本内容 - """ response = requests.post( f'{self.live_chat_config.livetalking_address}/human', json={ @@ -408,17 +404,13 @@ class DouyinLiveWebReply: message_count = self.live_chat_config.messages(batch_number) if message_count == 0: logger.info(f'生成系统文案,batch_number: {batch_number}') - # 结合原始样例话术,拼接提示词,调用Ollama,生成洗稿后的话术 system_messages = self.live_chat_config.system_messages llm_prompt = self.live_chat_config.refine_system_message_prompt.format( content=system_messages) logger.info(f'llm_prompt: {llm_prompt}') reply_messages = self._llm(llm_prompt, False) - # 处理reply_messages,先转换为json对象,将key和value分别对应type和message存入sqlite message表中,并统一给batch_number赋值为0 - # 正则匹配处理reply_messages,只保留大括号及其范围内的字符串 reply_messages = re.findall(r'\{.*?\}', reply_messages, re.DOTALL)[0] reply_messages = json.loads(reply_messages) - # 遍历reply_messages对象,insert message for _type, message in reply_messages.items(): self.live_chat_config.insert_message(message, _type, batch_number) logger.info(f'入库文案:{_type} | {message}') @@ -427,10 +419,11 @@ class DouyinLiveWebReply: """ 优先从用户交互队列中取提示词,如果没有用户交互的数据,则输出系统提示词 """ - live_chat_config.update_chat_enable_status('已启动') + # 这里修正为使用 self.live_chat_config + self.live_chat_config.update_chat_enable_status('已启动') logger.info(f'livetalking address -> {self.live_chat_config.livetalking_address}') logger.info(f'ollama_address -> {self.live_chat_config.ollama_address}') - # 加一个计数器,统计is_speaking连续为False的次数,如果超过10次,才算真正的未在说话 + while True: try: is_speaking = self.config['is_speaking'] @@ -438,8 +431,16 @@ class DouyinLiveWebReply: prompt_data = self.queue.get(False) if prompt_data is not None: product_name, product_specification, product_description = self.live_chat_config.product_info - # live_chat: 弹幕 - message_type, prompt, live_chat = prompt_data + + # 兼容老/新两种数据结构: + # 老结构: (message_type, prompt, live_chat) + # 新结构: (message_type, prompt, live_chat, user_name) + if isinstance(prompt_data, tuple) and len(prompt_data) == 4: + message_type, prompt, live_chat, sender_name = prompt_data + else: + message_type, prompt, live_chat = prompt_data + sender_name = None + if message_type == MessageType.ENTER_LIVE_ROOM.value: if random.random() >= self.live_chat_config.enter_live_room_prob / 100: continue @@ -475,32 +476,33 @@ class DouyinLiveWebReply: prompt = prompt.format(product_name=product_name, product_specification=product_specification, product_description=product_description) + if live_chat is not None: - logger.info(f'弹幕: {live_chat}') + logger.info(f'弹幕({sender_name}): {live_chat}') llm_output = self._llm( self.live_chat_config.product_related_prompt.format(content=live_chat)) logger.info(f'判断弹幕是否违反中国大陆法律和政策: {llm_output}') if llm_output != '否': continue + reply_message = self._llm(prompt, False) - self.response_queue.put(reply_message) + # 这里把“回复内容 + 发送者昵称”一起塞进 response_queue + self.response_queue.put((reply_message, sender_name)) else: - # 用户交互队列为空,输出系统文案和文案 if not self.live_chat_config.next_reply_message: logger.info('文案已用完,重新生成文案') self.live_chat_config.flush_message() self.generate_messages(1) - continue else: - # 调用Livetalking说话 - # 判断response_queue是否为空,如果不为空,则取出回复内容并调用livetalking,否则从数据库中取出文案 - reply_message = '' - # 判断是否有需要回复的弹幕 + # 不在说话 -> 从队列/库里取一句话播报 if not self.response_queue.empty(): - reply_message = self.response_queue.get() + reply_message, from_user = self.response_queue.get() reply_message = self.reply_message_postprocess(reply_message) - logger.info(f"开始播放回复弹幕:{reply_message}") + if from_user: + logger.info(f"开始播放回复弹幕({from_user}):{reply_message}") + else: + logger.info(f"开始播放回复弹幕:{reply_message}") else: precedence_message = self.live_chat_config.precedence_reply_message if not precedence_message: @@ -511,23 +513,17 @@ class DouyinLiveWebReply: self.generate_messages(1) continue reply_message, _id = message - # 状态改为1 logger.info(f'更新文案id:{_id}状态为: 1') self.live_chat_config.update_next_reply_status(1, _id) else: reply_message, _id = precedence_message - # 置空优先文案 self.live_chat_config.flush_precedence_reply_message() logger.info(f"开始播放系统文案:{reply_message}") - # 判断self.response_queue.empty(),true则打印开始播放弹幕回复,false则打印开始播放系统文案 - # logger.info(f'开始播放{"弹幕回复" if not self.response_queue.empty() else "系统文案"}: {reply_message}') self.post_to_human_sync(reply_message) - # 等0.5秒再检测 time.sleep(1) except Exception: - # 发生异常,输出系统文案 logger.error(traceback.format_exc()) time.sleep(5) system_messages = self.live_chat_config.system_messages diff --git a/message_processor.py b/message_processor.py index 70fc4f9..a42b56c 100644 --- a/message_processor.py +++ b/message_processor.py @@ -18,7 +18,7 @@ def parse_chat_msg(payload, queue): if not content.strip(): return prompt = live_chat_config.chat_prompt.format(content=content, product_name='{product_name}', product_specification='{product_specification}', product_description='{product_description}') - queue.put((MessageType.CHAT.value, prompt, content)) + queue.put((MessageType.CHAT.value, prompt, content, user_name)) # logger.info(f"【聊天msg】[{user_id}]{user_name}: {content}") # logger.info(f"队列数量: {queue.qsize()}")