main
戴潇逸 2 weeks ago
parent 9552dcaba3
commit 2dc7b0dfc8

@ -1,12 +1,11 @@
pyinstaller --onefile ^ pyinstaller --onefile ^
--icon ./douyin.icon ^ --add-data "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/py_mini_racer;py_mini_racer" ^
--add-data "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/py_mini_racer;py_mini_racer" ^ --add-data "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/betterproto;betterproto" ^
--add-data "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/betterproto;betterproto" ^ --add-data "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/betterproto-2.0.0b6.dist-info;betterproto-2.0.0b6.dist-info" ^
--add-data "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/betterproto-2.0.0b6.dist-info;betterproto-2.0.0b6.dist-info" ^ --add-data "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/grpclib;grpclib" ^
--add-data "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/grpclib;grpclib" ^ --add-data "D:/code/live-digital-avatar/live-digital-avatar-chat/sign.js;." ^
--add-data "E:/workspace/live-digital-avatar/live-digital-avatar-chat/sign.js;." ^ --add-binary "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/py_mini_racer/mini_racer.dll;." ^
--add-binary "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/py_mini_racer/mini_racer.dll;." ^ --add-binary "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/py_mini_racer/icudtl.dat;." ^
--add-binary "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/py_mini_racer/icudtl.dat;." ^ --add-binary "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/py_mini_racer/snapshot_blob.bin;." ^
--add-binary "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/py_mini_racer/snapshot_blob.bin;." ^
--name chat ^ --name chat ^
main.py main.py

@ -141,6 +141,14 @@ class LiveChatConfig:
cursor.close() cursor.close()
return count return count
@property
def precedence_reply_message(self):
cursor = self.conn.cursor()
cursor.execute('select message, id from message where status = 2 and batch_number = 0 order by id limit 1')
count = cursor.fetchone()
cursor.close()
return count
@property @property
def next_reply_message(self): def next_reply_message(self):
cursor = self.conn.cursor() cursor = self.conn.cursor()
@ -187,6 +195,12 @@ class LiveChatConfig:
self.conn.commit() self.conn.commit()
cursor.close() cursor.close()
def flush_precedence_reply_message(self):
cursor = self.conn.cursor()
cursor.execute("UPDATE message SET status = CASE WHEN id > (SELECT MIN(id) FROM message WHERE status = 2) THEN 0 WHEN id <= (SELECT MIN(id) FROM message WHERE status = 2) THEN 1 ELSE status END;")
self.conn.commit()
cursor.close()
def update_next_reply_status(self, status, _id): def update_next_reply_status(self, status, _id):
cursor = self.conn.cursor() cursor = self.conn.cursor()
cursor.execute("update message set status = ? where id = ?", (status, _id)) cursor.execute("update message set status = ? where id = ?", (status, _id))
@ -212,7 +226,6 @@ class LiveChatConfig:
self.conn.commit() self.conn.commit()
cursor.close() cursor.close()
class PromptQueue: class PromptQueue:
def __init__(self, maxsize=0): def __init__(self, maxsize=0):
self.queue = Queue(maxsize) self.queue = Queue(maxsize)

@ -329,7 +329,7 @@ class DouyinLiveWebReply:
} }
], ],
"options": { "options": {
"temperature": 0.5 "temperature": 0.9
}, },
"stream": False, "stream": False,
"filterThink": True "filterThink": True
@ -363,7 +363,7 @@ class DouyinLiveWebReply:
response = requests.post( response = requests.post(
f'{self.live_chat_config.ollama_address}/live-digital-avatar-manage/ollama/generate', json=payload, f'{self.live_chat_config.ollama_address}/live-digital-avatar-manage/ollama/generate', json=payload,
headers={'Authorization': f'Bearer {self.live_chat_config.backend_token}'}, headers={'Authorization': f'Bearer {self.live_chat_config.backend_token}'},
timeout=10).content.decode()[5:] timeout=30).content.decode()[5:]
response = json.loads(response) response = json.loads(response)
return response['message']['content'] return response['message']['content']
@ -411,6 +411,7 @@ class DouyinLiveWebReply:
system_messages = self.live_chat_config.system_messages system_messages = self.live_chat_config.system_messages
llm_prompt = self.live_chat_config.refine_system_message_prompt.format( llm_prompt = self.live_chat_config.refine_system_message_prompt.format(
content=system_messages) content=system_messages)
logger.info(f'llm_prompt: {llm_prompt}')
reply_messages = self._llm(llm_prompt, False) reply_messages = self._llm(llm_prompt, False)
# 处理reply_messages先转换为json对象将key和value分别对应type和message存入sqlite message表中并统一给batch_number赋值为0 # 处理reply_messages先转换为json对象将key和value分别对应type和message存入sqlite message表中并统一给batch_number赋值为0
# 正则匹配处理reply_messages只保留大括号及其范围内的字符串 # 正则匹配处理reply_messages只保留大括号及其范围内的字符串
@ -419,7 +420,7 @@ class DouyinLiveWebReply:
# 遍历reply_messages对象insert message # 遍历reply_messages对象insert message
for _type, message in reply_messages.items(): for _type, message in reply_messages.items():
self.live_chat_config.insert_message(message, _type, batch_number) self.live_chat_config.insert_message(message, _type, batch_number)
logger.info(f'入库备用系统文案:{_type} | {message}') logger.info(f'入库文案:{_type} | {message}')
def __call__(self): def __call__(self):
""" """
@ -429,14 +430,12 @@ class DouyinLiveWebReply:
logger.info(f'livetalking address -> {self.live_chat_config.livetalking_address}') logger.info(f'livetalking address -> {self.live_chat_config.livetalking_address}')
logger.info(f'ollama_address -> {self.live_chat_config.ollama_address}') logger.info(f'ollama_address -> {self.live_chat_config.ollama_address}')
# 加一个计数器统计is_speaking连续为False的次数如果超过10次才算真正的未在说话 # 加一个计数器统计is_speaking连续为False的次数如果超过10次才算真正的未在说话
is_not_speaking_count = 0
while True: while True:
try: try:
is_speaking = requests.post(f'{self.live_chat_config.livetalking_address}/is_speaking', is_speaking = requests.post(f'{self.live_chat_config.livetalking_address}/is_speaking',
json={'sessionid': self.live_chat_config.livetalking_sessionid}, json={'sessionid': self.live_chat_config.livetalking_sessionid},
timeout=5).json()['data'] timeout=5).json()['data']
if is_speaking: if is_speaking:
time.sleep(0.1)
prompt_data = self.queue.get(False) prompt_data = self.queue.get(False)
if prompt_data is not None: if prompt_data is not None:
product_name, product_specification, product_description = self.live_chat_config.product_info product_name, product_specification, product_description = self.live_chat_config.product_info
@ -486,43 +485,44 @@ class DouyinLiveWebReply:
continue continue
reply_message = self._llm(prompt, False) reply_message = self._llm(prompt, False)
self.response_queue.put(reply_message) self.response_queue.put(reply_message)
# is_speaking此时是False需要等一段时间再查询
time.sleep(0.5)
else: else:
# 用户交互队列为空,输出系统文案和备用系统文案 # 用户交互队列为空,输出系统文案和文案
if not self.live_chat_config.next_reply_message: if not self.live_chat_config.next_reply_message:
logger.info('备用系统文案已用完,重新生成备用系统文案') logger.info('文案已用完,重新生成文案')
self.live_chat_config.flush_message() self.live_chat_config.flush_message()
self.generate_messages(1) self.generate_messages(1)
continue continue
else: else:
time.sleep(0.1)
is_not_speaking_count += 1
if is_not_speaking_count == 20:
logger.info('连续20次请求Livetalking未在说话开始回复')
# 调用Livetalking说话 # 调用Livetalking说话
# 判断response_queue是否为空如果不为空则取出回复内容并调用livetalking否则从数据库中取出备用系统文案 # 判断response_queue是否为空如果不为空则取出回复内容并调用livetalking否则从数据库中取出文案
reply_message = '' reply_message = ''
# 判断是否有需要回复的弹幕
if not self.response_queue.empty(): if not self.response_queue.empty():
reply_message = self.response_queue.get() reply_message = self.response_queue.get()
reply_message = self.reply_message_postprocess(reply_message) reply_message = self.reply_message_postprocess(reply_message)
else: else:
reply_message_data = self.live_chat_config.next_reply_message precedence_message = self.live_chat_config.precedence_reply_message
if not reply_message_data: if not precedence_message:
logger.info('备用系统文案已用完,重新生成备用系统文案') message = self.live_chat_config.next_reply_message
if not message:
logger.info('文案已用完,重新生成文案')
self.generate_messages(0) self.generate_messages(0)
self.generate_messages(1) self.generate_messages(1)
continue continue
reply_message, _id = reply_message_data reply_message, _id = message
# 说完之后把状态改为1 # 状态改为1
logger.info(f'更新备用系统文案id:{_id}状态为: 1') logger.info(f'更新文案id:{_id}状态为: 1')
self.live_chat_config.update_next_reply_status(1, _id) self.live_chat_config.update_next_reply_status(1, _id)
else:
# asyncio.run(self.post_to_human(reply_message)) reply_message, _id = precedence_message
logger.info(f'开始播放: {reply_message}') # 置空优先文案
self.live_chat_config.flush_precedence_reply_message()
# 判断self.response_queue.empty()true则打印开始播放弹幕回复false则打印开始播放系统文案
logger.info(f'开始播放{"弹幕回复" if not self.response_queue.empty() else "系统文案"}: {reply_message}')
self.post_to_human_sync(reply_message) self.post_to_human_sync(reply_message)
is_not_speaking_count = 0 # 等0.1秒再检测
time.sleep(0.1)
except Exception: except Exception:
# 发生异常,输出系统文案 # 发生异常,输出系统文案

Loading…
Cancel
Save