main
戴潇逸 1 week ago
parent 9552dcaba3
commit 2dc7b0dfc8

@ -1,12 +1,11 @@
pyinstaller --onefile ^
--icon ./douyin.icon ^
--add-data "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/py_mini_racer;py_mini_racer" ^
--add-data "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/betterproto;betterproto" ^
--add-data "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/betterproto-2.0.0b6.dist-info;betterproto-2.0.0b6.dist-info" ^
--add-data "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/grpclib;grpclib" ^
--add-data "E:/workspace/live-digital-avatar/live-digital-avatar-chat/sign.js;." ^
--add-binary "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/py_mini_racer/mini_racer.dll;." ^
--add-binary "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/py_mini_racer/icudtl.dat;." ^
--add-binary "E:/workspace/live-digital-avatar/live-digital-avatar-chat/venv/Lib/site-packages/py_mini_racer/snapshot_blob.bin;." ^
--add-data "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/py_mini_racer;py_mini_racer" ^
--add-data "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/betterproto;betterproto" ^
--add-data "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/betterproto-2.0.0b6.dist-info;betterproto-2.0.0b6.dist-info" ^
--add-data "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/grpclib;grpclib" ^
--add-data "D:/code/live-digital-avatar/live-digital-avatar-chat/sign.js;." ^
--add-binary "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/py_mini_racer/mini_racer.dll;." ^
--add-binary "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/py_mini_racer/icudtl.dat;." ^
--add-binary "D:/code/live-digital-avatar/live-digital-avatar-chat/.venv/Lib/site-packages/py_mini_racer/snapshot_blob.bin;." ^
--name chat ^
main.py

@ -141,6 +141,14 @@ class LiveChatConfig:
cursor.close()
return count
@property
def precedence_reply_message(self):
cursor = self.conn.cursor()
cursor.execute('select message, id from message where status = 2 and batch_number = 0 order by id limit 1')
count = cursor.fetchone()
cursor.close()
return count
@property
def next_reply_message(self):
cursor = self.conn.cursor()
@ -187,6 +195,12 @@ class LiveChatConfig:
self.conn.commit()
cursor.close()
def flush_precedence_reply_message(self):
cursor = self.conn.cursor()
cursor.execute("UPDATE message SET status = CASE WHEN id > (SELECT MIN(id) FROM message WHERE status = 2) THEN 0 WHEN id <= (SELECT MIN(id) FROM message WHERE status = 2) THEN 1 ELSE status END;")
self.conn.commit()
cursor.close()
def update_next_reply_status(self, status, _id):
cursor = self.conn.cursor()
cursor.execute("update message set status = ? where id = ?", (status, _id))
@ -212,7 +226,6 @@ class LiveChatConfig:
self.conn.commit()
cursor.close()
class PromptQueue:
def __init__(self, maxsize=0):
self.queue = Queue(maxsize)

@ -329,7 +329,7 @@ class DouyinLiveWebReply:
}
],
"options": {
"temperature": 0.5
"temperature": 0.9
},
"stream": False,
"filterThink": True
@ -363,7 +363,7 @@ class DouyinLiveWebReply:
response = requests.post(
f'{self.live_chat_config.ollama_address}/live-digital-avatar-manage/ollama/generate', json=payload,
headers={'Authorization': f'Bearer {self.live_chat_config.backend_token}'},
timeout=10).content.decode()[5:]
timeout=30).content.decode()[5:]
response = json.loads(response)
return response['message']['content']
@ -411,6 +411,7 @@ class DouyinLiveWebReply:
system_messages = self.live_chat_config.system_messages
llm_prompt = self.live_chat_config.refine_system_message_prompt.format(
content=system_messages)
logger.info(f'llm_prompt: {llm_prompt}')
reply_messages = self._llm(llm_prompt, False)
# 处理reply_messages先转换为json对象将key和value分别对应type和message存入sqlite message表中并统一给batch_number赋值为0
# 正则匹配处理reply_messages只保留大括号及其范围内的字符串
@ -419,7 +420,7 @@ class DouyinLiveWebReply:
# 遍历reply_messages对象insert message
for _type, message in reply_messages.items():
self.live_chat_config.insert_message(message, _type, batch_number)
logger.info(f'入库备用系统文案:{_type} | {message}')
logger.info(f'入库文案:{_type} | {message}')
def __call__(self):
"""
@ -429,14 +430,12 @@ class DouyinLiveWebReply:
logger.info(f'livetalking address -> {self.live_chat_config.livetalking_address}')
logger.info(f'ollama_address -> {self.live_chat_config.ollama_address}')
# 加一个计数器统计is_speaking连续为False的次数如果超过10次才算真正的未在说话
is_not_speaking_count = 0
while True:
try:
is_speaking = requests.post(f'{self.live_chat_config.livetalking_address}/is_speaking',
json={'sessionid': self.live_chat_config.livetalking_sessionid},
timeout=5).json()['data']
if is_speaking:
time.sleep(0.1)
prompt_data = self.queue.get(False)
if prompt_data is not None:
product_name, product_specification, product_description = self.live_chat_config.product_info
@ -486,43 +485,44 @@ class DouyinLiveWebReply:
continue
reply_message = self._llm(prompt, False)
self.response_queue.put(reply_message)
# is_speaking此时是False需要等一段时间再查询
time.sleep(0.5)
else:
# 用户交互队列为空,输出系统文案和备用系统文案
# 用户交互队列为空,输出系统文案和文案
if not self.live_chat_config.next_reply_message:
logger.info('备用系统文案已用完,重新生成备用系统文案')
logger.info('文案已用完,重新生成文案')
self.live_chat_config.flush_message()
self.generate_messages(1)
continue
else:
time.sleep(0.1)
is_not_speaking_count += 1
if is_not_speaking_count == 20:
logger.info('连续20次请求Livetalking未在说话开始回复')
# 调用Livetalking说话
# 判断response_queue是否为空如果不为空则取出回复内容并调用livetalking否则从数据库中取出备用系统文案
reply_message = ''
if not self.response_queue.empty():
reply_message = self.response_queue.get()
reply_message = self.reply_message_postprocess(reply_message)
else:
reply_message_data = self.live_chat_config.next_reply_message
if not reply_message_data:
logger.info('备用系统文案已用完,重新生成备用系统文案')
# 调用Livetalking说话
# 判断response_queue是否为空如果不为空则取出回复内容并调用livetalking否则从数据库中取出文案
reply_message = ''
# 判断是否有需要回复的弹幕
if not self.response_queue.empty():
reply_message = self.response_queue.get()
reply_message = self.reply_message_postprocess(reply_message)
else:
precedence_message = self.live_chat_config.precedence_reply_message
if not precedence_message:
message = self.live_chat_config.next_reply_message
if not message:
logger.info('文案已用完,重新生成文案')
self.generate_messages(0)
self.generate_messages(1)
continue
reply_message, _id = reply_message_data
# 说完之后把状态改为1
logger.info(f'更新备用系统文案id:{_id}状态为: 1')
reply_message, _id = message
# 状态改为1
logger.info(f'更新文案id:{_id}状态为: 1')
self.live_chat_config.update_next_reply_status(1, _id)
# asyncio.run(self.post_to_human(reply_message))
logger.info(f'开始播放: {reply_message}')
self.post_to_human_sync(reply_message)
is_not_speaking_count = 0
else:
reply_message, _id = precedence_message
# 置空优先文案
self.live_chat_config.flush_precedence_reply_message()
# 判断self.response_queue.empty()true则打印开始播放弹幕回复false则打印开始播放系统文案
logger.info(f'开始播放{"弹幕回复" if not self.response_queue.empty() else "系统文案"}: {reply_message}')
self.post_to_human_sync(reply_message)
# 等0.1秒再检测
time.sleep(0.1)
except Exception:
# 发生异常,输出系统文案

Loading…
Cancel
Save