@ -329,7 +329,7 @@ class DouyinLiveWebReply:
}
}
] ,
] ,
" options " : {
" options " : {
" temperature " : 0. 5
" temperature " : 0. 9
} ,
} ,
" stream " : False ,
" stream " : False ,
" filterThink " : True
" filterThink " : True
@ -363,7 +363,7 @@ class DouyinLiveWebReply:
response = requests . post (
response = requests . post (
f ' { self . live_chat_config . ollama_address } /live-digital-avatar-manage/ollama/generate ' , json = payload ,
f ' { self . live_chat_config . ollama_address } /live-digital-avatar-manage/ollama/generate ' , json = payload ,
headers = { ' Authorization ' : f ' Bearer { self . live_chat_config . backend_token } ' } ,
headers = { ' Authorization ' : f ' Bearer { self . live_chat_config . backend_token } ' } ,
timeout = 1 0) . content . decode ( ) [ 5 : ]
timeout = 3 0) . content . decode ( ) [ 5 : ]
response = json . loads ( response )
response = json . loads ( response )
return response [ ' message ' ] [ ' content ' ]
return response [ ' message ' ] [ ' content ' ]
@ -411,6 +411,7 @@ class DouyinLiveWebReply:
system_messages = self . live_chat_config . system_messages
system_messages = self . live_chat_config . system_messages
llm_prompt = self . live_chat_config . refine_system_message_prompt . format (
llm_prompt = self . live_chat_config . refine_system_message_prompt . format (
content = system_messages )
content = system_messages )
logger . info ( f ' llm_prompt: { llm_prompt } ' )
reply_messages = self . _llm ( llm_prompt , False )
reply_messages = self . _llm ( llm_prompt , False )
# 处理reply_messages, 先转换为json对象, 将key和value分别对应type和message存入sqlite message表中, 并统一给batch_number赋值为0
# 处理reply_messages, 先转换为json对象, 将key和value分别对应type和message存入sqlite message表中, 并统一给batch_number赋值为0
# 正则匹配处理reply_messages, 只保留大括号及其范围内的字符串
# 正则匹配处理reply_messages, 只保留大括号及其范围内的字符串
@ -419,7 +420,7 @@ class DouyinLiveWebReply:
# 遍历reply_messages对象, insert message
# 遍历reply_messages对象, insert message
for _type , message in reply_messages . items ( ) :
for _type , message in reply_messages . items ( ) :
self . live_chat_config . insert_message ( message , _type , batch_number )
self . live_chat_config . insert_message ( message , _type , batch_number )
logger . info ( f ' 入库 备用系统 文案:{ _type } | { message } ' )
logger . info ( f ' 入库 文案:{ _type } | { message } ' )
def __call__ ( self ) :
def __call__ ( self ) :
"""
"""
@ -429,14 +430,12 @@ class DouyinLiveWebReply:
logger . info ( f ' livetalking address -> { self . live_chat_config . livetalking_address } ' )
logger . info ( f ' livetalking address -> { self . live_chat_config . livetalking_address } ' )
logger . info ( f ' ollama_address -> { self . live_chat_config . ollama_address } ' )
logger . info ( f ' ollama_address -> { self . live_chat_config . ollama_address } ' )
# 加一个计数器, 统计is_speaking连续为False的次数, 如果超过10次, 才算真正的未在说话
# 加一个计数器, 统计is_speaking连续为False的次数, 如果超过10次, 才算真正的未在说话
is_not_speaking_count = 0
while True :
while True :
try :
try :
is_speaking = requests . post ( f ' { self . live_chat_config . livetalking_address } /is_speaking ' ,
is_speaking = requests . post ( f ' { self . live_chat_config . livetalking_address } /is_speaking ' ,
json = { ' sessionid ' : self . live_chat_config . livetalking_sessionid } ,
json = { ' sessionid ' : self . live_chat_config . livetalking_sessionid } ,
timeout = 5 ) . json ( ) [ ' data ' ]
timeout = 5 ) . json ( ) [ ' data ' ]
if is_speaking :
if is_speaking :
time . sleep ( 0.1 )
prompt_data = self . queue . get ( False )
prompt_data = self . queue . get ( False )
if prompt_data is not None :
if prompt_data is not None :
product_name , product_specification , product_description = self . live_chat_config . product_info
product_name , product_specification , product_description = self . live_chat_config . product_info
@ -486,43 +485,44 @@ class DouyinLiveWebReply:
continue
continue
reply_message = self . _llm ( prompt , False )
reply_message = self . _llm ( prompt , False )
self . response_queue . put ( reply_message )
self . response_queue . put ( reply_message )
# is_speaking此时是False, 需要等一段时间再查询
time . sleep ( 0.5 )
else :
else :
# 用户交互队列为空,输出系统文案和 备用系统 文案
# 用户交互队列为空,输出系统文案和文案
if not self . live_chat_config . next_reply_message :
if not self . live_chat_config . next_reply_message :
logger . info ( ' 备用系统 文案已用完,重新生成备用系统 文案' )
logger . info ( ' 文案已用完,重新生成文案' )
self . live_chat_config . flush_message ( )
self . live_chat_config . flush_message ( )
self . generate_messages ( 1 )
self . generate_messages ( 1 )
continue
continue
else :
else :
time . sleep ( 0.1 )
# 调用Livetalking说话
is_not_speaking_count + = 1
# 判断response_queue是否为空, 如果不为空, 则取出回复内容并调用livetalking, 否则从数据库中取出文案
if is_not_speaking_count == 20 :
reply_message = ' '
logger . info ( ' 连续20次请求Livetalking未在说话, 开始回复 ' )
# 判断是否有需要回复的弹幕
# 调用Livetalking说话
if not self . response_queue . empty ( ) :
# 判断response_queue是否为空, 如果不为空, 则取出回复内容并调用livetalking, 否则从数据库中取出备用系统文案
reply_message = self . response_queue . get ( )
reply_message = ' '
reply_message = self . reply_message_postprocess ( reply_message )
if not self . response_queue . empty ( ) :
else :
reply_message = self . response_queue . get ( )
precedence_message = self . live_chat_config . precedence_reply_message
reply_message = self . reply_message_postprocess ( reply_message )
if not precedence_message :
else :
message = self . live_chat_config . next_reply_message
reply_message_data = self . live_chat_config . next_reply_message
if not message :
if not reply_message_data :
logger . info ( ' 文案已用完,重新生成文案 ' )
logger . info ( ' 备用系统文案已用完,重新生成备用系统文案 ' )
self . generate_messages ( 0 )
self . generate_messages ( 0 )
self . generate_messages ( 1 )
self . generate_messages ( 1 )
continue
continue
reply_message , _id = reply_ message_data
reply_message , _id = message
# 说完之后把 状态改为1
# 状态改为1
logger . info ( f ' 更新 备用系统 文案id:{ _id } 状态为: 1 ' )
logger . info ( f ' 更新 文案id:{ _id } 状态为: 1 ' )
self . live_chat_config . update_next_reply_status ( 1 , _id )
self . live_chat_config . update_next_reply_status ( 1 , _id )
else :
# asyncio.run(self.post_to_human(reply_message))
reply_message , _id = precedence_message
logger . info ( f ' 开始播放: { reply_message } ' )
# 置空优先文案
self . post_to_human_sync ( reply_message )
self . live_chat_config . flush_precedence_reply_message ( )
is_not_speaking_count = 0
# 判断self.response_queue.empty(), true则打印开始播放弹幕回复, false则打印开始播放系统文案
logger . info ( f ' 开始播放 { " 弹幕回复 " if not self . response_queue . empty ( ) else " 系统文案 " } : { reply_message } ' )
self . post_to_human_sync ( reply_message )
# 等0.1秒再检测
time . sleep ( 0.1 )
except Exception :
except Exception :
# 发生异常,输出系统文案
# 发生异常,输出系统文案