修改参数

main
fanpt 2 days ago
parent 687df82bc6
commit 80b10ca970

@ -335,8 +335,8 @@ if __name__ == '__main__':
parser.add_argument('--customvideo_config', type=str, default='', help="custom action json") parser.add_argument('--customvideo_config', type=str, default='', help="custom action json")
parser.add_argument('--tts', type=str, default='edgetts', help="tts service type") #xtts gpt-sovits cosyvoice parser.add_argument('--tts', type=str, default='edgetts', help="tts service type") #xtts gpt-sovits cosyvoice
parser.add_argument('--REF_FILE', type=str, default="zh-CN-YunxiaNeural") parser.add_argument('--REF_FILE', type=str, default="input/doubao.wav")
parser.add_argument('--REF_TEXT', type=str, default=None) parser.add_argument('--REF_TEXT', type=str, default="刚进直播间的宝子们,左上角先点个关注,点亮咱们家的粉丝灯牌!我是你们的主播陈婉婉,今天给大家准备了超级重磅的福利")
parser.add_argument('--TTS_SERVER', type=str, default='http://127.0.0.1:9880') # http://localhost:9000 parser.add_argument('--TTS_SERVER', type=str, default='http://127.0.0.1:9880') # http://localhost:9000
# parser.add_argument('--CHARACTER', type=str, default='test') # parser.add_argument('--CHARACTER', type=str, default='test')
# parser.add_argument('--EMOTION', type=str, default='default') # parser.add_argument('--EMOTION', type=str, default='default')
@ -370,9 +370,9 @@ if __name__ == '__main__':
elif opt.model == 'wav2lip': elif opt.model == 'wav2lip':
from lipreal import LipReal,load_model,load_avatar,warm_up from lipreal import LipReal,load_model,load_avatar,warm_up
logger.info(opt) logger.info(opt)
model = load_model("./models/wav2lip.pth") model = load_model("./models/wav2lip384.pth")
avatar = load_avatar(opt.avatar_id) avatar = load_avatar(opt.avatar_id)
warm_up(opt.batch_size,model,256) warm_up(opt.batch_size,model,384)
elif opt.model == 'ultralight': elif opt.model == 'ultralight':
from lightreal import LightReal,load_model,load_avatar,warm_up from lightreal import LightReal,load_model,load_avatar,warm_up
logger.info(opt) logger.info(opt)

@ -36,7 +36,7 @@ import torch.multiprocessing as mp
from lipasr import LipASR from lipasr import LipASR
import asyncio import asyncio
from av import AudioFrame, VideoFrame from av import AudioFrame, VideoFrame
from wav2lip.models import Wav2Lip from wav2lip.models384.wav2lip import Wav2Lip
from basereal import BaseReal from basereal import BaseReal
#from imgcache import ImgCache #from imgcache import ImgCache

@ -1,48 +1,66 @@
import re
import time import time
import os import requests
import json
from basereal import BaseReal from basereal import BaseReal
from logger import logger from logger import logger
def llm_response(message,nerfreal:BaseReal): def llm_response(message: str, nerfreal: BaseReal):
start = time.perf_counter() start = time.perf_counter()
from openai import OpenAI logger.info(f"message:{message}")
client = OpenAI( url = "http://192.168.10.70:11434/api/chat"
# 如果您没有配置环境变量请在此处用您的API Key进行替换 data = {
api_key=os.getenv("DASHSCOPE_API_KEY"), "model": "qwen3:30b-a3b",
# 填写DashScope SDK的base_url "messages": [
base_url="https://dashscope.aliyuncs.com/compatible-mode/v1", {"role": "system", "content": "你叫小苏,贵州人,带有轻微贵州口音,现在正在直播,你的人设是带货主播,你现在卖的产品是酱酒,任务是消除消费者对假酒的顾虑。"},
) {"role": "user", "content": f"{message}\n不可以输出表情包/no_think"}
end = time.perf_counter() ],
logger.info(f"llm Time init: {end-start}s") "stream": True
completion = client.chat.completions.create( }
model="qwen-plus",
messages=[{'role': 'system', 'content': 'You are a helpful assistant.'}, response = requests.post(url, json=data, stream=True)
{'role': 'user', 'content': message}], logger.info(f"Ollama init time: {time.perf_counter() - start:.2f}s")
stream=True,
# 通过以下设置在流式输出的最后一行展示token使用信息
stream_options={"include_usage": True}
)
result = "" result = ""
full_answer = ""
first = True first = True
for chunk in completion: think_tag_pattern = re.compile(r"</?think>", re.IGNORECASE)
if len(chunk.choices)>0: punctuation = ",.!;:,。!?:;"
#print(chunk.choices[0].delta.content)
for line in response.iter_lines():
if not line:
continue
try:
json_data = json.loads(line.decode("utf-8"))
except json.JSONDecodeError:
continue
# 如果有结束标志,可以在此处判断并 break
# if json_data.get("done"):
# break
msg = json_data.get("message", {}).get("content", "")
msg = think_tag_pattern.sub("", msg)
if not msg:
continue
full_answer += msg
if first: if first:
end = time.perf_counter() logger.info(f"Ollama time to first chunk: {time.perf_counter() - start:.2f}s")
logger.info(f"llm Time to first chunk: {end-start}s")
first = False first = False
msg = chunk.choices[0].delta.content
lastpos = 0 lastpos = 0
#msglist = re.split('[,.!;:,。!?]',msg)
for i, char in enumerate(msg): for i, char in enumerate(msg):
if char in ",.!;:,。!?:;" : if char in punctuation:
result = result+msg[lastpos:i+1] result += msg[lastpos:i+1]
lastpos = i+1 lastpos = i+1
if len(result) > 10: if len(result) > 10:
logger.info(result) logger.info(result)
nerfreal.put_msg_txt(result) nerfreal.put_msg_txt(result)
result = "" result = ""
result = result+msg[lastpos:] result += msg[lastpos:]
end = time.perf_counter()
logger.info(f"llm Time to last chunk: {end-start}s") if result:
nerfreal.put_msg_txt(result) nerfreal.put_msg_txt(result)
Loading…
Cancel
Save