(new) 聚合前端;musetalk过渡优化;bugfix (#407)

* 1. 修复了musetalk方案中,当数字人说话状态变化时,嘴部画面跳变问题;
2. 新增现代美观的前端dashboard.html,集成了对话与朗读功能;
3. 修复了“'weights_only' is an invalid keyword argument for load()”报错。

* bugfix:修复视频连接状态不更新的bug

---------

Co-authored-by: marstaos <liu.marstaos@outlook.com>
main
Marstaos 4 months ago committed by GitHub
parent a267e87f1f
commit 777a89d20b
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

5
.gitignore vendored

@ -15,4 +15,7 @@ pretrained
*.mp4 *.mp4
.DS_Store .DS_Store
workspace/log_ngp.txt workspace/log_ngp.txt
.idea .idea
keep_gpu.py
models/
*.log

@ -473,6 +473,7 @@ if __name__ == '__main__':
elif opt.transport=='rtcpush': elif opt.transport=='rtcpush':
pagename='rtcpushapi.html' pagename='rtcpushapi.html'
logger.info('start http server; http://<serverip>:'+str(opt.listenport)+'/'+pagename) logger.info('start http server; http://<serverip>:'+str(opt.listenport)+'/'+pagename)
logger.info('如果使用webrtc推荐访问webrtc集成前端: http://<serverip>:'+str(opt.listenport)+'/dashboard.html')
def run_server(runner): def run_server(runner):
loop = asyncio.new_event_loop() loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop) asyncio.set_event_loop(loop)

@ -267,23 +267,44 @@ class MuseReal(BaseReal):
def process_frames(self,quit_event,loop=None,audio_track=None,video_track=None): def process_frames(self,quit_event,loop=None,audio_track=None,video_track=None):
# 新增状态跟踪变量
self.last_speaking = False
self.transition_start = time.time()
self.transition_duration = 0.1 # 过渡时间
self.last_silent_frame = None # 静音帧缓存
self.last_speaking_frame = None # 说话帧缓存
while not quit_event.is_set(): while not quit_event.is_set():
try: try:
res_frame,idx,audio_frames = self.res_frame_queue.get(block=True, timeout=1) res_frame,idx,audio_frames = self.res_frame_queue.get(block=True, timeout=1)
except queue.Empty: except queue.Empty:
continue continue
if audio_frames[0][1]!=0 and audio_frames[1][1]!=0: #全为静音数据只需要取fullimg
# 检测状态变化
current_speaking = not (audio_frames[0][1]!=0 and audio_frames[1][1]!=0)
if current_speaking != self.last_speaking:
logger.info(f"状态切换:{'静音' if self.last_speaking else '说话'}{'说话' if current_speaking else '静音'}")
self.transition_start = time.time()
self.last_speaking = current_speaking
if audio_frames[0][1]!=0 and audio_frames[1][1]!=0:
self.speaking = False self.speaking = False
audiotype = audio_frames[0][1] audiotype = audio_frames[0][1]
if self.custom_index.get(audiotype) is not None: #有自定义视频 if self.custom_index.get(audiotype) is not None:
mirindex = self.mirror_index(len(self.custom_img_cycle[audiotype]),self.custom_index[audiotype]) mirindex = self.mirror_index(len(self.custom_img_cycle[audiotype]),self.custom_index[audiotype])
combine_frame = self.custom_img_cycle[audiotype][mirindex] target_frame = self.custom_img_cycle[audiotype][mirindex]
self.custom_index[audiotype] += 1 self.custom_index[audiotype] += 1
# if not self.custom_opt[audiotype].loop and self.custom_index[audiotype]>=len(self.custom_img_cycle[audiotype]):
# self.curr_state = 1 #当前视频不循环播放,切换到静音状态
else: else:
combine_frame = self.frame_list_cycle[idx] target_frame = self.frame_list_cycle[idx]
# 说话→静音过渡
if time.time() - self.transition_start < self.transition_duration and self.last_speaking_frame is not None:
alpha = min(1.0, (time.time() - self.transition_start) / self.transition_duration)
combine_frame = cv2.addWeighted(self.last_speaking_frame, 1-alpha, target_frame, alpha, 0)
else:
combine_frame = target_frame
# 缓存静音帧
self.last_silent_frame = combine_frame.copy()
else: else:
self.speaking = True self.speaking = True
bbox = self.coord_list_cycle[idx] bbox = self.coord_list_cycle[idx]
@ -291,20 +312,26 @@ class MuseReal(BaseReal):
x1, y1, x2, y2 = bbox x1, y1, x2, y2 = bbox
try: try:
res_frame = cv2.resize(res_frame.astype(np.uint8),(x2-x1,y2-y1)) res_frame = cv2.resize(res_frame.astype(np.uint8),(x2-x1,y2-y1))
except: except Exception as e:
logger.warning(f"resize error: {e}")
continue continue
mask = self.mask_list_cycle[idx] mask = self.mask_list_cycle[idx]
mask_crop_box = self.mask_coords_list_cycle[idx] mask_crop_box = self.mask_coords_list_cycle[idx]
#combine_frame = get_image(ori_frame,res_frame,bbox)
#t=time.perf_counter()
combine_frame = get_image_blending(ori_frame,res_frame,bbox,mask,mask_crop_box)
#print('blending time:',time.perf_counter()-t)
image = combine_frame #(outputs['image'] * 255).astype(np.uint8) # 静音→说话过渡
current_frame = get_image_blending(ori_frame,res_frame,bbox,mask,mask_crop_box)
if time.time() - self.transition_start < self.transition_duration and self.last_silent_frame is not None:
alpha = min(1.0, (time.time() - self.transition_start) / self.transition_duration)
combine_frame = cv2.addWeighted(self.last_silent_frame, 1-alpha, current_frame, alpha, 0)
else:
combine_frame = current_frame
# 缓存说话帧
self.last_speaking_frame = combine_frame.copy()
image = combine_frame
new_frame = VideoFrame.from_ndarray(image, format="bgr24") new_frame = VideoFrame.from_ndarray(image, format="bgr24")
asyncio.run_coroutine_threadsafe(video_track._queue.put((new_frame,None)), loop) asyncio.run_coroutine_threadsafe(video_track._queue.put((new_frame,None)), loop)
self.record_video_data(image) self.record_video_data(image)
#self.recordq_video.put(new_frame)
for audio_frame in audio_frames: for audio_frame in audio_frames:
frame,type,eventpoint = audio_frame frame,type,eventpoint = audio_frame
@ -312,12 +339,8 @@ class MuseReal(BaseReal):
new_frame = AudioFrame(format='s16', layout='mono', samples=frame.shape[0]) new_frame = AudioFrame(format='s16', layout='mono', samples=frame.shape[0])
new_frame.planes[0].update(frame.tobytes()) new_frame.planes[0].update(frame.tobytes())
new_frame.sample_rate=16000 new_frame.sample_rate=16000
# if audio_track._queue.qsize()>10:
# time.sleep(0.1)
asyncio.run_coroutine_threadsafe(audio_track._queue.put((new_frame,eventpoint)), loop) asyncio.run_coroutine_threadsafe(audio_track._queue.put((new_frame,eventpoint)), loop)
self.record_audio_data(frame) self.record_audio_data(frame)
#self.notify(eventpoint)
#self.recordq_audio.put(new_frame)
logger.info('musereal process_frames thread stop') logger.info('musereal process_frames thread stop')
def render(self,quit_event,loop=None,audio_track=None,video_track=None): def render(self,quit_event,loop=None,audio_track=None,video_track=None):

@ -80,7 +80,7 @@ class Resnet18(nn.Module):
return feat8, feat16, feat32 return feat8, feat16, feat32
def init_weight(self, model_path): def init_weight(self, model_path):
state_dict = torch.load(model_path, weights_only=False) #modelzoo.load_url(resnet18_url) state_dict = torch.load(model_path) #modelzoo.load_url(resnet18_url)
self_state_dict = self.state_dict() self_state_dict = self.state_dict()
for k, v in state_dict.items(): for k, v in state_dict.items():
if 'fc' in k: continue if 'fc' in k: continue

@ -0,0 +1,772 @@
<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>livetalking数字人交互平台</title>
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/css/bootstrap.min.css" rel="stylesheet">
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/bootstrap-icons@1.10.0/font/bootstrap-icons.css">
<style>
:root {
--primary-color: #4361ee;
--secondary-color: #3f37c9;
--accent-color: #4895ef;
--background-color: #f8f9fa;
--card-bg: #ffffff;
--text-color: #212529;
--border-radius: 10px;
--box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
body {
font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
background-color: var(--background-color);
color: var(--text-color);
min-height: 100vh;
padding-top: 20px;
}
.dashboard-container {
max-width: 1400px;
margin: 0 auto;
padding: 20px;
}
.card {
background-color: var(--card-bg);
border-radius: var(--border-radius);
box-shadow: var(--box-shadow);
border: none;
margin-bottom: 20px;
overflow: hidden;
}
.card-header {
background-color: var(--primary-color);
color: white;
font-weight: 600;
padding: 15px 20px;
border-bottom: none;
}
.video-container {
position: relative;
width: 100%;
background-color: #000;
border-radius: var(--border-radius);
overflow: hidden;
display: flex;
justify-content: center;
align-items: center;
}
video {
max-width: 100%;
max-height: 100%;
display: block;
border-radius: var(--border-radius);
}
.controls-container {
padding: 20px;
}
.btn-primary {
background-color: var(--primary-color);
border-color: var(--primary-color);
}
.btn-primary:hover {
background-color: var(--secondary-color);
border-color: var(--secondary-color);
}
.btn-outline-primary {
color: var(--primary-color);
border-color: var(--primary-color);
}
.btn-outline-primary:hover {
background-color: var(--primary-color);
color: white;
}
.form-control {
border-radius: var(--border-radius);
padding: 10px 15px;
border: 1px solid #ced4da;
}
.form-control:focus {
border-color: var(--accent-color);
box-shadow: 0 0 0 0.25rem rgba(67, 97, 238, 0.25);
}
.status-indicator {
width: 10px;
height: 10px;
border-radius: 50%;
display: inline-block;
margin-right: 5px;
}
.status-connected {
background-color: #28a745;
}
.status-disconnected {
background-color: #dc3545;
}
.status-connecting {
background-color: #ffc107;
}
.asr-container {
height: 300px;
overflow-y: auto;
padding: 15px;
background-color: #f8f9fa;
border-radius: var(--border-radius);
border: 1px solid #ced4da;
}
.asr-text {
margin-bottom: 10px;
padding: 10px;
background-color: white;
border-radius: var(--border-radius);
box-shadow: 0 1px 3px rgba(0, 0, 0, 0.1);
}
.user-message {
background-color: #e3f2fd;
border-left: 4px solid var(--primary-color);
}
.system-message {
background-color: #f1f8e9;
border-left: 4px solid #8bc34a;
}
.recording-indicator {
position: absolute;
top: 15px;
right: 15px;
background-color: rgba(220, 53, 69, 0.8);
color: white;
padding: 5px 10px;
border-radius: 20px;
font-size: 0.8rem;
display: none;
}
.recording-indicator.active {
display: flex;
align-items: center;
}
.recording-indicator .blink {
width: 10px;
height: 10px;
background-color: #fff;
border-radius: 50%;
margin-right: 5px;
animation: blink 1s infinite;
}
@keyframes blink {
0% { opacity: 1; }
50% { opacity: 0.3; }
100% { opacity: 1; }
}
.mode-switch {
margin-bottom: 20px;
}
.nav-tabs .nav-link {
color: var(--text-color);
border: none;
padding: 10px 20px;
border-radius: var(--border-radius) var(--border-radius) 0 0;
}
.nav-tabs .nav-link.active {
color: var(--primary-color);
background-color: var(--card-bg);
border-bottom: 3px solid var(--primary-color);
font-weight: 600;
}
.tab-content {
padding: 20px;
background-color: var(--card-bg);
border-radius: 0 0 var(--border-radius) var(--border-radius);
}
.settings-panel {
padding: 15px;
background-color: #f8f9fa;
border-radius: var(--border-radius);
margin-top: 15px;
}
.footer {
text-align: center;
margin-top: 30px;
padding: 20px 0;
color: #6c757d;
font-size: 0.9rem;
}
.voice-record-btn {
width: 60px;
height: 60px;
border-radius: 50%;
background-color: var(--primary-color);
color: white;
display: flex;
justify-content: center;
align-items: center;
cursor: pointer;
transition: all 0.2s ease;
box-shadow: 0 2px 5px rgba(0,0,0,0.2);
margin: 0 auto;
}
.voice-record-btn:hover {
background-color: var(--secondary-color);
transform: scale(1.05);
}
.voice-record-btn:active {
background-color: #dc3545;
transform: scale(0.95);
}
.voice-record-btn i {
font-size: 24px;
}
.voice-record-label {
text-align: center;
margin-top: 10px;
font-size: 14px;
color: #6c757d;
}
.video-size-control {
margin-top: 15px;
}
.recording-pulse {
animation: pulse 1.5s infinite;
}
@keyframes pulse {
0% {
box-shadow: 0 0 0 0 rgba(220, 53, 69, 0.7);
}
70% {
box-shadow: 0 0 0 15px rgba(220, 53, 69, 0);
}
100% {
box-shadow: 0 0 0 0 rgba(220, 53, 69, 0);
}
}
</style>
</head>
<body>
<div class="dashboard-container">
<div class="row">
<div class="col-12">
<h1 class="text-center mb-4">livetalking数字人交互平台</h1>
</div>
</div>
<div class="row">
<!-- 视频区域 -->
<div class="col-lg-8">
<div class="card">
<div class="card-header d-flex justify-content-between align-items-center">
<div>
<span class="status-indicator status-disconnected" id="connection-status"></span>
<span id="status-text">未连接</span>
</div>
</div>
<div class="card-body p-0">
<div class="video-container">
<video id="video" autoplay playsinline></video>
<div class="recording-indicator" id="recording-indicator">
<div class="blink"></div>
<span>录制中</span>
</div>
</div>
<div class="controls-container">
<div class="row">
<div class="col-md-6 mb-3">
<button class="btn btn-primary w-100" id="start">
<i class="bi bi-play-fill"></i> 开始连接
</button>
<button class="btn btn-danger w-100" id="stop" style="display: none;">
<i class="bi bi-stop-fill"></i> 停止连接
</button>
</div>
<div class="col-md-6 mb-3">
<div class="d-flex">
<button class="btn btn-outline-primary flex-grow-1 me-2" id="btn_start_record">
<i class="bi bi-record-fill"></i> 开始录制
</button>
<button class="btn btn-outline-danger flex-grow-1" id="btn_stop_record" disabled>
<i class="bi bi-stop-fill"></i> 停止录制
</button>
</div>
</div>
</div>
<div class="row">
<div class="col-12">
<div class="video-size-control">
<label for="video-size-slider" class="form-label">视频大小调节: <span id="video-size-value">100%</span></label>
<input type="range" class="form-range" id="video-size-slider" min="50" max="150" value="100">
</div>
</div>
</div>
<div class="settings-panel mt-3">
<div class="row">
<div class="col-md-12">
<div class="form-check form-switch mb-3">
<input class="form-check-input" type="checkbox" id="use-stun">
<label class="form-check-label" for="use-stun">使用STUN服务器</label>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
</div>
<!-- 右侧交互 -->
<div class="col-lg-4">
<div class="card">
<div class="card-header">
<ul class="nav nav-tabs card-header-tabs" id="interaction-tabs" role="tablist">
<li class="nav-item" role="presentation">
<button class="nav-link active" id="chat-tab" data-bs-toggle="tab" data-bs-target="#chat" type="button" role="tab" aria-controls="chat" aria-selected="true">对话模式</button>
</li>
<li class="nav-item" role="presentation">
<button class="nav-link" id="tts-tab" data-bs-toggle="tab" data-bs-target="#tts" type="button" role="tab" aria-controls="tts" aria-selected="false">朗读模式</button>
</li>
</ul>
</div>
<div class="card-body">
<div class="tab-content" id="interaction-tabs-content">
<!-- 对话模式 -->
<div class="tab-pane fade show active" id="chat" role="tabpanel" aria-labelledby="chat-tab">
<div class="asr-container mb-3" id="chat-messages">
<div class="asr-text system-message">
系统: 欢迎使用livetalking请点击"开始连接"按钮开始对话。
</div>
</div>
<form id="chat-form">
<div class="input-group mb-3">
<textarea class="form-control" id="chat-message" rows="3" placeholder="输入您想对数字人说的话..."></textarea>
<button class="btn btn-primary" type="submit">
<i class="bi bi-send"></i> 发送
</button>
</div>
</form>
<!-- 按住说话按钮 -->
<div class="voice-record-btn" id="voice-record-btn">
<i class="bi bi-mic-fill"></i>
</div>
<div class="voice-record-label">按住说话,松开发送</div>
</div>
<!-- 朗读模式 -->
<div class="tab-pane fade" id="tts" role="tabpanel" aria-labelledby="tts-tab">
<form id="echo-form">
<div class="mb-3">
<label for="message" class="form-label">输入要朗读的文本</label>
<textarea class="form-control" id="message" rows="6" placeholder="输入您想让数字人朗读的文字..."></textarea>
</div>
<button type="submit" class="btn btn-primary w-100">
<i class="bi bi-volume-up"></i> 朗读文本
</button>
</form>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="footer">
<p>Made with ❤️ by Marstaos | Frontend & Performance Optimization</p>
</div>
</div>
<!-- 隐藏的会话ID -->
<input type="hidden" id="sessionid" value="0">
<script src="client.js"></script>
<script src="srs.sdk.js"></script>
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.0/dist/js/bootstrap.bundle.min.js"></script>
<script src="https://code.jquery.com/jquery-3.6.0.min.js"></script>
<script>
$(document).ready(function() {
$('#video-size-slider').on('input', function() {
const value = $(this).val();
$('#video-size-value').text(value + '%');
$('#video').css('width', value + '%');
});
function updateConnectionStatus(status) {
const statusIndicator = $('#connection-status');
const statusText = $('#status-text');
statusIndicator.removeClass('status-connected status-disconnected status-connecting');
switch(status) {
case 'connected':
statusIndicator.addClass('status-connected');
statusText.text('已连接');
break;
case 'connecting':
statusIndicator.addClass('status-connecting');
statusText.text('连接中...');
break;
case 'disconnected':
default:
statusIndicator.addClass('status-disconnected');
statusText.text('未连接');
break;
}
}
// 添加聊天消息
function addChatMessage(message, type = 'user') {
const messagesContainer = $('#chat-messages');
const messageClass = type === 'user' ? 'user-message' : 'system-message';
const sender = type === 'user' ? '您' : '数字人';
const messageElement = $(`
<div class="asr-text ${messageClass}">
${sender}: ${message}
</div>
`);
messagesContainer.append(messageElement);
messagesContainer.scrollTop(messagesContainer[0].scrollHeight);
}
// 开始/停止按钮
$('#start').click(function() {
updateConnectionStatus('connecting');
start();
$(this).hide();
$('#stop').show();
// 添加定时器检查视频流是否已加载
let connectionCheckTimer = setInterval(function() {
const video = document.getElementById('video');
// 检查视频是否有数据
if (video.readyState >= 3 && video.videoWidth > 0) {
updateConnectionStatus('connected');
clearInterval(connectionCheckTimer);
}
}, 2000); // 每2秒检查一次
// 60秒后如果还是连接中状态就停止检查
setTimeout(function() {
if (connectionCheckTimer) {
clearInterval(connectionCheckTimer);
}
}, 60000);
});
$('#stop').click(function() {
stop();
$(this).hide();
$('#start').show();
updateConnectionStatus('disconnected');
});
// 录制功能
$('#btn_start_record').click(function() {
console.log('Starting recording...');
fetch('/record', {
body: JSON.stringify({
type: 'start_record',
sessionid: parseInt(document.getElementById('sessionid').value),
}),
headers: {
'Content-Type': 'application/json'
},
method: 'POST'
}).then(function(response) {
if (response.ok) {
console.log('Recording started.');
$('#btn_start_record').prop('disabled', true);
$('#btn_stop_record').prop('disabled', false);
$('#recording-indicator').addClass('active');
} else {
console.error('Failed to start recording.');
}
}).catch(function(error) {
console.error('Error:', error);
});
});
$('#btn_stop_record').click(function() {
console.log('Stopping recording...');
fetch('/record', {
body: JSON.stringify({
type: 'end_record',
sessionid: parseInt(document.getElementById('sessionid').value),
}),
headers: {
'Content-Type': 'application/json'
},
method: 'POST'
}).then(function(response) {
if (response.ok) {
console.log('Recording stopped.');
$('#btn_start_record').prop('disabled', false);
$('#btn_stop_record').prop('disabled', true);
$('#recording-indicator').removeClass('active');
} else {
console.error('Failed to stop recording.');
}
}).catch(function(error) {
console.error('Error:', error);
});
});
$('#echo-form').on('submit', function(e) {
e.preventDefault();
var message = $('#message').val();
if (!message.trim()) return;
console.log('Sending echo message:', message);
fetch('/human', {
body: JSON.stringify({
text: message,
type: 'echo',
interrupt: true,
sessionid: parseInt(document.getElementById('sessionid').value),
}),
headers: {
'Content-Type': 'application/json'
},
method: 'POST'
});
$('#message').val('');
addChatMessage(`已发送朗读请求: "${message}"`, 'system');
});
// 聊天模式表单提交
$('#chat-form').on('submit', function(e) {
e.preventDefault();
var message = $('#chat-message').val();
if (!message.trim()) return;
console.log('Sending chat message:', message);
fetch('/human', {
body: JSON.stringify({
text: message,
type: 'chat',
interrupt: true,
sessionid: parseInt(document.getElementById('sessionid').value),
}),
headers: {
'Content-Type': 'application/json'
},
method: 'POST'
});
addChatMessage(message, 'user');
$('#chat-message').val('');
});
// 按住说话功能
let mediaRecorder;
let audioChunks = [];
let isRecording = false;
let recognition;
// 检查浏览器是否支持语音识别
const isSpeechRecognitionSupported = 'webkitSpeechRecognition' in window || 'SpeechRecognition' in window;
if (isSpeechRecognitionSupported) {
recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
recognition.continuous = true;
recognition.interimResults = true;
recognition.lang = 'zh-CN';
recognition.onresult = function(event) {
let interimTranscript = '';
let finalTranscript = '';
for (let i = event.resultIndex; i < event.results.length; ++i) {
if (event.results[i].isFinal) {
finalTranscript += event.results[i][0].transcript;
} else {
interimTranscript += event.results[i][0].transcript;
}
}
if (finalTranscript) {
$('#chat-message').val(finalTranscript);
}
};
recognition.onerror = function(event) {
console.error('语音识别错误:', event.error);
};
}
// 按住说话按钮事件
$('#voice-record-btn').on('mousedown touchstart', function(e) {
e.preventDefault();
startRecording();
}).on('mouseup mouseleave touchend', function() {
if (isRecording) {
stopRecording();
}
});
// 开始录音
function startRecording() {
if (isRecording) return;
navigator.mediaDevices.getUserMedia({ audio: true })
.then(function(stream) {
audioChunks = [];
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.ondataavailable = function(e) {
if (e.data.size > 0) {
audioChunks.push(e.data);
}
};
mediaRecorder.start();
isRecording = true;
$('#voice-record-btn').addClass('recording-pulse');
$('#voice-record-btn').css('background-color', '#dc3545');
if (recognition) {
recognition.start();
}
})
.catch(function(error) {
console.error('无法访问麦克风:', error);
alert('无法访问麦克风,请检查浏览器权限设置。');
});
}
function stopRecording() {
if (!isRecording) return;
mediaRecorder.stop();
isRecording = false;
// 停止所有音轨
mediaRecorder.stream.getTracks().forEach(track => track.stop());
// 视觉反馈恢复
$('#voice-record-btn').removeClass('recording-pulse');
$('#voice-record-btn').css('background-color', '');
// 停止语音识别
if (recognition) {
recognition.stop();
}
// 获取识别的文本并发送
setTimeout(function() {
const recognizedText = $('#chat-message').val().trim();
if (recognizedText) {
// 发送识别的文本
fetch('/human', {
body: JSON.stringify({
text: recognizedText,
type: 'chat',
interrupt: true,
sessionid: parseInt(document.getElementById('sessionid').value),
}),
headers: {
'Content-Type': 'application/json'
},
method: 'POST'
});
addChatMessage(recognizedText, 'user');
$('#chat-message').val('');
}
}, 500);
}
// WebRTC 相关功能
if (typeof window.onWebRTCConnected === 'function') {
const originalOnConnected = window.onWebRTCConnected;
window.onWebRTCConnected = function() {
updateConnectionStatus('connected');
if (originalOnConnected) originalOnConnected();
};
} else {
window.onWebRTCConnected = function() {
updateConnectionStatus('connected');
};
}
// 当连接断开时更新状态
if (typeof window.onWebRTCDisconnected === 'function') {
const originalOnDisconnected = window.onWebRTCDisconnected;
window.onWebRTCDisconnected = function() {
updateConnectionStatus('disconnected');
if (originalOnDisconnected) originalOnDisconnected();
};
} else {
window.onWebRTCDisconnected = function() {
updateConnectionStatus('disconnected');
};
}
// SRS WebRTC播放功能
var sdk = null; // 全局处理器,用于在重新发布时进行清理
function startPlay() {
// 关闭之前的连接
if (sdk) {
sdk.close();
}
sdk = new SrsRtcWhipWhepAsync();
$('#video').prop('srcObject', sdk.stream);
var host = window.location.hostname;
var url = "http://" + host + ":1985/rtc/v1/whep/?app=live&stream=livestream";
sdk.play(url).then(function(session) {
console.log('WebRTC播放已启动会话ID:', session.sessionid);
}).catch(function(reason) {
sdk.close();
console.error('WebRTC播放失败:', reason);
});
}
});
</script>
</body>
</html>
Loading…
Cancel
Save