|
|
@ -104,8 +104,13 @@ RESP: 无
|
|
|
|
|
|
|
|
|
|
|
|
import argparse
|
|
|
|
import argparse
|
|
|
|
import os
|
|
|
|
import os
|
|
|
|
import signal
|
|
|
|
|
|
|
|
import sys
|
|
|
|
import sys
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
now_dir = os.getcwd()
|
|
|
|
|
|
|
|
sys.path.append(now_dir)
|
|
|
|
|
|
|
|
sys.path.append("%s/GPT_SoVITS" % (now_dir))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import signal
|
|
|
|
from time import time as ttime
|
|
|
|
from time import time as ttime
|
|
|
|
import torch
|
|
|
|
import torch
|
|
|
|
import librosa
|
|
|
|
import librosa
|
|
|
@ -439,6 +444,8 @@ def handle(refer_wav_path, prompt_text, prompt_language, text, text_language):
|
|
|
|
wav.seek(0)
|
|
|
|
wav.seek(0)
|
|
|
|
|
|
|
|
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
torch.cuda.empty_cache()
|
|
|
|
|
|
|
|
if device == "mps":
|
|
|
|
|
|
|
|
print('executed torch.mps.empty_cache()')
|
|
|
|
torch.mps.empty_cache()
|
|
|
|
torch.mps.empty_cache()
|
|
|
|
return StreamingResponse(wav, media_type="audio/wav")
|
|
|
|
return StreamingResponse(wav, media_type="audio/wav")
|
|
|
|
|
|
|
|
|
|
|
|