add ASR_Model Select
parent
3a03e8115d
commit
67f87d6306
@ -0,0 +1,31 @@
|
||||
import os
|
||||
|
||||
def check_fw_local_models():
|
||||
'''
|
||||
启动时检查本地是否有 Faster Whisper 模型.
|
||||
'''
|
||||
model_size_list = [
|
||||
"tiny", "tiny.en",
|
||||
"base", "base.en",
|
||||
"small", "small.en",
|
||||
"medium", "medium.en",
|
||||
"large", "large-v1",
|
||||
"large-v2", "large-v3"]
|
||||
for i, size in enumerate(model_size_list):
|
||||
if os.path.exists(f'tools/asr/models/faster-whisper-{size}'):
|
||||
model_size_list[i] = size + '(local)'
|
||||
return model_size_list
|
||||
|
||||
asr_dict = {
|
||||
"达摩 ASR (中文)": {
|
||||
'lang': ['zh'],
|
||||
'size': ['large'],
|
||||
'path': 'funasr_asr.py',
|
||||
},
|
||||
"Faster Whisper (多语种)": {
|
||||
'lang': ['auto', 'zh', 'en', 'ja'],
|
||||
'size': check_fw_local_models(),
|
||||
'path': 'fasterwhisper_asr.py'
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,97 @@
|
||||
import argparse
|
||||
import os
|
||||
import traceback
|
||||
import requests
|
||||
from glob import glob
|
||||
|
||||
from faster_whisper import WhisperModel
|
||||
from tqdm import tqdm
|
||||
|
||||
from config import check_fw_local_models
|
||||
|
||||
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
||||
|
||||
language_code_list = [
|
||||
"af", "am", "ar", "as", "az",
|
||||
"ba", "be", "bg", "bn", "bo",
|
||||
"br", "bs", "ca", "cs", "cy",
|
||||
"da", "de", "el", "en", "es",
|
||||
"et", "eu", "fa", "fi", "fo",
|
||||
"fr", "gl", "gu", "ha", "haw",
|
||||
"he", "hi", "hr", "ht", "hu",
|
||||
"hy", "id", "is", "it", "ja",
|
||||
"jw", "ka", "kk", "km", "kn",
|
||||
"ko", "la", "lb", "ln", "lo",
|
||||
"lt", "lv", "mg", "mi", "mk",
|
||||
"ml", "mn", "mr", "ms", "mt",
|
||||
"my", "ne", "nl", "nn", "no",
|
||||
"oc", "pa", "pl", "ps", "pt",
|
||||
"ro", "ru", "sa", "sd", "si",
|
||||
"sk", "sl", "sn", "so", "sq",
|
||||
"sr", "su", "sv", "sw", "ta",
|
||||
"te", "tg", "th", "tk", "tl",
|
||||
"tr", "tt", "uk", "ur", "uz",
|
||||
"vi", "yi", "yo", "zh", "yue",
|
||||
"auto"]
|
||||
|
||||
def execute_asr(input_folder, output_folder, model_size, language):
|
||||
if 'local' in model_size:
|
||||
model_size = model_size.split('(')[0]
|
||||
model_path = f'tools/asr/models/faster-whisper-{model_size}'
|
||||
else:
|
||||
model_path = model_size
|
||||
if language == 'auto':
|
||||
language = None #不设置语种由模型自动输出概率最高的语种
|
||||
|
||||
try:
|
||||
model = WhisperModel(model_path, device="cuda", compute_type="float16")
|
||||
except:
|
||||
return print(traceback.format_exc())
|
||||
|
||||
output = []
|
||||
output_file_name = os.path.basename(input_folder)
|
||||
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
|
||||
|
||||
if not os.path.exists(output_folder):
|
||||
os.makedirs(output_folder)
|
||||
|
||||
for file in tqdm(glob(os.path.join(input_folder, '**/*.wav'), recursive=True)):
|
||||
try:
|
||||
segments, info = model.transcribe(
|
||||
audio = file,
|
||||
beam_size = 5,
|
||||
vad_filter = True,
|
||||
vad_parameters = dict(min_silence_duration_ms=700),
|
||||
language = language)
|
||||
text = ''
|
||||
for segment in segments:
|
||||
text += segment.text
|
||||
output.append(f"{file}|{output_file_name}|{info.language.upper()}|{text}")
|
||||
except:
|
||||
return print(traceback.format_exc())
|
||||
|
||||
with open(output_file_path, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(output))
|
||||
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
||||
return output_file_path
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
||||
help="Path to the folder containing WAV files.")
|
||||
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
||||
help="Output folder to store transcriptions.")
|
||||
parser.add_argument("-s", "--model_size", type=str, default='large-v3',
|
||||
choices=check_fw_local_models(),
|
||||
help="Model Size of Faster Whisper")
|
||||
parser.add_argument("-l", "--language", type=str, default='zh',
|
||||
choices=language_code_list,
|
||||
help="Language of the audio files.")
|
||||
|
||||
cmd = parser.parse_args()
|
||||
output_file_path = execute_asr(
|
||||
input_folder = cmd.input_folder,
|
||||
output_folder = cmd.output_folder,
|
||||
model_size = cmd.model_size,
|
||||
language = cmd.language,
|
||||
)
|
@ -0,0 +1,66 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import traceback
|
||||
from tqdm import tqdm
|
||||
|
||||
from funasr import AutoModel
|
||||
|
||||
path_asr = 'tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
|
||||
path_vad = 'tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch'
|
||||
path_punc = 'tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch'
|
||||
path_asr = path_asr if os.path.exists(path_asr) else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
path_vad = path_vad if os.path.exists(path_vad) else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
|
||||
path_punc = path_punc if os.path.exists(path_punc) else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
|
||||
|
||||
model = AutoModel(
|
||||
model = path_asr,
|
||||
model_revision = "v2.0.4",
|
||||
vad_model = path_vad,
|
||||
vad_model_revision = "v2.0.4",
|
||||
punc_model = path_punc,
|
||||
punc_model_revision = "v2.0.4",
|
||||
)
|
||||
|
||||
def execute_asr(input_folder, output_folder, model_size, language):
|
||||
input_file_names = os.listdir(input_folder)
|
||||
input_file_names.sort()
|
||||
|
||||
output = []
|
||||
output_file_name = os.path.basename(input_folder)
|
||||
|
||||
for name in tqdm(input_file_names):
|
||||
try:
|
||||
text = model.generate(input="%s/%s"%(input_folder, name))[0]["text"]
|
||||
output.append(f"{input_folder}/{name}|{output_file_name}|{language.upper()}|{text}")
|
||||
except:
|
||||
return print(traceback.format_exc())
|
||||
|
||||
output_folder = output_folder or "output/asr_opt"
|
||||
os.makedirs(output_folder, exist_ok=True)
|
||||
output_file_path = os.path.abspath(f'{output_folder}/{output_file_name}.list')
|
||||
|
||||
with open(output_file_path, "w", encoding="utf-8") as f:
|
||||
f.write("\n".join(output))
|
||||
print(f"ASR 任务完成->标注文件路径: {output_file_path}\n")
|
||||
return output_file_path
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
||||
help="Path to the folder containing WAV files.")
|
||||
parser.add_argument("-o", "--output_folder", type=str, required=True,
|
||||
help="Output folder to store transcriptions.")
|
||||
parser.add_argument("-s", "--model_size", type=str, default='large',
|
||||
help="Model Size of FunASR is Large")
|
||||
parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh'],
|
||||
help="Language of the audio files.")
|
||||
|
||||
cmd = parser.parse_args()
|
||||
execute_asr(
|
||||
input_folder = cmd.input_folder,
|
||||
output_folder = cmd.output_folder,
|
||||
model_size = cmd.model_size,
|
||||
language = cmd.language,
|
||||
)
|
@ -1,42 +0,0 @@
|
||||
import os
|
||||
import argparse
|
||||
import os
|
||||
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
|
||||
from glob import glob
|
||||
from faster_whisper import WhisperModel
|
||||
|
||||
def main(input_folder, output_folder, output_filename, language):
|
||||
model = WhisperModel("large-v3", device="cuda", compute_type="float16")
|
||||
|
||||
output_file = os.path.join(output_folder, output_filename)
|
||||
if not os.path.exists(output_folder):
|
||||
os.makedirs(output_folder)
|
||||
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
for file in glob(os.path.join(input_folder, '**/*.wav'), recursive=True):
|
||||
segments, _ = model.transcribe(file, beam_size=10, vad_filter=True,
|
||||
vad_parameters=dict(min_silence_duration_ms=700), language=language)
|
||||
segments = list(segments)
|
||||
|
||||
filename = os.path.basename(file).replace('.wav', '')
|
||||
directory = os.path.dirname(file)
|
||||
|
||||
result_line = f"{file}|{language.upper()}|{segments[0].text}\n"
|
||||
f.write(result_line)
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("-i", "--input_folder", type=str, required=True,
|
||||
help="Path to the folder containing WAV files.")
|
||||
parser.add_argument("-o", "--output_folder", type=str, required=True, help="Output folder to store transcriptions.")
|
||||
parser.add_argument("-f", "--output_filename", type=str, default="transcriptions.txt", help="Name of the output text file.")
|
||||
parser.add_argument("-l", "--language", type=str, default='zh', choices=['zh', 'en', ...],
|
||||
help="Language of the audio files.")
|
||||
|
||||
cmd = parser.parse_args()
|
||||
|
||||
input_folder = cmd.input_folder
|
||||
output_folder = cmd.output_folder
|
||||
output_filename = cmd.output_filename
|
||||
language = cmd.language
|
||||
main(input_folder, output_folder, output_filename, language)
|
@ -1,39 +0,0 @@
|
||||
# -*- coding:utf-8 -*-
|
||||
|
||||
import sys,os,traceback
|
||||
|
||||
from funasr import AutoModel
|
||||
|
||||
dir=sys.argv[1]
|
||||
if(dir[-1]=="/"):dir=dir[:-1]
|
||||
# opt_name=dir.split("\\")[-1].split("/")[-1]
|
||||
opt_name=os.path.basename(dir)
|
||||
|
||||
path_asr='tools/damo_asr/models/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch'
|
||||
path_vad='tools/damo_asr/models/speech_fsmn_vad_zh-cn-16k-common-pytorch'
|
||||
path_punc='tools/damo_asr/models/punc_ct-transformer_zh-cn-common-vocab272727-pytorch'
|
||||
path_asr=path_asr if os.path.exists(path_asr)else "iic/speech_paraformer-large_asr_nat-zh-cn-16k-common-vocab8404-pytorch"
|
||||
path_vad=path_vad if os.path.exists(path_vad)else "iic/speech_fsmn_vad_zh-cn-16k-common-pytorch"
|
||||
path_punc=path_punc if os.path.exists(path_punc)else "iic/punc_ct-transformer_zh-cn-common-vocab272727-pytorch"
|
||||
|
||||
model = AutoModel(model=path_asr, model_revision="v2.0.4",
|
||||
vad_model=path_vad,
|
||||
vad_model_revision="v2.0.4",
|
||||
punc_model=path_punc,
|
||||
punc_model_revision="v2.0.4",
|
||||
)
|
||||
|
||||
|
||||
opt=[]
|
||||
file_names = os.listdir(dir)
|
||||
file_names.sort()
|
||||
for name in file_names:
|
||||
try:
|
||||
text = model.generate(input="%s/%s"%(dir,name))[0]["text"]
|
||||
opt.append("%s/%s|%s|ZH|%s"%(dir,name,opt_name,text))
|
||||
except:
|
||||
print(traceback.format_exc())
|
||||
|
||||
opt_dir="output/asr_opt"
|
||||
os.makedirs(opt_dir,exist_ok=True)
|
||||
with open("%s/%s.list"%(opt_dir,opt_name),"w",encoding="utf-8")as f:f.write("\n".join(opt))
|
Loading…
Reference in New Issue