From 90ebefa78fd544da36eebe0b2003620879c921b0 Mon Sep 17 00:00:00 2001 From: KamioRinn <63162909+KamioRinn@users.noreply.github.com> Date: Fri, 27 Jun 2025 10:41:52 +0800 Subject: [PATCH] make sure ort providers available (#2489) --- GPT_SoVITS/text/g2pw/onnx_api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GPT_SoVITS/text/g2pw/onnx_api.py b/GPT_SoVITS/text/g2pw/onnx_api.py index 52eed44..1d5e423 100644 --- a/GPT_SoVITS/text/g2pw/onnx_api.py +++ b/GPT_SoVITS/text/g2pw/onnx_api.py @@ -93,13 +93,13 @@ class G2PWOnnxConverter: sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL sess_options.execution_mode = onnxruntime.ExecutionMode.ORT_SEQUENTIAL sess_options.intra_op_num_threads = 2 if torch.cuda.is_available() else 0 - try: + if "CUDAExecutionProvider" in onnxruntime.get_available_providers(): self.session_g2pW = onnxruntime.InferenceSession( os.path.join(uncompress_path, "g2pW.onnx"), sess_options=sess_options, providers=["CUDAExecutionProvider", "CPUExecutionProvider"], ) - except: + else: self.session_g2pW = onnxruntime.InferenceSession( os.path.join(uncompress_path, "g2pW.onnx"), sess_options=sess_options,