添加代码注释

V0.1.0
jiangxt 2 years ago
parent c00546eedb
commit 933ae42228

@ -15,23 +15,31 @@ warnings.filterwarnings("ignore")
class PP_TSMv2(object):
"""PP-TSMv2模型中常用的参数初始化"""
def __init__(self,use_gpu=True,batch_size=1,ir_optim=True,\
disable_glog=False,save_name=None,enable_mklddn=False,\
precision="fp32",gpu_mem=8000,cpu_threads=None,time_test_file=False):
precision="fp32",gpu_mem=8000,cpu_threads=None):
self.use_gpu = use_gpu
self.cpu_threads = cpu_threads
self.use_gpu = use_gpu #是否使用GPU
self.cpu_threads = cpu_threads #cpu线程数
self.batch_size = batch_size
self.ir_optim = ir_optim
self.ir_optim = ir_optim #是否开启IR优化
self.disable_glog = disable_glog
self.gpu_mem = gpu_mem
self.enable_mkldnn = enable_mklddn
self.precision = precision
self.save_name = save_name
self.time_test_file = time_test_file
self.gpu_mem = gpu_mem #GPU存储大小
self.enable_mkldnn = enable_mklddn #是否开启mkldnn
self.precision = precision #mfldnn精度
self.save_name = save_name #转化推理模型存放名称
def create_paddle_predictor(self,model_f,pretr_p,cfg):
"""
创建推理引擎
model_f:可推理模型存放的路径+配置文件
pretr_p:训练后的参数存放文件
cfg:模型配置文件
"""
config = Config(model_f,pretr_p)
if self.use_gpu:
config.enable_use_gpu(self.gpu_mem,0)
@ -60,10 +68,17 @@ class PP_TSMv2(object):
def exportmodel(self,config,pretr_p,output_p):
"""
加载训练后的模型参数生成可推理预测的模型
pretr_p:训练后的参数存放文件
output_p:转化为可推理模型的存放路径
"""
cfg, model_name = trim_config(get_config(config, overrides=None, show=False))
# pretr_p = str(pretr_p)
print(f"Building model({model_name})...")
#创建推理模型
model = build_model(cfg.MODEL)
assert osp.isfile(
pretr_p
@ -73,6 +88,8 @@ class PP_TSMv2(object):
os.makedirs(output_p)
print(f"Loading params from ({pretr_p})...")
# 加载推理模型参数
params = paddle.load(pretr_p)
model.set_dict(params)
@ -83,6 +100,8 @@ class PP_TSMv2(object):
layer.rep()
input_spec = get_input_spec(cfg.INFERENCE, model_name)
#将模型转化为静态图以及模型的保存
model = to_static(model, input_spec=input_spec)
paddle.jit.save(model,osp.join(output_p, model_name if self.save_name is None else self.save_name))
print(f"model ({model_name}) has been already saved in ({output_p}).")
@ -90,12 +109,24 @@ class PP_TSMv2(object):
def predict(self,config,input_f,batch_size,model_f,params_f):
"""
推理模型,对数据进行推理预测
config :PP-TSMv2模型的配置文件
input_f:待推理数据集的存放路径
batch_size:模型推理中所取数据的多少,default = 1
model_f:可推理模型存放的路径+配置文件
params_f:可推理模型的参数
"""
cfg = get_config(config, overrides=None, show=False)
model_name = cfg.model_name
print(f"Inference model({model_name})...")
#创建推理模型
InferenceHelper = build_inference_helper(cfg.INFERENCE)
_ , predictor = self.create_paddle_predictor(model_f,params_f,cfg) # 要改 model_f,pretr_p,cfg
#创建推理引擎
_ , predictor = self.create_paddle_predictor(model_f,params_f,cfg)
# get input_tensor and output_tensor
input_names = predictor.get_input_names()
@ -113,15 +144,19 @@ class PP_TSMv2(object):
for st_idx in range(0, len(files), batch_num):
ed_idx = min(st_idx + batch_num, len(files))
#输出数据预处理
batched_inputs = InferenceHelper.preprocess_batch(files[st_idx:ed_idx])
for i in range(len(input_tensor_list)):
input_tensor_list[i].copy_from_cpu(batched_inputs[i])
#推理引擎开始推理
predictor.run()
batched_outputs = []
for j in range(len(output_tensor_list)):
batched_outputs.append(output_tensor_list[j].copy_to_cpu())
#输出推理结果
InferenceHelper.postprocess(batched_outputs,True)
@ -129,16 +164,16 @@ class PP_TSMv2(object):
def main():
config='/home/xznsh/data/PaddleVideo/configs/recognition/pptsm/v2/pptsm_lcnet_k400_16frames_uniform.yaml' #配置文件地址
input_file='/home/xznsh/data/PaddleVideo/data/dataset/video_seg_re_hand' #推理数据集存放的地址
pretrain_params='/home/xznsh/data/PaddleVideo/output/ppTSMv2/ppTSMv2_best.pdparams' #训练后模型参数文件存放
output_path='/home/xznsh/data/PaddleVideo/inference/infer1' #推理模型存放地址
model_file='/home/xznsh/data/PaddleVideo/inference/infer1/ppTSMv2.pdmodel' #推理模型存放地址
params_file='/home/xznsh/data/PaddleVideo/inference/infer1/ppTSMv2.pdiparams' #推理模型参数存放地址
config='/home/xznsh/data/PaddleVideo/configs/recognition/pptsm/v2/pptsm_lcnet_k400_16frames_uniform.yaml' #配置文件地址
input_file='/home/xznsh/data/PaddleVideo/data/dataset/video_seg_re_hand' #推理数据集存放的地址
pretrain_params='/home/xznsh/data/PaddleVideo/output/ppTSMv2/ppTSMv2_best.pdparams' #训练后模型参数文件存放
output_path='/home/xznsh/data/PaddleVideo/inference/infer1' #推理模型存放地址
model_file='/home/xznsh/data/PaddleVideo/inference/infer1/ppTSMv2.pdmodel' #推理模型存放地址
params_file='/home/xznsh/data/PaddleVideo/inference/infer1/ppTSMv2.pdiparams' #推理模型参数存放地址
batch_size= 1
PP_TSMv2().exportmodel(config,pretrain_params,output_path) #输出推理模型
PP_TSMv2().exportmodel(config,pretrain_params,output_path) #输出推理模型
time.sleep(2)
PP_TSMv2().predict(config,input_file,batch_size,model_file,params_file)
PP_TSMv2().predict(config,input_file,batch_size,model_file,params_file) #推理模型推理、预测
if __name__ == "__main__":

Loading…
Cancel
Save