|
|
import cv2
|
|
|
import os
|
|
|
import time
|
|
|
import mediapipe as mp
|
|
|
|
|
|
from ultralytics import YOLO
|
|
|
import queue
|
|
|
|
|
|
import threading
|
|
|
from config import Q_SZ
|
|
|
|
|
|
from personDet import analysis_yolov8
|
|
|
import tools_function
|
|
|
from holisticDet import MediapipeProcess
|
|
|
import mediapipe_detection_image
|
|
|
from PP_TSMv2_infer import PP_TSMv2_predict
|
|
|
import shutil
|
|
|
import json
|
|
|
|
|
|
|
|
|
|
|
|
class DealVideo():
|
|
|
|
|
|
def __init__(self,video_file,video_save_file,person_model,mediapipe_model,pptsmv2_model):
|
|
|
|
|
|
'''
|
|
|
加载数据
|
|
|
'''
|
|
|
|
|
|
self.video_file = video_file
|
|
|
self.video_save_file = video_save_file
|
|
|
|
|
|
# 初始化模型
|
|
|
|
|
|
self.person_model = person_model
|
|
|
self.mediapipe_model = mediapipe_model
|
|
|
self.predictor = pptsmv2_model[1]
|
|
|
self.infer = pptsmv2_model[0]
|
|
|
self.batch_size = 1
|
|
|
|
|
|
# 队列
|
|
|
self.videoQueue = queue.Queue(maxsize=Q_SZ)
|
|
|
self.videoQueue2 = queue.Queue(maxsize=Q_SZ)
|
|
|
self.cutbboxQueue = queue.Queue(maxsize=0)
|
|
|
self.videodetQueue = queue.Queue(maxsize=0)
|
|
|
self.videoQueue3 = queue.Queue(maxsize=0)
|
|
|
self.videoreturnQueue = queue.Queue(maxsize=0)
|
|
|
|
|
|
#线程
|
|
|
self.get_video_listThread = threading.Thread(target=self.get_video_list)
|
|
|
self.get_video_frameThread = threading.Thread(target=self.get_video_frame)
|
|
|
self.write_videoThread = threading.Thread(target=self.write_video)
|
|
|
self.head_hands_detThread = threading.Thread(target=self.head_hands_det)
|
|
|
self.video_select_dectThread = threading.Thread(target=self.video_select_dect)
|
|
|
self.select_video_pathThread = threading.Thread(target=self.select_video_path)
|
|
|
self.analysis_return_meassageThread = threading.Thread(target=self.analysis_return_meassage)
|
|
|
|
|
|
|
|
|
|
|
|
def get_video_list(self):
|
|
|
|
|
|
'''
|
|
|
获取数据文件
|
|
|
'''
|
|
|
|
|
|
if os.path.isdir(self.video_file):
|
|
|
|
|
|
video_ext = [".mp4", ".avi",".MP4"]
|
|
|
for maindir, subdir, file_name_list in os.walk(self.video_file):
|
|
|
for filename in file_name_list:
|
|
|
apath = os.path.join(maindir, filename)
|
|
|
ext = os.path.splitext(apath)[1]
|
|
|
if ext in video_ext:
|
|
|
self.videoQueue.put(apath)
|
|
|
|
|
|
else:
|
|
|
self.videoQueue.put(self.video_file)
|
|
|
|
|
|
def get_video_frame(self):
|
|
|
|
|
|
'''
|
|
|
对视频进行分帧、每一帧都保存队列
|
|
|
'''
|
|
|
|
|
|
while True:
|
|
|
|
|
|
if self.videoQueue.empty():
|
|
|
|
|
|
time.sleep(1)
|
|
|
|
|
|
else:
|
|
|
|
|
|
video_path = self.videoQueue.get()
|
|
|
|
|
|
# video_basename = os.path.basename(video_path).split('.')[0]
|
|
|
|
|
|
# print('video_path:',video_path)
|
|
|
|
|
|
cap = cv2.VideoCapture(video_path)
|
|
|
video_fps = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
|
|
|
|
# frame_list = []
|
|
|
count_fps = 0
|
|
|
frame_result_contact = []
|
|
|
count_fps_del = 0
|
|
|
|
|
|
while cap.isOpened():
|
|
|
success, frame = cap.read()
|
|
|
if not success:
|
|
|
print(video_path,"Ignoring empty camera frame.")
|
|
|
# print('video_fps:',video_fps,'count_fps:',count_fps)
|
|
|
break
|
|
|
|
|
|
# print('count_fps_read_video=',count_fps)
|
|
|
imgsize = frame.shape
|
|
|
|
|
|
person_det = analysis_yolov8(frame=frame,
|
|
|
model_coco=self.person_model,
|
|
|
confidence_set=0.5)
|
|
|
|
|
|
person_list = tools_function.get_dict_values(person_det)
|
|
|
|
|
|
if frame_result_contact:
|
|
|
start_fps = frame_result_contact[0]['fps']
|
|
|
|
|
|
else:
|
|
|
start_fps = count_fps
|
|
|
|
|
|
if count_fps == (video_fps - 1):
|
|
|
|
|
|
video_end = True
|
|
|
|
|
|
else:
|
|
|
|
|
|
video_end = False
|
|
|
|
|
|
if person_list:
|
|
|
|
|
|
count_fps_del_re,updata_result_contact = self.analysis_by_bbox(imgsize=imgsize,
|
|
|
detect_result=person_list,
|
|
|
dertpara=4,
|
|
|
start_fps=start_fps,
|
|
|
now_fps=count_fps,
|
|
|
label_name='person',
|
|
|
video_path=video_path,
|
|
|
frame_result_contact=frame_result_contact,
|
|
|
parameter_fps=150,
|
|
|
count_fps_del=count_fps_del,
|
|
|
video_end=video_end
|
|
|
)
|
|
|
count_fps_del = count_fps_del_re
|
|
|
frame_result_contact = updata_result_contact
|
|
|
|
|
|
count_fps += 1
|
|
|
|
|
|
def head_hands_det(self):
|
|
|
|
|
|
# print('head_hands_detaohgaogh')
|
|
|
|
|
|
while True:
|
|
|
|
|
|
if self.videoQueue3.empty():
|
|
|
|
|
|
time.sleep(1)
|
|
|
else:
|
|
|
|
|
|
t0 = time.time()
|
|
|
video_path = self.videoQueue3.get()
|
|
|
|
|
|
|
|
|
# print('video_path_head_hands_det:',video_path)
|
|
|
|
|
|
cap = cv2.VideoCapture(video_path)
|
|
|
video_fps = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
|
|
|
|
# frame_list = []
|
|
|
count_fps = 0
|
|
|
|
|
|
head_result_contact = []
|
|
|
hands_result_contact = []
|
|
|
count_fps_del_head = 0
|
|
|
count_fps_del_hand = 0
|
|
|
|
|
|
while cap.isOpened():
|
|
|
success, frame = cap.read()
|
|
|
if not success:
|
|
|
print(video_path,"Ignoring empty camera frame.")
|
|
|
# print('count_fps:',count_fps,'video_fps:',video_fps)
|
|
|
break
|
|
|
|
|
|
# print('count_fps_read_video=',count_fps)
|
|
|
imgsize = frame.shape
|
|
|
|
|
|
# 模型推理
|
|
|
hh_result = MediapipeProcess.mediapipe_det(image=frame,
|
|
|
holistic=self.mediapipe_model)
|
|
|
hh_result_dict = MediapipeProcess.get_analysis_result(image=frame,results=hh_result)
|
|
|
|
|
|
# # 获得当前坐标列表
|
|
|
head_result = hh_result_dict['face_bbox']
|
|
|
head_result_1 = tools_function.select_list(head_result)
|
|
|
hands_result = hh_result_dict['hand_bbox']
|
|
|
hands_result_1 = tools_function.select_list(hands_result)
|
|
|
|
|
|
|
|
|
if count_fps == (video_fps - 1):
|
|
|
|
|
|
# print('count_fps:',count_fps,'video_fps:',video_fps)
|
|
|
|
|
|
video_end = True
|
|
|
else:
|
|
|
|
|
|
video_end = False
|
|
|
|
|
|
# 统一修正坐标,分别对头和手进行分析
|
|
|
if head_result_1:
|
|
|
|
|
|
if head_result_contact:
|
|
|
start_fps = head_result_contact[0]['fps']
|
|
|
else:
|
|
|
start_fps = count_fps
|
|
|
|
|
|
|
|
|
|
|
|
count_fps_del_re,updata_result_contact = self.analysis_by_bbox(imgsize=imgsize,
|
|
|
detect_result=head_result_1,
|
|
|
dertpara=1,
|
|
|
start_fps=start_fps,
|
|
|
now_fps=count_fps,
|
|
|
label_name='head',
|
|
|
video_path=video_path,
|
|
|
frame_result_contact=head_result_contact,
|
|
|
parameter_fps=80,
|
|
|
count_fps_del=count_fps_del_head,
|
|
|
video_end=video_end
|
|
|
)
|
|
|
count_fps_del_head = count_fps_del_re
|
|
|
head_result_contact = updata_result_contact
|
|
|
|
|
|
if hands_result_1:
|
|
|
|
|
|
if hands_result_contact:
|
|
|
start_fps = hands_result_contact[0]['fps']
|
|
|
else:
|
|
|
start_fps = count_fps
|
|
|
|
|
|
count_fps_del_re,updata_result_contact = self.analysis_by_bbox(imgsize=imgsize,
|
|
|
detect_result=hands_result_1,
|
|
|
dertpara=2,
|
|
|
start_fps=start_fps,
|
|
|
now_fps=count_fps,
|
|
|
label_name='hands',
|
|
|
video_path=video_path,
|
|
|
frame_result_contact=hands_result_contact,
|
|
|
parameter_fps=80,
|
|
|
count_fps_del=count_fps_del_hand,
|
|
|
video_end=video_end
|
|
|
)
|
|
|
count_fps_del_hand = count_fps_del_re
|
|
|
hands_result_contact = updata_result_contact
|
|
|
# print(count_fps,'----------------hands_result_1-------------------------------',hands_result_1)
|
|
|
# print(count_fps,"-------------------------updata_result_contact----------------------:",updata_result_contact)
|
|
|
|
|
|
count_fps += 1
|
|
|
|
|
|
|
|
|
def video_select_dect(self):
|
|
|
|
|
|
while True:
|
|
|
|
|
|
if self.videodetQueue.empty():
|
|
|
time.sleep(5)
|
|
|
else:
|
|
|
|
|
|
video_path = self.videodetQueue.get()
|
|
|
|
|
|
try:
|
|
|
|
|
|
result_list = PP_TSMv2_predict().predict(input_f=video_path,
|
|
|
batch_size=self.batch_size,
|
|
|
predictor=self.predictor,
|
|
|
InferenceHelper=self.infer)
|
|
|
|
|
|
if result_list['topk_scores'] > 0.9:
|
|
|
|
|
|
video_base_name = os.path.basename(video_path)
|
|
|
video_save_select_path = self.video_save_file + '/' + 'video_select_dect/'+ str(result_list['topk_class'])
|
|
|
os.makedirs(video_save_select_path, exist_ok=True)
|
|
|
video_save = os.path.join(video_save_select_path, video_base_name)
|
|
|
|
|
|
os.rename(video_path, video_save)
|
|
|
|
|
|
self.videoreturnQueue.put(video_save)
|
|
|
|
|
|
print("result_list_video_select_dect:",result_list)
|
|
|
|
|
|
except Exception as e:
|
|
|
print(e)
|
|
|
|
|
|
def analysis_by_bbox(self,imgsize,detect_result,dertpara,start_fps,now_fps,label_name,video_path,frame_result_contact,parameter_fps,count_fps_del,video_end):
|
|
|
'''
|
|
|
imgsize:图片的尺寸,
|
|
|
detect_result:检测到的图像的结果,bboxlist
|
|
|
dertpara:scale_factor:缩放因子,大于 1 表示放大,小于 1 表示缩小
|
|
|
start_fps: 对比列表中的起始帧
|
|
|
now_fpsl:当前帧率
|
|
|
label_name:用于分析的检测类别
|
|
|
video_path:视频路径
|
|
|
frame_result_contact:对比列表
|
|
|
parameter_fps:统计截止时间
|
|
|
count_fps_del:统计前后帧未出现次数
|
|
|
|
|
|
'''
|
|
|
|
|
|
bbox_list = tools_function.para_list_correction(images_size=imgsize,bbox_list=detect_result,dertpara=dertpara)
|
|
|
|
|
|
count_fps_del_re,update_frame_result_contact = self.get_cut_message(fps1=now_fps,
|
|
|
label_name = label_name,
|
|
|
re_list=bbox_list,
|
|
|
video_path=video_path,
|
|
|
frame_result_contact=frame_result_contact,
|
|
|
parameter_fps=parameter_fps,
|
|
|
count_fps_del=count_fps_del,
|
|
|
video_end=video_end)
|
|
|
|
|
|
# count_fps_del_re,updata_result_contact = self.get_continue_keys(count_fps_del=count_fps_del,
|
|
|
# continue_para=continue_para,
|
|
|
# start_fps=start_fps,
|
|
|
# now_fps=now_fps,
|
|
|
# frame_result_contact=frame_result_contact,
|
|
|
# update_frame_result_contact=update_frame_result_contact)\
|
|
|
|
|
|
return count_fps_del_re,update_frame_result_contact
|
|
|
|
|
|
|
|
|
def get_cut_message(self,fps1,label_name,re_list,video_path,frame_result_contact,parameter_fps,count_fps_del,video_end):
|
|
|
|
|
|
# continue_para = False
|
|
|
|
|
|
if not frame_result_contact:
|
|
|
|
|
|
bbox_list_all = tools_function.change_list_dict(fps1=fps1,re_list=re_list)
|
|
|
|
|
|
frame_result_contact = bbox_list_all
|
|
|
# print("frame_result_contact:",frame_result_contact)
|
|
|
|
|
|
else:
|
|
|
|
|
|
example_dict_list = frame_result_contact
|
|
|
|
|
|
cut_list,example_lst,re_dict_lst = tools_function.analysis_re01_list(example_list=example_dict_list,
|
|
|
result_list=re_list)
|
|
|
# 有目标减少情况
|
|
|
if example_lst:
|
|
|
|
|
|
# 截图保存视频
|
|
|
# continue_para = True
|
|
|
|
|
|
# cut_dict = {'video_path':video_path,'label_name':label_name,"stop_fps":fps1,'bbox_list':example_lst}
|
|
|
|
|
|
start_fps = example_lst[0]['fps']
|
|
|
|
|
|
if count_fps_del <= 5:
|
|
|
|
|
|
frame_result_contact = frame_result_contact
|
|
|
count_fps_del = count_fps_del + 1
|
|
|
|
|
|
# else:
|
|
|
|
|
|
if (fps1 - start_fps) < 15:
|
|
|
|
|
|
frame_result_contact = frame_result_contact
|
|
|
|
|
|
else:
|
|
|
cut_dict = {'video_path':video_path,'label_name':label_name,"stop_fps":fps1,'bbox_list':example_lst}
|
|
|
frame_result_contact = [item for item in frame_result_contact if item not in example_lst]
|
|
|
self.cutbboxQueue.put(cut_dict)
|
|
|
|
|
|
# 有新添加目标情况
|
|
|
if re_dict_lst:
|
|
|
|
|
|
# 对比示例列表更新
|
|
|
update_list = tools_function.change_list_dict(fps1=fps1,re_list=re_dict_lst)
|
|
|
|
|
|
frame_result_contact = frame_result_contact + update_list
|
|
|
|
|
|
# 统计截止时间
|
|
|
time_out_list = tools_function.statistics_fps(fps_now=fps1,re_list=frame_result_contact,parameter=parameter_fps)
|
|
|
|
|
|
if time_out_list:
|
|
|
|
|
|
# 裁剪保存视频
|
|
|
# bbox_list = Process_tools.change_dict_list(time_out_list)
|
|
|
|
|
|
cut_dict = {'video_path':video_path,'label_name':label_name,"stop_fps":fps1,'bbox_list':time_out_list}
|
|
|
|
|
|
# 添加到新的队列
|
|
|
self.cutbboxQueue.put(cut_dict)
|
|
|
|
|
|
# 对比示例列表更新
|
|
|
frame_result_contact = [item for item in frame_result_contact if item not in time_out_list]
|
|
|
|
|
|
if video_end:
|
|
|
|
|
|
cut_dict = {'video_path':video_path,'label_name':label_name,"stop_fps":fps1,'bbox_list':frame_result_contact}
|
|
|
|
|
|
self.cutbboxQueue.put(cut_dict)
|
|
|
|
|
|
frame_result_contact.clear()
|
|
|
|
|
|
# print('frame_result_contact:',frame_result_contact)
|
|
|
|
|
|
return count_fps_del,frame_result_contact
|
|
|
|
|
|
|
|
|
def analysis_return_meassage(self):
|
|
|
|
|
|
# big_add_list = []
|
|
|
# big_list = []
|
|
|
|
|
|
while True:
|
|
|
|
|
|
if self.videoreturnQueue.empty():
|
|
|
|
|
|
time.sleep(5)
|
|
|
else:
|
|
|
video_message_path = self.videoreturnQueue.get()
|
|
|
|
|
|
directory = os.path.dirname(video_message_path)
|
|
|
labels_pptsm = directory.split('/')[-1]
|
|
|
|
|
|
video_basename = os.path.basename(video_message_path).split('.')[0]
|
|
|
|
|
|
small_anno_infor = video_basename.split('__')[-1]
|
|
|
big_anno_infor = video_basename.split('__')[-2]
|
|
|
video_base_name = video_basename.split('__')[0]
|
|
|
|
|
|
#保存的json文件格式
|
|
|
file_path = self.video_save_file + '/' + video_base_name + '.json'
|
|
|
|
|
|
# 对小图上的坐标和帧率进行分析
|
|
|
small_startfps,small_stopfps,small_fps = small_anno_infor.split('_')[0].split('-')
|
|
|
small_bbox_0,small_bbox_1,small_bbox_2,small_bbox_3 = small_anno_infor.split('_')[1].split('-')
|
|
|
|
|
|
big_startfps,big_stopfps,big_fps = big_anno_infor.split('_')[0].split('-')
|
|
|
big_bbox_0,big_bbox_1,big_bbox_2,big_bbox_3 = big_anno_infor.split('_')[1].split('-')
|
|
|
|
|
|
big_add_startfps = int(big_startfps) + int(small_startfps)
|
|
|
big_add_stopfps = int(big_startfps) + int(small_stopfps)
|
|
|
big_add_bbox_0 = int(big_bbox_0) + int(small_bbox_0)
|
|
|
big_add_bbox_1 = int(big_bbox_1) + int(small_bbox_1)
|
|
|
big_add_bbox_2 = int(big_bbox_0) + int(small_bbox_2)
|
|
|
big_add_bbox_3 = int(big_bbox_1) + int(small_bbox_3)
|
|
|
|
|
|
big_add_dict = {'labels':labels_pptsm,'startfps':big_add_startfps,'stopfps':big_add_stopfps,'bbox':[big_add_bbox_0,big_add_bbox_1,big_add_bbox_2,big_add_bbox_3]}
|
|
|
big_person_dict = {'labels':'person','startfps':big_startfps,'stopfps':big_stopfps,'bbox':[big_bbox_0,big_bbox_1,big_bbox_2,big_bbox_3]}
|
|
|
|
|
|
|
|
|
|
|
|
if os.path.isfile(file_path):
|
|
|
# 如果文件已存在,读取其中的字典数据
|
|
|
with open(file_path, "r") as json_file:
|
|
|
data = json.load(json_file)
|
|
|
data['big_dict'].append(big_add_dict)
|
|
|
|
|
|
if tools_function.compare_dicts(data['big_dict'], big_person_dict):
|
|
|
data['big_dict'].append(big_person_dict)
|
|
|
|
|
|
with open(file_path, "w") as json_file:
|
|
|
|
|
|
json.dump(data, json_file)
|
|
|
|
|
|
# # 访问和处理字典数据
|
|
|
# print(data)
|
|
|
else:
|
|
|
# 如果文件不存在,创建一个新的字典并保存到文件中
|
|
|
bbox_dict = {'big_dict':[big_add_dict,big_person_dict]}
|
|
|
with open(file_path, "w") as json_file:
|
|
|
json.dump(bbox_dict, json_file)
|
|
|
|
|
|
|
|
|
|
|
|
def write_video(self):
|
|
|
# print('write_videoafagragr')
|
|
|
|
|
|
'''
|
|
|
保存成视频
|
|
|
'''
|
|
|
while True:
|
|
|
if self.cutbboxQueue.empty():
|
|
|
time.sleep(2)
|
|
|
else:
|
|
|
video_frame_dict = self.cutbboxQueue.get()
|
|
|
# 视频路径
|
|
|
video_path = video_frame_dict['video_path']
|
|
|
video_basename = os.path.basename(video_path).split('.')[0]
|
|
|
file_name = video_frame_dict['label_name']
|
|
|
# 原视频帧率和尺寸
|
|
|
cap = cv2.VideoCapture(video_path)
|
|
|
fps = cap.get(cv2.CAP_PROP_FPS)
|
|
|
video_fps = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
|
|
|
|
|
# print(video_path,'fps:',fps,'video_fps:',video_fps)
|
|
|
# 获得起始
|
|
|
stop_fps = video_frame_dict['stop_fps']
|
|
|
# 裁剪信息
|
|
|
result_list = video_frame_dict['bbox_list']
|
|
|
if cap.isOpened():
|
|
|
|
|
|
for i,bbox_dict in enumerate(result_list):
|
|
|
start_fps = bbox_dict['fps']
|
|
|
|
|
|
if start_fps >= stop_fps:
|
|
|
|
|
|
# print('start_fps:',start_fps,'stop_fps:',stop_fps)
|
|
|
break
|
|
|
|
|
|
else:
|
|
|
bbox_list = bbox_dict['result']
|
|
|
bbox_int_list = [int(bbox_list[0]),int(bbox_list[1]),int(bbox_list[2]),int(bbox_list[3])]
|
|
|
w = bbox_int_list[2] - bbox_int_list[0]
|
|
|
h = bbox_int_list[3] - bbox_int_list[1]
|
|
|
size = [w,h]
|
|
|
if tools_function.determine_zero(size):
|
|
|
size = (w,h)
|
|
|
|
|
|
# 根据标签保存不同视频分类
|
|
|
# bbox_name = '{}-{}-{}_{}'.format(int(bbox_list[0]), int(bbox_list[1]), int(bbox_list[2]), int(bbox_list[3]))
|
|
|
video_name_save = '{}__{}-{}-{}_{}-{}-{}-{}.avi'.format(video_basename, start_fps, stop_fps, video_fps,int(bbox_list[0]), int(bbox_list[1]), int(bbox_list[2]), int(bbox_list[3]))
|
|
|
video_save_file = self.video_save_file + '/' + file_name
|
|
|
os.makedirs(video_save_file, exist_ok=True)
|
|
|
video_save_path = os.path.join(video_save_file, video_name_save)
|
|
|
videoWriter =cv2.VideoWriter(video_save_path,cv2.VideoWriter_fourcc('X','V','I','D'),fps,size)
|
|
|
|
|
|
tools_function.save_seg_video(video_name=video_path,
|
|
|
frameToStart=start_fps,
|
|
|
frametoStop=stop_fps,
|
|
|
videoWriter=videoWriter,
|
|
|
bbox=bbox_int_list,
|
|
|
size=size)
|
|
|
videoWriter.release()
|
|
|
self.videoQueue2.put(video_save_path)
|
|
|
|
|
|
else:
|
|
|
print('-----------------agrag-----------------',size,'-----------------agag-----------------')
|
|
|
|
|
|
cap.release()
|
|
|
|
|
|
else:
|
|
|
print(video_path)
|
|
|
break
|
|
|
|
|
|
|
|
|
def select_video_path(self):
|
|
|
|
|
|
while True:
|
|
|
|
|
|
if self.videoQueue2.empty():
|
|
|
time.sleep(5)
|
|
|
else:
|
|
|
video_path = self.videoQueue2.get()
|
|
|
directory = os.path.dirname(video_path)
|
|
|
labels = directory.split('/')[-1]
|
|
|
|
|
|
# print('video_pathagfg:',video_path)
|
|
|
|
|
|
# print(labels)
|
|
|
|
|
|
if labels == 'person':
|
|
|
|
|
|
self.videoQueue3.put(video_path)
|
|
|
|
|
|
# if labels == 'head' or labels == 'hands':
|
|
|
if labels == 'hands':
|
|
|
self.videodetQueue.put(video_path)
|
|
|
|
|
|
else:
|
|
|
pass
|
|
|
|
|
|
def run(self):
|
|
|
|
|
|
self.get_video_listThread.start()
|
|
|
self.get_video_frameThread.start()
|
|
|
self.write_videoThread.start()
|
|
|
self.head_hands_detThread.start()
|
|
|
self.video_select_dectThread.start()
|
|
|
self.select_video_pathThread.start()
|
|
|
self.analysis_return_meassageThread.start()
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
t1 = time.time()
|
|
|
video = "E:/Bank_files/Bank_02/dataset/video_kf/02.mp4"
|
|
|
video_save = 'test_video'
|
|
|
|
|
|
# 初始化目标检测
|
|
|
person_model = YOLO("model_file/yolov8x.pt")
|
|
|
|
|
|
# 初始化pptsmv2
|
|
|
config = 'model_file/inference/pptsm_lcnet_k400_16frames_uniform.yaml' # 配置文件地址
|
|
|
model_file = 'model_file/inference_hands_2/ppTSMv2.pdmodel' # 推理模型存放地址
|
|
|
params_file = 'model_file/inference_hands_2/ppTSMv2.pdiparams'
|
|
|
# batch_size= 1
|
|
|
infer,predictor = PP_TSMv2_predict().create_inference_model(config,model_file,params_file)
|
|
|
# PP_TSMv2_predict().predict(config,input_file,batch_size,predictor,infer)
|
|
|
|
|
|
# 初始化mediapipe
|
|
|
mp_holistic = mp.solutions.holistic
|
|
|
holistic = mp_holistic.Holistic(
|
|
|
min_detection_confidence=0.5,
|
|
|
min_tracking_confidence=0.5)
|
|
|
|
|
|
# get_seg_video(video_file=video,video_save_path=video_save,dertTime=dertTime)
|
|
|
|
|
|
deal = DealVideo(video_file=video,video_save_file=video_save,person_model=person_model,mediapipe_model=holistic,pptsmv2_model=[infer,predictor])
|
|
|
deal.run()
|
|
|
|
|
|
t2 = time.time()
|
|
|
|
|
|
# print('总时间:',t2-t1)
|
|
|
|