|
|
|
@ -72,7 +72,7 @@ class FrameToVideo(Thread):
|
|
|
|
|
target['frame'].append(frame_split)
|
|
|
|
|
target['count'] += 1
|
|
|
|
|
target['flag'] = True
|
|
|
|
|
new_target_list.append(target)
|
|
|
|
|
# new_target_list.append(target)
|
|
|
|
|
match_flag = True
|
|
|
|
|
break
|
|
|
|
|
else:
|
|
|
|
@ -129,10 +129,6 @@ class FrameToVideo(Thread):
|
|
|
|
|
def frame_analysis(self):
|
|
|
|
|
video_capture = cv2.VideoCapture(cfg_dict['test_video_path']) # 本地测试用
|
|
|
|
|
while True:
|
|
|
|
|
# for x in self.head_target_list:
|
|
|
|
|
# print(len(x['frame']))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
result, frame_img = video_capture.read() # 本地测试用
|
|
|
|
|
# try:
|
|
|
|
|
# frame_img = self.queue_img.get_nowait()
|
|
|
|
@ -149,6 +145,7 @@ class FrameToVideo(Thread):
|
|
|
|
|
confidence=cfg_dict['confidence']['head'])
|
|
|
|
|
|
|
|
|
|
frame_y, frame_x, _ = frame_img.shape
|
|
|
|
|
logger.debug(f'帧尺寸,y:{frame_y},x:{frame_x}')
|
|
|
|
|
self.target_analysis(self.person_target_list, new_person_target_list, person_coord_list,
|
|
|
|
|
frame_x, frame_y, frame_img, 'person')
|
|
|
|
|
self.target_analysis(self.head_target_list, new_head_target_list, head_coord_list, frame_x,
|
|
|
|
|