|
|
@ -67,6 +67,8 @@ class FrameToVideo(Thread):
|
|
|
|
if all([abs(coord[n] - target['coord'][n]) <= 5 for n in range(4)]): # 误差判断
|
|
|
|
if all([abs(coord[n] - target['coord'][n]) <= 5 for n in range(4)]): # 误差判断
|
|
|
|
frame_split = frame_img[target['split_y']['min']:target['split_y']['max'],
|
|
|
|
frame_split = frame_img[target['split_y']['min']:target['split_y']['max'],
|
|
|
|
target['split_x']['min']:target['split_x']['max']]
|
|
|
|
target['split_x']['min']:target['split_x']['max']]
|
|
|
|
|
|
|
|
# cv2.imshow('yang', frame_split)
|
|
|
|
|
|
|
|
# cv2.waitKey(2000)
|
|
|
|
target['frame'].append(frame_split)
|
|
|
|
target['frame'].append(frame_split)
|
|
|
|
target['count'] += 1
|
|
|
|
target['count'] += 1
|
|
|
|
target['flag'] = True
|
|
|
|
target['flag'] = True
|
|
|
@ -80,7 +82,11 @@ class FrameToVideo(Thread):
|
|
|
|
def target_analysis(self, target_list, new_target_list, person_coord_list, frame_x, frame_y, frame_img, label):
|
|
|
|
def target_analysis(self, target_list, new_target_list, person_coord_list, frame_x, frame_y, frame_img, label):
|
|
|
|
if not target_list:
|
|
|
|
if not target_list:
|
|
|
|
for line in person_coord_list:
|
|
|
|
for line in person_coord_list:
|
|
|
|
coord = line['person']
|
|
|
|
# label that self define maybe different from model
|
|
|
|
|
|
|
|
if label == 'head':
|
|
|
|
|
|
|
|
coord = line['head']
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
coord = line['person']
|
|
|
|
split_x, split_y = self.boundary_treat(frame_x, frame_y, coord)
|
|
|
|
split_x, split_y = self.boundary_treat(frame_x, frame_y, coord)
|
|
|
|
|
|
|
|
|
|
|
|
# 裁剪大一圈,固定裁剪范围
|
|
|
|
# 裁剪大一圈,固定裁剪范围
|
|
|
@ -90,7 +96,7 @@ class FrameToVideo(Thread):
|
|
|
|
'split_y': split_y, 'flag': False})
|
|
|
|
'split_y': split_y, 'flag': False})
|
|
|
|
else:
|
|
|
|
else:
|
|
|
|
for line in person_coord_list:
|
|
|
|
for line in person_coord_list:
|
|
|
|
coord = line['person']
|
|
|
|
coord = line[label]
|
|
|
|
|
|
|
|
|
|
|
|
match_flag = self.target_match(target_list, coord, frame_img, new_target_list)
|
|
|
|
match_flag = self.target_match(target_list, coord, frame_img, new_target_list)
|
|
|
|
if not match_flag:
|
|
|
|
if not match_flag:
|
|
|
@ -103,22 +109,31 @@ class FrameToVideo(Thread):
|
|
|
|
# 判断帧数,生成视频
|
|
|
|
# 判断帧数,生成视频
|
|
|
|
for target in target_list:
|
|
|
|
for target in target_list:
|
|
|
|
if len(target['frame']) == cfg_dict['video_length']:
|
|
|
|
if len(target['frame']) == cfg_dict['video_length']:
|
|
|
|
print(target)
|
|
|
|
|
|
|
|
frame_w = target['split_x']['max'] - target['split_x']['min']
|
|
|
|
frame_w = target['split_x']['max'] - target['split_x']['min']
|
|
|
|
frame_h = target['split_y']['max'] - target['split_y']['min']
|
|
|
|
frame_h = target['split_y']['max'] - target['split_y']['min']
|
|
|
|
|
|
|
|
logger.info(f'开始输出视频:{label}')
|
|
|
|
self.save_video(target['frame'], cfg_dict['fps'], cfg_dict['video_encoding'],
|
|
|
|
self.save_video(target['frame'], cfg_dict['fps'], cfg_dict['video_encoding'],
|
|
|
|
cfg_dict['video_path'][label] + self.camera + str(int(time.time())) + '.mp4v',
|
|
|
|
cfg_dict['video_path'][label] + self.camera + str(int(time.time())) + '.mp4v',
|
|
|
|
(frame_w, frame_h))
|
|
|
|
(frame_w, frame_h))
|
|
|
|
|
|
|
|
logger.info(f'输出视频结束:{label}')
|
|
|
|
continue
|
|
|
|
continue
|
|
|
|
# 过滤中断没有匹配到的目标
|
|
|
|
# 过滤中断没有匹配到的目标
|
|
|
|
if target['flag']:
|
|
|
|
if target['flag']:
|
|
|
|
target['flag'] = False
|
|
|
|
target['flag'] = False
|
|
|
|
new_target_list.append(target)
|
|
|
|
new_target_list.append(target)
|
|
|
|
|
|
|
|
if label == 'person':
|
|
|
|
|
|
|
|
self.person_target_list = new_target_list
|
|
|
|
|
|
|
|
else:
|
|
|
|
|
|
|
|
self.head_target_list = new_target_list
|
|
|
|
|
|
|
|
|
|
|
|
def frame_analysis(self):
|
|
|
|
def frame_analysis(self):
|
|
|
|
video_caputre = cv2.VideoCapture(cfg_dict['test_video_path']) # 本地测试用
|
|
|
|
video_capture = cv2.VideoCapture(cfg_dict['test_video_path']) # 本地测试用
|
|
|
|
while True:
|
|
|
|
while True:
|
|
|
|
result, frame_img = video_caputre.read() # 本地测试用
|
|
|
|
# for x in self.head_target_list:
|
|
|
|
|
|
|
|
# print(len(x['frame']))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
result, frame_img = video_capture.read() # 本地测试用
|
|
|
|
# try:
|
|
|
|
# try:
|
|
|
|
# frame_img = self.queue_img.get_nowait()
|
|
|
|
# frame_img = self.queue_img.get_nowait()
|
|
|
|
# except Empty:
|
|
|
|
# except Empty:
|
|
|
@ -138,8 +153,6 @@ class FrameToVideo(Thread):
|
|
|
|
frame_x, frame_y, frame_img, 'person')
|
|
|
|
frame_x, frame_y, frame_img, 'person')
|
|
|
|
self.target_analysis(self.head_target_list, new_head_target_list, head_coord_list, frame_x,
|
|
|
|
self.target_analysis(self.head_target_list, new_head_target_list, head_coord_list, frame_x,
|
|
|
|
frame_y, frame_img, 'head')
|
|
|
|
frame_y, frame_img, 'head')
|
|
|
|
self.person_target_list = new_person_target_list
|
|
|
|
|
|
|
|
self.head_target_list = new_head_target_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
def run(self):
|
|
|
|
self.frame_analysis()
|
|
|
|
self.frame_analysis()
|
|
|
|