diff --git a/xznsh_flow/cfg.json b/xznsh_flow/cfg.json index 3ddeeac..db8eb37 100644 --- a/xznsh_flow/cfg.json +++ b/xznsh_flow/cfg.json @@ -1 +1 @@ -{"log_path": "xznsh.log", "frame": 0.05, "camera": {"camera_01": "rtsp://admin:@192.168.10.18", "camera_02": "rtsp://admin:@192.168.10.12"}, "video_path": {"person": "video/person/", "head": "video/head/"}, "username": "eWF4aW4=", "password": "eWF4aW5AMTIz", "url": "", "model_path": {"person": "person.pt", "head": "person_cut_hh.pt", "phone": "xxx", "action": "xxx"}, "confidence": {"person": 0.5, "head": 0.5, "phone": 0.5}, "video_encoding": "MP42", "video_length": 50, "error_x": 200, "error_y": 200, "fps": 30, "test_video_path": "0711-4.mp4"} \ No newline at end of file +{"log_path": "xznsh.log", "frame": 0.05, "camera": {"camera_01": "rtsp://admin:@192.168.10.18", "camera_02": "rtsp://admin:@192.168.10.12"}, "video_path": {"person": "video/person/", "head": "video/head/"}, "username": "eWF4aW4=", "password": "eWF4aW5AMTIz", "url": "", "model_path": {"person": "person.pt", "head": "person_cut_hh.pt", "phone": "xxx", "action": "xxx"}, "confidence": {"person": 0.5, "head": 0.5, "phone": 0.5}, "video_encoding": "MP42", "video_length": 90, "error_x": 200, "error_y": 200, "fps": 30, "test_video_path": "0711-4.mp4"} \ No newline at end of file diff --git a/xznsh_flow/xznsh_main.py b/xznsh_flow/xznsh_main.py index d6e421d..a52e50a 100644 --- a/xznsh_flow/xznsh_main.py +++ b/xznsh_flow/xznsh_main.py @@ -67,6 +67,8 @@ class FrameToVideo(Thread): if all([abs(coord[n] - target['coord'][n]) <= 5 for n in range(4)]): # 误差判断 frame_split = frame_img[target['split_y']['min']:target['split_y']['max'], target['split_x']['min']:target['split_x']['max']] + # cv2.imshow('yang', frame_split) + # cv2.waitKey(2000) target['frame'].append(frame_split) target['count'] += 1 target['flag'] = True @@ -80,7 +82,11 @@ class FrameToVideo(Thread): def target_analysis(self, target_list, new_target_list, person_coord_list, frame_x, frame_y, frame_img, label): if not target_list: for line in person_coord_list: - coord = line['person'] + # label that self define maybe different from model + if label == 'head': + coord = line['head'] + else: + coord = line['person'] split_x, split_y = self.boundary_treat(frame_x, frame_y, coord) # 裁剪大一圈,固定裁剪范围 @@ -90,7 +96,7 @@ class FrameToVideo(Thread): 'split_y': split_y, 'flag': False}) else: for line in person_coord_list: - coord = line['person'] + coord = line[label] match_flag = self.target_match(target_list, coord, frame_img, new_target_list) if not match_flag: @@ -103,22 +109,31 @@ class FrameToVideo(Thread): # 判断帧数,生成视频 for target in target_list: if len(target['frame']) == cfg_dict['video_length']: - print(target) frame_w = target['split_x']['max'] - target['split_x']['min'] frame_h = target['split_y']['max'] - target['split_y']['min'] + logger.info(f'开始输出视频:{label}') self.save_video(target['frame'], cfg_dict['fps'], cfg_dict['video_encoding'], cfg_dict['video_path'][label] + self.camera + str(int(time.time())) + '.mp4v', (frame_w, frame_h)) + logger.info(f'输出视频结束:{label}') continue # 过滤中断没有匹配到的目标 if target['flag']: target['flag'] = False new_target_list.append(target) + if label == 'person': + self.person_target_list = new_target_list + else: + self.head_target_list = new_target_list def frame_analysis(self): - video_caputre = cv2.VideoCapture(cfg_dict['test_video_path']) # 本地测试用 + video_capture = cv2.VideoCapture(cfg_dict['test_video_path']) # 本地测试用 while True: - result, frame_img = video_caputre.read() # 本地测试用 + # for x in self.head_target_list: + # print(len(x['frame'])) + + + result, frame_img = video_capture.read() # 本地测试用 # try: # frame_img = self.queue_img.get_nowait() # except Empty: @@ -138,8 +153,6 @@ class FrameToVideo(Thread): frame_x, frame_y, frame_img, 'person') self.target_analysis(self.head_target_list, new_head_target_list, head_coord_list, frame_x, frame_y, frame_img, 'head') - self.person_target_list = new_person_target_list - self.head_target_list = new_head_target_list def run(self): self.frame_analysis()