|
|
|
@ -23,10 +23,14 @@ class FrameToVideo(Thread):
|
|
|
|
|
super(FrameToVideo, self).__init__()
|
|
|
|
|
self.camera = camera_name
|
|
|
|
|
# self.queue_img = CAMERA_QUEUE[camera_name]
|
|
|
|
|
self.yolo_model = {'person': YOLO(cfg_dict['model_path']['person']),
|
|
|
|
|
'head': YOLO(cfg_dict['model_path']['head'])}
|
|
|
|
|
# self.yolo_model = {'person': YOLO(cfg_dict['model_path']['person']),
|
|
|
|
|
# 'head': YOLO(cfg_dict['model_path']['head'])}
|
|
|
|
|
|
|
|
|
|
self.yolo_model = YOLO(cfg_dict['model_path']['all'])
|
|
|
|
|
|
|
|
|
|
self.person_target_list = []
|
|
|
|
|
self.head_target_list = []
|
|
|
|
|
self.phone_target_list = []
|
|
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
|
def save_video(frames, fps, fourcc, video_path, w_h_size):
|
|
|
|
@ -85,8 +89,10 @@ class FrameToVideo(Thread):
|
|
|
|
|
# label that self define maybe different from model
|
|
|
|
|
if label == 'head':
|
|
|
|
|
coord = line['head']
|
|
|
|
|
else:
|
|
|
|
|
elif label == 'person':
|
|
|
|
|
coord = line['person']
|
|
|
|
|
else:
|
|
|
|
|
coord = line['phone']
|
|
|
|
|
split_x, split_y = self.boundary_treat(frame_x, frame_y, coord)
|
|
|
|
|
|
|
|
|
|
# 裁剪大一圈,固定裁剪范围
|
|
|
|
@ -138,11 +144,16 @@ class FrameToVideo(Thread):
|
|
|
|
|
|
|
|
|
|
new_person_target_list = []
|
|
|
|
|
new_head_target_list = []
|
|
|
|
|
new_phone_target_list = []
|
|
|
|
|
# 调用模型,逐帧检测
|
|
|
|
|
person_coord_list = analysis_yolov8(frame=frame_img, model_coco=self.yolo_model['person'],
|
|
|
|
|
confidence=cfg_dict['confidence']['person'])
|
|
|
|
|
head_coord_list = analysis_yolov8(frame=frame_img, model_coco=self.yolo_model['head'],
|
|
|
|
|
confidence=cfg_dict['confidence']['head'])
|
|
|
|
|
# person_coord_list = analysis_yolov8(frame=frame_img, model_coco=self.yolo_model['person'],
|
|
|
|
|
# confidence=cfg_dict['confidence']['person'])
|
|
|
|
|
# head_coord_list = analysis_yolov8(frame=frame_img, model_coco=self.yolo_model['head'],
|
|
|
|
|
# confidence=cfg_dict['confidence']['head'])
|
|
|
|
|
|
|
|
|
|
person_coord_list, head_coord_list, phone_coord_list = analysis_yolov8(frame=frame_img,
|
|
|
|
|
model_coco=self.yolo_model,
|
|
|
|
|
confidence=cfg_dict['confidence'])
|
|
|
|
|
|
|
|
|
|
frame_y, frame_x, _ = frame_img.shape
|
|
|
|
|
logger.debug(f'帧尺寸,y:{frame_y},x:{frame_x}')
|
|
|
|
@ -150,6 +161,8 @@ class FrameToVideo(Thread):
|
|
|
|
|
frame_x, frame_y, frame_img, 'person')
|
|
|
|
|
self.target_analysis(self.head_target_list, new_head_target_list, head_coord_list, frame_x,
|
|
|
|
|
frame_y, frame_img, 'head')
|
|
|
|
|
self.target_analysis(self.phone_target_list, new_phone_target_list, phone_coord_list, frame_x,
|
|
|
|
|
frame_y, frame_img, 'phone')
|
|
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
|
self.frame_analysis()
|
|
|
|
|