From af2a1868d465b87e59e7c523b4923fdafd5830c4 Mon Sep 17 00:00:00 2001 From: wangying <wangying@supervision.ltd> Date: Thu, 10 Aug 2023 09:33:10 +0800 Subject: [PATCH] =?UTF-8?q?0810=E6=96=B0=E5=A2=9E=E4=BF=9D=E5=AD=98?= =?UTF-8?q?=E6=A3=80=E6=B5=8B=E7=BB=93=E6=9E=9C=EF=BC=8C=E6=96=B0=E5=A2=9E?= =?UTF-8?q?=E7=94=BB=E5=9B=BE=E6=A0=87=E6=B3=A8=E9=83=A8=E5=88=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../detect_process/tools_function.py | 58 +++++-- .../detect_process/video_draw_labels.py | 79 ++++++++++ .../detect_process/video_process.py | 149 +++++++++++------- 3 files changed, 221 insertions(+), 65 deletions(-) create mode 100644 Bank_second_part/detect_process/video_draw_labels.py diff --git a/Bank_second_part/detect_process/tools_function.py b/Bank_second_part/detect_process/tools_function.py index 14eb9a3..7ffab51 100644 --- a/Bank_second_part/detect_process/tools_function.py +++ b/Bank_second_part/detect_process/tools_function.py @@ -1,5 +1,6 @@ import cv2 import os +from deepdiff import DeepDiff @@ -37,8 +38,8 @@ def get_video_list(path): # cap.release() - # 截取裁剪需要的视频帧 -def save_seg_video(video_name,frameToStart,frametoStop,videoWriter,bbox): +# 截取裁剪需要的视频帧 +def save_seg_video(video_name,frameToStart,frametoStop,videoWriter,bbox,size): cap = cv2.VideoCapture(video_name) count = 0 @@ -50,10 +51,14 @@ def save_seg_video(video_name,frameToStart,frametoStop,videoWriter,bbox): # print('correct= ', count) #裁剪视频画面 - frame_target = frame[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])] # (split_height, split_width) - frame_target = cv2.resize(frame_target,(200,200)) - - videoWriter.write(frame_target) + frame_target = frame[bbox[1]:bbox[3], bbox[0]:bbox[2]] # (split_height, split_width) + try: + frame_target = cv2.resize(frame_target,size) + videoWriter.write(frame_target) + + except Exception as e: + print('----------------------------------',size,'----------------------------------') + print(e) if not success or count >= frametoStop: break @@ -61,8 +66,6 @@ def save_seg_video(video_name,frameToStart,frametoStop,videoWriter,bbox): videoWriter.release() cap.release() - - # 获得字典中所有values值(这个值是列表) def get_dict_values(lst): @@ -224,6 +227,35 @@ def para_correction(images_size,bbox,dertpara): bbox_list = [bbox_list_x[0],bbox_list_y[0],bbox_list_x[1],bbox_list_y[1]] return bbox_list + +def para_correction_back(x1, y1, x2, y2): + + ''' + 修正检测后标注框还原 + + ''' + + # if dertpara: + # pass + # else: + # x1, y1, x2, y2 = bbox[0], bbox[1],bbox[2],bbox[3] # 原始坐标 + width = x2 - x1 + height = y2 - y1 + + scaled_width = int(width * 1.2) + scaled_height = int(height * 1.2) + + restored_width = int(scaled_width / 1.2) + restored_height = int(scaled_height / 1.2) + + restored_x1 = x1 + (width - restored_width) // 2 + restored_y1 = y1 + (height - restored_height) // 2 + restored_x2 = restored_x1 + restored_width + restored_y2 = restored_y1 + restored_height + + bbox_list = [restored_x1,restored_y1,restored_x2,restored_y2] + + return bbox_list def para_list_correction(images_size,bbox_list,dertpara): @@ -350,4 +382,12 @@ def select_bbox(bbox_list): # print('bbox_list:',bbox_list_return) return bbox_list_return - \ No newline at end of file + +# 对比两字典中的值是否完全一致 +def compare_dicts(dict1, list_of_dicts): + for d in list_of_dicts: + diff = DeepDiff(dict1, d) + + if not diff: + return False + return True \ No newline at end of file diff --git a/Bank_second_part/detect_process/video_draw_labels.py b/Bank_second_part/detect_process/video_draw_labels.py new file mode 100644 index 0000000..624e7c2 --- /dev/null +++ b/Bank_second_part/detect_process/video_draw_labels.py @@ -0,0 +1,79 @@ +import cv2 +import json +import os + +def draw_video(video_path,labels_file,video_save): + + cap = cv2.VideoCapture(video_path) + fps = int(cap.get(cv2.CAP_PROP_FPS)) + width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + size = (width,height) + video_basename = os.path.basename(video_path) + + labels_pptsm_list = ["Nodding",'Not_Playing_Mobile','Not_sleet','Playing_Mobile','slppe'] + + with open(labels_file, "r") as json_file: + data_dict = json.load(json_file) + labels_list = data_dict['big_dict'] + + video_save_file = video_save + os.makedirs(video_save_file, exist_ok=True) + video_save_path = os.path.join(video_save_file, video_basename) + videoWriter =cv2.VideoWriter(video_save_path,cv2.VideoWriter_fourcc('X','V','I','D'),fps,size) + + count_fps = 0 + while cap.isOpened(): + success, frame = cap.read() + print('count_fps:',count_fps) + if not success: + print(video_path,"Ignoring empty camera frame.") + # print('video_fps:',video_fps,'count_fps:',count_fps) + break + + re_anno_list = get_bbox_list(count_fps=count_fps,bbox_dict_list=labels_list) + + for re_dic in re_anno_list: + + re_txt = re_dic[0] + re_bbox = re_dic[1] + + cv2.rectangle(frame, (int(re_bbox[0]), int(re_bbox[1])),(int(re_bbox[2]), int(re_bbox[3])), (0, 255, 255), 2) + cv2.putText(frame, re_txt, (int(re_bbox[0]) - 10, int(re_bbox[1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2) + + if re_txt != 'person': + re_txt_1 = labels_pptsm_list[int(re_txt)] + cv2.putText(frame, re_txt_1, (int(50), (int(50) + int(re_txt)*30)), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 0, 255), 3) + + videoWriter.write(frame) + + count_fps += 1 + + + + videoWriter.release() + cap.release() + +def get_bbox_list(count_fps,bbox_dict_list): + + re_list = [] + + for i,bbox_dict in enumerate(bbox_dict_list): + + startfps = int(bbox_dict['startfps']) + stopfps = int(bbox_dict['stopfps']) + + if count_fps in range(startfps,stopfps): + + labels = bbox_dict['labels'] + bbox_list = bbox_dict['bbox'] + + re_list.append([labels,bbox_list]) + + return re_list + +if __name__ == '__main__': + + draw_video(video_path='E:/Bank_files/Bank_02/dataset/video_kf/02.mp4', + labels_file='E:/Bank_files/Bank_02/process_file/test_video/02.json', + video_save='E:/Bank_files/Bank_02/process_file/test_video') \ No newline at end of file diff --git a/Bank_second_part/detect_process/video_process.py b/Bank_second_part/detect_process/video_process.py index 5c6ced6..ac12767 100644 --- a/Bank_second_part/detect_process/video_process.py +++ b/Bank_second_part/detect_process/video_process.py @@ -15,6 +15,7 @@ from holisticDet import MediapipeProcess import mediapipe_detection_image from PP_TSMv2_infer import PP_TSMv2_predict import shutil +import json @@ -43,6 +44,7 @@ class DealVideo(): self.cutbboxQueue = queue.Queue(maxsize=0) self.videodetQueue = queue.Queue(maxsize=0) self.videoQueue3 = queue.Queue(maxsize=0) + self.videoreturnQueue = queue.Queue(maxsize=0) #线程 self.get_video_listThread = threading.Thread(target=self.get_video_list) @@ -51,6 +53,7 @@ class DealVideo(): self.head_hands_detThread = threading.Thread(target=self.head_hands_det) self.video_select_dectThread = threading.Thread(target=self.video_select_dect) self.select_video_pathThread = threading.Thread(target=self.select_video_path) + self.analysis_return_meassageThread = threading.Thread(target=self.analysis_return_meassage) @@ -87,16 +90,15 @@ class DealVideo(): else: - t1 = time.time() video_path = self.videoQueue.get() # video_basename = os.path.basename(video_path).split('.')[0] - print('video_path:',video_path) + # print('video_path:',video_path) cap = cv2.VideoCapture(video_path) video_fps = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - + # frame_list = [] count_fps = 0 frame_result_contact = [] @@ -106,7 +108,7 @@ class DealVideo(): success, frame = cap.read() if not success: print(video_path,"Ignoring empty camera frame.") - print('video_fps:',video_fps,'count_fps:',count_fps) + # print('video_fps:',video_fps,'count_fps:',count_fps) break @@ -143,7 +145,7 @@ class DealVideo(): label_name='person', video_path=video_path, frame_result_contact=frame_result_contact, - parameter_fps=200, + parameter_fps=50, count_fps_del=count_fps_del, video_end=video_end ) @@ -152,11 +154,9 @@ class DealVideo(): count_fps += 1 - - def head_hands_det(self): - print('head_hands_detaohgaogh') + # print('head_hands_detaohgaogh') while True: @@ -169,7 +169,7 @@ class DealVideo(): video_path = self.videoQueue3.get() - print('video_path_head_hands_det:',video_path) + # print('video_path_head_hands_det:',video_path) cap = cv2.VideoCapture(video_path) video_fps = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) @@ -186,7 +186,7 @@ class DealVideo(): success, frame = cap.read() if not success: print(video_path,"Ignoring empty camera frame.") - print('count_fps:',count_fps,'video_fps:',video_fps) + # print('count_fps:',count_fps,'video_fps:',video_fps) break # print('count_fps_read_video=',count_fps) @@ -206,7 +206,7 @@ class DealVideo(): if count_fps == (video_fps - 1): - print('count_fps:',count_fps,'video_fps:',video_fps) + # print('count_fps:',count_fps,'video_fps:',video_fps) video_end = True else: @@ -228,7 +228,7 @@ class DealVideo(): label_name='head', video_path=video_path, frame_result_contact=head_result_contact, - parameter_fps=50, + parameter_fps=25, count_fps_del=count_fps_del_head, video_end=video_end ) @@ -250,7 +250,7 @@ class DealVideo(): label_name='hands', video_path=video_path, frame_result_contact=hands_result_contact, - parameter_fps=50, + parameter_fps=25, count_fps_del=count_fps_del_hand, video_end=video_end ) @@ -284,6 +284,8 @@ class DealVideo(): os.rename(video_path, video_save) + self.videoreturnQueue.put(video_save) + print("result_list_video_select_dect:",result_list) except Exception as e: @@ -331,7 +333,6 @@ class DealVideo(): # continue_para = False - if not frame_result_contact: bbox_list_all = tools_function.change_list_dict(fps1=fps1,re_list=re_list) @@ -351,24 +352,25 @@ class DealVideo(): # 截图保存视频 # continue_para = True - cut_dict = {'video_path':video_path,'label_name':label_name,"stop_fps":fps1,'bbox_list':example_lst} + # cut_dict = {'video_path':video_path,'label_name':label_name,"stop_fps":fps1,'bbox_list':example_lst} - start_fps = example_lst[0]['fps'] + # start_fps = example_lst[0]['fps'] - if count_fps_del <= 3: + if count_fps_del <= 2: frame_result_contact = frame_result_contact count_fps_del = count_fps_del + 1 - else: + # else: - if (fps1 - start_fps) < 10: + # if (fps1 - start_fps) < 5: - frame_result_contact = frame_result_contact - else: - - frame_result_contact = [item for item in frame_result_contact if item not in example_lst] - self.cutbboxQueue.put(cut_dict) + # frame_result_contact = frame_result_contact + else: + + cut_dict = {'video_path':video_path,'label_name':label_name,"stop_fps":fps1,'bbox_list':example_lst} + frame_result_contact = [item for item in frame_result_contact if item not in example_lst] + self.cutbboxQueue.put(cut_dict) # 有新添加目标情况 if re_dict_lst: @@ -407,38 +409,72 @@ class DealVideo(): return count_fps_del,frame_result_contact - # def get_continue_keys(self,count_fps_del,continue_para,start_fps,now_fps,frame_result_contact,update_frame_result_contact): + def analysis_return_meassage(self): + + # big_add_list = [] + # big_list = [] - # # 判断是否有偶然没检测到的情况 - # if continue_para: + while True: + + if self.videoreturnQueue.empty(): - # dert_fps = now_fps - start_fps + time.sleep(5) + else: + video_message_path = self.videoreturnQueue.get() - # print('dert_fps:',dert_fps) + directory = os.path.dirname(video_message_path) + labels_pptsm = directory.split('/')[-1] - # if dert_fps <= 20: + video_basename = os.path.basename(video_message_path).split('.')[0] - # count_fps_del = count_fps_del + 1 + small_anno_infor = video_basename.split('__')[-1] + big_anno_infor = video_basename.split('__')[-2] + video_base_name = video_basename.split('__')[0] - # if count_fps_del <= 3: + #保存的json文件格式 + file_path = self.video_save_file + '/' + video_base_name + '.json' - # frame_result_contact = frame_result_contact - - # else: + # 对小图上的坐标和帧率进行分析 + small_startfps,small_stopfps,small_fps = small_anno_infor.split('_')[0].split('-') + small_bbox_0,small_bbox_1,small_bbox_2,small_bbox_3 = small_anno_infor.split('_')[1].split('-') - # frame_result_contact = update_frame_result_contact - # count_fps_del = 0 + big_startfps,big_stopfps,big_fps = big_anno_infor.split('_')[0].split('-') + big_bbox_0,big_bbox_1,big_bbox_2,big_bbox_3 = big_anno_infor.split('_')[1].split('-') - # else: - # count_fps_del = 0 - - # else: + big_add_startfps = int(big_startfps) + int(small_startfps) + big_add_stopfps = int(big_startfps) + int(small_stopfps) + big_add_bbox_0 = int(big_bbox_0) + int(small_bbox_0) + big_add_bbox_1 = int(big_bbox_1) + int(small_bbox_1) + big_add_bbox_2 = int(big_bbox_0) + int(small_bbox_2) + big_add_bbox_3 = int(big_bbox_1) + int(small_bbox_3) - # frame_result_contact = update_frame_result_contact + big_add_dict = {'labels':labels_pptsm,'startfps':big_add_startfps,'stopfps':big_add_stopfps,'bbox':[big_add_bbox_0,big_add_bbox_1,big_add_bbox_2,big_add_bbox_3]} + big_person_dict = {'labels':'person','startfps':big_startfps,'stopfps':big_stopfps,'bbox':[big_bbox_0,big_bbox_1,big_bbox_2,big_bbox_3]} - # return count_fps_del,frame_result_contact + if os.path.isfile(file_path): + # 如果文件已存在,读取其中的字典数据 + with open(file_path, "r") as json_file: + data = json.load(json_file) + data['big_dict'].append(big_add_dict) + + if tools_function.compare_dicts(data['big_dict'], big_person_dict): + data['big_dict'].append(big_person_dict) + + with open(file_path, "w") as json_file: + + json.dump(data, json_file) + + # # 访问和处理字典数据 + # print(data) + else: + # 如果文件不存在,创建一个新的字典并保存到文件中 + bbox_dict = {'big_dict':[big_add_dict,big_person_dict]} + with open(file_path, "w") as json_file: + json.dump(bbox_dict, json_file) + + def write_video(self): # print('write_videoafagragr') @@ -460,7 +496,7 @@ class DealVideo(): fps = cap.get(cv2.CAP_PROP_FPS) video_fps = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) - print(video_path,'fps:',fps,'video_fps:',video_fps) + # print(video_path,'fps:',fps,'video_fps:',video_fps) # 获得起始 stop_fps = video_frame_dict['stop_fps'] # 裁剪信息 @@ -472,16 +508,18 @@ class DealVideo(): if start_fps >= stop_fps: - print('start_fps:',start_fps,'stop_fps:',stop_fps) + # print('start_fps:',start_fps,'stop_fps:',stop_fps) break else: bbox_list = bbox_dict['result'] - # w = int(bbox_list[2]) - int(bbox_list[0]) - # h = int(bbox_list[3]) - int(bbox_list[1]) - size = (200,200) + bbox_int_list = [int(bbox_list[0]),int(bbox_list[1]),int(bbox_list[2]),int(bbox_list[3])] + w = bbox_int_list[2] - bbox_int_list[0] + h = bbox_int_list[3] - bbox_int_list[1] + size = (w,h) # 根据标签保存不同视频分类 - video_name_save = video_basename + '_' + str(start_fps) + '_' + str(stop_fps) + '_' + str(i) + '.avi' + # bbox_name = '{}-{}-{}_{}'.format(int(bbox_list[0]), int(bbox_list[1]), int(bbox_list[2]), int(bbox_list[3])) + video_name_save = '{}__{}-{}-{}_{}-{}-{}-{}.avi'.format(video_basename, start_fps, stop_fps, video_fps,int(bbox_list[0]), int(bbox_list[1]), int(bbox_list[2]), int(bbox_list[3])) video_save_file = self.video_save_file + '/' + file_name os.makedirs(video_save_file, exist_ok=True) video_save_path = os.path.join(video_save_file, video_name_save) @@ -491,7 +529,8 @@ class DealVideo(): frameToStart=start_fps, frametoStop=stop_fps, videoWriter=videoWriter, - bbox=bbox_list) + bbox=bbox_int_list, + size=size) videoWriter.release() self.videoQueue2.put(video_save_path) @@ -502,8 +541,6 @@ class DealVideo(): break - - def select_video_path(self): while True: @@ -515,7 +552,7 @@ class DealVideo(): directory = os.path.dirname(video_path) labels = directory.split('/')[-1] - print('video_pathagfg:',video_path) + # print('video_pathagfg:',video_path) # print(labels) @@ -535,20 +572,20 @@ class DealVideo(): self.get_video_listThread.start() self.get_video_frameThread.start() self.write_videoThread.start() - # self.write_videoThread.join() self.head_hands_detThread.start() self.video_select_dectThread.start() self.select_video_pathThread.start() + self.analysis_return_meassageThread.start() if __name__ == '__main__': t1 = time.time() - video = "E:/Bank_files/Bank_02/dataset/video_test/1min/0711-7_4.avi" + video = "test_video/test/0711ahgh.avi" video_save = 'test_video' # 初始化目标检测 - person_model = YOLO("model_file/yolov8n.pt") + person_model = YOLO("model_file/yolov8x.pt") # 初始化pptsmv2 config = 'model_file/inference/pptsm_lcnet_k400_16frames_uniform.yaml' # 配置文件地址