diff --git a/Bank_second_part/xbank_detect_process/add_xml.py b/Bank_second_part/xbank_detect_process/add_xml.py new file mode 100644 index 0000000..754da18 --- /dev/null +++ b/Bank_second_part/xbank_detect_process/add_xml.py @@ -0,0 +1,95 @@ + +from xml.etree.ElementTree import ElementTree, Element + + +# xml换行 + + +def indent(elem, level=0): + i = "\n" + level*"\t" + if len(elem): + if not elem.text or not elem.text.strip(): + elem.text = i + "\t" + if not elem.tail or not elem.tail.strip(): + elem.tail = i + for elem in elem: + indent(elem, level+1) + if not elem.tail or not elem.tail.strip(): + elem.tail = i + else: + if level and (not elem.tail or not elem.tail.strip()): + elem.tail = i + + +def add_xml(inforsDict,xmlFilePath): + + result = inforsDict + + for re in result: + # if re['score'] > 0.5: + + # 获得标注信息 + ObjName = list(re.keys())[0] + xmin = int(list(re.values())[0][0]) + ymin = int(list(re.values())[0][1]) + xmax = int(list(re.values())[0][2]) + ymax = int(list(re.values())[0][3]) + # xmax = xmin + r + # ymax = ymin + z + + #if ObjName == 'person': + + tree = ElementTree() + tree.parse(xmlFilePath) + + # 得到根目录 + root = tree.getroot() + + # 创建一级目录 + elementOjb = Element('object') + + elementBox = Element('bndbox') + + # 创建二级目录 + one = Element('name') + one.text = ObjName # 二级目录的值 #结果展示:1 + elementOjb.append(one) # 将二级目录加到一级目录里 + + two = Element('pose') + two.text = "Unspecified" + elementOjb.append(two) + + three = Element('truncated') + three.text = "0" + elementOjb.append(three) + + four = Element('difficult') + four.text = "0" + elementOjb.append(four) + + five = Element('xmin') + five.text = str(xmin) + elementBox.append(five) + + six = Element('xmax') + six.text = str(xmax) + elementBox.append(six) + + seven = Element('ymin') + seven.text = str(ymin) + elementBox.append(seven) + + eight = Element('ymax') + eight.text = str(ymax) + elementBox.append(eight) + + # 将一级目录加到根目录里 + elementOjb.append(elementBox) + root.append(elementOjb) + # 换行缩进 + indent(elementOjb) + indent(elementBox) + # 让结果保存进文件就可以了 + tree.write(xmlFilePath, encoding='utf-8', xml_declaration=True) + + diff --git a/Bank_second_part/xbank_detect_process/config_person.yaml b/Bank_second_part/xbank_detect_process/config_person.yaml new file mode 100644 index 0000000..dc2df2c --- /dev/null +++ b/Bank_second_part/xbank_detect_process/config_person.yaml @@ -0,0 +1,30 @@ +# load model file +model: /home/xbank/xbank_poc_test_use/model_file/yolov8.onnx +model_cache: /home/xbank/xbank_poc_test_use/tensort_cache/yolov8.trt +# label and bbox message set +model_parameter: + device : gpu + label_names: ["person","sleep"] # model labels + compara_label_names: ["person"] # + compara_relevancy: False # 'object_num' + relevancy_para : False + object_num_min : 5 + confidence : 0.5 + +# save_path : /home/xbank/xbank_poc_test_use/save_path/sleep +# save_path_original : /home/yaxin/xbank/xbank_poc_test/save_path_original/sleep +# test_path : /home/yaxin/xbank/xbank_poc_test/test_save_path/sleep +save_path_original : False +test_path : False +save_annotations : False +save_path : False + +# save videos +save_videos : /home/xbank/xbank_poc_test_use/video_save_path/person + +# detect time set +detect_time : 60 +detect_time_small : 5 +detect_ratio : 0.9 + + diff --git a/Bank_second_part/xbank_detect_process/config_phone.yaml b/Bank_second_part/xbank_detect_process/config_phone.yaml index e7b383d..9055fba 100644 --- a/Bank_second_part/xbank_detect_process/config_phone.yaml +++ b/Bank_second_part/xbank_detect_process/config_phone.yaml @@ -1,21 +1,36 @@ # load model file -model: ./model_file/yolov5.onnx -model_cache: ./tensort_cache/yolov5.trt +model: /home/xbank/xbank_poc_test_use/model_file/yolov5.onnx +model_cache: /home/xbank/xbank_poc_test_use/tensort_cache/yolov5.trt # label and bbox message set model_parameter: - device : gpu + device : gpu label_names: ["Keypad","hands","keyboard", "mouse","phone"] # model labels compara_label_names: ["hands","phone"] # - compara_relevancy: 'overlap' # 'in_bbox' + compara_relevancy: overlap # 'in_bbox','overlap' relevancy_para : 0 + object_num_min : False confidence : 0.2 -save_path : ./save_path/hands +# temporarily save images +# save_path : E:/Bank_files/Bank_03/xbank_poc_test_use/save_path/hands/det # save_path_original : /home/yaxin/xbank/xbank_poc_test/save_path_original/hands -# test_path : /home/yaxin/xbank/xbank_poc_test/test_save_path/hands -save_path_original : False +# test_path : E:/Bank_files/Bank_03/xbank_poc_test_use/save_path/hands/nodet +save_path : False test_path : False +save_path_original : False +save_annotations : False + +# save detect infors +save_annotations : False + +# save videos +save_videos : /home/xbank/xbank_poc_test_use/video_save_path/hands + +# detect time set +detect_time : 60 +detect_time_small : 5 +detect_ratio : 0.5 diff --git a/Bank_second_part/xbank_detect_process/config_sleep.yaml b/Bank_second_part/xbank_detect_process/config_sleep.yaml index 023a921..5c17117 100644 --- a/Bank_second_part/xbank_detect_process/config_sleep.yaml +++ b/Bank_second_part/xbank_detect_process/config_sleep.yaml @@ -1,20 +1,31 @@ # load model file -model: ./model_file/yolov8.onnx -model_cache: ./tensort_cache/yolov8.trt +model: /home/xbank/xbank_poc_test_use/model_file/yolov8.onnx +model_cache: /home/xbank/xbank_poc_test_use/tensort_cache/yolov8.trt # label and bbox message set model_parameter: - device : gpu + device : gpu label_names: ["person","sleep"] # model labels - compara_label_names: ["person","sleep"] # + compara_label_names: ["sleep"] # compara_relevancy: False # 'in_bbox' relevancy_para : False - confidence : 0.2 + object_num_min : False + confidence : 0.5 -save_path : ./save_path/sleep +# save_path : /home/xbank/xbank_poc_test_use/save_path/sleep # save_path_original : /home/yaxin/xbank/xbank_poc_test/save_path_original/sleep # test_path : /home/yaxin/xbank/xbank_poc_test/test_save_path/sleep save_path_original : False test_path : False +save_annotations : False +save_path : False + +# save videos +save_videos : /home/xbank/xbank_poc_test_use/video_save_path/sleep + +# detect time set +detect_time : 60 +detect_time_small : 5 +detect_ratio : 0.5 diff --git a/Bank_second_part/xbank_detect_process/create_xml.py b/Bank_second_part/xbank_detect_process/create_xml.py new file mode 100644 index 0000000..873b424 --- /dev/null +++ b/Bank_second_part/xbank_detect_process/create_xml.py @@ -0,0 +1,53 @@ + +from lxml.etree import Element, SubElement, tostring + + +def create_xml(boxs, img_shape, xml_path): + """ + 创建xml文件,依次写入xml文件必备关键字 + :param boxs: txt文件中的box + :param img_shape: 图片信息,xml中需要写入WHC + :return: + """ + node_root = Element('annotation') + node_folder = SubElement(node_root, 'folder') + node_folder.text = 'Images' + node_filename = SubElement(node_root, 'filename') + node_filename.text = str(img_shape[3]) + node_size = SubElement(node_root, 'size') + node_width = SubElement(node_size, 'width') + node_width.text = str(img_shape[1]) + node_height = SubElement(node_size, 'height') + node_height.text = str(img_shape[0]) + node_depth = SubElement(node_size, 'depth') + node_depth.text = str(img_shape[2]) + + if len(boxs) >= 1: # 循环写入box + for box in boxs: + node_object = SubElement(node_root, 'object') + node_name = SubElement(node_object, 'name') + # if str(list_[4]) == "person": # 根据条件筛选需要标注的标签,例如这里只标记person这类,不符合则直接跳过 + # node_name.text = str(list_[4]) + # else: + # continue + node_name.text = str(list(box.keys())[0]) + node_difficult = SubElement(node_object, 'difficult') + node_difficult.text = '0' + node_bndbox = SubElement(node_object, 'bndbox') + node_xmin = SubElement(node_bndbox, 'xmin') + node_xmin.text = str(int(list(box.values())[0][0])) + node_ymin = SubElement(node_bndbox, 'ymin') + node_ymin.text = str(int(list(box.values())[0][1])) + node_xmax = SubElement(node_bndbox, 'xmax') + node_xmax.text = str(int(list(box.values())[0][2])) + node_ymax = SubElement(node_bndbox, 'ymax') + node_ymax.text = str(int(list(box.values())[0][3])) + + xml = tostring(node_root, pretty_print=True) # 格式化显示,该换行的换行 + + # file_name = img_shape[3].split(".")[0] + # filename = xml_path+"/{}.xml".format(file_name) + + f = open(xml_path, "wb") + f.write(xml) + f.close() diff --git a/Bank_second_part/xbank_detect_process/main_process.py b/Bank_second_part/xbank_detect_process/main_process.py index d0aba64..8f6f1e4 100644 --- a/Bank_second_part/xbank_detect_process/main_process.py +++ b/Bank_second_part/xbank_detect_process/main_process.py @@ -1,128 +1,256 @@ from analysis_result.get_model_result import det_img from analysis_result.same_model_img import same_model_img_analysis_labels, model_labels_selet -from model_load.model_load import load_model +from model_load.model_load import Load_model from drawing_img.drawing_img import drawing_frame -from analysis_data.data_dir_file import get_dir_file +from analysis_data.data_rtsp import rtsp_para +from analysis_data.data_dir_file import get_dir_file, get_imgframe from analysis_data.config_load import get_configs -from utils import is_image_file, is_rtsp_or_video +from add_xml import add_xml +from create_xml import create_xml +import yaml import cv2 import os +from pathlib import Path import time from datetime import datetime +import glob +import json def data_load(args): - # print('正在运行的进程',msg) - # print(args) source = args[0] - model_yaml = args[1] + model_ymal = args[1] # 数据加载 - rtsp_or_video_source = is_rtsp_or_video(source) + rtsp_source = rtsp_para(source) dir_source = os.path.isdir(source) - img_source = is_image_file(source) + file_source = os.path.isfile(source) # # 模型加载 - model_data = get_configs(model_yaml) - model_inference = load_model(model_file=model_data["model"], + model_data = get_configs(model_ymal) + model_inference = Load_model(model_file=model_data["model"], device=model_data["model_parameter"]['device'], cache_file=model_data["model_cache"]) - if rtsp_or_video_source: - start_point = time.perf_counter() + if rtsp_source: - cap = cv2.VideoCapture(source) + rtsp_detect_process(source=source, model_data=model_data, + model_inference=model_inference) - try: - i = 0 - while True: - ret, frame = cap.read() + if dir_source: + dir_source_process(source, model_inference, model_data) - if not ret: - - # 流媒体不稳定, - # 如果未成功读取到视频帧,则继续读取下一帧 - if source.startswith('rtsp'): - continue - else: - break + if file_source: - print(source,datetime.today(), i) - # if source == 'rtsp://admin:@192.168.10.18': + file_source_process(source, model_inference, model_data) - # cv2.imshow('18',frame) - img_frame_dict = {"path": source, 'frame': frame} - images_update = img_process( - img_frame_dict, model_inference, model_data) - - # print(type(images_update['frame'])) +def rtsp_detect_process(source, model_data, model_inference): - # if source == 'rtsp://admin:@192.168.10.18': - # cv2.namedWindow('18',0) - # cv2.imshow('18',images_update['frame']) + cap = cv2.VideoCapture(source) - i = i+1 - + # 视频流信息 + fps = int(cap.get(cv2.CAP_PROP_FPS)) + fps_num = fps*model_data['detect_time'] + fps_num_small = fps*model_data['detect_time_small'] + size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), + int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))) + try: + i = 0 + j = 0 - except Exception as e: - # 处理异常或错误 - print(str(e)) + det_t_num = 0 + nodet_t_num = 0 - cap.release() + det_img = [] - end_point = time.perf_counter() - print(f"视频处理时间: {(end_point - start_point)}s") + video_name_time = 0 + det_fps_time = [] - exit(0) + while True: + ret, frame = cap.read() - if dir_source: + t1 = time.time() + + if not ret: + continue # 如果未成功读取到视频帧,则继续读取下一帧 + + i = i + 1 + j = j + 1 + + # 读取到当前视频帧时间 + data_now = datetime.now() + get_time = str(data_now.strftime("%H")) + \ + str(data_now.strftime("%M")) + str(data_now.strftime("%S")) + \ + str(data_now.strftime("%f")) + + # 视频保存 + if video_name_time == 0: + + video_name_time = get_time + savePath = os.path.join(model_data['save_videos'], (str(data_now.strftime( + "%Y")) + str(data_now.strftime("%m")) + str(data_now.strftime("%d")))) + + if not os.path.exists(savePath): + os.makedirs(savePath) + + video_path = os.path.join( + savePath, video_name_time + '.avi') + print('video_path:', video_path) + + out_video = cv2.VideoWriter( + video_path, cv2.VideoWriter_fourcc(*'DIVX'), fps, size) + + print(source, data_now, i, j,video_path) + + + # 推理部分 + imgframe_dict = {"path": source, 'frame': frame, 'get_fps': j} + + images_det_result = img_process( + imgframe_dict, model_inference, model_data) + + images_update = save_process( + imgframe_dict, images_det_result, model_data) + + # print('images_det_result:', len(images_det_result)) + + # 结果判断,t + if images_det_result: + + det_t_num = det_t_num + 1 + + if len(det_img) == 0: + img_dict = images_update.copy() + del img_dict['frame'] + det_img.append(img_dict) + + if not images_det_result and len(det_img) > 0: + + nodet_t_num = nodet_t_num + 1 + + if (det_t_num + nodet_t_num) >= fps_num_small: + + para = determine_time( + det_num=det_t_num, nodet_num=nodet_t_num, ratio_set=model_data['detect_ratio']) + + if para: + + first_fps_time = det_img[0] + first_fps_time.update( + {"dert_fps": (j-int(first_fps_time['get_fps'])+1)}) + det_fps_time.append(first_fps_time) + + det_img.clear() + det_t_num = 0 + nodet_t_num = 0 - img_ext = [".jpg", ".JPG", ".bmp"] - video_ext = [".mp4", ".avi", ".MP4"] + # 视频保存 + out_video.write(images_update['frame']) - img_list = get_dir_file(source, img_ext) - video_list = get_dir_file(source, video_ext) + # 结果判断 ,T + if j >= fps_num: - if img_list: + out_video.release() - for img in img_list: + # T时间截至,判断t时间结果。 + if det_img: + para = determine_time( + det_num=det_t_num, nodet_num=nodet_t_num, ratio_set=model_data['detect_ratio']) - t1 = time.time() - images = cv2.imread(img) - - img_frame_dict = {"path": img, 'frame': images} + first_fps_time = det_img[0] + + # print(j-int(first_fps_time['get_fps'])+1) + # print(fps_num_small/2) + + if (j-int(first_fps_time['get_fps'])+1) >= (fps_num_small/2): + + first_fps_time.update( + {"dert_fps": (j-int(first_fps_time['get_fps'])+1)}) + det_fps_time.append(first_fps_time) + + + + # print('det_fps_time:', det_fps_time) + + if det_fps_time: + re_list = json_get( + time_list=det_fps_time, video_path=video_path,fps=fps) + json_save(re_list) + + else: + print(video_path) + os.remove(video_path) + print('----------------------------------------------clear videos-----------------------------------------------') + + # 重置 + print('----------------------------------------------next-----------------------------------------------') + det_img.clear() + det_fps_time.clear() + det_t_num = 0 + nodet_t_num = 0 + video_name_time = 0 + j = 0 + + # print('det_fps_time:', det_fps_time,'det_img:',det_img) + + t2 = time.time() + tx = t2 - t1 + print('检测一张图片的时间为:', tx) + + except Exception as e: + # 处理异常或错误 + print(str(e)) + + cap.release() - images_update = img_process( - img_frame_dict, model_inference, model_data) - t2 = time.time() - tx = t2 - t1 - print('检测一张图片的时间为:',tx) +def dir_source_process(source, model_inference, model_data): + img_ext = [".jpg", ".JPG", ".bmp"] + video_ext = [".mp4", ".avi", ".MP4"] - if video_list: + img_list = get_dir_file(source, img_ext) + video_list = get_dir_file(source, video_ext) - pass + if img_list: - if img_source: + for img in img_list: - img_para = True + t1 = time.time() + images = cv2.imread(img) - if img_para: - images = cv2.imread(source) - img_frame_dict = {"path": source, 'frame': images} + imgframe_dict = {"path": img, 'frame': images} images_update = img_process( - img_frame_dict, model_inference, model_data) + imgframe_dict, model_inference, model_data) + t2 = time.time() + tx = t2 - t1 + print('检测一张图片的时间为:', tx) -def img_process(images, model_inference, model_data): + if video_list: + + pass + + +def file_source_process(source, model_inference, model_data): + + img_para = True + + if img_para: + images = cv2.imread(source) + imgframe_dict = {"path": source, 'frame': images} + + images_update = img_process( + imgframe_dict, model_inference, model_data) - start_point = time.perf_counter() +def img_process(images, model_inference, model_data): + + # t1 = time.time() # 检测每帧图片,返回推理结果 results = det_img(model_inference=model_inference, images_frame=images['frame'], @@ -146,36 +274,56 @@ def img_process(images, model_inference, model_data): determine_bbox = select_labels_list - # 判断后的信息返回结果,这里是画图,可返回结果 + # print(determine_bbox) + + if model_data['model_parameter']['object_num_min']: + if len(determine_bbox) >= model_data["model_parameter"]['object_num_min']: + + # print(len(determine_bbox)) + determine_bbox.clear() + + # 返回检测后结果 + return determine_bbox + + +def save_process(images, determine_bbox, model_data): + if determine_bbox: images.update({"results": determine_bbox}) img_save = drawing_frame( images_frame=images['frame'], result_list=determine_bbox) - + images.update({"frame": img_save}) - img_name = images_save( - images=images['frame'], save_path=model_data["save_path"]) + if model_data["save_path"]: - print('sleep:', images['path'], img_name) + imgname = images_save( + images=images, save_path=model_data["save_path"]) if model_data['save_path_original']: - images_save(images=images['frame'], - save_path=model_data["save_path_original"]) - else: - pass + imgname_original = images_save(images=images, + save_path=model_data["save_path_original"]) + + if model_data["save_annotations"]: + + if not os.path.exists(model_data["save_annotations"]): + + os.makedirs(model_data["save_annotations"]) + save_annotations_xml( + xml_save_file=model_data["save_annotations"], save_infors=determine_bbox, images=images['path']) + + else: + pass + else: # 没检测出来的图片是否保存 if model_data["test_path"]: - img_name = images_save( - images=images['frame'], save_path=model_data["test_path"]) - # print('no:',images['path'],img_name) + imgname = images_save( + images=images, save_path=model_data["test_path"]) + # print('no:',images['path'],imgname) - else: - pass - # 展示显示 # if images['path'] == 'rtsp://admin:@192.168.10.11': # cv2.namedWindow('11', cv2.WINDOW_NORMAL) @@ -185,29 +333,120 @@ def img_process(images, model_inference, model_data): # t2 = time.time() - end_point = time.perf_counter() - cost = end_point - start_point - - print(f"Predicted in {cost * 1000:.2f}ms. {1.0 / cost:.2f} FPS") - return images def images_save(images, save_path): + # 保存时候时间为图片名 - data_now = datetime.today() - images_name = str(data_now.year) + str(data_now.month) + str(data_now.day) + str(data_now.hour) + \ - str(data_now.minute) + str(data_now.second) + \ - str(data_now.microsecond) + '.jpg' - img_save_path = save_path + '/' + str( - data_now.year) + '/' + str(data_now.month) + '_' + str(data_now.day) + '/' + # data_now = datetime.now() + # images_name = str(data_now.strftime("%Y")) + str(data_now.strftime("%m")) + str(data_now.strftime("%d")) + str(data_now.strftime("%H")) + \ + # str(data_now.strftime("%M")) + str(data_now.strftime("%S")) + \ + # str(data_now.strftime("%f")) + '.jpg' + # img_save_path = save_path + '/' + str( + # data_now.year) + '/' + str(data_now.month) + '_' + str(data_now.day) + '/' + img_save_path = os.path.join(save_path, str(images['path'].split('.')[-1])) + images_name = images['get_time'] + '.jpg' if not os.path.exists(img_save_path): os.makedirs(img_save_path) - full_name = img_save_path + images_name + full_name = os.path.join(img_save_path, images_name) - cv2.imwrite(full_name, images) + cv2.imwrite(full_name, images['frame']) return full_name + +def save_annotations_xml(xml_save_file, save_infors, images): + + results = save_infors + img = os.path.basename(images) + img_frame = cv2.imread(images) + xml_save_path = os.path.join(xml_save_file, img.split('.')[0] + '.xml') + w, h, d = img_frame.shape + img_shape = (w, h, d, img) + + if os.path.isfile(xml_save_path): + + add_xml(inforsDict=results, + xmlFilePath=xml_save_path) + else: + create_xml(boxs=results, + img_shape=img_shape, + xml_path=xml_save_path) + + +def determine_time(det_num, nodet_num, ratio_set): + + ratio = det_num / (det_num + nodet_num) + + # print(det_num, nodet_num, ratio) + + if ratio >= ratio_set: + + return True + + else: + + return False + + +def video_synthesis(imglist, savePath, size, fps, videoname): + + if not os.path.exists(savePath): + os.makedirs(savePath) + + print(videoname) + video_path = os.path.join(savePath, videoname + '.avi') + out = cv2.VideoWriter( + video_path, cv2.VideoWriter_fourcc(*'DIVX'), fps, size) + + sorted_list = sorted(imglist, key=lambda x: x['get_time']) + + for filename in sorted_list: + out.write(filename['frame']) + out.release() + + +def json_get(time_list, video_path,fps): + + result_dict ={'info': {'video_path': video_path,'fps':fps}} + re_dict = {} + for i, det_dict in enumerate(time_list): + + list_hands = ["Keypad", "hands", "keyboard", "mouse", "phone"] + list_sleep = ["person", "sleep"] + + if list(det_dict['results'][0].keys())[0] in list_hands: + + result_lables = 'playing_phone' + + if list(det_dict['results'][0].keys())[0] in list_sleep: + + result_lables = "sleep" + + fps_dict = {'time': det_dict['get_fps'], + 'duration': det_dict['dert_fps'], 'result': result_lables} + re_dict.update({('id_' + str(i)): fps_dict}) + + result_dict.update({'result':re_dict}) + + return result_dict + + +def json_save(result_dict): + + json_path = result_dict['info`']['video_path'].split('.')[0] + '.json' + + result = json.dumps(result_dict) + + f = open(json_path, 'w') + f.write(result + '\n') + f.close + + +# if __name__ == '__main__': + +# data_load(['rtsp://admin:@192.168.10.203', +# 'E:/Bank_files/Bank_03/xbank_poc_test_use/config_phone.yaml']) diff --git a/Bank_second_part/xbank_detect_process/model_load/model_load.py b/Bank_second_part/xbank_detect_process/model_load/model_load.py index 1b09eb5..d9a60e4 100644 --- a/Bank_second_part/xbank_detect_process/model_load/model_load.py +++ b/Bank_second_part/xbank_detect_process/model_load/model_load.py @@ -19,7 +19,7 @@ def build_option(device, backend, cache_file): return option -def load_model(model_file, device, cache_file): +def Load_model(model_file, device, cache_file): """ 加载模型的tensorRT引擎 model_file:模型权重 格式:".onnx" diff --git a/Bank_second_part/xbank_detect_process/readme.md b/Bank_second_part/xbank_detect_process/readme.md index bc19902..92a56fa 100644 --- a/Bank_second_part/xbank_detect_process/readme.md +++ b/Bank_second_part/xbank_detect_process/readme.md @@ -6,22 +6,56 @@ model: /home/yaxin/xbank/xbank_poc_test/config_phone.yaml # 配置检测的模型配置文件 4、模型配置文件,config_检测名称.yaml + # load model file - model: /home/yaxin/xbank/xbank_poc_test/model_file/yolov5.onnx # 模型的绝对路径 - model_cache: /home/yaxin/xbank/xbank_poc_test/tensort_cache/yolov5.trt # 设置cache,不用更改 - - # label and bbox message set - model_parameter: # 模型检测中需要的参数 - device : gpu # 使用gpu检测 - label_names: ["Keypad","hands","keyboard", "mouse","phone"] # model labels # 检测模型训练时的标签名列表,标签名顺序需要跟训练时候分配数据集时候的顺序一致,不用更改 - compara_label_names: ["hands","phone"] # # 检测到的类别中需要挑选出来的目标 - compara_relevancy: 'overlap' # 'in_bbox' # 是否对挑选出来进行推理的目标进行其标签框之间关系进行推理,如果不需要就设置成FALSE,默认不用更改 - relevancy_para : 0 # 需要的参数 - confidence : 0.2 # 设置检测出来的目标过滤的置信度 - - save_path : /home/yaxin/xbank/xbank_poc_test/save_path/hands # 保存检测后的图片的路径,默认不需要更改 - # save_path_original : /home/yaxin/xbank/xbank_poc_test/save_path_original/hands # 检测到的图片的原始图片的路径,不保存使用False - # test_path : /home/yaxin/xbank/xbank_poc_test/test_save_path/hands # 未检测到目标的图片,不保存设置为False,默认保存 + + # 模型的绝对路径 + model: /home/yaxin/xbank/xbank_poc_test/model_file/yolov5_XXX.onnx + # 设置cache,不用更改 + model_cache: /home/yaxin/xbank/xbank_poc_test/tensort_cache/yolov5_XXX.trt + + # label and bbox message set + + # 模型检测中需要的参数 + model_parameter: + + # 使用gpu检测 + device : gpu + # 检测模型训练时的标签名列表,标签名顺序需要跟训练时候分配数据集时候的顺序一致,不用更改 + label_names: ["Keypad","hands","keyboard", "mouse","phone"] # model labels + # 检测到的类别中需要挑选出来的目标 + compara_label_names: ["hands","phone"] + # 是否对挑选出来进行推理的目标进行其标签框之间关系进行推理,如果不需要就设置成FALSE,默认不用更改 + compara_relevancy: 'overlap' # 'in_bbox' + # 需要的参数 + relevancy_para : 0 + # 设置检测出来的目标过滤的置信度 + confidence : 0.2 + # 统计检测到的目标最低数量,不需要统计则设置为False + object_num_min : 5 + + # 保存检测后的图片的路径,不保存使用False + save_path : /home/yaxin/xbank/xbank_poc_test/save_path/hands + # 检测到的图片的原始图片的路径,不保存使用False + save_path_original : /home/yaxin/xbank/xbank_poc_test/save_path_original/hands + # 未检测到目标的图片,不保存设置为False,默认保存 + test_path : /home/yaxin/xbank/xbank_poc_test/test_save_path/hands + # 检测到的目标的图片是否保存检测信息为xml标注信息,不保存设置为False + save_annotations : False + + # 保存检测到目标的视频片段 + # save videos + # 视频保存路径, + save_videos : /home/xbank/xbank_poc_test_use/video_save_path/person + + # 检测中时长的判断 + # detect time set + # 保存的长视频的时长,单位为s + detect_time : 60 + # 判断是否为目标行为的时长,单位为S + detect_time_small : 5 + # 在单位时长中,判定为目标行为的目标信息在总检测信息中的比例 + detect_ratio : 0.9 5、使用本地环境 conda activate fastdeploy