0921更新代码

V0.1.0
王莹
parent ab9b20f834
commit 87722c10ab

@ -0,0 +1,7 @@
import yaml
from pathlib import Path
def get_configs(ymal_files):
yaml_path = Path(__file__).parent / ymal_files
with yaml_path.open("r", encoding="utf-8") as f:
return yaml.load(f, Loader=yaml.FullLoader)

@ -0,0 +1,25 @@
import cv2
import os
def get_dir_file(path,ext_list):
# video_ext = [".mp4", ".avi", ".MP4"]
file_names = []
for maindir, subdir, file_name_list in os.walk(path):
for filename in file_name_list:
apath = os.path.join(maindir, filename)
ext = os.path.splitext(apath)[1]
if ext in ext_list:
file_names.append(apath)
return file_names
def get_imgframe(img_path_list):
imgframe_list = []
for img in img_path_list:
images = cv2.imread(img)
imgframe_dict = {"path":img,'frame':images}
imgframe_list.append(imgframe_dict)
return imgframe_list

@ -0,0 +1,9 @@
def rtsp_para(scource):
if scource.split('://')[0] == 'rtsp':
return True
else:
return False

@ -0,0 +1,22 @@
def det_img(model_inference,images_frame,confidence,label_name_list):
result = model_inference.predict(images_frame)
bbox_list = result.boxes
labels_ids_list = result.label_ids
scores_list = result.scores
result_list = []
for i in range(len(labels_ids_list)):
if scores_list[i] > confidence:
result_dict = {label_name_list[int(labels_ids_list[i])]:bbox_list[i]}
result_list.append(result_dict)
return result_list

@ -1,4 +1,4 @@
from tools_analysis import select_bbox_by_labels, iou_result_two
from analysis_result.tools_analysis import select_bbox_by_labels, iou_result_two
def same_model_img_analysis_labels(example_list, result_dicts_list, relevancy, relevancy_para):
@ -9,6 +9,7 @@ def same_model_img_analysis_labels(example_list, result_dicts_list, relevancy, r
# 获得当前结果的标签集
result_labels_list = [list(re.keys())[0] for re in result_dicts_list]
# print('result_labels_list:',result_labels_list)
# 判断是否存在example_list中的所有标签
continue_para = False
@ -42,6 +43,6 @@ def model_labels_selet(example_list, result_dict_list):
直接从result中获得目标列表
'''
compara_labellist = [result_dict for result_dict in result_dict_list if list(
result_dict.keys()) in example_list]
result_dict.keys())[0] in example_list]
return compara_labellist

@ -65,8 +65,8 @@ def select_bbox_by_labels(example_list, result_dicts_list):
for label in example_list:
bbox_list = []
for result in result_dicts_list:
if list(result.keys()) == label:
bbox_list.append(list(result.values()))
if list(result.keys())[0] == label:
bbox_list.append(list(result.values())[0])
result_dict_change = {label: bbox_list}
result_dict_list_change.append(result_dict_change)

@ -0,0 +1,32 @@
detect_0:
source : rtsp://admin:@192.168.10.18
model: /home/yaxin/xbank/xbank_poc_test/config_phone.yaml
detect_1:
source : rtsp://admin:@192.168.10.12
model: /home/yaxin/xbank/xbank_poc_test/config_sleep.yaml
detect_2:
source : rtsp://admin:@192.168.10.18
model: /home/yaxin/xbank/xbank_poc_test/config_sleep.yaml
detect_3:
source : rtsp://admin:@192.168.10.11
model: /home/yaxin/xbank/xbank_poc_test/config_sleep.yaml
# detect_4:
# source : rtsp://admin:@192.168.10.11
# model: /home/yaxin/xbank/xbank_poc_test/config_phone.yaml
# detect_5:
# source : rtsp://admin:@192.168.10.18
# model: /home/yaxin/xbank/xbank_poc_test/config_phone.yaml
# detect_6:
# source : /home/yaxin/xbank/xbank_poc_test/images_test/images_del
# model: /home/yaxin/xbank/xbank_poc_test/config_phone.yaml
# detect_7:
# source : /home/yaxin/xbank/xbank_poc_test/images_test/images_del
# model: /home/yaxin/xbank/xbank_poc_test/config_sleep.yaml

@ -0,0 +1,21 @@
# load model file
model: /home/yaxin/xbank/xbank_poc_test/model_file/yolov5.onnx
model_cache: /home/yaxin/xbank/xbank_poc_test/tensort_cache/yolov5.trt
# label and bbox message set
model_parameter:
device : gpu
label_names: ["Keypad","hands","keyboard", "mouse","phone"] # model labels
compara_label_names: ["hands","phone"] #
compara_relevancy: 'overlap' # 'in_bbox'
relevancy_para : 0
confidence : 0.2
save_path : /home/yaxin/xbank/xbank_poc_test/save_path/hands
# save_path_original : /home/yaxin/xbank/xbank_poc_test/save_path_original/hands
# test_path : /home/yaxin/xbank/xbank_poc_test/test_save_path/hands
save_path_original : False
test_path : False

@ -0,0 +1,20 @@
# load model file
model: /home/yaxin/xbank/xbank_poc_test/model_file/yolov8.onnx
model_cache: /home/yaxin/xbank/xbank_poc_test/tensort_cache/yolov8.trt
# label and bbox message set
model_parameter:
device : gpu
label_names: ["person","sleep"] # model labels
compara_label_names: ["person","sleep"] #
compara_relevancy: False # 'in_bbox'
relevancy_para : False
confidence : 0.2
save_path : /home/yaxin/xbank/xbank_poc_test/save_path/sleep
# save_path_original : /home/yaxin/xbank/xbank_poc_test/save_path_original/sleep
# test_path : /home/yaxin/xbank/xbank_poc_test/test_save_path/sleep
save_path_original : False
test_path : False

@ -0,0 +1,34 @@
import cv2
def drawing_frame(images_frame,
result_list,
line_1=3,
line_2=1,
color_1=(0, 0, 255),
color_2=(0, 255, 255),
txt_num=0.75,
scores_list_id=False):
'''
result_dicts_list: 格式为[{labels1:result1},{labels2:result2},{labels2:result2}...]
'''
img = images_frame.copy()
for result_list in result_list:
label_name = list(result_list.keys())[0]
bbox_list = list(result_list.values())[0]
if scores_list_id:
scores_list = result_list['scores_list']
lables = str(label_name) + "__" + str(scores_list)
else:
lables = str(label_name)
cv2.rectangle(img, (int(bbox_list[0]), int(bbox_list[1])), (int(
bbox_list[2]), int(bbox_list[3])), color_1, line_1)
cv2.putText(img, lables, (int(bbox_list[0]) - 10, int(
bbox_list[1]) - 10), cv2.FONT_HERSHEY_SIMPLEX, txt_num, color_2, line_2)
return img

@ -0,0 +1,37 @@
#coding: utf-8
from main_process import data_load
import multiprocessing
from analysis_data.config_load import get_configs
from model_load.model_load import Load_model
import time
from multiprocessing import Pool
def get_args_list(args_data):
args_list = []
for args in args_data:
args_det = args_data[args]
det_config = args_det['model']
det_source = args_det['source']
det_list = [det_source, det_config]
args_list.append(det_list)
return args_list
if __name__ == "__main__":
# 加载配置文件
args = '/home/yaxin/xbank/xbank_poc_test/config_det.yaml'
args_data = get_configs(ymal_files=args)
args_list = get_args_list(args_data)
process_num = len(args_list)
with Pool(process_num) as pool:
pool.map(data_load, args_list)

@ -0,0 +1,201 @@
from analysis_result.get_model_result import det_img
from analysis_result.same_model_img import same_model_img_analysis_labels, model_labels_selet
from model_load.model_load import Load_model
from drawing_img.drawing_img import drawing_frame
from analysis_data.data_rtsp import rtsp_para
from analysis_data.data_dir_file import get_dir_file, get_imgframe
from analysis_data.data_load import data_load
from analysis_data.config_load import get_configs
import yaml
import cv2
import os
from pathlib import Path
import time
from datetime import datetime
def data_load(args):
# print('正在运行的进程',msg)
# print(args)
source = args[0]
model_ymal = args[1]
# 数据加载
rtsp_source = rtsp_para(source)
dir_source = os.path.isdir(source)
file_source = os.path.isfile(source)
# # 模型加载
model_data = get_configs(model_ymal)
model_inference = Load_model(model_file=model_data["model"],
device=model_data["model_parameter"]['device'],
cache_file=model_data["model_cache"])
if rtsp_source:
cap = cv2.VideoCapture(source)
try:
i = 0
while True:
ret, frame = cap.read()
if not ret:
continue # 如果未成功读取到视频帧,则继续读取下一帧
print(source,datetime.today(), i)
# if source == 'rtsp://admin:@192.168.10.18':
# cv2.imshow('18',frame)
t1 = time.time()
imgframe_dict = {"path": source, 'frame': frame}
images_update = img_process(
imgframe_dict, model_inference, model_data)
# print(type(images_update['frame']))
t2 = time.time()
tx = t2 - t1
print('检测一张图片的时间为:',tx)
# if source == 'rtsp://admin:@192.168.10.18':
# cv2.namedWindow('18',0)
# cv2.imshow('18',images_update['frame'])
i = i+1
except Exception as e:
# 处理异常或错误
print(str(e))
cap.release()
if dir_source:
img_ext = [".jpg", ".JPG", ".bmp"]
video_ext = [".mp4", ".avi", ".MP4"]
img_list = get_dir_file(source, img_ext)
video_list = get_dir_file(source, video_ext)
if img_list:
for img in img_list:
t1 = time.time()
images = cv2.imread(img)
imgframe_dict = {"path": img, 'frame': images}
images_update = img_process(
imgframe_dict, model_inference, model_data)
t2 = time.time()
tx = t2 - t1
print('检测一张图片的时间为:',tx)
if video_list:
pass
if file_source:
img_para = True
if img_para:
images = cv2.imread(source)
imgframe_dict = {"path": source, 'frame': images}
images_update = img_process(
imgframe_dict, model_inference, model_data)
def img_process(images, model_inference, model_data):
# t1 = time.time()
# 检测每帧图片,返回推理结果
results = det_img(model_inference=model_inference,
images_frame=images['frame'],
confidence=model_data["model_parameter"]['confidence'],
label_name_list=model_data["model_parameter"]['label_names'])
# print(images['path'])
# 根据需要挑选标注框信息
select_labels_list = model_labels_selet(example_list=model_data["model_parameter"]['compara_label_names'],
result_dict_list=results)
if model_data["model_parameter"]['compara_relevancy']:
# 需要根据的逻辑判断标注框信息
determine_bbox = same_model_img_analysis_labels(example_list=model_data["model_parameter"]['compara_label_names'],
result_dicts_list=select_labels_list,
relevancy=model_data["model_parameter"]['compara_relevancy'],
relevancy_para=model_data["model_parameter"]['relevancy_para'])
else:
determine_bbox = select_labels_list
# 判断后的信息返回结果,这里是画图,可返回结果
if determine_bbox:
images.update({"results": determine_bbox})
img_save = drawing_frame(
images_frame=images['frame'], result_list=determine_bbox)
images.update({"frame": img_save})
imgname = images_save(
images=images['frame'], save_path=model_data["save_path"])
print('sleep:', images['path'], imgname)
if model_data['save_path_original']:
images_save(images=images['frame'],
save_path=model_data["save_path_original"])
else:
pass
else:
# 没检测出来的图片是否保存
if model_data["test_path"]:
imgname = images_save(
images=images['frame'], save_path=model_data["test_path"])
# print('no:',images['path'],imgname)
else:
pass
# 展示显示
# if images['path'] == 'rtsp://admin:@192.168.10.11':
# cv2.namedWindow('11', cv2.WINDOW_NORMAL)
# cv2.imshow('11',images['frame'])
# cv2.waitKey(1)
# cv2.destroyAllWindows()
# t2 = time.time()
return images
def images_save(images, save_path):
# 保存时候时间为图片名
data_now = datetime.today()
images_name = str(data_now.year) + str(data_now.month) + str(data_now.day) + str(data_now.hour) + \
str(data_now.minute) + str(data_now.second) + \
str(data_now.microsecond) + '.jpg'
img_save_path = save_path + '/' + str(
data_now.year) + '/' + str(data_now.month) + '_' + str(data_now.day) + '/'
if not os.path.exists(img_save_path):
os.makedirs(img_save_path)
full_name = img_save_path + images_name
cv2.imwrite(full_name, images)
return full_name

@ -0,0 +1,44 @@
import fastdeploy as fd
def build_option(device, backend, cache_file):
"""
创建Runtime运行选项
device设备CPU or GPU
backend:TensorRT引擎
"""
option = fd.RuntimeOption()
option.use_cpu()
option.trt_option.serialize_file = cache_file
if device.lower() == "gpu":
option.use_gpu(0)
if backend.lower() == "trt":
assert device.lower(
) == "gpu", "TensorRT backend require inference on device GPU."
option.use_trt_backend()
return option
def Load_model(model_file, device, cache_file):
"""
加载模型的tensorRT引擎
model_file模型权重 格式".onnx"
device设备选择
"""
model_name = model_file.split("/")[-1].split(".")[0]
runtime_option = build_option(
device=device, backend="trt", cache_file=cache_file)
model_inference = []
if model_name == "yolov5":
model = fd.vision.detection.YOLOv5(
model_file, runtime_option=runtime_option)
model_inference = model
elif model_name == "yolov8":
model = fd.vision.detection.YOLOv8(
model_file, runtime_option=runtime_option)
model_inference = model
# print(model_inference)
return model_inference

@ -0,0 +1,29 @@
1、主程序运行main.py文件
2、main.py文件中args加载检测配置文件config_det.yaml为了保证路径没问题请设置绝对路径
3、config_det.yaml中配置
detect_0: # 名称随意,不可重复
source : rtsp://admin:@192.168.10.18 # scource 设置rtsp流或者图片文件夹
model: /home/yaxin/xbank/xbank_poc_test/config_phone.yaml # 配置检测的模型配置文件
4、模型配置文件config_检测名称.yaml
# load model file
model: /home/yaxin/xbank/xbank_poc_test/model_file/yolov5.onnx # 模型的绝对路径
model_cache: /home/yaxin/xbank/xbank_poc_test/tensort_cache/yolov5.trt # 设置cache不用更改
# label and bbox message set
model_parameter: # 模型检测中需要的参数
device : gpu # 使用gpu检测
label_names: ["Keypad","hands","keyboard", "mouse","phone"] # model labels # 检测模型训练时的标签名列表,标签名顺序需要跟训练时候分配数据集时候的顺序一致,不用更改
compara_label_names: ["hands","phone"] # # 检测到的类别中需要挑选出来的目标
compara_relevancy: 'overlap' # 'in_bbox' # 是否对挑选出来进行推理的目标进行其标签框之间关系进行推理如果不需要就设置成FALSE默认不用更改
relevancy_para : 0 # 需要的参数
confidence : 0.2 # 设置检测出来的目标过滤的置信度
save_path : /home/yaxin/xbank/xbank_poc_test/save_path/hands # 保存检测后的图片的路径,默认不需要更改
# save_path_original : /home/yaxin/xbank/xbank_poc_test/save_path_original/hands # 检测到的图片的原始图片的路径不保存使用False
# test_path : /home/yaxin/xbank/xbank_poc_test/test_save_path/hands # 未检测到目标的图片不保存设置为False默认保存
5、使用本地环境
conda activate fastdeploy
修改过配置文件后运行Python main.py
Loading…
Cancel
Save