diff --git a/Bank_second_part/dataset_deal/video_list_split.py b/Bank_second_part/dataset_deal/video_list_split.py new file mode 100644 index 0000000..efb41a3 --- /dev/null +++ b/Bank_second_part/dataset_deal/video_list_split.py @@ -0,0 +1,44 @@ +import os + +def save_file_paths_with_labels(folder_path, train_ratio, valid_ratio, test_ratio, label): + # 获取文件夹下所有文件的路径 + file_paths = [] + for root, dirs, files in os.walk(folder_path): + for file in files: + file_path = os.path.join(root, file) + file_paths.append(file_path) + + # 计算每个集合的文件数目 + total_files = len(file_paths) + train_files = int(total_files * train_ratio) + valid_files = int(total_files * valid_ratio) + test_files = int(total_files * test_ratio) + + # 根据比例拆分文件路径列表 + train_paths = file_paths[:train_files] + valid_paths = file_paths[train_files:train_files + valid_files] + test_paths = file_paths[train_files + valid_files:train_files + valid_files + test_files] + + # 为文件路径添加标签 + train_paths_labeled = [path + ' ' + str(label) for path in train_paths] + valid_paths_labeled = [path + ' ' + str(label) for path in valid_paths] + test_paths_labeled = [path + ' ' + str(label) for path in test_paths] + + # 将文件路径保存到txt文件 + save_to_txt(train_paths_labeled, 'train.txt') + save_to_txt(valid_paths_labeled, 'valid.txt') + save_to_txt(test_paths_labeled, 'test.txt') + +def save_to_txt(file_paths, filename): + with open(filename, 'w') as file: + for file_path in file_paths: + file.write(file_path + '\n') + +# 示例用法 +folder_path = 'dataset/video_seg_re_hand' # 替换为实际的文件夹路径 +train_ratio = 0.8 # 训练集比例 +valid_ratio = 0.2 # 验证集比例 +test_ratio = 0.0 # 测试集比例 +label = '1' # 替换为实际的标签 + +save_file_paths_with_labels(folder_path, train_ratio, valid_ratio, test_ratio, label) \ No newline at end of file diff --git a/Bank_second_part/dataset_deal/video_model.py b/Bank_second_part/dataset_deal/video_model.py new file mode 100644 index 0000000..efa8da5 --- /dev/null +++ b/Bank_second_part/dataset_deal/video_model.py @@ -0,0 +1,179 @@ +import numpy as np +import cv2 +import os +import time +from tqdm import tqdm +from ultralytics import YOLO +from ultralytics.yolo.utils.plotting import Annotator + +from yolov8_det import analysis_yolov8 + +model_yolo = YOLO("E:/Bank_files/Bank_02/model_files/all_labels.pt") + + +# 图像文件夹 +def get_video_list(path): + video_ext = [".mp4", ".avi",".MP4"] + video_names = [] + for maindir, subdir, file_name_list in os.walk(path): + for filename in file_name_list: + apath = os.path.join(maindir, filename) + ext = os.path.splitext(apath)[1] + if ext in video_ext: + video_names.append(apath) + return video_names + + +# 截取裁剪需要的视频帧 +def save_seg_video(video_name,frameToStart,frametoStop,videoWriter,bbox): + + cap = cv2.VideoCapture(video_name) + count = 0 + + while True: + + success, frame = cap.read() + + if success: + + count += 1 + if count <= frametoStop and count > frameToStart: # 选取起始帧 + print('correct= ', count) + + #裁剪视频画面 + frame_target = frame[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])] # (split_height, split_width) + + videoWriter.write(frame_target) + + if not success or count >= frametoStop: + break + + print('end') + + + +# 对视频的操作 +def get_seg_video(video_file,video_save_path,dertTime): + + + # 检查路径 + print("frame image save path:{}".format(video_save_path)) + os.makedirs(video_save_path, exist_ok=True) + + if os.path.isdir(video_file): + files = get_video_list(video_file) + else: + files = [video_file] + + files.sort() + video_num = len(files) + + for num in range(video_num): + + # 视频名字 + video_name = files[num] + + print(video_name) + video_basename = os.path.basename(video_name).split('.')[0] + + cap = cv2.VideoCapture(video_name) + #帧率 + fps = cap.get(cv2.CAP_PROP_FPS) + + success,frame = cap.read() + + count_fps = 0 + write_fps = 0 + dertTime = 2 + # 每段帧率 + dertF = dertTime * fps + + # while True: + + # 前后帧信息保存 + + # result_list = [] + + # count_result_num = 0 + + while success: + + count_fps += 1 + + # 调用模型,逐帧检测 + results_img = analysis_yolov8(frame=frame, + model_coco=model_yolo, + confidence=0.1) + + + # result_list.append({count_fps:results_img}) + + # if len(result_list) == 5: + + # result_list.clear() + + + # num = len(results_img) + + # # 如果只检测到一个人 + if num == 1: + + # 起始帧 + write_fps = count_fps + + stop_fps = write_fps + dertF + + # 目标检测结果 + + bbox = list(results_img[0].values())[0] + w = bbox[2] -bbox[0] + h = bbox[3] -bbox[1] + size = [int(w),int(h)] + # 保存截取视频 + video_name_save = video_save_path + '/' + video_basename + '_' +str(write_fps) + '.avi' + + videoWriter =cv2.VideoWriter(video_name_save,cv2.VideoWriter_fourcc('X','V','I','D'),fps,size) + + save_seg_video(video_name,write_fps,stop_fps,videoWriter,bbox) + + # result_dict = {count_fps:bbox} + + # result_list.append(result_dict) + + # print(count_fps,write_fps,stop_fps,video_name,bbox) + + break + + # if num == 0: + + # continue + + # if num > 1: + + # # print() + # pass + + + +if __name__ == '__main__': + + + # 每个视频的时长(单位秒) + dertTime = 5 + + video = "E:/Bank_files/Bank_02/dataset/vlc_0711/0711-1.mp4" + video_save = 'videos_codes_2' + + get_seg_video(video_file=video,video_save_path=video_save,dertTime=dertTime) + + + + + + + + + + + + diff --git a/Bank_second_part/dataset_deal/video_resize_xy.py b/Bank_second_part/dataset_deal/video_resize_xy.py new file mode 100644 index 0000000..c4ac974 --- /dev/null +++ b/Bank_second_part/dataset_deal/video_resize_xy.py @@ -0,0 +1,92 @@ +import os.path +import cv2 +import glob + + +def get_video_list(path): + video_ext = [".mp4", ".avi",".MP4"] + video_names = [] + for maindir, subdir, file_name_list in os.walk(path): + for filename in file_name_list: + apath = os.path.join(maindir, filename) + ext = os.path.splitext(apath)[1] + if ext in video_ext: + video_names.append(apath) + return video_names + + + +def split_video(input_video, output_video,size_file): + + + print("frame image save path:{}".format(output_video)) + os.makedirs(output_video, exist_ok=True) + + if os.path.isdir(input_video): + files = get_video_list(input_video) + else: + files = [input_video] + + files.sort() + video_num = len(files) + + print('files',files) + + for i in range(video_num): + + video_name = files[i] + video_basename = os.path.basename(video_name) + + print('video_name:',video_name) + + video_caputre = cv2.VideoCapture(video_name) + + # get video parameters + fps = video_caputre.get(cv2.CAP_PROP_FPS) + # width = video_caputre.get(cv2.CAP_PROP_FRAME_WIDTH) + # height = video_caputre.get(cv2.CAP_PROP_FRAME_HEIGHT) + + # print("fps:", fps) + # print("width:", width) + # print("height:", height) + + # 定义截取尺寸,后面定义的每帧的h和w要于此一致,否则视频无法播放 + split_width = int(size_file[2] - size_file[0]) + split_height = int(size_file[3] - size_file[1]) + size = (split_width, split_height) + + print('size_file:',size_file) + + fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') + # 创建视频写入对象 + output_path = output_video + '/' + video_basename + videp_write = cv2.VideoWriter(output_path, fourcc, fps, size) + + print('output_path:',output_path) + + print('Start!!!') + # 读取视频帧 + success, frame_src = video_caputre.read() # (960, 2560, 3) # (height, width, channel) + while success and not cv2.waitKey(1) == 27: # 读完退出或者按下 esc 退出 + + # [width, height] 要与上面定义的size参数一致,注意参数的位置 + # frame_target = frame_src[0:split_height, 0:split_width] # (split_height, split_width) + frame_target = frame_src[size_file[1]:size_file[3], size_file[0]:size_file[2]] # (split_height, split_width) + # print('size_file1:',size_file) + # 写入视频文件 + videp_write.write(frame_target) + # 不断读取 + success, frame_src = video_caputre.read() + + print("Finished!!!") + video_caputre.release() + +if __name__ == '__main__': + + + input_file = 'E:/Bank_files/Bank_02/dataset/video_seg_2s' + output_file = 'E:/Bank_files/Bank_02/dataset/dataset_01_set/video_seg_head' + + #xmin,ymin,xmax,ymax + bbox = [500,350,900,700] + split_video(input_video=input_file, output_video=output_file,size_file=bbox) diff --git a/Bank_second_part/dataset_deal/video_seg.py b/Bank_second_part/dataset_deal/video_seg.py new file mode 100644 index 0000000..99be502 --- /dev/null +++ b/Bank_second_part/dataset_deal/video_seg.py @@ -0,0 +1,134 @@ +import numpy as np +import cv2 +import os +import time + +def get_video_list(path): + video_ext = [".mp4", ".avi",".MP4"] + video_names = [] + for maindir, subdir, file_name_list in os.walk(path): + for filename in file_name_list: + apath = os.path.join(maindir, filename) + ext = os.path.splitext(apath)[1] + if ext in video_ext: + video_names.append(apath) + return video_names + + +def save_seg_video(video_name,frameToStart,frametoStop,videoWriter): + + cap = cv2.VideoCapture(video_name) + count = 0 + + while True: + + success, frame = cap.read() + + # count += 1 + if success: + + print('frametoStop:',frametoStop,'frameToStart:',frameToStart) + print('correct2= ', count) + count += 1 + if count <= frametoStop and count > frameToStart: # 选取起始帧 + print('correct= ', count) + videoWriter.write(frame) + + if not success or count >= frametoStop: + break + + print('end') + + +def get_seg_video(video_file,video_save_path,dertTime): + + + # 检查路径 + print("frame image save path:{}".format(video_save_path)) + os.makedirs(video_save_path, exist_ok=True) + + if os.path.isdir(video_file): + files = get_video_list(video_file) + else: + files = [video_file] + + files.sort() + video_num = len(files) + + for num in range(video_num): + + # 视频名字 + video_name = files[num] + video_basename = os.path.basename(video_name).split('.')[0] + + cap = cv2.VideoCapture(video_name) + + #帧率 + fps = cap.get(cv2.CAP_PROP_FPS) + fps = int(fps) + + print(fps) + + # 获得原视频尺寸 + size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)), int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))) + + # 获取视频总帧数 + total_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + # 每段帧率 + dertF = dertTime * fps + + print('dertF:',dertF) + + #帧率分段 + n = total_frame/dertF + + print(int(n)) + + + for i in range(int(n) + 1): + + + video_name_save = video_save_path + '/' + video_basename + '_' +str(i) + '.avi' + + videoWriter =cv2.VideoWriter(video_name_save,cv2.VideoWriter_fourcc('X','V','I','D'),fps,size) + + start_time = i * dertF + + if i == int(n) + 1: + + stop_time = total_frame + + else: + stop_time = start_time + dertF + + + print(video_name) + + save_seg_video(video_name=video_name,frameToStart=start_time,frametoStop=stop_time,videoWriter=videoWriter) + + + + +if __name__ == '__main__': + + + # 每个视频的时长(单位秒) + dertTime = 5 + + video = "dataset/video" + video_save = 'dataset/video_seg_5s' + + get_seg_video(video_file=video,video_save_path=video_save,dertTime=dertTime) + + + + + + + + + + + + diff --git a/Bank_second_part/dataset_deal/yolov8_det.py b/Bank_second_part/dataset_deal/yolov8_det.py new file mode 100644 index 0000000..25add17 --- /dev/null +++ b/Bank_second_part/dataset_deal/yolov8_det.py @@ -0,0 +1,47 @@ + + + +def analysis_yolov8(frame, model_coco,confidence): + + # 第一步:用COCO数据集推理 + results_coco = model_coco(frame) + # print('results_coco:',results_coco) + if results_coco: + + for r in results_coco: + + boxes = r.boxes + + re_dict_all = {} + + re_list = [] + idx = 0 + for box in boxes: + + idx += 1 + + b = box.xyxy[0] # get box coordinates in (top, left, bottom, right) format + c = box.cls + + # 保存标签和坐标值作为返回结果 + blist = b.tolist() + labels_name = model_coco.names[int(c)] + + confidence = float(box.conf) + + confidence = round(confidence, 2) + + # 过滤置信度0.5以下目标 + if confidence < confidence: + + continue + + # 一个结果字典 + re_dict = [labels_name,blist] + + re_list.append(re_dict) + + # key = f"detection{idx}" + # re_dict_all[key] = re_dict + + return re_list