From c1882253c967eb2217c9a20787019e821c4af6ae Mon Sep 17 00:00:00 2001 From: chunquansang <916920620@qq.com> Date: Mon, 8 Jan 2024 19:54:39 +0800 Subject: [PATCH] =?UTF-8?q?=E9=A6=96=E6=AC=A1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- CMakeLists.txt | 21 ++ multi_thread.cc | 903 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 924 insertions(+) create mode 100644 CMakeLists.txt create mode 100644 multi_thread.cc diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..8d39d08 --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,21 @@ +PROJECT(multi_thread_demo C CXX) +CMAKE_MINIMUM_REQUIRED (VERSION 3.10) + +# 指定下载解压后的fastdeploy库路径 +option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.") + +set(CMAKE_CXX_STANDARD 20) +SET(CMAKE_CXX_COMPILER "/usr/bin/g++") +set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS}" "-std=c++20" ) + +include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake) + +# 添加FastDeploy依赖头文件 +include_directories(${FASTDEPLOY_INCS}) + +add_executable(multi_thread_demo ${PROJECT_SOURCE_DIR}/multi_thread.cc) +# 添加FastDeploy库依赖 +target_link_libraries(multi_thread_demo ${FASTDEPLOY_LIBS} pthread) + +# find_package(pybind11 REQUIRED) +# pybind11_add_module(multi_thread_demo multi_thread.cc) \ No newline at end of file diff --git a/multi_thread.cc b/multi_thread.cc new file mode 100644 index 0000000..f84397b --- /dev/null +++ b/multi_thread.cc @@ -0,0 +1,903 @@ +// // // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// // // +// // // Licensed under the Apache License, Version 2.0 (the "License"); +// // // you may not use this file except in compliance with the License. +// // // You may obtain a copy of the License at +// // // +// // // http://www.apache.org/licenses/LICENSE-2.0 +// // // +// // // Unless required by applicable law or agreed to in writing, software +// // // distributed under the License is distributed on an "AS IS" BASIS, +// // // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// // // See the License for the specific language governing permissions and +// // // limitations under the License. + +// // #include +// // #include "fastdeploy/vision.h" +// // #include +// // #include +// // #ifdef WIN32 +// // const char sep = '\\'; +// // #else +// // const char sep = '/'; +// // #endif + +// // // void Predict(fastdeploy::vision::classification::PaddleClasModel *model, int thread_id, const std::vector& images) { +// // // for (auto const &image_file : images) { +// // // std::cout << image_file << std::endl; +// // // auto im = cv::imread(image_file); + +// // // fastdeploy::vision::ClassifyResult res; +// // // if (!model->Predict(im, &res)) { +// // // std::cerr << "Failed to predict." << std::endl; +// // // return; +// // // } + +// // // // print res +// // // std::cout << "Thread Id: " << thread_id << std::endl; +// // // std::cout << res.Str() << std::endl; +// // // } +// // // } + + +// // void GetImageList(std::vector>* image_list, const std::string& image_file_path, int thread_num){ +// // std::vector images; +// // cv::glob(image_file_path, images, false); +// // // number of image files in images folder +// // size_t count = images.size(); +// // size_t num = count / thread_num; +// // for (int i = 0; i < thread_num; i++) { +// // std::vector temp_list; +// // if (i == thread_num - 1) { +// // for (size_t j = i*num; j < count; j++){ +// // temp_list.push_back(images[j]); +// // } +// // } else { +// // for (size_t j = 0; j < num; j++){ +// // temp_list.push_back(images[i * num + j]); +// // } +// // } +// // (*image_list)[i] = temp_list; +// // } +// // } + + +// // // SCRFD +// // void Predict(fastdeploy::vision::facedet::SCRFD *model, int thread_id, const std::vector& images, cv::Mat im) { +// // // auto im = cv::imread("ILSVRC2012_val_00000010.jpeg"); + +// // // fastdeploy::vision::DetectionResult res; +// // // if (!model->Predict(im, &res)) { +// // // std::cerr << "Failed to predict." << std::endl; +// // // return; +// // // } +// // // std::cout << res.Str() << std::endl; +// // auto start_time = std::chrono::high_resolution_clock::now(); + +// // // 获取时间戳 +// // auto timestamp = std::chrono::duration_cast(start_time.time_since_epoch()).count(); + +// // // 打印时间戳 +// // std::cout << "程序开始时间戳: " << timestamp << " 毫秒" << std::endl; +// // for (auto const &image_file : images) { +// // std::cout << image_file << std::endl; + +// // fastdeploy::vision::FaceDetectionResult res; +// // auto start_time = std::chrono::high_resolution_clock::now(); +// // if (!model->Predict(&im, &res)) { +// // std::cerr << "Failed to predict." << std::endl; +// // return; +// // } +// // auto end_time = std::chrono::high_resolution_clock::now(); +// // auto duration = std::chrono::duration_cast(end_time - start_time); +// // std::cout << "花费: " << duration.count() << " 毫秒" << std::endl; +// // // print res +// // std::cout << "Thread Id: " << thread_id << std::endl; +// // std::cout << res.Str() << std::endl; +// // } +// // } + + + +// // //SCRFD +// // void GpuInfer(const std::string& model_dir, const std::string& image_file_path, int thread_num) { +// // auto option1 = fastdeploy::RuntimeOption(); +// // option1.UseGpu(); +// // option1.UseTrtBackend(); +// // option1.SetTrtInputShape("images", {1, 3, 640, 640}); +// // option1.trt_option.serialize_file = "./tmp3.trt"; +// // option1.trt_option.enable_fp16 = true; +// // auto model1 = fastdeploy::vision::facedet::SCRFD("scrfd_500m_bnkps_shape640x640.onnx", "", option1); +// // if (!model1.Initialized()) { +// // std::cerr << "Failed to initialize." << std::endl; +// // return; +// // } + +// // auto im = cv::imread("test_face.jpg"); + +// // // fastdeploy::vision::DetectionResult res; +// // // if (!model1.Predict(im, &res)) { +// // // std::cerr << "Failed to predict." << std::endl; +// // // return; +// // // } +// // // std::cout << res.Str() << std::endl; + +// // std::vector models; +// // for (int i = 0; i < thread_num; ++i) { +// // models.emplace_back(std::move(model1.Clone())); +// // } + +// // std::vector> image_list(thread_num); +// // GetImageList(&image_list, image_file_path, thread_num); + +// // std::vector threads; +// // for (int i = 0; i < thread_num; ++i) { +// // threads.emplace_back(Predict, models[i].get(), i, image_list[i], im); +// // } + +// // for (int i = 0; i < thread_num; ++i) { +// // threads[i].join(); +// // } +// // } + +// // // yolov8 +// // void PredictYolo(fastdeploy::vision::detection::YOLOv8 *model, int thread_id, const std::vector& images, cv::Mat im) { +// // // auto im = cv::imread("ILSVRC2012_val_00000010.jpeg"); + +// // // fastdeploy::vision::DetectionResult res; +// // // if (!model->Predict(im, &res)) { +// // // std::cerr << "Failed to predict." << std::endl; +// // // return; +// // // } +// // // std::cout << res.Str() << std::endl; +// // auto start_time = std::chrono::high_resolution_clock::now(); + +// // // 获取时间戳 +// // auto timestamp = std::chrono::duration_cast(start_time.time_since_epoch()).count(); + +// // // 打印时间戳 +// // std::cout << "程序开始时间戳: " << timestamp << " 毫秒" << std::endl; +// // for (auto const &image_file : images) { +// // std::cout << image_file << std::endl; +// // // auto im = cv::imread(image_file); + +// // fastdeploy::vision::DetectionResult res; +// // auto start_time = std::chrono::high_resolution_clock::now(); +// // if (!model->Predict(im, &res)) { +// // std::cerr << "Failed to predict." << std::endl; +// // return; +// // } +// // auto end_time = std::chrono::high_resolution_clock::now(); +// // auto duration = std::chrono::duration_cast(end_time - start_time); +// // std::cout << "&&&&& " << res.label_ids[0] << std::endl; +// // std::cout << "花费: " << duration.count() << " 毫秒" << std::endl; +// // // print res +// // std::cout << "Thread Id: " << thread_id << std::endl; +// // std::cout << res.Str() << std::endl; +// // auto vis_im = fastdeploy::vision::VisDetection(im, res); +// // cv::imwrite("vis_result.jpg", vis_im); +// // std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +// // } +// // } + + +// // // void PredictYolo1(fastdeploy::vision::detection::YOLOv8 *model, int thread_id, const std::vector& images, cv::Mat im) { +// // // // auto im = cv::imread("ILSVRC2012_val_00000010.jpeg"); + +// // // // fastdeploy::vision::DetectionResult res; +// // // // if (!model->Predict(im, &res)) { +// // // // std::cerr << "Failed to predict." << std::endl; +// // // // return; +// // // // } +// // // // std::cout << res.Str() << std::endl; +// // // auto start_time = std::chrono::high_resolution_clock::now(); + +// // // // 获取时间戳 +// // // auto timestamp = std::chrono::duration_cast(start_time.time_since_epoch()).count(); + +// // // // 打印时间戳 +// // // std::cout << "程序开始时间戳: " << timestamp << " 毫秒" << std::endl; + +// // // fastdeploy::vision::DetectionResult res; +// // // auto start_time = std::chrono::high_resolution_clock::now(); +// // // if (!model->Predict(im, &res)) { +// // // std::cerr << "Failed to predict." << std::endl; +// // // return; +// // // } +// // // auto end_time = std::chrono::high_resolution_clock::now(); +// // // auto duration = std::chrono::duration_cast(end_time - start_time); +// // // std::cout << "&&&&& " << res.label_ids[0] << std::endl; +// // // std::cout << "花费: " << duration.count() << " 毫秒" << std::endl; +// // // // print res +// // // std::cout << "Thread Id: " << thread_id << std::endl; +// // // std::cout << res.Str() << std::endl; +// // // auto vis_im = fastdeploy::vision::VisDetection(im, res); +// // // cv::imwrite("vis_result.jpg", vis_im); +// // // std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; +// // // } + + + +// // //yolov8 +// // void GpuInferYolo(const std::string& model_dir, const std::string& image_file_path, int thread_num) { +// // auto option1 = fastdeploy::RuntimeOption(); +// // option1.UseGpu(); +// // option1.UseTrtBackend(); +// // option1.SetTrtInputShape("images", {1, 3, 640, 640}); +// // option1.trt_option.serialize_file = "./tmp2.trt"; +// // option1.trt_option.enable_fp16 = true; +// // auto model1 = fastdeploy::vision::detection::YOLOv8("yolov8s.onnx", "", option1); +// // if (!model1.Initialized()) { +// // std::cerr << "Failed to initialize." << std::endl; +// // return; +// // } + +// // auto im = cv::imread("frame_6.png"); + +// // // fastdeploy::vision::DetectionResult res; +// // // if (!model1.Predict(im, &res)) { +// // // std::cerr << "Failed to predict." << std::endl; +// // // return; +// // // } +// // // std::cout << res.Str() << std::endl; + +// // std::vector models; +// // for (int i = 0; i < thread_num; ++i) { +// // models.emplace_back(std::move(model1.Clone())); +// // } + +// // std::vector> image_list(thread_num); +// // GetImageList(&image_list, image_file_path, thread_num); + +// // std::vector threads; +// // for (int i = 0; i < thread_num; ++i) { +// // threads.emplace_back(PredictYolo, models[i].get(), i, image_list[i], im); +// // } + +// // for (int i = 0; i < thread_num; ++i) { +// // threads[i].join(); +// // } +// // } + +// // // faceid +// // void PredictFaceId(fastdeploy::vision::faceid::PartialFC *model, int thread_id, const std::vector& images, cv::Mat im) { +// // // auto im = cv::imread("ILSVRC2012_val_00000010.jpeg"); + +// // // fastdeploy::vision::DetectionResult res; +// // // if (!model->Predict(im, &res)) { +// // // std::cerr << "Failed to predict." << std::endl; +// // // return; +// // // } +// // // std::cout << res.Str() << std::endl; +// // auto start_time = std::chrono::high_resolution_clock::now(); + +// // // 获取时间戳 +// // auto timestamp = std::chrono::duration_cast(start_time.time_since_epoch()).count(); + +// // // 打印时间戳 +// // std::cout << "程序开始时间戳: " << timestamp << " 毫秒" << std::endl; +// // for (auto const &image_file : images) { +// // std::cout << image_file << std::endl; +// // auto im = cv::imread(image_file); + +// // // auto face0 = cv::imread("face_0.jpg"); +// // // auto face1 = cv::imread("face_1.jpg"); +// // // auto face2 = cv::imread("face_2.jpg"); + +// // // fastdeploy::vision::FaceRecognitionResult res0; +// // // fastdeploy::vision::FaceRecognitionResult res1; +// // // fastdeploy::vision::FaceRecognitionResult res2; + + +// // // if ((!model->Predict(face0, &res0)) || (!model->Predict(face1, &res1)) || +// // // (!model->Predict(face2, &res2))) { +// // // std::cerr << "Prediction Failed." << std::endl; +// // // } +// // // std::cout << "Prediction Done!" << std::endl; + +// // // std::cout << "--- [Face 0]:" << res0.Str(); +// // // std::cout << "--- [Face 1]:" << res1.Str(); +// // // std::cout << "--- [Face 2]:" << res2.Str(); +// // // float cosine01 = fastdeploy::vision::utils::CosineSimilarity( +// // // res0.embedding, res1.embedding, +// // // model->GetPostprocessor().GetL2Normalize()); +// // // float cosine02 = fastdeploy::vision::utils::CosineSimilarity( +// // // res0.embedding, res2.embedding, +// // // model->GetPostprocessor().GetL2Normalize()); +// // // std::cout << "Detect Done! Cosine 01: " << cosine01 +// // // << ", Cosine 02:" << cosine02 << std::endl; + + +// // fastdeploy::vision::FaceRecognitionResult res; +// // auto start_time = std::chrono::high_resolution_clock::now(); +// // if (!model->Predict(im, &res)) { +// // std::cerr << "Failed to predict." << std::endl; +// // return; +// // } +// // auto end_time = std::chrono::high_resolution_clock::now(); +// // auto duration = std::chrono::duration_cast(end_time - start_time); +// // std::cout << "花费: " << duration.count() << " 毫秒" << std::endl; +// // // print res +// // std::cout << "Thread Id: " << thread_id << std::endl; +// // std::cout << res.Str() << std::endl; +// // } +// // } + +// // // faceid +// // void GpuInferPredictFaceId(const std::string& model_dir, const std::string& image_file_path, int thread_num) { +// // auto option1 = fastdeploy::RuntimeOption(); +// // option1.UseGpu(); +// // option1.UseTrtBackend(); +// // option1.SetTrtInputShape("data", {1, 3, 112, 112}); +// // option1.trt_option.serialize_file = "./tmp_faceid.trt"; +// // option1.trt_option.enable_fp16 = true; +// // auto model1 = fastdeploy::vision::faceid::PartialFC("partial_fc_glint360k_r100.onnx", "", option1); +// // if (!model1.Initialized()) { +// // std::cerr << "Failed to initialize." << std::endl; +// // return; +// // } + +// // auto im = cv::imread("ILSVRC2012_val_00000010.jpeg"); + +// // std::vector models; +// // for (int i = 0; i < thread_num; ++i) { +// // models.emplace_back(std::move(model1.Clone())); +// // } + +// // std::vector> image_list(thread_num); +// // GetImageList(&image_list, image_file_path, thread_num); + +// // std::vector threads; +// // for (int i = 0; i < thread_num; ++i) { +// // threads.emplace_back(PredictFaceId, models[i].get(), i, image_list[i], im); +// // } + +// // for (int i = 0; i < thread_num; ++i) { +// // threads[i].join(); +// // } +// // } + + +// // // void GpuInfer(const std::string& model_dir, const std::string& image_file_path, int thread_num) { +// // // auto model_file = model_dir + sep + "inference.pdmodel"; +// // // auto params_file = model_dir + sep + "inference.pdiparams"; +// // // auto config_file = model_dir + sep + "inference_cls.yaml"; +// // // auto option = fastdeploy::RuntimeOption(); +// // // option.UseGpu(); +// // // option.UsePaddleBackend(); +// // // auto model = fastdeploy::vision::classification::PaddleClasModel( +// // // model_file, params_file, config_file, option); +// // // if (!model.Initialized()) { +// // // std::cerr << "Failed to initialize." << std::endl; +// // // return; +// // // } + +// // // std::vector models; +// // // for (int i = 0; i < thread_num; ++i) { +// // // models.emplace_back(std::move(model.Clone())); +// // // } + +// // // std::vector> image_list(thread_num); +// // // GetImageList(&image_list, image_file_path, thread_num); + +// // // std::vector threads; +// // // for (int i = 0; i < thread_num; ++i) { +// // // threads.emplace_back(Predict, models[i].get(), i, image_list[i]); +// // // } + +// // // for (int i = 0; i < thread_num; ++i) { +// // // threads[i].join(); +// // // } +// // // } + + +// // // void TrtInfer(const std::string& model_dir, const std::string& image_file_path, int thread_num) { +// // // auto model_file = model_dir + sep + "inference.pdmodel"; +// // // auto params_file = model_dir + sep + "inference.pdiparams"; +// // // auto config_file = model_dir + sep + "inference_cls.yaml"; +// // // auto option = fastdeploy::RuntimeOption(); +// // // option.UseGpu(); +// // // option.UseTrtBackend(); +// // // option.SetTrtInputShape("inputs", {1, 3, 224, 224}); +// // // option.trt_option.serialize_file = "./tmp3.trt"; +// // // option.trt_option.enable_fp16 = true; + +// // // // for model.Clone() must SetTrtInputShape first +// // // option.SetTrtInputShape("inputs", {1, 3, 224, 224}); +// // // auto model = fastdeploy::vision::classification::PaddleClasModel( +// // // model_file, params_file, config_file, option); +// // // if (!model.Initialized()) { +// // // std::cerr << "Failed to initialize." << std::endl; +// // // return; +// // // } + +// // // std::vector models; +// // // for (int i = 0; i < thread_num; ++i) { +// // // models.emplace_back(std::move(model.Clone())); +// // // } + +// // // std::vector> image_list(thread_num); +// // // GetImageList(&image_list, image_file_path, thread_num); + +// // // std::vector threads; +// // // for (int i = 0; i < thread_num; ++i) { +// // // threads.emplace_back(Predict, models[i].get(), i, image_list[i]); +// // // } + +// // // for (int i = 0; i < thread_num; ++i) { +// // // threads[i].join(); +// // // } +// // // } + + + +// // int main(int argc, char **argv) { +// // std::vector> videoFrames; +// // auto start_time = std::chrono::high_resolution_clock::now(); + + +// // cv::VideoCapture cap("test.mp4"); + +// // // 检查视频是否成功打开 +// // if (!cap.isOpened()) { +// // std::cerr << "Error opening video file!" << std::endl; +// // return -1; +// // } + +// // // 获取视频的帧数和帧率 +// // int frameCount = cap.get(cv::CAP_PROP_FRAME_COUNT); +// // double fps = cap.get(cv::CAP_PROP_FPS); + +// // std::cout << "Number of frames: " << frameCount << std::endl; +// // std::cout << "Frames per second: " << fps << std::endl; + + +// // for (int i = 0; i < frameCount; ++i) { +// // cv::Mat frame; +// // cap >> frame; // 读取一帧 +// // std::cout << i << std::endl; +// // // 检查是否成功读取帧 +// // if (frame.empty()) { +// // std::cerr << "Error reading frame " << i << " from video!" << std::endl; +// // break; +// // } +// // std::vector compressedData; +// // cv::imencode(".jpg", frame, compressedData, {cv::IMWRITE_JPEG_QUALITY, 90}); +// // videoFrames.push_back(compressedData); + +// // } +// // std::cout << "size: " << videoFrames.size() << std::endl; +// // // int flag = std::atoi(argv[3]); +// // // if (flag == 0) { +// // // GpuInfer(argv[1], argv[2], std::atoi(argv[4])); +// // // } else if (flag == 1){ +// // // GpuInferYolo(argv[1], argv[2], std::atoi(argv[4])); +// // // } else if (flag == 2) { +// // // GpuInferPredictFaceId(argv[1], argv[2], std::atoi(argv[4])); +// // // } else { + +// // // } + + + +// // // auto end_time = std::chrono::high_resolution_clock::now(); +// // // auto timestamp = std::chrono::duration_cast(end_time.time_since_epoch()).count(); + +// // // // 打印时间戳 +// // // std::cout << "程序结束时间戳: " << timestamp << " 毫秒" << std::endl; +// // // // 计算程序运行时间 +// // // auto duration = std::chrono::duration_cast(end_time - start_time); + +// // // // 打印程序运行时间 +// // // std::cout << "程序运行时间: " << duration.count() << " 毫秒" << std::endl; +// // return 0; +// // } + + + +#include +#include +#include +#include +#include +#include +#include "fastdeploy/vision.h" +#include +#include +#include + +// #include + +// namespace py = pybind11; + +// #include + +// namespace fs = std::filesystem; + + +std::atomic hasCarOrPerson(false); +std::mutex resultMutex; +std::condition_variable resultCondition; + +class VideoSegmenter { +public: + static const std::string FFMPEG_PATH; + static const std::string SEGMENT_TIME; + static const std::string TMP_OUTPUT_PATH; + + static std::string segmentVideo(const std::string& videoPath) { + auto start = std::chrono::high_resolution_clock::now(); + + // fs::path outputPath = fs::path(TMP_OUTPUT_PATH) / "%04d.mp4"; + // fs::remove_all(outputPath.parent_path()); + // fs::create_directories(outputPath.parent_path()); + std::string outputPath = TMP_OUTPUT_PATH + "/%04d.mp4"; + + // 删除目录 + std::string parentPath = outputPath.substr(0, outputPath.find_last_of("/")); + std::string removeCommand = "rm -rf " + parentPath; + std::system(removeCommand.c_str()); + + // 创建目录 + std::string createDirectoriesCommand = "mkdir -p " + parentPath; + std::system(createDirectoriesCommand.c_str()); + + // std::vector command = { + // FFMPEG_PATH, + // "-i", + // videoPath, + // "-c", + // "copy", + // "-reset_timestamps", + // "1", + // "-segment_time", + // SEGMENT_TIME, + // "-f", + // "segment", + // outputPath.string() + // }; + + try { + std::string command = "ffmpeg -i " + videoPath + " -c copy -reset_timestamps 1 -segment_time 00:10 -f segment ../SegmentationFiles/%04d.mp4"; + int result = std::system(command.c_str()); + if (result != 0) { + throw std::runtime_error("Failed to segment video"); + } + } catch (const std::exception& e) { + throw std::runtime_error("Failed to segment video"); + } + + auto end = std::chrono::high_resolution_clock::now(); + std::cout << "Segmentation completed in " << std::chrono::duration_cast(end - start).count() << "s" << std::endl; + + return parentPath; + } +}; + +const std::string VideoSegmenter::FFMPEG_PATH = "/usr/local/ffmpeg-6.0.1-amd64-static/ffmpeg"; +const std::string VideoSegmenter::SEGMENT_TIME = "00:10"; +const std::string VideoSegmenter::TMP_OUTPUT_PATH = "../SegmentationFiles"; + +class FastReader { +public: + static std::vector> readVideoFrames(const std::string& videoPath, int skipFrame=0) { + cv::VideoCapture cap(videoPath); + std::vector> frames; + int count = 0; + while (true) { + ++count; + cv::Mat frame; + cap >> frame; + if (frame.empty()) { + break; + } + std::cout << skipFrame << "####" << count << "###" <<(count % skipFrame) << std::endl; + if ((skipFrame > 0) && ((count % (skipFrame+1)) != 0)) { + continue; + } + std::vector encodedFrame; + cv::imencode(".jpg", frame, encodedFrame); + frames.push_back(encodedFrame); + // 这里可以添加额外的图像处理操作,例如缩放、灰度转换等 + } + + return frames; + } + + static std::vector> read(const std::string& videoPath, int skipFrame=0) { + auto start = std::chrono::high_resolution_clock::now(); + + std::string folderPath = VideoSegmenter::segmentVideo(videoPath); + + std::vector videoFiles; + // for (const auto& entry : fs::directory_iterator(folderPath)) { + // if (entry.path().extension() == ".mp4") { + // videoFiles.push_back(entry.path().string()); + // } + // } + + DIR *dir; + struct dirent *ent; + + if ((dir = opendir(folderPath.c_str())) != nullptr) { + while ((ent = readdir(dir)) != nullptr) { + std::string filePath = folderPath + "/" + ent->d_name; + size_t pos = filePath.find_last_of('.'); + + if (pos != std::string::npos && filePath.substr(pos) == ".mp4") { + videoFiles.push_back(filePath); + } + } + closedir(dir); + } else { + std::cerr << "Error opening directory" << std::endl; + } + + + std::sort(videoFiles.begin(), videoFiles.end()); + + std::vector>> caches(videoFiles.size()); + + std::vector threads; + + for (size_t idx = 0; idx < videoFiles.size(); ++idx) { + threads.emplace_back([&, idx]() { + caches[idx] = readVideoFrames(videoFiles[idx], skipFrame); + }); + } + + for (auto& thread : threads) { + thread.join(); + } + + // 将缓存中的帧按照文件顺序拼接成一个完整的vector + std::vector> framesList; + for (const auto& cache : caches) { + std::cout << cache.size() << std::endl; + for (const auto& frame : cache) { + framesList.push_back(frame); + } + } + + std::cout << framesList.size() << std::endl; + auto end = std::chrono::high_resolution_clock::now(); + std::cout << "Reading " << videoFiles.size() << " video files took " << std::chrono::duration_cast(end - start).count() << " seconds." << std::endl; + + return framesList; + } +}; + + +// yolov8 +void PredictYolo(int thread_id, fastdeploy::vision::detection::YOLOv8* model, std::vector* videos, size_t start, size_t end) { + for (size_t i = start; i < end; ++i) { + auto im = cv::imdecode((videos[i]), cv::IMREAD_UNCHANGED); + fastdeploy::vision::DetectionResult res; + auto start_time = std::chrono::high_resolution_clock::now(); + if (!model->Predict(im, &res)) { + std::cerr << "Failed to predict." << std::endl; + return; + } + + bool found = false; + for (size_t i = 0; i < res.label_ids.size(); ++i) { + if (res.label_ids[i] == 0 || res.label_ids[i] == 1) { + found = true; + break; + } + } + + if (found) { + { + std::lock_guard lock(resultMutex); + hasCarOrPerson = true; + } + resultCondition.notify_all(); // 通知其他线程结束 + return; + } + + auto end_time = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(end_time - start_time); + std::cout << i << "花费: " << duration.count() << " 毫秒" << std::endl; + } + // auto im = cv::imread("ILSVRC2012_val_00000010.jpeg"); + + // fastdeploy::vision::DetectionResult res; + // if (!model->Predict(im, &res)) { + // std::cerr << "Failed to predict." << std::endl; + // return; + // } + // std::cout << res.Str() << std::endl; + // auto start_time = std::chrono::high_resolution_clock::now(); + + // // 获取时间戳 + // auto timestamp = std::chrono::duration_cast(start_time.time_since_epoch()).count(); + + // // 打印时间戳 + // std::cout << "程序开始时间戳: " << timestamp << " 毫秒" << std::endl; + // for (auto const &image_file : images) { + // std::cout << image_file << std::endl; + // // auto im = cv::imread(image_file); + + // fastdeploy::vision::DetectionResult res; + // auto start_time = std::chrono::high_resolution_clock::now(); + // if (!model->Predict(im, &res)) { + // std::cerr << "Failed to predict." << std::endl; + // return; + // } + // auto end_time = std::chrono::high_resolution_clock::now(); + // auto duration = std::chrono::duration_cast(end_time - start_time); + // std::cout << "&&&&& " << res.label_ids[0] << std::endl; + // std::cout << "花费: " << duration.count() << " 毫秒" << std::endl; + // // print res + // std::cout << "Thread Id: " << thread_id << std::endl; + // std::cout << res.Str() << std::endl; + // auto vis_im = fastdeploy::vision::VisDetection(im, res); + // cv::imwrite("vis_result.jpg", vis_im); + // std::cout << "Visualized result saved in ./vis_result.jpg" << std::endl; + // } +} + + +class FrameChangeDetectorV1 { +public: + FrameChangeDetectorV1(int threshold = 10) { + detector = cv::FastFeatureDetector::create(); + detector->setThreshold(threshold); + } + + std::tuple detectDiff(const cv::Mat& currGray, const cv::Mat& prevGray) { + std::vector kpPrevPoints; + detector->detect(prevGray, kpPrevPoints); + + std::vector prevPoints; + for (const auto& kp : kpPrevPoints) { + prevPoints.emplace_back(kp.pt); + } + + if (prevPoints.empty()) { + return std::make_tuple(0.0, 0.0); + } + + std::vector currPoints; + std::vector status; + std::vector err; + cv::calcOpticalFlowPyrLK( + prevGray, currGray, prevPoints, currPoints, status, err, + cv::Size(15, 15), 2, + cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 3, 0.01) + ); + + int count = 0; + double avgFlowX = 0.0; + double avgFlowY = 0.0; + + for (size_t i = 0; i < currPoints.size(); ++i) { + if (status[i] == 1) { + double dx = currPoints[i].x - prevPoints[i].x; + double dy = currPoints[i].y - prevPoints[i].y; + avgFlowX += dx; + avgFlowY += dy; + count++; + } + } + + avgFlowX /= count; + avgFlowY /= count; + + return std::make_tuple(avgFlowX, avgFlowY); + } + +private: + cv::Ptr detector; +}; + +void processStill(int thread_id, FrameChangeDetectorV1* frameChangeDetector, std::vector* videos, size_t start, size_t end, int& belowThresholdCount, double diffThreshold) { + for (size_t i = start; i < end; ++i) { + if (i == 0) { + continue; + } + auto preIm = cv::imdecode((videos[i]), cv::IMREAD_UNCHANGED); + auto currIm = cv::imdecode((videos[i-1]), cv::IMREAD_UNCHANGED); + auto result = frameChangeDetector->detectDiff(currIm, preIm); + double avgFlowX = std::get<0>(result); + double avgFlowY = std::get<1>(result); + double avgFlow = std::abs((avgFlowX + avgFlowY) / 2.0); + if (avgFlow < diffThreshold) { + belowThresholdCount++; + } + } +} + +// int add(int i, int j) +// { +// return i + j; +// } + +// PYBIND11_MODULE(example, m) +// { +// // 可选,说明这个模块是做什么的 +// m.doc() = "pybind11 example plugin"; +// //def("给python调用方法名", &实际操作的函数, "函数功能说明",默认参数). 其中函数功能说明为可选 +// m.def("add", &add, "A function which adds two numbers", py::arg("i")=1, py::arg("j")=2); +// } + +int main(int argc, char **argv) { + double value_3 = std::stod(argv[3]); + double value_4 = std::stod(argv[4]); + auto start_time = std::chrono::high_resolution_clock::now(); + int skipFrame = 10; + auto videos = FastReader::read(argv[1], skipFrame); + bool isStill = true; + + int belowThresholdCount = 1; + std::vector stillThreads; + int stillThreadNum = 10; + // double diffThreshold=0.01; + // double countThreshold=0.9; + double diffThreshold=value_3; + double countThreshold=value_4; + FrameChangeDetectorV1 frameChangeDetector; + const size_t stillElementsPerThread = videos.size() / stillThreadNum; + for (size_t i = 0; i < stillThreadNum; ++i) { + size_t start = i * stillElementsPerThread; + size_t end = (i == stillThreadNum - 1) ? videos.size() : (i + 1) * stillElementsPerThread; + stillThreads.emplace_back(processStill, i, &frameChangeDetector, videos.data(), start, end, std::ref(belowThresholdCount), diffThreshold); + + } + + for (int i = 0; i < stillThreadNum; ++i) { + stillThreads[i].join(); + } + + double stillRatio = static_cast(belowThresholdCount) / static_cast(videos.size()); + std::cout << "结果: " << stillRatio << std::endl; + if (stillRatio < countThreshold) { + isStill = false; + std::vector threads; + int numThreads = 4; + auto option1 = fastdeploy::RuntimeOption(); + option1.UseGpu(); + option1.UseTrtBackend(); + option1.SetTrtInputShape("images", {1, 3, 640, 640}); + option1.trt_option.serialize_file = std::string(argv[2])+"tmp2.trt"; + option1.trt_option.enable_fp16 = true; + auto model1 = fastdeploy::vision::detection::YOLOv8(std::string(argv[2])+"yolov8s.onnx", "", option1); + if (!model1.Initialized()) { + std::cerr << "Failed to initialize." << std::endl; + return -1; + } + std::vector models; + for (int i = 0; i < numThreads; ++i) { + models.emplace_back(std::move(model1.Clone())); + } + + const size_t elementsPerThread = videos.size() / numThreads; + for (size_t i = 0; i < numThreads; ++i) { + size_t start = i * elementsPerThread; + size_t end = (i == numThreads - 1) ? videos.size() : (i + 1) * elementsPerThread; + + std::cerr << start << end << std::endl; + threads.emplace_back(PredictYolo, i, models[i].get(), videos.data(), start, end); + + } + for (int i = 0; i < numThreads; ++i) { + threads[i].join(); + } + } + auto end_time = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(end_time - start_time); + std::cout << "花费: " << duration.count() << " 毫秒" << hasCarOrPerson << std::endl; + + if (isStill) { + std::cout << "code108" << std::endl; + } else{ + if (hasCarOrPerson) { + std::cout << "code200" << std::endl; + } else{ + std::cout << "code107" << std::endl; + } + } + + return 0; +} \ No newline at end of file