Browse Source

vision_yolov8

zhengke 1 year ago
parent
commit
c51dcf45eb
100 changed files with 7235 additions and 273 deletions
  1. 86 0
      src/detection/camera_obs/camera_obs.pro
  2. 8 0
      src/detection/camera_obs/camera_obs.xml
  3. 21 0
      src/detection/camera_obs/include/block.h
  4. 39 0
      src/detection/camera_obs/include/calibrator.h
  5. 14 0
      src/detection/camera_obs/include/config.h
  6. 18 0
      src/detection/camera_obs/include/cuda_utils.h
  7. 82 0
      src/detection/camera_obs/include/imageBuffer.h
  8. 504 0
      src/detection/camera_obs/include/logging.h
  9. 29 0
      src/detection/camera_obs/include/macros.h
  10. 10 0
      src/detection/camera_obs/include/model.h
  11. 24 0
      src/detection/camera_obs/include/postprocess.h
  12. 16 0
      src/detection/camera_obs/include/preprocess.h
  13. 20 0
      src/detection/camera_obs/include/types.h
  14. 86 0
      src/detection/camera_obs/include/utils.h
  15. 348 0
      src/detection/camera_obs/main.cpp
  16. 52 0
      src/detection/vision_yolov8/CMakeLists.txt
  17. BIN
      src/detection/vision_yolov8/data/12801.jpg
  18. BIN
      src/detection/vision_yolov8/data/12802.jpg
  19. BIN
      src/detection/vision_yolov8/data/12803.jpg
  20. BIN
      src/detection/vision_yolov8/data/12804.jpg
  21. BIN
      src/detection/vision_yolov8/data/2.png
  22. BIN
      src/detection/vision_yolov8/data/51204.jpg
  23. BIN
      src/detection/vision_yolov8/data/6.jpg
  24. BIN
      src/detection/vision_yolov8/data/6086083.jpg
  25. BIN
      src/detection/vision_yolov8/data/6406401.jpg
  26. BIN
      src/detection/vision_yolov8/data/6406402.jpg
  27. BIN
      src/detection/vision_yolov8/data/6406403.jpg
  28. BIN
      src/detection/vision_yolov8/data/6406404.jpg
  29. BIN
      src/detection/vision_yolov8/data/6406406.jpg
  30. BIN
      src/detection/vision_yolov8/data/6406407.jpg
  31. BIN
      src/detection/vision_yolov8/data/7.jpg
  32. BIN
      src/detection/vision_yolov8/data/bus.jpg
  33. BIN
      src/detection/vision_yolov8/data/dog.jpg
  34. BIN
      src/detection/vision_yolov8/data/im_01.png
  35. BIN
      src/detection/vision_yolov8/data/image1.jpg
  36. BIN
      src/detection/vision_yolov8/data/image2.jpg
  37. BIN
      src/detection/vision_yolov8/data/image3.jpg
  38. BIN
      src/detection/vision_yolov8/data/long.jpg
  39. BIN
      src/detection/vision_yolov8/data/people.mp4
  40. BIN
      src/detection/vision_yolov8/data/rifle2.jpeg
  41. BIN
      src/detection/vision_yolov8/data/road0.png
  42. BIN
      src/detection/vision_yolov8/data/road1.jpg
  43. BIN
      src/detection/vision_yolov8/data/sailboat3.jpg
  44. BIN
      src/detection/vision_yolov8/data/test2.mp4
  45. BIN
      src/detection/vision_yolov8/data/zidane.jpg
  46. 30 0
      src/detection/vision_yolov8/gen_wts.py
  47. 21 0
      src/detection/vision_yolov8/include/block.h
  48. 39 0
      src/detection/vision_yolov8/include/calibrator.h
  49. 14 0
      src/detection/vision_yolov8/include/config.h
  50. 18 0
      src/detection/vision_yolov8/include/cuda_utils.h
  51. 82 0
      src/detection/vision_yolov8/include/imageBuffer.h
  52. 504 0
      src/detection/vision_yolov8/include/logging.h
  53. 29 0
      src/detection/vision_yolov8/include/macros.h
  54. 10 0
      src/detection/vision_yolov8/include/model.h
  55. 26 0
      src/detection/vision_yolov8/include/postprocess.h
  56. 16 0
      src/detection/vision_yolov8/include/preprocess.h
  57. 29 0
      src/detection/vision_yolov8/include/types.h
  58. 86 0
      src/detection/vision_yolov8/include/utils.h
  59. 525 0
      src/detection/vision_yolov8/main.cpp
  60. 10 0
      src/detection/vision_yolov8/pers_yolov8.yaml
  61. 242 0
      src/detection/vision_yolov8/plugin/yololayer.cu
  62. 102 0
      src/detection/vision_yolov8/plugin/yololayer.h
  63. 57 0
      src/detection/vision_yolov8/show_coordinate_video.py
  64. 193 0
      src/detection/vision_yolov8/src/block.cpp
  65. 80 0
      src/detection/vision_yolov8/src/calibrator.cpp
  66. 394 0
      src/detection/vision_yolov8/src/model.cpp
  67. 331 0
      src/detection/vision_yolov8/src/postprocess.cpp
  68. 84 0
      src/detection/vision_yolov8/src/postprocess.cu
  69. 155 0
      src/detection/vision_yolov8/src/preprocess.cu
  70. 128 0
      src/detection/vision_yolov8/vision_yolov8.pro
  71. 28 0
      src/detection/vision_yolov8/vision_yolov8_config.yaml
  72. 24 0
      src/detection/vision_yolov8/visionyolov8.proto
  73. 250 0
      src/detection/vision_yolov8/yolov8_det.cpp
  74. 455 0
      src/detection/vision_yolov8/yolov8_det_trt.py
  75. 321 0
      src/detection/vision_yolov8/yolov8_seg.cpp
  76. 570 0
      src/detection/vision_yolov8/yolov8_seg_trt.py
  77. 8 0
      src/detection/yaml/camera.yaml
  78. 12 0
      src/detection/yaml/camera147.yaml
  79. 12 0
      src/detection/yaml/camera_middle_640_360.yaml
  80. BIN
      src/detection/yaml/img_cali.jpg
  81. 16 0
      src/detection/yaml/init.yaml
  82. 2 0
      src/detection/yaml/pers.yaml
  83. 10 0
      src/detection/yaml/pers_ori .yaml
  84. 10 0
      src/detection/yaml/pers_yolov8.yaml
  85. 102 0
      src/detection/yaml/show_coordinate_new.py
  86. 57 0
      src/detection/yaml/show_coordinate_video.py
  87. 25 0
      src/detection/yaml/sign_config.yaml
  88. 27 0
      src/detection/yaml/trafficlight_config.yaml
  89. 28 0
      src/detection/yaml/vision_yolov8_config.yaml
  90. 24 0
      src/include/proto/visionyolov8.proto
  91. 8 0
      src/tool/video_record/Readme.md
  92. 98 0
      src/tool/video_record/main.cpp
  93. 82 0
      src/tool/video_record/video_record.pro
  94. 8 0
      src/tool/video_record/video_record.xml
  95. 311 0
      src/tool/video_replay/main.cpp
  96. 48 0
      src/tool/video_replay/video_replay.pro
  97. 11 0
      src/tool/video_replay/video_replay.xml
  98. 94 257
      src/ui/ui_ads_hmi/ADCIntelligentVehicle.cpp
  99. 9 12
      src/ui/ui_ads_hmi/ADCIntelligentVehicle.h
  100. 23 4
      src/ui/ui_ads_hmi/ADCIntelligentVehicle.ui

+ 86 - 0
src/detection/camera_obs/camera_obs.pro

@@ -0,0 +1,86 @@
+######################################################################
+# Automatically generated by qmake (3.1) Thu Dec 28 15:58:13 2023
+######################################################################
+
+TEMPLATE = app
+TARGET = camera_obs
+INCLUDEPATH += .
+
+# The following define makes your compiler warn you if you use any
+# feature of Qt which has been marked as deprecated (the exact warnings
+# depend on your compiler). Please consult the documentation of the
+# deprecated API in order to know how to port your code away from it.
+DEFINES += QT_DEPRECATED_WARNINGS
+
+# You can also make your code fail to compile if you use deprecated APIs.
+# In order to do so, uncomment the following line.
+# You can also select to disable deprecated APIs only up to a certain version of Qt.
+#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000    # disables all the APIs deprecated before Qt 6.0.0
+
+# Input
+HEADERS += include/imageBuffer.h \
+           ../../include/msgtype/object.pb.h \
+           ../../include/msgtype/objectarray.pb.h \
+           ../../include/msgtype/fusionobject.pb.h \
+           ../../include/msgtype/fusionobjectarray.pb.h \
+           ../../include/msgtype/visionyolov8.pb.h \
+           ../../include/msgtype/rawpic.pb.h
+SOURCES += main.cpp \
+        ../../include/msgtype/object.pb.cc \
+        ../../include/msgtype/objectarray.pb.cc \
+        ../../include/msgtype/fusionobject.pb.cc \
+        ../../include/msgtype/fusionobjectarray.pb.cc \
+        ../../include/msgtype/visionyolov8.pb.cc \
+        ../../include/msgtype/rawpic.pb.cc
+
+
+INCLUDEPATH += ./include \
+               /usr/include/opencv4 \
+               /usr/local/cuda/include \
+               /usr/local/cuda/lib64 \
+               ../../include/msgtype/
+
+#LIBS += /usr/lib/aarch64-linux-gnu/*.so
+
+#LIBS += /usr/local/cuda-10.2/lib64/*.so
+
+
+LIBS += -L/usr/local/cuda/targets/aarch64-linux/lib
+LIBS += -lcudart -lcufft -lyaml-cpp
+LIBS +=  -lnvinfer -lnvonnxparser -lnvcaffe_parser
+LIBS += -lboost_system
+unix:LIBS +=  -lpcl_common\
+        -lpcl_features\
+        -lpcl_filters\
+        -lpcl_io\
+        -lpcl_io_ply\
+        -lpcl_kdtree\
+        -lpcl_keypoints\
+        -lpcl_octree\
+        -lpcl_outofcore\
+        -lpcl_people\
+        -lpcl_recognition\
+        -lpcl_registration\
+        -lpcl_sample_consensus\
+        -lpcl_search\
+        -lpcl_segmentation\
+        -lpcl_surface\
+        -lpcl_tracking\
+        -lpcl_visualization
+
+INCLUDEPATH += /usr/include/opencv4/
+LIBS += /usr/lib/aarch64-linux-gnu/libopencv*.so
+
+
+!include(../../../include/common.pri ) {
+    error( "Couldn't find the common.pri file!" )
+}
+
+!include(../../../include/ivprotobuf.pri ) {
+    error( "Couldn't find the ivprotobuf.pri file!" )
+}
+!include(../../../include/ivopencv.pri ) {
+    error( "Couldn't find the ivopencv.pri file!" )
+}
+INCLUDEPATH += $$PWD/../../include/msgtype
+LIBS += -L/usr/lib/aarch64-linux-gnu/ -lglog

+ 8 - 0
src/detection/camera_obs/camera_obs.xml

@@ -0,0 +1,8 @@
+<xml>	
+	<node name="fusion_vision">
+		<param name="msgname" value="image00" />
+		<param name="width" value = "1280" />
+		<param name="height" value="720" />
+		<param name="show_flag" value="1" />
+	</node>
+</xml>

+ 21 - 0
src/detection/camera_obs/include/block.h

@@ -0,0 +1,21 @@
+#pragma once
+#include <map>
+#include <vector>
+#include <string>
+#include "NvInfer.h"
+
+std::map<std::string, nvinfer1::Weights> loadWeights(const std::string file);
+
+nvinfer1::IElementWiseLayer* convBnSiLU(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int ch, int k, int s, int p, std::string lname);
+
+nvinfer1::IElementWiseLayer* C2F(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int c1, int c2, int n, bool shortcut, float e, std::string lname);
+
+nvinfer1::IElementWiseLayer* SPPF(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int c1, int c2, int k, std::string lname);
+
+nvinfer1::IShuffleLayer* DFL(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int ch, int grid, int k, int s, int p, std::string lname);
+
+nvinfer1::IPluginV2Layer* addYoLoLayer(nvinfer1::INetworkDefinition *network, std::vector<nvinfer1::IConcatenationLayer*> dets, bool is_segmentation = false);

+ 39 - 0
src/detection/camera_obs/include/calibrator.h

@@ -0,0 +1,39 @@
+#ifndef ENTROPY_CALIBRATOR_H
+#define ENTROPY_CALIBRATOR_H
+
+#include <NvInfer.h>
+#include <string>
+#include <vector>
+#include "macros.h"
+
+//! \class Int8EntropyCalibrator2
+//!
+//! \brief Implements Entropy calibrator 2.
+//!  CalibrationAlgoType is kENTROPY_CALIBRATION_2.
+//!
+class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2
+{
+public:
+    Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true);
+    virtual ~Int8EntropyCalibrator2();
+    int getBatchSize() const TRT_NOEXCEPT override;
+    bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override;
+    const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override;
+    void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override;
+
+private:
+    int batchsize_;
+    int input_w_;
+    int input_h_;
+    int img_idx_;
+    std::string img_dir_;
+    std::vector<std::string> img_files_;
+    size_t input_count_;
+    std::string calib_table_name_;
+    const char* input_blob_name_;
+    bool read_cache_;
+    void* device_input_;
+    std::vector<char> calib_cache_;
+};
+
+#endif // ENTROPY_CALIBRATOR_H

+ 14 - 0
src/detection/camera_obs/include/config.h

@@ -0,0 +1,14 @@
+#define USE_FP16
+//#define USE_INT8
+
+const static char *kInputTensorName = "images";
+const static char *kOutputTensorName = "output";
+const static int kNumClass = 80;
+const static int kBatchSize = 1;
+const static int kGpuId = 0;
+const static int kInputH = 640;
+const static int kInputW = 640;
+const static float kNmsThresh = 0.45f;
+const static float kConfThresh = 0.5f;
+const static int kMaxInputImageSize = 3000 * 3000;
+const static int kMaxNumOutputBbox = 1000;

+ 18 - 0
src/detection/camera_obs/include/cuda_utils.h

@@ -0,0 +1,18 @@
+#ifndef TRTX_CUDA_UTILS_H_
+#define TRTX_CUDA_UTILS_H_
+
+#include <cuda_runtime_api.h>
+
+#ifndef CUDA_CHECK
+#define CUDA_CHECK(callstr)\
+    {\
+        cudaError_t error_code = callstr;\
+        if (error_code != cudaSuccess) {\
+            std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\
+            assert(0);\
+        }\
+    }
+#endif  // CUDA_CHECK
+
+#endif  // TRTX_CUDA_UTILS_H_
+

+ 82 - 0
src/detection/camera_obs/include/imageBuffer.h

@@ -0,0 +1,82 @@
+#ifndef IMAGEBUFFER_H
+#define IMAGEBUFFER_H
+
+#include <opencv2/opencv.hpp>
+#include <mutex>
+#include <condition_variable>
+#include <queue>
+template<typename T>
+class ConsumerProducerQueue
+{
+
+public:
+    ConsumerProducerQueue(int mxsz,bool dropFrame) :
+            maxSize(mxsz),dropFrame(dropFrame)
+    { }
+
+    bool add(T request)
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        if(dropFrame && isFull())
+        {
+            //lock.unlock();
+            //return false;
+            cpq.pop();
+            cpq.push(request);
+            cond.notify_all();
+            return true;
+        }
+        else {
+            cond.wait(lock, [this]() { return !isFull(); });
+            cpq.push(request);
+            //lock.unlock();
+            cond.notify_all();
+            return true;
+        }
+    }
+    void consume(T &request)
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        cond.wait(lock, [this]()
+        { return !isEmpty(); });
+        request = cpq.front();
+        cpq.pop();
+        //lock.unlock();
+        cond.notify_all();
+
+    }
+
+    bool isFull() const
+    {
+        return cpq.size() >= maxSize;
+    }
+
+    bool isEmpty() const
+    {
+        return cpq.size() == 0;
+    }
+
+    int length() const
+    {
+        return cpq.size();
+    }
+
+    void clear()
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        while (!isEmpty())
+        {
+            cpq.pop();
+        }
+        lock.unlock();
+        cond.notify_all();
+    }
+
+private:
+    std::condition_variable cond;  //条件变量允许通过通知进而实现线程同步
+    std::mutex mutex;     //提供了多种互斥操作,可以显式避免数据竞争
+    std::queue<T> cpq;    //容器适配器,它给予程序员队列的功能
+    int maxSize;
+    bool dropFrame;
+};
+#endif // IMAGEBUFFER_H

+ 504 - 0
src/detection/camera_obs/include/logging.h

@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TENSORRT_LOGGING_H
+#define TENSORRT_LOGGING_H
+
+#include "NvInferRuntimeCommon.h"
+#include <cassert>
+#include <ctime>
+#include <iomanip>
+#include <iostream>
+#include <ostream>
+#include <sstream>
+#include <string>
+#include "macros.h"
+
+using Severity = nvinfer1::ILogger::Severity;
+
+class LogStreamConsumerBuffer : public std::stringbuf
+{
+public:
+    LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
+        : mOutput(stream)
+        , mPrefix(prefix)
+        , mShouldLog(shouldLog)
+    {
+    }
+
+    LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
+        : mOutput(other.mOutput)
+    {
+    }
+
+    ~LogStreamConsumerBuffer()
+    {
+        // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
+        // std::streambuf::pptr() gives a pointer to the current position of the output sequence
+        // if the pointer to the beginning is not equal to the pointer to the current position,
+        // call putOutput() to log the output to the stream
+        if (pbase() != pptr())
+        {
+            putOutput();
+        }
+    }
+
+    // synchronizes the stream buffer and returns 0 on success
+    // synchronizing the stream buffer consists of inserting the buffer contents into the stream,
+    // resetting the buffer and flushing the stream
+    virtual int sync()
+    {
+        putOutput();
+        return 0;
+    }
+
+    void putOutput()
+    {
+        if (mShouldLog)
+        {
+            // prepend timestamp
+            std::time_t timestamp = std::time(nullptr);
+            tm* tm_local = std::localtime(&timestamp);
+            std::cout << "[";
+            std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
+            std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
+            // std::stringbuf::str() gets the string contents of the buffer
+            // insert the buffer contents pre-appended by the appropriate prefix into the stream
+            mOutput << mPrefix << str();
+            // set the buffer to empty
+            str("");
+            // flush the stream
+            mOutput.flush();
+        }
+    }
+
+    void setShouldLog(bool shouldLog)
+    {
+        mShouldLog = shouldLog;
+    }
+
+private:
+    std::ostream& mOutput;
+    std::string mPrefix;
+    bool mShouldLog;
+};
+
+//!
+//! \class LogStreamConsumerBase
+//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
+//!
+class LogStreamConsumerBase
+{
+public:
+    LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
+        : mBuffer(stream, prefix, shouldLog)
+    {
+    }
+
+protected:
+    LogStreamConsumerBuffer mBuffer;
+};
+
+//!
+//! \class LogStreamConsumer
+//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
+//!  Order of base classes is LogStreamConsumerBase and then std::ostream.
+//!  This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
+//!  in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
+//!  This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
+//!  Please do not change the order of the parent classes.
+//!
+class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
+{
+public:
+    //! \brief Creates a LogStreamConsumer which logs messages with level severity.
+    //!  Reportable severity determines if the messages are severe enough to be logged.
+    LogStreamConsumer(Severity reportableSeverity, Severity severity)
+        : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
+        , std::ostream(&mBuffer) // links the stream buffer with the stream
+        , mShouldLog(severity <= reportableSeverity)
+        , mSeverity(severity)
+    {
+    }
+
+    LogStreamConsumer(LogStreamConsumer&& other)
+        : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
+        , std::ostream(&mBuffer) // links the stream buffer with the stream
+        , mShouldLog(other.mShouldLog)
+        , mSeverity(other.mSeverity)
+    {
+    }
+
+    void setReportableSeverity(Severity reportableSeverity)
+    {
+        mShouldLog = mSeverity <= reportableSeverity;
+        mBuffer.setShouldLog(mShouldLog);
+    }
+
+private:
+    static std::ostream& severityOstream(Severity severity)
+    {
+        return severity >= Severity::kINFO ? std::cout : std::cerr;
+    }
+
+    static std::string severityPrefix(Severity severity)
+    {
+        switch (severity)
+        {
+        case Severity::kINTERNAL_ERROR: return "[F] ";
+        case Severity::kERROR: return "[E] ";
+        case Severity::kWARNING: return "[W] ";
+        case Severity::kINFO: return "[I] ";
+        case Severity::kVERBOSE: return "[V] ";
+        default: assert(0); return "";
+        }
+    }
+
+    bool mShouldLog;
+    Severity mSeverity;
+};
+
+//! \class Logger
+//!
+//! \brief Class which manages logging of TensorRT tools and samples
+//!
+//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
+//! and supports logging two types of messages:
+//!
+//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
+//! - Test pass/fail messages
+//!
+//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
+//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
+//!
+//! In the future, this class could be extended to support dumping test results to a file in some standard format
+//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
+//!
+//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
+//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
+//! library and messages coming from the sample.
+//!
+//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
+//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
+//! object.
+
+class Logger : public nvinfer1::ILogger
+{
+public:
+    Logger(Severity severity = Severity::kWARNING)
+        : mReportableSeverity(severity)
+    {
+    }
+
+    //!
+    //! \enum TestResult
+    //! \brief Represents the state of a given test
+    //!
+    enum class TestResult
+    {
+        kRUNNING, //!< The test is running
+        kPASSED,  //!< The test passed
+        kFAILED,  //!< The test failed
+        kWAIVED   //!< The test was waived
+    };
+
+    //!
+    //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
+    //! \return The nvinfer1::ILogger associated with this Logger
+    //!
+    //! TODO Once all samples are updated to use this method to register the logger with TensorRT,
+    //! we can eliminate the inheritance of Logger from ILogger
+    //!
+    nvinfer1::ILogger& getTRTLogger()
+    {
+        return *this;
+    }
+
+    //!
+    //! \brief Implementation of the nvinfer1::ILogger::log() virtual method
+    //!
+    //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
+    //! inheritance from nvinfer1::ILogger
+    //!
+    void log(Severity severity, const char* msg) TRT_NOEXCEPT override 
+    {
+        LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
+    }
+
+    //!
+    //! \brief Method for controlling the verbosity of logging output
+    //!
+    //! \param severity The logger will only emit messages that have severity of this level or higher.
+    //!
+    void setReportableSeverity(Severity severity)
+    {
+        mReportableSeverity = severity;
+    }
+
+    //!
+    //! \brief Opaque handle that holds logging information for a particular test
+    //!
+    //! This object is an opaque handle to information used by the Logger to print test results.
+    //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
+    //! with Logger::reportTest{Start,End}().
+    //!
+    class TestAtom
+    {
+    public:
+        TestAtom(TestAtom&&) = default;
+
+    private:
+        friend class Logger;
+
+        TestAtom(bool started, const std::string& name, const std::string& cmdline)
+            : mStarted(started)
+            , mName(name)
+            , mCmdline(cmdline)
+        {
+        }
+
+        bool mStarted;
+        std::string mName;
+        std::string mCmdline;
+    };
+
+    //!
+    //! \brief Define a test for logging
+    //!
+    //! \param[in] name The name of the test.  This should be a string starting with
+    //!                  "TensorRT" and containing dot-separated strings containing
+    //!                  the characters [A-Za-z0-9_].
+    //!                  For example, "TensorRT.sample_googlenet"
+    //! \param[in] cmdline The command line used to reproduce the test
+    //
+    //! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
+    //!
+    static TestAtom defineTest(const std::string& name, const std::string& cmdline)
+    {
+        return TestAtom(false, name, cmdline);
+    }
+
+    //!
+    //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
+    //!        as input
+    //!
+    //! \param[in] name The name of the test
+    //! \param[in] argc The number of command-line arguments
+    //! \param[in] argv The array of command-line arguments (given as C strings)
+    //!
+    //! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
+    static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
+    {
+        auto cmdline = genCmdlineString(argc, argv);
+        return defineTest(name, cmdline);
+    }
+
+    //!
+    //! \brief Report that a test has started.
+    //!
+    //! \pre reportTestStart() has not been called yet for the given testAtom
+    //!
+    //! \param[in] testAtom The handle to the test that has started
+    //!
+    static void reportTestStart(TestAtom& testAtom)
+    {
+        reportTestResult(testAtom, TestResult::kRUNNING);
+        assert(!testAtom.mStarted);
+        testAtom.mStarted = true;
+    }
+
+    //!
+    //! \brief Report that a test has ended.
+    //!
+    //! \pre reportTestStart() has been called for the given testAtom
+    //!
+    //! \param[in] testAtom The handle to the test that has ended
+    //! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
+    //!                   TestResult::kFAILED, TestResult::kWAIVED
+    //!
+    static void reportTestEnd(const TestAtom& testAtom, TestResult result)
+    {
+        assert(result != TestResult::kRUNNING);
+        assert(testAtom.mStarted);
+        reportTestResult(testAtom, result);
+    }
+
+    static int reportPass(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kPASSED);
+        return EXIT_SUCCESS;
+    }
+
+    static int reportFail(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kFAILED);
+        return EXIT_FAILURE;
+    }
+
+    static int reportWaive(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kWAIVED);
+        return EXIT_SUCCESS;
+    }
+
+    static int reportTest(const TestAtom& testAtom, bool pass)
+    {
+        return pass ? reportPass(testAtom) : reportFail(testAtom);
+    }
+
+    Severity getReportableSeverity() const
+    {
+        return mReportableSeverity;
+    }
+
+private:
+    //!
+    //! \brief returns an appropriate string for prefixing a log message with the given severity
+    //!
+    static const char* severityPrefix(Severity severity)
+    {
+        switch (severity)
+        {
+        case Severity::kINTERNAL_ERROR: return "[F] ";
+        case Severity::kERROR: return "[E] ";
+        case Severity::kWARNING: return "[W] ";
+        case Severity::kINFO: return "[I] ";
+        case Severity::kVERBOSE: return "[V] ";
+        default: assert(0); return "";
+        }
+    }
+
+    //!
+    //! \brief returns an appropriate string for prefixing a test result message with the given result
+    //!
+    static const char* testResultString(TestResult result)
+    {
+        switch (result)
+        {
+        case TestResult::kRUNNING: return "RUNNING";
+        case TestResult::kPASSED: return "PASSED";
+        case TestResult::kFAILED: return "FAILED";
+        case TestResult::kWAIVED: return "WAIVED";
+        default: assert(0); return "";
+        }
+    }
+
+    //!
+    //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
+    //!
+    static std::ostream& severityOstream(Severity severity)
+    {
+        return severity >= Severity::kINFO ? std::cout : std::cerr;
+    }
+
+    //!
+    //! \brief method that implements logging test results
+    //!
+    static void reportTestResult(const TestAtom& testAtom, TestResult result)
+    {
+        severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
+                                         << testAtom.mCmdline << std::endl;
+    }
+
+    //!
+    //! \brief generate a command line string from the given (argc, argv) values
+    //!
+    static std::string genCmdlineString(int argc, char const* const* argv)
+    {
+        std::stringstream ss;
+        for (int i = 0; i < argc; i++)
+        {
+            if (i > 0)
+                ss << " ";
+            ss << argv[i];
+        }
+        return ss.str();
+    }
+
+    Severity mReportableSeverity;
+};
+
+namespace
+{
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
+//!
+//! Example usage:
+//!
+//!     LOG_VERBOSE(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
+//!
+//! Example usage:
+//!
+//!     LOG_INFO(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_INFO(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
+//!
+//! Example usage:
+//!
+//!     LOG_WARN(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_WARN(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
+//!
+//! Example usage:
+//!
+//!     LOG_ERROR(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_ERROR(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
+//         ("fatal" severity)
+//!
+//! Example usage:
+//!
+//!     LOG_FATAL(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_FATAL(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
+}
+
+} // anonymous namespace
+
+#endif // TENSORRT_LOGGING_H

+ 29 - 0
src/detection/camera_obs/include/macros.h

@@ -0,0 +1,29 @@
+#ifndef __MACROS_H
+#define __MACROS_H
+
+#include "NvInfer.h"
+
+#ifdef API_EXPORTS
+#if defined(_MSC_VER)
+#define API __declspec(dllexport)
+#else
+#define API __attribute__((visibility("default")))
+#endif
+#else
+
+#if defined(_MSC_VER)
+#define API __declspec(dllimport)
+#else
+#define API
+#endif
+#endif  // API_EXPORTS
+
+#if NV_TENSORRT_MAJOR >= 8
+#define TRT_NOEXCEPT noexcept
+#define TRT_CONST_ENQUEUE const
+#else
+#define TRT_NOEXCEPT
+#define TRT_CONST_ENQUEUE
+#endif
+
+#endif  // __MACROS_H

+ 10 - 0
src/detection/camera_obs/include/model.h

@@ -0,0 +1,10 @@
+#pragma once
+#include "NvInfer.h"
+#include <string>
+#include <assert.h>
+
+nvinfer1::IHostMemory* buildEngineYolov8Det(nvinfer1::IBuilder* builder,
+nvinfer1::IBuilderConfig* config, nvinfer1::DataType dt, const std::string& wts_path, float& gd, float& gw, int& max_channels);
+
+nvinfer1::IHostMemory* buildEngineYolov8Seg(nvinfer1::IBuilder* builder,
+nvinfer1::IBuilderConfig* config, nvinfer1::DataType dt, const std::string& wts_path, float& gd, float& gw, int& max_channels);

+ 24 - 0
src/detection/camera_obs/include/postprocess.h

@@ -0,0 +1,24 @@
+#pragma once
+
+#include "types.h"
+#include "NvInfer.h"
+#include <opencv2/opencv.hpp>
+#include "visionyolov8.pb.h"
+
+cv::Rect get_rect(cv::Mat& img, float bbox[4]);
+
+void nms(std::vector<Detection>& res, float *output, float conf_thresh, float nms_thresh = 0.5);
+
+void batch_nms(std::vector<std::vector<Detection>>& batch_res, float *output, int batch_size, int output_size, float conf_thresh, float nms_thresh = 0.5);
+
+iv::vision::Visionyolov8 draw_bbox(std::vector<cv::Mat> &img_batch, std::vector<std::vector<Detection>> &res_batch, cv::Mat &pers_mat_inv, cv::Point2i transed_O, float METER_PER_PIXEL_X, float METER_PER_PIXEL_Y);
+
+void batch_process(std::vector<std::vector<Detection>> &res_batch, const float* decode_ptr_host, int batch_size, int bbox_element, const std::vector<cv::Mat>& img_batch);
+
+void process_decode_ptr_host(std::vector<Detection> &res, const float* decode_ptr_host, int bbox_element, cv::Mat& img, int count);
+
+void cuda_decode(float* predict, int num_bboxes, float confidence_threshold,float* parray,int max_objects, cudaStream_t stream);
+
+void cuda_nms(float* parray, float nms_threshold, int max_objects, cudaStream_t stream);
+
+void draw_mask_bbox(cv::Mat& img, std::vector<Detection>& dets, std::vector<cv::Mat>& masks, std::unordered_map<int, std::string>& labels_map);

+ 16 - 0
src/detection/camera_obs/include/preprocess.h

@@ -0,0 +1,16 @@
+#pragma once
+
+#include <opencv2/opencv.hpp>
+#include "NvInfer.h"
+#include "types.h"
+#include <map>
+
+
+void cuda_preprocess_init(int max_image_size);
+
+void cuda_preprocess_destroy();
+
+void cuda_preprocess(uint8_t *src, int src_width, int src_height, float *dst, int dst_width, int dst_height, cudaStream_t stream);
+
+void cuda_batch_preprocess(std::vector<cv::Mat> &img_batch, float *dst, int dst_width, int dst_height, cudaStream_t stream);
+

+ 20 - 0
src/detection/camera_obs/include/types.h

@@ -0,0 +1,20 @@
+#pragma once
+#include "config.h"
+#include "string"
+
+struct alignas(float) Detection {
+  //center_x center_y w h
+  float bbox[4];
+  float conf;  // bbox_conf * cls_conf
+  float class_id;
+  float mask[32];
+};
+
+struct AffineMatrix {
+    float value[6];
+};
+
+const int bbox_element = sizeof(AffineMatrix) / sizeof(float)+1;      // left, top, right, bottom, confidence, class, keepflag
+
+const std::string objectName[8] ={"person",
+                         "bicycle","car","motorcycle","airplane","bus","train","truck"};

+ 86 - 0
src/detection/camera_obs/include/utils.h

@@ -0,0 +1,86 @@
+#pragma once
+#include <opencv2/opencv.hpp>
+#include <dirent.h>
+#include <fstream>  
+
+static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) {
+    int w, h, x, y;
+    float r_w = input_w / (img.cols*1.0);
+    float r_h = input_h / (img.rows*1.0);
+    if (r_h > r_w) {
+        w = input_w;
+        h = r_w * img.rows;
+        x = 0;
+        y = (input_h - h) / 2;
+    } else {
+        w = r_h * img.cols;
+        h = input_h;
+        x = (input_w - w) / 2;
+        y = 0;
+    }
+    cv::Mat re(h, w, CV_8UC3);
+    cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR);
+    cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128));
+    re.copyTo(out(cv::Rect(x, y, re.cols, re.rows)));
+    return out;
+}
+
+static inline int read_files_in_dir(const char *p_dir_name, std::vector<std::string> &file_names) {
+    DIR *p_dir = opendir(p_dir_name);
+    if (p_dir == nullptr) {
+        return -1;
+    }
+
+    struct dirent* p_file = nullptr;
+    while ((p_file = readdir(p_dir)) != nullptr) {
+        if (strcmp(p_file->d_name, ".") != 0 &&
+            strcmp(p_file->d_name, "..") != 0) {
+            //std::string cur_file_name(p_dir_name);
+            //cur_file_name += "/";
+            //cur_file_name += p_file->d_name;
+            std::string cur_file_name(p_file->d_name);
+            file_names.push_back(cur_file_name);
+        }
+    }
+
+    closedir(p_dir);
+    return 0;
+}
+
+// Function to trim leading and trailing whitespace from a string
+static inline std::string trim_leading_whitespace(const std::string& str) {
+    size_t first = str.find_first_not_of(' ');
+    if (std::string::npos == first) {
+        return str;
+    }
+    size_t last = str.find_last_not_of(' ');
+    return str.substr(first, (last - first + 1));
+}
+
+// Src: https://stackoverflow.com/questions/16605967
+static inline std::string to_string_with_precision(const float a_value, const int n = 2) {
+    std::ostringstream out;
+    out.precision(n);
+    out << std::fixed << a_value;
+    return out.str();
+}
+
+static inline int read_labels(const std::string labels_filename, std::unordered_map<int, std::string>& labels_map) {
+    std::ifstream file(labels_filename);
+    // Read each line of the file
+    std::string line;
+    int index = 0;
+    while (std::getline(file, line)) {
+        // Strip the line of any leading or trailing whitespace
+        line = trim_leading_whitespace(line);
+
+        // Add the stripped line to the labels_map, using the loop index as the key
+        labels_map[index] = line;
+        index++;
+    }
+    // Close the file
+    file.close();
+
+    return 0;
+}
+

+ 348 - 0
src/detection/camera_obs/main.cpp

@@ -0,0 +1,348 @@
+#include <QCoreApplication>
+#include <iostream>
+#include <fstream>
+#include <opencv2/opencv.hpp>
+#include <thread>
+
+#include "modulecomm.h"
+#include "xmlparam.h"
+#include "ivfault.h"
+#include "ivlog.h"
+#include "ivexit.h"
+#include "ivversion.h"
+#include <chrono>
+#include "rawpic.pb.h"
+#include "visionyolov8.pb.h"
+#include "imageBuffer.h"
+#include <thread>
+#include <QMutex>
+#include "objectarray.pb.h"
+#include "fusionobjectarray.pb.h"
+
+
+using namespace std;
+
+bool calibrationstate = false;
+bool cropstate = false;
+
+int show_flag = 0;
+cv::Range crop_height = cv::Range(0,720);
+cv::Range crop_width = cv::Range(280,1000);
+int gindex = 0;
+
+int gwidth = 1920;
+int gheight = 1080;
+
+
+void * gpcamera;
+string cameraname="image00";
+string gstrdevname = "video0";
+string video_path = "/home/nvidia/Videos/video.mp4";
+
+
+
+
+ConsumerProducerQueue<cv::Mat> * imageBuffer =  new ConsumerProducerQueue<cv::Mat>(3,true);
+ConsumerProducerQueue<iv::vision::rawpic> * picBuffer =  new ConsumerProducerQueue<iv::vision::rawpic>(3,true);
+ConsumerProducerQueue<iv::vision::Visionyolov8> * yolov8Buffer =  new ConsumerProducerQueue<iv::vision::Visionyolov8>(3,true);
+
+std::string PERS_FILE = "/home/nvidia/app/app/yaml/pers_ori.yaml";
+
+void *gfu = iv::modulecomm::RegisterSend("yolo_obs",1000000,1);
+iv::vision::Visionyolov8 vision_yolov8_info;
+static QMutex gMutex;
+//读取视频数据
+void ReadFunc(int n)
+{
+    cv::VideoCapture cap(video_path);
+    if(!cap.isOpened())
+    {
+        cout<<"camera failed to open"<<endl;
+    }
+    while(1)
+    {
+        cv::Mat frame;
+        //读视频的时候加上,读摄像头去掉
+        if(imageBuffer->isFull())
+        {
+            continue;
+        }
+        if(cap.read(frame))
+        {
+//            if(calibrationstate)
+//                cv::remap(frame,frame,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+            cv::Mat crop_img;
+            cv::resize(frame, crop_img, cv::Size(1280, 720), 0, 0, cv::INTER_LINEAR);
+//            if(cropstate)
+//                crop_img = crop_img(crop_height, crop_width);
+            imageBuffer->add(crop_img);
+        }
+        else
+        {
+            std::this_thread::sleep_for(std::chrono::milliseconds(1));
+        }
+    }
+}
+
+
+
+void ListenYolov8(const char * strdata,const unsigned int nSize, const unsigned int index,const QDateTime * dt,const char * strmemname)
+{
+    if(nSize<1)return;
+    iv::vision::Visionyolov8 vision_yolov8;
+    if(false == vision_yolov8.ParseFromArray(strdata,nSize))
+    {
+        std::cout<<"ListenYolov8 fail."<<std::endl;
+        return;
+    }else{
+//        std::cout<<"listen size="<<vision_yolov8.ByteSize()<<std::endl;
+    }
+    iv::vision::Visionyolov8 vision_yolov8_info;
+    gMutex.lock();
+    vision_yolov8_info.CopyFrom(vision_yolov8);
+    yolov8Buffer->add(vision_yolov8_info);
+    gMutex.unlock();
+}
+
+void Listenpic(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
+{
+    if(nSize<1000)return;
+    iv::vision::rawpic pic;
+    if(false == pic.ParseFromArray(strdata,nSize))
+    {
+        std::cout<<"picview Listenpic fail."<<std::endl;
+        return;
+    }
+    cv::Mat mat(pic.height(),pic.width(),pic.mattype());
+    if(pic.type() == 1)
+        memcpy(mat.data,pic.picdata().data(),mat.rows*mat.cols*mat.elemSize());
+    else
+    {
+        std::vector<unsigned char> buff(pic.picdata().data(),pic.picdata().data() + pic.picdata().size());
+        mat = cv::imdecode(buff,cv::IMREAD_COLOR);
+    }
+    imageBuffer->add(mat);
+    mat.release();
+}
+
+void draw_bbox(cv::Mat img, iv::vision::Visionyolov8 vision_yolov8){
+//    iv::vision::PixelObject *pixelObject = vision_yolov8.add_pixelobject();
+    std::cout<<"--------draw start--------------"<<std::endl;
+    std::cout<<"object_size:"<<vision_yolov8.pixelobject_size()<<std::endl;
+    for(int i=0; i<vision_yolov8.pixelobject_size();++i){
+        std::string type = vision_yolov8.pixelobject(i).type();
+        std::cout<<"type:"<<type<<std::endl;
+        cv::Point2f a;
+        a.x = vision_yolov8.pixelobject(i).x();
+        a.y = vision_yolov8.pixelobject(i).y();
+        int width = vision_yolov8.pixelobject(i).width();
+        int height = vision_yolov8.pixelobject(i).height();
+        cv::Rect r(a.x, a.y, width, height);
+        std::cout<<"obj.x: "<<vision_yolov8.worldobject(i).x()<<std::endl;
+        std::cout<<"obj.y: "<<vision_yolov8.worldobject(i).y()<<std::endl;
+        std::cout<<"obj.width: "<<vision_yolov8.worldobject(i).width()<<std::endl;
+        if(vision_yolov8.worldobject(i).y()<5){
+            cv::rectangle(img, r, cv::Scalar(0, 0, 255), 2);
+            cv::putText(img,
+                    " x:"+std::to_string(vision_yolov8.worldobject(i).x()).substr(0,4)+"m"+
+                    " y:"+std::to_string(vision_yolov8.worldobject(i).y()).substr(0,4)+"m"+
+                    "width:"+std::to_string(vision_yolov8.worldobject(i).width()).substr(0,4)+"m"
+                    , cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN,
+                    1, cv::Scalar(0, 0, 255), 1);
+        }else{
+            cv::rectangle(img, r, cv::Scalar(0, 255, 0), 2);
+            cv::putText(img,
+                    " x:"+std::to_string(vision_yolov8.worldobject(i).x()).substr(0,4)+"m"+
+                    " y:"+std::to_string(vision_yolov8.worldobject(i).y()).substr(0,4)+"m"+
+                    "width:"+std::to_string(vision_yolov8.worldobject(i).width()).substr(0,4)+"m"
+                    , cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_PLAIN,
+                    1, cv::Scalar(0, 255, 0), 1);
+        }
+
+    }
+    std::cout<<"---------draw end--------------"<<std::endl;
+
+}
+
+
+void ObsToNormal(iv::fusion::fusionobjectarray& lidar_radar_fusion_object_array)
+{
+    std::cout<<"ObsToNormal:"<<std::endl;
+    for(int i = 0; i < lidar_radar_fusion_object_array.obj_size(); i++)
+    {
+//        if((lidar_radar_fusion_object_array.obj(i).sensor_type() == 0) &&(lidar_radar_fusion_object_array.obj(i).centroid().y() > 20 || abs(lidar_radar_fusion_object_array.obj(i).centroid().x())>10) ) continue;
+//        if ((lidar_radar_fusion_object_array.obj(i).sensor_type() == 0)&&(lidar_radar_fusion_object_array.obj(i).centroid().y()>10 && abs(lidar_radar_fusion_object_array.obj(i).centroid().x())<1.3)) continue;
+//        if((lidar_radar_fusion_object_array.obj(i).sensor_type() == 0)&&(lidar_radar_fusion_object_array.obj(i).centroid().y() <1.0  && abs(lidar_radar_fusion_object_array.obj(i).centroid().x())<1.3)) continue;
+
+        if((lidar_radar_fusion_object_array.obj(i).dimensions().x()>0)&&
+                (lidar_radar_fusion_object_array.obj(i).dimensions().y()>0))
+        {
+            int xp = (int)((lidar_radar_fusion_object_array.obj(i).dimensions().x()/0.2)/2.0);
+            if(xp == 0)xp=1;
+            int yp = (int)((lidar_radar_fusion_object_array.obj(i).dimensions().y()/0.2)/2.0);
+            if(yp == 0)yp=1;
+            int ix,iy;
+            for(ix = 0; ix<(xp*2); ix++)
+            {
+                for(iy = 0; iy<(yp*2); iy++)
+                {
+                    iv::fusion::NomalXYZ nomal_centroid;
+                    iv::fusion::NomalXYZ *nomal_centroid_;
+                    float nomal_x = ix*0.2 - xp*0.2;
+                    float nomal_y = iy*0.2 - yp*0.2;
+                    float nomal_z = 1.0;
+                    float s = nomal_x*cos(lidar_radar_fusion_object_array.obj(i).yaw())
+                            - nomal_y*sin(lidar_radar_fusion_object_array.obj(i).yaw());
+                    float t = nomal_x*sin(lidar_radar_fusion_object_array.obj(i).yaw())
+                            + nomal_y*cos(lidar_radar_fusion_object_array.obj(i).yaw());
+                    nomal_centroid.set_nomal_x(lidar_radar_fusion_object_array.obj(i).centroid().x() + s);
+                    nomal_centroid.set_nomal_y(lidar_radar_fusion_object_array.obj(i).centroid().y() + t);
+                    if(abs(lidar_radar_fusion_object_array.obj(i).centroid().x() + s) <1.3 &&
+                            lidar_radar_fusion_object_array.obj(i).centroid().y() + t <1.0) continue;
+                    else{
+                        nomal_centroid.set_nomal_x(lidar_radar_fusion_object_array.obj(i).centroid().x() + s);
+                        nomal_centroid.set_nomal_y(lidar_radar_fusion_object_array.obj(i).centroid().y() + t);
+                        iv::fusion::fusionobject &fusion_obj = (iv::fusion::fusionobject &)lidar_radar_fusion_object_array.obj(i);
+                        nomal_centroid_ = fusion_obj.add_nomal_centroid();
+                        nomal_centroid_->CopyFrom(nomal_centroid);
+                    }
+                    //std::cout<<"nomal_centroid:"<<nomal_centroid.nomal_x()<<","<<nomal_centroid.nomal_y()<<std::endl;
+                }
+            }
+    }
+}}
+
+void datafusion(iv::vision::Visionyolov8 yolo_obj){
+    iv::fusion::fusionobjectarray out_fusion;
+    std::cout<<"---------obs start--------------"<<std::endl;
+    std::cout<<"size: "<<yolo_obj.worldobject_size()<<std::endl;
+    for(int i=0; i<yolo_obj.worldobject_size();++i){
+        cv::Point2f a;
+        a.x = yolo_obj.worldobject(i).x();
+        a.y = yolo_obj.worldobject(i).y();
+        float width = yolo_obj.worldobject(i).width();
+
+        iv::fusion::fusionobject fusion_object;
+
+        fusion_object.set_yaw(0);
+
+        iv::fusion::PointXYZ centroid;
+        iv::fusion::PointXYZ *centroid_;
+        centroid.set_x(a.x);
+        centroid.set_y(a.y);
+        centroid.set_z(width/2);
+        centroid_ = fusion_object.mutable_centroid();
+        centroid_->CopyFrom(centroid);
+        std::cout<<"centroid.x:"<<centroid.x()<<"  centroid.y:"<<centroid.y()<<std::endl;
+        iv::fusion::Dimension dimension;
+        iv::fusion::Dimension *dimension_;
+        dimension.set_x(width);
+        dimension.set_y(width);
+        dimension.set_z(width);
+        dimension_ = fusion_object.mutable_dimensions();
+        dimension_->CopyFrom(dimension);
+        std::cout<<"width:"<<dimension.x()<<std::endl;
+        iv::fusion::fusionobject *pobj = out_fusion.add_obj();
+        pobj->CopyFrom(fusion_object);
+    }
+
+    ObsToNormal(out_fusion);
+    string out;
+    if(out_fusion.obj_size() == 0)
+    {
+        //        std::cout<<"   fake   obj"<<std::endl;
+
+        iv::fusion::fusionobject fake_obj;
+        iv::fusion::fusionobject *fake_obj_;
+        iv::fusion::PointXYZ fake_cen;
+        iv::fusion::PointXYZ *fake_cen_;
+        fake_cen.set_x(10000);
+        fake_cen.set_y(10000);
+        fake_cen.set_z(10000);
+        fake_cen_ = fake_obj.mutable_centroid();
+        fake_cen_ ->CopyFrom(fake_cen);
+
+        fake_obj_ = out_fusion.add_obj();
+        fake_obj_->CopyFrom(fake_obj);
+    }
+    std::cout<<"start ser"<<std::endl;
+    out = out_fusion.SerializeAsString();
+    std::cout<<"out.size: "<<out.size()<<std::endl;
+    std::cout<<"---------obs end--------------"<<std::endl;
+    iv::modulecomm::ModuleSendMsg(gfu,out.data(),out.length());
+
+}
+
+int main(int argc, char *argv[]) {
+
+    void * gpa;
+    gpa = iv::modulecomm::RegisterRecv("vision_yolov8", ListenYolov8);
+
+    QCoreApplication a(argc, argv);
+    QString strpath = QCoreApplication::applicationDirPath();
+    if(argc < 2)
+        strpath = strpath + "/fusion_vision.xml";
+    else
+        strpath = argv[1];
+    std::cout<<strpath.toStdString()<<std::endl;
+    iv::xmlparam::Xmlparam xp(strpath.toStdString());
+
+    std::string xmlmsgname = xp.GetParam("msgname","image00");
+    std::string strwidth = xp.GetParam("width","1280");
+    std::string strheight = xp.GetParam("height","720");
+    std::string showFlag = xp.GetParam("show_flag","0");
+    cameraname = xmlmsgname;
+    gwidth = atoi(strwidth.data());
+    gheight = atoi(strheight.data());
+    show_flag = atoi(showFlag.data());
+
+    if(show_flag)
+        gpcamera= iv::modulecomm::RegisterRecv(&cameraname[0],Listenpic);
+
+    double waittime = (double)cv::getTickCount();
+    while (1)
+    {
+        if(yolov8Buffer->isEmpty())
+        {
+            double waittotal = (double)cv::getTickCount() - waittime;
+            double totaltime = waittotal/cv::getTickFrequency();
+//--------长时间获取不到图片则退出程序----------
+//            if(totaltime>2.0)
+//            {
+//                cout<<"Cant't get frame and quit"<<endl;
+//                cv::destroyAllWindows();
+//                std::cout<<"------end program------"<<std::endl;
+//                break;
+//            }
+            cout<<"Wait for frame and object res "<<totaltime<<"s"<<endl;
+            std::this_thread::sleep_for(std::chrono::milliseconds(10));
+            continue;
+        }
+
+        iv::vision::Visionyolov8 res;
+        yolov8Buffer->consume(res);
+        datafusion(res);
+
+        if(show_flag){
+            cv::Mat mat;
+            imageBuffer->consume(mat);
+
+            // Draw bounding boxes
+            draw_bbox(mat, res);
+            cv::namedWindow("camera_obs",cv::WINDOW_NORMAL);
+            cv::resizeWindow("camera_obs", 640, 480);
+            cv::imshow("camera_obs",mat);
+            mat.release();
+        }
+
+        if (cv::waitKey(10) == 'q')
+        {
+            break;
+        }
+        waittime = (double)cv::getTickCount();
+    }
+    cv::destroyAllWindows();
+
+    return 0;
+}
+

+ 52 - 0
src/detection/vision_yolov8/CMakeLists.txt

@@ -0,0 +1,52 @@
+cmake_minimum_required(VERSION 3.10)
+
+project(vision_yolov8)
+
+add_definitions(-std=c++11)
+add_definitions(-DAPI_EXPORTS)
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_BUILD_TYPE Debug)
+
+set(CMAKE_CUDA_COMPILER /usr/local/cuda/bin/nvcc)
+enable_language(CUDA)
+
+include_directories(${PROJECT_SOURCE_DIR}/include)
+include_directories(${PROJECT_SOURCE_DIR}/plugin)
+
+# include and link dirs of cuda and tensorrt, you need adapt them if yours are different
+if (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
+  message("embed_platform on")
+  include_directories(/usr/local/cuda/targets/aarch64-linux/include)
+  link_directories(/usr/local/cuda/targets/aarch64-linux/lib)
+else()
+  message("embed_platform off")
+  # cuda
+  include_directories(/usr/local/cuda/include)
+  link_directories(/usr/local/cuda/lib64)
+
+  # tensorrt
+  include_directories(/home/lindsay/TensorRT-8.4.1.5/include)
+  link_directories(/home/lindsay/TensorRT-8.4.1.5/lib)
+#  include_directories(/home/lindsay/TensorRT-7.2.3.4/include)
+#  link_directories(/home/lindsay/TensorRT-7.2.3.4/lib)
+
+
+endif()
+
+add_library(myplugins SHARED ${PROJECT_SOURCE_DIR}/plugin/yololayer.cu)
+target_link_libraries(myplugins nvinfer cudart)
+
+find_package(OpenCV)
+include_directories(${OpenCV_INCLUDE_DIRS})
+
+
+file(GLOB_RECURSE SRCS ${PROJECT_SOURCE_DIR}/src/*.cpp ${PROJECT_SOURCE_DIR}/src/*.cu)
+add_executable(yolov8_det ${PROJECT_SOURCE_DIR}/yolov8_det.cpp ${SRCS})
+
+target_link_libraries(yolov8_det nvinfer)
+target_link_libraries(yolov8_det cudart)
+target_link_libraries(yolov8_det myplugins)
+target_link_libraries(yolov8_det ${OpenCV_LIBS})
+
+add_executable(yolov8_seg ${PROJECT_SOURCE_DIR}/yolov8_seg.cpp ${SRCS})
+target_link_libraries(yolov8_seg nvinfer cudart myplugins ${OpenCV_LIBS})

BIN
src/detection/vision_yolov8/data/12801.jpg


BIN
src/detection/vision_yolov8/data/12802.jpg


BIN
src/detection/vision_yolov8/data/12803.jpg


BIN
src/detection/vision_yolov8/data/12804.jpg


BIN
src/detection/vision_yolov8/data/2.png


BIN
src/detection/vision_yolov8/data/51204.jpg


BIN
src/detection/vision_yolov8/data/6.jpg


BIN
src/detection/vision_yolov8/data/6086083.jpg


BIN
src/detection/vision_yolov8/data/6406401.jpg


BIN
src/detection/vision_yolov8/data/6406402.jpg


BIN
src/detection/vision_yolov8/data/6406403.jpg


BIN
src/detection/vision_yolov8/data/6406404.jpg


BIN
src/detection/vision_yolov8/data/6406406.jpg


BIN
src/detection/vision_yolov8/data/6406407.jpg


BIN
src/detection/vision_yolov8/data/7.jpg


BIN
src/detection/vision_yolov8/data/bus.jpg


BIN
src/detection/vision_yolov8/data/dog.jpg


BIN
src/detection/vision_yolov8/data/im_01.png


BIN
src/detection/vision_yolov8/data/image1.jpg


BIN
src/detection/vision_yolov8/data/image2.jpg


BIN
src/detection/vision_yolov8/data/image3.jpg


BIN
src/detection/vision_yolov8/data/long.jpg


BIN
src/detection/vision_yolov8/data/people.mp4


BIN
src/detection/vision_yolov8/data/rifle2.jpeg


BIN
src/detection/vision_yolov8/data/road0.png


BIN
src/detection/vision_yolov8/data/road1.jpg


BIN
src/detection/vision_yolov8/data/sailboat3.jpg


BIN
src/detection/vision_yolov8/data/test2.mp4


BIN
src/detection/vision_yolov8/data/zidane.jpg


+ 30 - 0
src/detection/vision_yolov8/gen_wts.py

@@ -0,0 +1,30 @@
+import sys
+import argparse
+import os
+import struct
+import torch
+
+pt_file = "./weights/yolov8s.pt"
+wts_file = "./weights/yolov8s.wts"
+
+# Initialize
+device = 'cpu'
+
+# Load model
+model = torch.load(pt_file, map_location=device)['model'].float()  # load to FP32
+
+anchor_grid = model.model[-1].anchors * model.model[-1].stride[..., None, None]
+
+delattr(model.model[-1], 'anchors')
+
+model.to(device).eval()
+
+with open(wts_file, 'w') as f:
+    f.write('{}\n'.format(len(model.state_dict().keys())))
+    for k, v in model.state_dict().items():
+        vr = v.reshape(-1).cpu().numpy()
+        f.write('{} {} '.format(k, len(vr)))
+        for vv in vr:
+            f.write(' ')
+            f.write(struct.pack('>f', float(vv)).hex())
+        f.write('\n')

+ 21 - 0
src/detection/vision_yolov8/include/block.h

@@ -0,0 +1,21 @@
+#pragma once
+#include <map>
+#include <vector>
+#include <string>
+#include "NvInfer.h"
+
+std::map<std::string, nvinfer1::Weights> loadWeights(const std::string file);
+
+nvinfer1::IElementWiseLayer* convBnSiLU(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int ch, int k, int s, int p, std::string lname);
+
+nvinfer1::IElementWiseLayer* C2F(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int c1, int c2, int n, bool shortcut, float e, std::string lname);
+
+nvinfer1::IElementWiseLayer* SPPF(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int c1, int c2, int k, std::string lname);
+
+nvinfer1::IShuffleLayer* DFL(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int ch, int grid, int k, int s, int p, std::string lname);
+
+nvinfer1::IPluginV2Layer* addYoLoLayer(nvinfer1::INetworkDefinition *network, std::vector<nvinfer1::IConcatenationLayer*> dets, bool is_segmentation = false);

+ 39 - 0
src/detection/vision_yolov8/include/calibrator.h

@@ -0,0 +1,39 @@
+#ifndef ENTROPY_CALIBRATOR_H
+#define ENTROPY_CALIBRATOR_H
+
+#include <NvInfer.h>
+#include <string>
+#include <vector>
+#include "macros.h"
+
+//! \class Int8EntropyCalibrator2
+//!
+//! \brief Implements Entropy calibrator 2.
+//!  CalibrationAlgoType is kENTROPY_CALIBRATION_2.
+//!
+class Int8EntropyCalibrator2 : public nvinfer1::IInt8EntropyCalibrator2
+{
+public:
+    Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name, const char* input_blob_name, bool read_cache = true);
+    virtual ~Int8EntropyCalibrator2();
+    int getBatchSize() const TRT_NOEXCEPT override;
+    bool getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT override;
+    const void* readCalibrationCache(size_t& length) TRT_NOEXCEPT override;
+    void writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT override;
+
+private:
+    int batchsize_;
+    int input_w_;
+    int input_h_;
+    int img_idx_;
+    std::string img_dir_;
+    std::vector<std::string> img_files_;
+    size_t input_count_;
+    std::string calib_table_name_;
+    const char* input_blob_name_;
+    bool read_cache_;
+    void* device_input_;
+    std::vector<char> calib_cache_;
+};
+
+#endif // ENTROPY_CALIBRATOR_H

+ 14 - 0
src/detection/vision_yolov8/include/config.h

@@ -0,0 +1,14 @@
+#define USE_FP16
+//#define USE_INT8
+
+const static char *kInputTensorName = "images";
+const static char *kOutputTensorName = "output";
+const static int kNumClass = 80;
+const static int kBatchSize = 1;
+const static int kGpuId = 0;
+const static int kInputH = 640;
+const static int kInputW = 640;
+const static float kNmsThresh = 0.45f;
+const static float kConfThresh = 0.5f;
+const static int kMaxInputImageSize = 3000 * 3000;
+const static int kMaxNumOutputBbox = 1000;

+ 18 - 0
src/detection/vision_yolov8/include/cuda_utils.h

@@ -0,0 +1,18 @@
+#ifndef TRTX_CUDA_UTILS_H_
+#define TRTX_CUDA_UTILS_H_
+
+#include <cuda_runtime_api.h>
+
+#ifndef CUDA_CHECK
+#define CUDA_CHECK(callstr)\
+    {\
+        cudaError_t error_code = callstr;\
+        if (error_code != cudaSuccess) {\
+            std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__;\
+            assert(0);\
+        }\
+    }
+#endif  // CUDA_CHECK
+
+#endif  // TRTX_CUDA_UTILS_H_
+

+ 82 - 0
src/detection/vision_yolov8/include/imageBuffer.h

@@ -0,0 +1,82 @@
+#ifndef IMAGEBUFFER_H
+#define IMAGEBUFFER_H
+
+#include <opencv2/opencv.hpp>
+#include <mutex>
+#include <condition_variable>
+#include <queue>
+template<typename T>
+class ConsumerProducerQueue
+{
+
+public:
+    ConsumerProducerQueue(int mxsz,bool dropFrame) :
+            maxSize(mxsz),dropFrame(dropFrame)
+    { }
+
+    bool add(T request)
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        if(dropFrame && isFull())
+        {
+            //lock.unlock();
+            //return false;
+            cpq.pop();
+            cpq.push(request);
+            cond.notify_all();
+            return true;
+        }
+        else {
+            cond.wait(lock, [this]() { return !isFull(); });
+            cpq.push(request);
+            //lock.unlock();
+            cond.notify_all();
+            return true;
+        }
+    }
+    void consume(T &request)
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        cond.wait(lock, [this]()
+        { return !isEmpty(); });
+        request = cpq.front();
+        cpq.pop();
+        //lock.unlock();
+        cond.notify_all();
+
+    }
+
+    bool isFull() const
+    {
+        return cpq.size() >= maxSize;
+    }
+
+    bool isEmpty() const
+    {
+        return cpq.size() == 0;
+    }
+
+    int length() const
+    {
+        return cpq.size();
+    }
+
+    void clear()
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        while (!isEmpty())
+        {
+            cpq.pop();
+        }
+        lock.unlock();
+        cond.notify_all();
+    }
+
+private:
+    std::condition_variable cond;  //条件变量允许通过通知进而实现线程同步
+    std::mutex mutex;     //提供了多种互斥操作,可以显式避免数据竞争
+    std::queue<T> cpq;    //容器适配器,它给予程序员队列的功能
+    int maxSize;
+    bool dropFrame;
+};
+#endif // IMAGEBUFFER_H

+ 504 - 0
src/detection/vision_yolov8/include/logging.h

@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TENSORRT_LOGGING_H
+#define TENSORRT_LOGGING_H
+
+#include "NvInferRuntimeCommon.h"
+#include <cassert>
+#include <ctime>
+#include <iomanip>
+#include <iostream>
+#include <ostream>
+#include <sstream>
+#include <string>
+#include "macros.h"
+
+using Severity = nvinfer1::ILogger::Severity;
+
+class LogStreamConsumerBuffer : public std::stringbuf
+{
+public:
+    LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
+        : mOutput(stream)
+        , mPrefix(prefix)
+        , mShouldLog(shouldLog)
+    {
+    }
+
+    LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
+        : mOutput(other.mOutput)
+    {
+    }
+
+    ~LogStreamConsumerBuffer()
+    {
+        // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
+        // std::streambuf::pptr() gives a pointer to the current position of the output sequence
+        // if the pointer to the beginning is not equal to the pointer to the current position,
+        // call putOutput() to log the output to the stream
+        if (pbase() != pptr())
+        {
+            putOutput();
+        }
+    }
+
+    // synchronizes the stream buffer and returns 0 on success
+    // synchronizing the stream buffer consists of inserting the buffer contents into the stream,
+    // resetting the buffer and flushing the stream
+    virtual int sync()
+    {
+        putOutput();
+        return 0;
+    }
+
+    void putOutput()
+    {
+        if (mShouldLog)
+        {
+            // prepend timestamp
+            std::time_t timestamp = std::time(nullptr);
+            tm* tm_local = std::localtime(&timestamp);
+            std::cout << "[";
+            std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
+            std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
+            // std::stringbuf::str() gets the string contents of the buffer
+            // insert the buffer contents pre-appended by the appropriate prefix into the stream
+            mOutput << mPrefix << str();
+            // set the buffer to empty
+            str("");
+            // flush the stream
+            mOutput.flush();
+        }
+    }
+
+    void setShouldLog(bool shouldLog)
+    {
+        mShouldLog = shouldLog;
+    }
+
+private:
+    std::ostream& mOutput;
+    std::string mPrefix;
+    bool mShouldLog;
+};
+
+//!
+//! \class LogStreamConsumerBase
+//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
+//!
+class LogStreamConsumerBase
+{
+public:
+    LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
+        : mBuffer(stream, prefix, shouldLog)
+    {
+    }
+
+protected:
+    LogStreamConsumerBuffer mBuffer;
+};
+
+//!
+//! \class LogStreamConsumer
+//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
+//!  Order of base classes is LogStreamConsumerBase and then std::ostream.
+//!  This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
+//!  in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
+//!  This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
+//!  Please do not change the order of the parent classes.
+//!
+class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
+{
+public:
+    //! \brief Creates a LogStreamConsumer which logs messages with level severity.
+    //!  Reportable severity determines if the messages are severe enough to be logged.
+    LogStreamConsumer(Severity reportableSeverity, Severity severity)
+        : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
+        , std::ostream(&mBuffer) // links the stream buffer with the stream
+        , mShouldLog(severity <= reportableSeverity)
+        , mSeverity(severity)
+    {
+    }
+
+    LogStreamConsumer(LogStreamConsumer&& other)
+        : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
+        , std::ostream(&mBuffer) // links the stream buffer with the stream
+        , mShouldLog(other.mShouldLog)
+        , mSeverity(other.mSeverity)
+    {
+    }
+
+    void setReportableSeverity(Severity reportableSeverity)
+    {
+        mShouldLog = mSeverity <= reportableSeverity;
+        mBuffer.setShouldLog(mShouldLog);
+    }
+
+private:
+    static std::ostream& severityOstream(Severity severity)
+    {
+        return severity >= Severity::kINFO ? std::cout : std::cerr;
+    }
+
+    static std::string severityPrefix(Severity severity)
+    {
+        switch (severity)
+        {
+        case Severity::kINTERNAL_ERROR: return "[F] ";
+        case Severity::kERROR: return "[E] ";
+        case Severity::kWARNING: return "[W] ";
+        case Severity::kINFO: return "[I] ";
+        case Severity::kVERBOSE: return "[V] ";
+        default: assert(0); return "";
+        }
+    }
+
+    bool mShouldLog;
+    Severity mSeverity;
+};
+
+//! \class Logger
+//!
+//! \brief Class which manages logging of TensorRT tools and samples
+//!
+//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
+//! and supports logging two types of messages:
+//!
+//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
+//! - Test pass/fail messages
+//!
+//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
+//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
+//!
+//! In the future, this class could be extended to support dumping test results to a file in some standard format
+//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
+//!
+//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
+//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
+//! library and messages coming from the sample.
+//!
+//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
+//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
+//! object.
+
+class Logger : public nvinfer1::ILogger
+{
+public:
+    Logger(Severity severity = Severity::kWARNING)
+        : mReportableSeverity(severity)
+    {
+    }
+
+    //!
+    //! \enum TestResult
+    //! \brief Represents the state of a given test
+    //!
+    enum class TestResult
+    {
+        kRUNNING, //!< The test is running
+        kPASSED,  //!< The test passed
+        kFAILED,  //!< The test failed
+        kWAIVED   //!< The test was waived
+    };
+
+    //!
+    //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
+    //! \return The nvinfer1::ILogger associated with this Logger
+    //!
+    //! TODO Once all samples are updated to use this method to register the logger with TensorRT,
+    //! we can eliminate the inheritance of Logger from ILogger
+    //!
+    nvinfer1::ILogger& getTRTLogger()
+    {
+        return *this;
+    }
+
+    //!
+    //! \brief Implementation of the nvinfer1::ILogger::log() virtual method
+    //!
+    //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
+    //! inheritance from nvinfer1::ILogger
+    //!
+    void log(Severity severity, const char* msg) TRT_NOEXCEPT override 
+    {
+        LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
+    }
+
+    //!
+    //! \brief Method for controlling the verbosity of logging output
+    //!
+    //! \param severity The logger will only emit messages that have severity of this level or higher.
+    //!
+    void setReportableSeverity(Severity severity)
+    {
+        mReportableSeverity = severity;
+    }
+
+    //!
+    //! \brief Opaque handle that holds logging information for a particular test
+    //!
+    //! This object is an opaque handle to information used by the Logger to print test results.
+    //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
+    //! with Logger::reportTest{Start,End}().
+    //!
+    class TestAtom
+    {
+    public:
+        TestAtom(TestAtom&&) = default;
+
+    private:
+        friend class Logger;
+
+        TestAtom(bool started, const std::string& name, const std::string& cmdline)
+            : mStarted(started)
+            , mName(name)
+            , mCmdline(cmdline)
+        {
+        }
+
+        bool mStarted;
+        std::string mName;
+        std::string mCmdline;
+    };
+
+    //!
+    //! \brief Define a test for logging
+    //!
+    //! \param[in] name The name of the test.  This should be a string starting with
+    //!                  "TensorRT" and containing dot-separated strings containing
+    //!                  the characters [A-Za-z0-9_].
+    //!                  For example, "TensorRT.sample_googlenet"
+    //! \param[in] cmdline The command line used to reproduce the test
+    //
+    //! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
+    //!
+    static TestAtom defineTest(const std::string& name, const std::string& cmdline)
+    {
+        return TestAtom(false, name, cmdline);
+    }
+
+    //!
+    //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
+    //!        as input
+    //!
+    //! \param[in] name The name of the test
+    //! \param[in] argc The number of command-line arguments
+    //! \param[in] argv The array of command-line arguments (given as C strings)
+    //!
+    //! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
+    static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
+    {
+        auto cmdline = genCmdlineString(argc, argv);
+        return defineTest(name, cmdline);
+    }
+
+    //!
+    //! \brief Report that a test has started.
+    //!
+    //! \pre reportTestStart() has not been called yet for the given testAtom
+    //!
+    //! \param[in] testAtom The handle to the test that has started
+    //!
+    static void reportTestStart(TestAtom& testAtom)
+    {
+        reportTestResult(testAtom, TestResult::kRUNNING);
+        assert(!testAtom.mStarted);
+        testAtom.mStarted = true;
+    }
+
+    //!
+    //! \brief Report that a test has ended.
+    //!
+    //! \pre reportTestStart() has been called for the given testAtom
+    //!
+    //! \param[in] testAtom The handle to the test that has ended
+    //! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
+    //!                   TestResult::kFAILED, TestResult::kWAIVED
+    //!
+    static void reportTestEnd(const TestAtom& testAtom, TestResult result)
+    {
+        assert(result != TestResult::kRUNNING);
+        assert(testAtom.mStarted);
+        reportTestResult(testAtom, result);
+    }
+
+    static int reportPass(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kPASSED);
+        return EXIT_SUCCESS;
+    }
+
+    static int reportFail(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kFAILED);
+        return EXIT_FAILURE;
+    }
+
+    static int reportWaive(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kWAIVED);
+        return EXIT_SUCCESS;
+    }
+
+    static int reportTest(const TestAtom& testAtom, bool pass)
+    {
+        return pass ? reportPass(testAtom) : reportFail(testAtom);
+    }
+
+    Severity getReportableSeverity() const
+    {
+        return mReportableSeverity;
+    }
+
+private:
+    //!
+    //! \brief returns an appropriate string for prefixing a log message with the given severity
+    //!
+    static const char* severityPrefix(Severity severity)
+    {
+        switch (severity)
+        {
+        case Severity::kINTERNAL_ERROR: return "[F] ";
+        case Severity::kERROR: return "[E] ";
+        case Severity::kWARNING: return "[W] ";
+        case Severity::kINFO: return "[I] ";
+        case Severity::kVERBOSE: return "[V] ";
+        default: assert(0); return "";
+        }
+    }
+
+    //!
+    //! \brief returns an appropriate string for prefixing a test result message with the given result
+    //!
+    static const char* testResultString(TestResult result)
+    {
+        switch (result)
+        {
+        case TestResult::kRUNNING: return "RUNNING";
+        case TestResult::kPASSED: return "PASSED";
+        case TestResult::kFAILED: return "FAILED";
+        case TestResult::kWAIVED: return "WAIVED";
+        default: assert(0); return "";
+        }
+    }
+
+    //!
+    //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
+    //!
+    static std::ostream& severityOstream(Severity severity)
+    {
+        return severity >= Severity::kINFO ? std::cout : std::cerr;
+    }
+
+    //!
+    //! \brief method that implements logging test results
+    //!
+    static void reportTestResult(const TestAtom& testAtom, TestResult result)
+    {
+        severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
+                                         << testAtom.mCmdline << std::endl;
+    }
+
+    //!
+    //! \brief generate a command line string from the given (argc, argv) values
+    //!
+    static std::string genCmdlineString(int argc, char const* const* argv)
+    {
+        std::stringstream ss;
+        for (int i = 0; i < argc; i++)
+        {
+            if (i > 0)
+                ss << " ";
+            ss << argv[i];
+        }
+        return ss.str();
+    }
+
+    Severity mReportableSeverity;
+};
+
+namespace
+{
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
+//!
+//! Example usage:
+//!
+//!     LOG_VERBOSE(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
+//!
+//! Example usage:
+//!
+//!     LOG_INFO(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_INFO(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
+//!
+//! Example usage:
+//!
+//!     LOG_WARN(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_WARN(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
+//!
+//! Example usage:
+//!
+//!     LOG_ERROR(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_ERROR(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
+//         ("fatal" severity)
+//!
+//! Example usage:
+//!
+//!     LOG_FATAL(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_FATAL(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
+}
+
+} // anonymous namespace
+
+#endif // TENSORRT_LOGGING_H

+ 29 - 0
src/detection/vision_yolov8/include/macros.h

@@ -0,0 +1,29 @@
+#ifndef __MACROS_H
+#define __MACROS_H
+
+#include "NvInfer.h"
+
+#ifdef API_EXPORTS
+#if defined(_MSC_VER)
+#define API __declspec(dllexport)
+#else
+#define API __attribute__((visibility("default")))
+#endif
+#else
+
+#if defined(_MSC_VER)
+#define API __declspec(dllimport)
+#else
+#define API
+#endif
+#endif  // API_EXPORTS
+
+#if NV_TENSORRT_MAJOR >= 8
+#define TRT_NOEXCEPT noexcept
+#define TRT_CONST_ENQUEUE const
+#else
+#define TRT_NOEXCEPT
+#define TRT_CONST_ENQUEUE
+#endif
+
+#endif  // __MACROS_H

+ 10 - 0
src/detection/vision_yolov8/include/model.h

@@ -0,0 +1,10 @@
+#pragma once
+#include "NvInfer.h"
+#include <string>
+#include <assert.h>
+
+nvinfer1::IHostMemory* buildEngineYolov8Det(nvinfer1::IBuilder* builder,
+nvinfer1::IBuilderConfig* config, nvinfer1::DataType dt, const std::string& wts_path, float& gd, float& gw, int& max_channels);
+
+nvinfer1::IHostMemory* buildEngineYolov8Seg(nvinfer1::IBuilder* builder,
+nvinfer1::IBuilderConfig* config, nvinfer1::DataType dt, const std::string& wts_path, float& gd, float& gw, int& max_channels);

+ 26 - 0
src/detection/vision_yolov8/include/postprocess.h

@@ -0,0 +1,26 @@
+#pragma once
+
+#include "types.h"
+#include "NvInfer.h"
+#include <opencv2/opencv.hpp>
+#include "visionyolov8.pb.h"
+
+cv::Rect get_rect(cv::Mat& img, float bbox[4]);
+
+void nms(std::vector<Detection>& res, float *output, float conf_thresh, float nms_thresh = 0.5);
+
+void batch_nms(std::vector<std::vector<Detection>>& batch_res, float *output, int batch_size, int output_size, float conf_thresh, float nms_thresh = 0.5);
+
+iv::vision::Visionyolov8 draw_bbox(std::vector<cv::Mat> &img_batch, std::vector<std::vector<Detection>> &res_batch, cv::Mat &pers_mat_inv, cv::Point2i transed_O, float METER_PER_PIXEL_X, float METER_PER_PIXEL_Y, float X, float Y, bool show_distance);
+
+void batch_process(std::vector<std::vector<Detection>> &res_batch, const float* decode_ptr_host, int batch_size, int bbox_element, const std::vector<cv::Mat>& img_batch);
+
+void process_decode_ptr_host(std::vector<Detection> &res, const float* decode_ptr_host, int bbox_element, cv::Mat& img, int count);
+
+void cuda_decode(float* predict, int num_bboxes, float confidence_threshold,float* parray,int max_objects, cudaStream_t stream);
+
+void cuda_nms(float* parray, float nms_threshold, int max_objects, cudaStream_t stream);
+
+void draw_mask_bbox(cv::Mat& img, std::vector<Detection>& dets, std::vector<cv::Mat>& masks, std::unordered_map<int, std::string>& labels_map);
+
+float distanceXY(float a, float b);

+ 16 - 0
src/detection/vision_yolov8/include/preprocess.h

@@ -0,0 +1,16 @@
+#pragma once
+
+#include <opencv2/opencv.hpp>
+#include "NvInfer.h"
+#include "types.h"
+#include <map>
+
+
+void cuda_preprocess_init(int max_image_size);
+
+void cuda_preprocess_destroy();
+
+void cuda_preprocess(uint8_t *src, int src_width, int src_height, float *dst, int dst_width, int dst_height, cudaStream_t stream);
+
+void cuda_batch_preprocess(std::vector<cv::Mat> &img_batch, float *dst, int dst_width, int dst_height, cudaStream_t stream);
+

+ 29 - 0
src/detection/vision_yolov8/include/types.h

@@ -0,0 +1,29 @@
+#pragma once
+#include "config.h"
+#include "string"
+
+struct alignas(float) Detection {
+  //center_x center_y w h
+  float bbox[4];
+  float conf;  // bbox_conf * cls_conf
+  float class_id;
+  float mask[32];
+};
+
+struct AffineMatrix {
+    float value[6];
+};
+
+const int bbox_element = sizeof(AffineMatrix) / sizeof(float)+1;      // left, top, right, bottom, confidence, class, keepflag
+
+const std::string objectName[80] ={"Person", "Bicycle", "Car", "Motorcycle", "Airplane", "Bus", "Train", "Truck",
+                                  "Boat", "Traffic light", "Fire hydrant", "Stop sign", "Parking meter", "Bench",
+                                  "Bird", "Cat", "Dog", "Horse", "Sheep", "Cow", "Elephant", "Bear", "Zebra",
+                                  "Giraffe", "Backpack", "Umbrella", "Handbag", "Tie", "Suitcase", "Frisbee", "Skis",
+                                  "Snowboard", "Sports ball", "Kite", "Baseball bat", "Baseball glove", "Skateboard",
+                                  "Surfboard", "Tennis racket", "Bottle", "Wine glass", "Cup", "Fork", "Knife", "Spoon",
+                                  "Bowl", "Banana", "Apple", "Sandwich", "Orange", "Broccoli", "Carrot", "Hot dog", "Pizza",
+                                  "Donut", "Cake", "Chair", "Couch", "Potted plant", "Bed", "Dining table", "Toilet", "TV",
+                                  "Laptop", "Mouse", "Remote", "Keyboard", "Cell phone", "Microwave", "Oven", "Toaster",
+                                  "Sink", "Refrigerator", "Book", "Clock", "Vase", "Scissors", "Teddy bear", "Hair drier",
+                                  "Toothbrush"};

+ 86 - 0
src/detection/vision_yolov8/include/utils.h

@@ -0,0 +1,86 @@
+#pragma once
+#include <opencv2/opencv.hpp>
+#include <dirent.h>
+#include <fstream>  
+
+static inline cv::Mat preprocess_img(cv::Mat& img, int input_w, int input_h) {
+    int w, h, x, y;
+    float r_w = input_w / (img.cols*1.0);
+    float r_h = input_h / (img.rows*1.0);
+    if (r_h > r_w) {
+        w = input_w;
+        h = r_w * img.rows;
+        x = 0;
+        y = (input_h - h) / 2;
+    } else {
+        w = r_h * img.cols;
+        h = input_h;
+        x = (input_w - w) / 2;
+        y = 0;
+    }
+    cv::Mat re(h, w, CV_8UC3);
+    cv::resize(img, re, re.size(), 0, 0, cv::INTER_LINEAR);
+    cv::Mat out(input_h, input_w, CV_8UC3, cv::Scalar(128, 128, 128));
+    re.copyTo(out(cv::Rect(x, y, re.cols, re.rows)));
+    return out;
+}
+
+static inline int read_files_in_dir(const char *p_dir_name, std::vector<std::string> &file_names) {
+    DIR *p_dir = opendir(p_dir_name);
+    if (p_dir == nullptr) {
+        return -1;
+    }
+
+    struct dirent* p_file = nullptr;
+    while ((p_file = readdir(p_dir)) != nullptr) {
+        if (strcmp(p_file->d_name, ".") != 0 &&
+            strcmp(p_file->d_name, "..") != 0) {
+            //std::string cur_file_name(p_dir_name);
+            //cur_file_name += "/";
+            //cur_file_name += p_file->d_name;
+            std::string cur_file_name(p_file->d_name);
+            file_names.push_back(cur_file_name);
+        }
+    }
+
+    closedir(p_dir);
+    return 0;
+}
+
+// Function to trim leading and trailing whitespace from a string
+static inline std::string trim_leading_whitespace(const std::string& str) {
+    size_t first = str.find_first_not_of(' ');
+    if (std::string::npos == first) {
+        return str;
+    }
+    size_t last = str.find_last_not_of(' ');
+    return str.substr(first, (last - first + 1));
+}
+
+// Src: https://stackoverflow.com/questions/16605967
+static inline std::string to_string_with_precision(const float a_value, const int n = 2) {
+    std::ostringstream out;
+    out.precision(n);
+    out << std::fixed << a_value;
+    return out.str();
+}
+
+static inline int read_labels(const std::string labels_filename, std::unordered_map<int, std::string>& labels_map) {
+    std::ifstream file(labels_filename);
+    // Read each line of the file
+    std::string line;
+    int index = 0;
+    while (std::getline(file, line)) {
+        // Strip the line of any leading or trailing whitespace
+        line = trim_leading_whitespace(line);
+
+        // Add the stripped line to the labels_map, using the loop index as the key
+        labels_map[index] = line;
+        index++;
+    }
+    // Close the file
+    file.close();
+
+    return 0;
+}
+

+ 525 - 0
src/detection/vision_yolov8/main.cpp

@@ -0,0 +1,525 @@
+
+#include <iostream>
+#include <fstream>
+#include <opencv2/opencv.hpp>
+#include "model.h"
+#include "utils.h"
+#include "preprocess.h"
+#include "postprocess.h"
+#include "cuda_utils.h"
+#include "logging.h"
+#include "imageBuffer.h"
+#include <thread>
+
+#include "modulecomm.h"
+#include "xmlparam.h"
+#include "ivfault.h"
+#include "ivlog.h"
+#include "ivexit.h"
+#include "ivversion.h"
+#include <chrono>
+#include "rawpic.pb.h"
+#include "visionyolov8.pb.h"
+
+
+Logger gLogger;
+using namespace nvinfer1;
+using namespace std;
+const int kOutputSize = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
+
+string config_file = "/home/nvidia/app/app/yaml/vision_yolov8_config.yaml";
+string calibration_yamlpath = "/home/nvidia/app/app/yaml/camera_middle_640_360.yaml";
+string engine_path = "/home/nvidia/app/app/model/yolov8n.engine";
+string PERS_FILE = "/home/nvidia/app/app/yaml/pers_yolov8.yaml";
+
+bool calibrationstate = false;
+bool test_video = false;
+bool cropstate = true;
+bool imshow_flag = true;
+bool show_distance = false;
+
+cv::Range crop_height = cv::Range(0,600);
+cv::Range crop_width = cv::Range(0,1280);
+cv::Mat camera_matrix,dist_coe,map1,map2;
+
+float METER_PER_PIXEL_X,METER_PER_PIXEL_Y;
+float BOARD_SIDE_LENGTH_X = 7.5;
+float BOARD_SIDE_LENGTH_Y = 50.0;
+float X = 0.0;
+float Y = 4.0;
+
+// string video_path = "v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink";
+string video_path = "/home/nvidia/Videos/video.mp4";
+
+// 摄像头
+void * gpcamera;
+string cameraname="image00";
+void * gpdetect;
+const std::string resultname="vision_yolov8";
+//std::string obj_det[] = {"Person", "Bicycle", "Car", "Motorcycle", "Bus", "Truck"};
+
+// buffer
+ConsumerProducerQueue<cv::Mat> * imageBuffer =  new ConsumerProducerQueue<cv::Mat>(3,true);
+ConsumerProducerQueue<iv::vision::rawpic> * picBuffer =  new ConsumerProducerQueue<iv::vision::rawpic>(3,true);
+
+cv::Point2i transed_O;
+//读取视频数据
+void ReadFunc(int n)
+{
+    cv::VideoCapture cap(video_path);
+    if(!cap.isOpened())
+    {
+        cout<<"camera failed to open"<<endl;
+    }
+    while(1)
+    {
+        cv::Mat frame;
+        //读视频的时候加上,读摄像头去掉
+        if(imageBuffer->isFull())
+        {
+            continue;
+        }
+        if(cap.read(frame))
+        {
+            if(calibrationstate)
+                cv::remap(frame,frame,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+            cv::Mat crop_img;
+            cv::resize(frame, crop_img, cv::Size(1280, 720), 0, 0, cv::INTER_LINEAR);
+            if(cropstate)
+                crop_img = crop_img(crop_height, crop_width);
+            imageBuffer->add(crop_img);
+        }
+        else
+        {
+            std::this_thread::sleep_for(std::chrono::milliseconds(1));
+        }
+    }
+}
+cv::Mat get_pers_mat(std::string pers_file)
+{
+    cv::Point2f src[4];
+    cv::Point2f dst[4];
+    cv::FileStorage pf(pers_file, cv::FileStorage::READ);
+    pf["board_left_top"]>>src[0];
+    pf["board_left_bottom"]>>src[1];
+    pf["board_right_top"]>>src[2];
+    pf["board_right_bottom"]>>src[3];
+
+    dst[1].x = 130*2;
+    dst[1].y = 330*2;
+
+    dst[3].x = 510*2;
+    dst[3].y = 330*2;
+
+    int side_length_x = dst[3].x - dst[1].x; //760
+
+    METER_PER_PIXEL_X = BOARD_SIDE_LENGTH_X / float(side_length_x); // 12m/760
+
+    dst[0].x = dst[1].x;
+    dst[0].y = 30*2;
+
+    dst[2].x = dst[3].x;
+    dst[2].y = 30*2;
+
+    int side_length_y = dst[1].y - dst[0].y; //600
+
+    METER_PER_PIXEL_Y = BOARD_SIDE_LENGTH_Y / float(side_length_y); // 100m/600
+
+    transed_O.x = (dst[1].x + dst[3].x)/2; // 640
+    transed_O.y = (dst[1].y + dst[3].y)/2; // 660
+
+    cv::Mat pers_mat = getPerspectiveTransform(dst, src);
+    cv::Mat pers_mat_inv = pers_mat.inv();
+
+    return pers_mat_inv;
+}
+
+//void serialize_engine(std::string &wts_name, std::string &engine_name, std::string &sub_type, float &gd, float &gw, int &max_channels) {
+//    IBuilder *builder = createInferBuilder(gLogger);
+//    IBuilderConfig *config = builder->createBuilderConfig();
+//    IHostMemory *serialized_engine = nullptr;
+
+//    serialized_engine = buildEngineYolov8Det(builder, config, DataType::kFLOAT, wts_name, gd, gw, max_channels);
+
+//    assert(serialized_engine);
+//    std::ofstream p(engine_name, std::ios::binary);
+//    if (!p) {
+//        std::cout << "could not open plan output file" << std::endl;
+//        assert(false);
+//    }
+//    p.write(reinterpret_cast<const char *>(serialized_engine->data()), serialized_engine->size());
+
+//    delete builder;
+//    delete config;
+//    delete serialized_engine;
+//}
+
+
+void deserialize_engine(std::string &engine_name, IRuntime **runtime, ICudaEngine **engine, IExecutionContext **context) {
+    std::ifstream file(engine_name, std::ios::binary);
+    if (!file.good()) {
+        std::cerr << "read " << engine_name << " error!" << std::endl;
+        assert(false);
+    }
+    size_t size = 0;
+    file.seekg(0, file.end);
+    size = file.tellg();
+    file.seekg(0, file.beg);
+    char *serialized_engine = new char[size];
+    assert(serialized_engine);
+    file.read(serialized_engine, size);
+    file.close();
+    *runtime = createInferRuntime(gLogger);
+    assert(*runtime);
+    *engine = (*runtime)->deserializeCudaEngine(serialized_engine, size);
+    assert(*engine);
+    *context = (*engine)->createExecutionContext();
+    assert(*context);
+    delete[] serialized_engine;
+}
+
+void prepare_buffer(ICudaEngine *engine, float **input_buffer_device, float **output_buffer_device,
+                    float **output_buffer_host, float **decode_ptr_host, float **decode_ptr_device, std::string cuda_post_process) {
+    assert(engine->getNbBindings() == 2);
+    // In order to bind the buffers, we need to know the names of the input and output tensors.
+    // Note that indices are guaranteed to be less than IEngine::getNbBindings()
+    const int inputIndex = engine->getBindingIndex(kInputTensorName);
+    const int outputIndex = engine->getBindingIndex(kOutputTensorName);
+    assert(inputIndex == 0);
+    assert(outputIndex == 1);
+    // Create GPU buffers on device
+    CUDA_CHECK(cudaMalloc((void **) input_buffer_device, kBatchSize * 3 * kInputH * kInputW * sizeof(float)));
+    CUDA_CHECK(cudaMalloc((void **) output_buffer_device, kBatchSize * kOutputSize * sizeof(float)));
+    if (cuda_post_process == "c") {
+        *output_buffer_host = new float[kBatchSize * kOutputSize];
+    } else if (cuda_post_process == "g") {
+        if (kBatchSize > 1) {
+            std::cerr << "Do not yet support GPU post processing for multiple batches" << std::endl;
+            exit(0);
+        }
+        // Allocate memory for decode_ptr_host and copy to device
+        *decode_ptr_host = new float[1 + kMaxNumOutputBbox * bbox_element];
+        CUDA_CHECK(cudaMalloc((void **)decode_ptr_device, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element)));
+    }
+}
+
+void infer(IExecutionContext &context, cudaStream_t &stream, void **buffers, float *output, int batchsize, float* decode_ptr_host, float* decode_ptr_device, int model_bboxes, std::string cuda_post_process) {
+    // infer on the batch asynchronously, and DMA output back to host
+    auto start = std::chrono::system_clock::now();
+    context.enqueue(batchsize, buffers, stream, nullptr);
+    if (cuda_post_process == "c") {
+        CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchsize * kOutputSize * sizeof(float), cudaMemcpyDeviceToHost,stream));
+        auto end = std::chrono::system_clock::now();
+        std::cout << "inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
+    } else if (cuda_post_process == "g") {
+        CUDA_CHECK(cudaMemsetAsync(decode_ptr_device, 0, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element), stream));
+        cuda_decode((float *)buffers[1], model_bboxes, kConfThresh, decode_ptr_device, kMaxNumOutputBbox, stream);
+        cuda_nms(decode_ptr_device, kNmsThresh, kMaxNumOutputBbox, stream);//cuda nms
+        CUDA_CHECK(cudaMemcpyAsync(decode_ptr_host, decode_ptr_device, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element), cudaMemcpyDeviceToHost, stream));
+        auto end = std::chrono::system_clock::now();
+        std::cout << "inference and gpu postprocess time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
+    }
+
+    CUDA_CHECK(cudaStreamSynchronize(stream));
+}
+
+
+bool parse_args(int argc, char **argv, std::string &wts, std::string &engine, std::string &img_dir, std::string &sub_type, 
+                std::string &cuda_post_process, float &gd, float &gw, int &max_channels) {
+    if (argc < 4) return false;
+    if (std::string(argv[1]) == "-s" && argc == 5) {
+        wts = std::string(argv[2]);
+        engine = std::string(argv[3]);
+        sub_type = std::string(argv[4]);
+        if (sub_type == "n") {
+          gd = 0.33;
+          gw = 0.25;
+          max_channels = 1024;
+        } else if (sub_type == "s"){
+          gd = 0.33;
+          gw = 0.50;
+          max_channels = 1024;
+        } else if (sub_type == "m") {
+          gd = 0.67;
+          gw = 0.75;
+          max_channels = 576; 
+        } else if (sub_type == "l") {
+          gd = 1.0;
+          gw = 1.0;
+          max_channels = 512;
+        } else if (sub_type == "x") {
+          gd = 1.0;
+          gw = 1.25;
+          max_channels = 640;
+        } else {
+          return false;
+        }
+    } else if (std::string(argv[1]) == "-d" && argc == 5) {
+        engine = std::string(argv[2]);
+        img_dir = std::string(argv[3]);
+        cuda_post_process = std::string(argv[4]);
+    } else {
+        return false;
+    }
+    return true;
+}
+
+void Listenpic(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
+{
+    if(nSize<1000)return;
+    iv::vision::rawpic pic;
+    if(false == pic.ParseFromArray(strdata,nSize))
+    {
+        std::cout<<"picview Listenpic fail."<<std::endl;
+        return;
+    }
+
+    //--
+//    cv::Mat mat(pic.height(),pic.width(),pic.mattype());
+//    if(pic.type() == 1)
+//        memcpy(mat.data,pic.picdata().data(),mat.rows*mat.cols*mat.elemSize());
+//    else
+//    {
+//        //     mat.release();
+//        std::vector<unsigned char> buff(pic.picdata().data(),pic.picdata().data() + pic.picdata().size());
+//        mat = cv::imdecode(buff,cv::IMREAD_COLOR);
+//    }
+//    if(calibrationstate)
+//        cv::remap(mat,mat,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+//    if(cropstate)
+//        mat = mat(crop_height,crop_width);
+//    imageBuffer->add(mat);
+//    mat.release();
+
+    picBuffer->add(pic);
+    pic.release_picdata();
+
+
+}
+
+int main(int argc, char** argv) {
+    cudaSetDevice(kGpuId);
+//    std::string engine_name = "/home/nvidia/app/app/model/yolov8n.engine";
+    std::string img_dir = "/home/nvidia/modularization/src/detection/vision_yolov8/images";
+    std::string cuda_post_process="g"; //gpu
+    int model_bboxes;
+
+    cv::FileStorage config(config_file, cv::FileStorage::READ);
+    bool config_isOpened = config.isOpened();
+    if(config_isOpened)
+    {
+
+        engine_path = string(config["model"]["engine_path"]);
+        calibrationstate = (string(config["camera"]["calibrationstate"]) == "true");
+        calibration_yamlpath = string(config["camera"]["calibration_yamlpath"]);
+        cropstate = (string(config["camera"]["cropstate"]) == "true");
+        BOARD_SIDE_LENGTH_X = float(config["warpPerspective"]["BOARD_SIDE_LENGTH_X"]);
+        BOARD_SIDE_LENGTH_Y = float(config["warpPerspective"]["BOARD_SIDE_LENGTH_Y"]);
+        X = float(config["warpPerspective"]["x"]);
+        Y = float(config["warpPerspective"]["y"]);
+        calibrationstate = (string(config["camera"]["calibrationstate"]) == "true");
+
+        cropstate = (string(config["camera"]["cropstate"]) == "true");
+        crop_height = cv::Range(int(config["camera"]["crop_height"]["start"]),
+                                int(config["camera"]["crop_height"]["end"]));
+
+        crop_width = cv::Range(int(config["camera"]["crop_width"]["start"]),
+                               int(config["camera"]["crop_width"]["end"])); 
+        imshow_flag = (string(config["show"]["imshow_flag"]) == "true");
+        show_distance = (string(config["show"]["show_distance"]) == "true");
+//        std::cout<<"flag:"<<(string(config["show"]["imshow_flag"]))<<std::endl;
+//        std::cout<<"flag:"<<imshow_flag<<std::endl;
+    }
+    config.release();
+
+    if (calibrationstate)
+    {
+        cv::FileStorage calib_file(calibration_yamlpath, cv::FileStorage::READ);
+        calib_file["cameraMatrix"]>>camera_matrix;
+        calib_file["distCoeffs"]>>dist_coe;
+        cv::Mat R = cv::Mat::eye(3, 3, CV_64F);
+        cv::Size imgsize=cv::Size(1280,720);
+        cv::initUndistortRectifyMap(camera_matrix, dist_coe, R, camera_matrix,imgsize,CV_16SC2,map1,map2);
+    }
+
+    iv::vision::Visionyolov8 vision_yolov8;
+    gpdetect = iv::modulecomm::RegisterSend(&resultname[0],10000,1);
+
+    // Deserialize the engine from file
+    IRuntime *runtime = nullptr;
+    ICudaEngine *engine = nullptr;
+    IExecutionContext *context = nullptr;
+    deserialize_engine(engine_path, &runtime, &engine, &context);
+    cudaStream_t stream;
+    CUDA_CHECK(cudaStreamCreate(&stream));
+    cuda_preprocess_init(kMaxInputImageSize);
+    auto out_dims = engine->getBindingDimensions(1);
+    model_bboxes = out_dims.d[0];
+    // Prepare cpu and gpu buffers
+    float *device_buffers[2];
+    float *output_buffer_host = nullptr;
+    float *decode_ptr_host=nullptr;
+    float *decode_ptr_device=nullptr;
+
+    // Read images from directory
+//    std::vector<std::string> file_names;
+//    if (read_files_in_dir(img_dir.c_str(), file_names) < 0) {
+//        std::cerr << "read_files_in_dir failed." << std::endl;
+//        return -1;
+//    }
+
+    prepare_buffer(engine, &device_buffers[0], &device_buffers[1], &output_buffer_host, &decode_ptr_host, &decode_ptr_device, cuda_post_process);
+    cv::Mat pers_mat_inv = get_pers_mat(PERS_FILE);  // 得到透视变换关系
+    if(test_video)
+        std::thread * readthread = new std::thread(ReadFunc,1);
+    else
+        gpcamera= iv::modulecomm::RegisterRecv(&cameraname[0],Listenpic);
+    double waittime = (double)cv::getTickCount();
+    while (1)
+    {
+        if(picBuffer->isEmpty())
+        {
+            double waittotal = (double)cv::getTickCount() - waittime;
+            double totaltime = waittotal/cv::getTickFrequency();
+//--------长时间获取不到图片则退出程序----------
+//            if(totaltime>1.5)
+//            {
+//                cout<<"Cant't get frame and quit"<<endl;
+//                cv::destroyAllWindows();
+//                std::cout<<"------end program------"<<std::endl;
+//                break;
+//            }
+            cout<<"Wait for frame "<<totaltime<<"s"<<endl;
+            std::this_thread::sleep_for(std::chrono::milliseconds(10));
+            continue;
+        }
+
+//        cv::Mat raw_image;
+//        imageBuffer->consume(raw_image);
+//        std::vector<cv::Mat> img_batch;
+//        img_batch.push_back(raw_image);
+
+        // picBuffer
+        iv::vision::rawpic pic;
+        picBuffer->consume(pic);
+        cv::Mat mat(pic.height(),pic.width(),pic.mattype());
+        if(pic.type() == 1)
+            memcpy(mat.data,pic.picdata().data(),mat.rows*mat.cols*mat.elemSize());
+        else
+        {
+            std::vector<unsigned char> buff(pic.picdata().data(),pic.picdata().data() + pic.picdata().size());
+            mat = cv::imdecode(buff,cv::IMREAD_COLOR);
+        }
+        if(calibrationstate)
+            cv::remap(mat,mat,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+        std::cout<<"mat.size="<<mat.size()<<std::endl;
+        if(cropstate)
+            mat = mat(crop_height,crop_width);
+        std::cout<<"crop_mat.size="<<mat.size()<<std::endl;
+        int time_pic = pic.time();
+        std::vector<cv::Mat> img_batch;
+        img_batch.push_back(mat);
+        mat.release();
+
+        // Preprocess
+        cuda_batch_preprocess(img_batch, device_buffers[0], kInputW, kInputH, stream);
+        // Run inference
+        infer(*context, stream, (void **)device_buffers, output_buffer_host, kBatchSize, decode_ptr_host, decode_ptr_device, model_bboxes, cuda_post_process);
+        std::vector<std::vector<Detection>> res_batch;
+        if (cuda_post_process == "c") {
+            // NMS
+            batch_nms(res_batch, output_buffer_host, img_batch.size(), kOutputSize, kConfThresh, kNmsThresh);
+        } else if (cuda_post_process == "g") {
+            //Process gpu decode and nms results
+            batch_process(res_batch, decode_ptr_host, img_batch.size(), bbox_element, img_batch);
+        }
+        // Draw bounding boxes
+        vision_yolov8 = draw_bbox(img_batch, res_batch, pers_mat_inv, transed_O, METER_PER_PIXEL_X, METER_PER_PIXEL_Y, X, Y, show_distance);
+        cv::namedWindow("vision_yolov8",cv::WINDOW_NORMAL);
+        cv::resizeWindow("vision_yolov8", 640, 480);
+//        cv::moveWindow("vision_yolov8", 500, 200);
+        if(imshow_flag)
+            cv::imshow("vision_yolov8",img_batch[0]);
+
+
+        // send Msg
+//        vision_yolov8.set_time(time_pic);
+        int size = vision_yolov8.ByteSize();
+//        std::cout<<"size:"<<size<<std::endl;
+        char * strdata = new char[vision_yolov8.ByteSize()];
+        if(vision_yolov8.SerializeToArray(strdata, size))
+        {
+            iv::modulecomm::ModuleSendMsg(gpdetect, strdata, size);
+            std::cout<<"vision_yolov8 serialize success."<<std::endl;
+        }
+        else
+        {
+            std::cout<<"vision_yolov8 serialize error."<<std::endl;
+        }
+        vision_yolov8.Clear();
+        delete strdata;
+
+        if (cv::waitKey(10) == 'q')
+        {
+            break;
+        }
+        waittime = (double)cv::getTickCount();
+    }
+    cv::destroyAllWindows();
+
+
+    // batch predict
+//    for (size_t i = 0; i < file_names.size(); i += kBatchSize) {
+//        // Get a batch of images
+//        std::vector<cv::Mat> img_batch;
+//        std::vector<std::string> img_name_batch;
+//        for (size_t j = i; j < i + kBatchSize && j < file_names.size(); j++) {
+//            cv::Mat img = cv::imread(img_dir + "/" + file_names[j]);
+//            img_batch.push_back(img);
+//            img_name_batch.push_back(file_names[j]);
+//        }
+//        // Preprocess
+//        cuda_batch_preprocess(img_batch, device_buffers[0], kInputW, kInputH, stream);
+//        // Run inference
+    //        infer(*context, stream, (void **)device_buffers, output_buffer_host, kBatchSize, decode_ptr_host, decode_ptr_device, model_bboxes, cuda_post_process);
+    //        std::vector<std::vector<Detection>> res_batch;
+    //        if (cuda_post_process == "c") {
+//            // NMS
+//            batch_nms(res_batch, output_buffer_host, img_batch.size(), kOutputSize, kConfThresh, kNmsThresh);
+//        } else if (cuda_post_process == "g") {
+//            //Process gpu decode and nms results
+//            batch_process(res_batch, decode_ptr_host, img_batch.size(), bbox_element, img_batch);
+//        }
+//        // Draw bounding boxes
+//        draw_bbox(img_batch, res_batch);
+//        // Save images
+//        for (size_t j = 0; j < img_batch.size(); j++) {
+//            cv::imwrite("_" + img_name_batch[j], img_batch[j]);
+//        }
+//    }
+
+    // Release stream and buffers
+    cudaStreamDestroy(stream);
+    CUDA_CHECK(cudaFree(device_buffers[0]));
+    CUDA_CHECK(cudaFree(device_buffers[1]));
+    CUDA_CHECK(cudaFree(decode_ptr_device));
+    delete[] decode_ptr_host;
+    delete[] output_buffer_host;
+    cuda_preprocess_destroy();
+    // Destroy the engine
+    delete context;
+    delete engine;
+    delete runtime;
+
+    // Print histogram of the output distribution
+    //std::cout << "\nOutput:\n\n";
+    //for (unsigned int i = 0; i < kOutputSize; i++)
+    //{
+    //    std::cout << prob[i] << ", ";
+    //    if (i % 10 == 0) std::cout << std::endl;
+    //}
+    //std::cout << std::endl;
+
+    return 0;
+}
+

+ 10 - 0
src/detection/vision_yolov8/pers_yolov8.yaml

@@ -0,0 +1,10 @@
+%YAML:1.0
+
+
+board_left_top: [639,371]
+
+board_left_bottom: [93,564]
+
+board_right_top: [762,372]
+
+board_right_bottom: [1273,537]

+ 242 - 0
src/detection/vision_yolov8/plugin/yololayer.cu

@@ -0,0 +1,242 @@
+#include "yololayer.h"
+#include "types.h"
+#include <assert.h>
+#include <math.h>
+#include "cuda_utils.h"
+#include <vector>
+#include <iostream>
+
+namespace Tn {
+    template<typename T>
+    void write(char*& buffer, const T& val) {
+        *reinterpret_cast<T*>(buffer) = val;
+        buffer += sizeof(T);
+    }
+
+    template<typename T>
+    void read(const char*& buffer, T& val) {
+        val = *reinterpret_cast<const T*>(buffer);
+        buffer += sizeof(T);
+    }
+}  // namespace Tn
+
+
+namespace nvinfer1 {
+YoloLayerPlugin::YoloLayerPlugin(int classCount, int netWidth, int netHeight, int maxOut, bool is_segmentation) {
+    mClassCount = classCount;
+    mYoloV8NetWidth = netWidth;
+    mYoloV8netHeight = netHeight;
+    mMaxOutObject = maxOut;
+    is_segmentation_ = is_segmentation;
+}
+
+YoloLayerPlugin::~YoloLayerPlugin() {}
+
+YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length) {
+    using namespace Tn;
+    const char* d = reinterpret_cast<const char*>(data), * a = d;
+    read(d, mClassCount);
+    read(d, mThreadCount);
+    read(d, mYoloV8NetWidth);
+    read(d, mYoloV8netHeight);
+    read(d, mMaxOutObject);
+    read(d, is_segmentation_);
+
+    assert(d == a + length);
+}
+
+void YoloLayerPlugin::serialize(void* buffer) const TRT_NOEXCEPT {
+
+    using namespace Tn;
+    char* d = static_cast<char*>(buffer), * a = d;
+    write(d, mClassCount);
+    write(d, mThreadCount);
+    write(d, mYoloV8NetWidth);
+    write(d, mYoloV8netHeight);
+    write(d, mMaxOutObject);
+    write(d, is_segmentation_);
+
+    assert(d == a + getSerializationSize());
+}
+
+size_t YoloLayerPlugin::getSerializationSize() const TRT_NOEXCEPT {
+    return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mYoloV8netHeight) + sizeof(mYoloV8NetWidth) + sizeof(mMaxOutObject) + sizeof(is_segmentation_);
+}
+
+int YoloLayerPlugin::initialize() TRT_NOEXCEPT {
+    return 0;
+}
+
+nvinfer1::Dims YoloLayerPlugin::getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) TRT_NOEXCEPT {
+    int total_size = mMaxOutObject * sizeof(Detection) / sizeof(float);
+    return nvinfer1::Dims3(total_size + 1, 1, 1);
+}
+
+void YoloLayerPlugin::setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT {
+    mPluginNamespace = pluginNamespace;
+}
+
+const char* YoloLayerPlugin::getPluginNamespace() const TRT_NOEXCEPT {
+    return mPluginNamespace;
+}
+
+nvinfer1::DataType YoloLayerPlugin::getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const TRT_NOEXCEPT {
+    return nvinfer1::DataType::kFLOAT;
+}
+
+bool YoloLayerPlugin::isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT {
+
+    return false;
+}
+
+bool YoloLayerPlugin::canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT {
+
+    return false;
+}
+
+void YoloLayerPlugin::configurePlugin(nvinfer1::PluginTensorDesc const* in, int nbInput, nvinfer1::PluginTensorDesc const* out, int nbOutput) TRT_NOEXCEPT {};
+
+void YoloLayerPlugin::attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT {};
+
+void YoloLayerPlugin::detachFromContext() TRT_NOEXCEPT {}
+
+const char* YoloLayerPlugin::getPluginType() const TRT_NOEXCEPT {
+
+    return "YoloLayer_TRT";
+}
+
+const char* YoloLayerPlugin::getPluginVersion() const TRT_NOEXCEPT {
+    return "1";
+}
+
+void YoloLayerPlugin::destroy() TRT_NOEXCEPT {
+
+    delete this;
+}
+
+nvinfer1::IPluginV2IOExt* YoloLayerPlugin::clone() const TRT_NOEXCEPT {
+
+    YoloLayerPlugin* p = new YoloLayerPlugin(mClassCount, mYoloV8NetWidth, mYoloV8netHeight, mMaxOutObject, is_segmentation_);
+    p->setPluginNamespace(mPluginNamespace);
+    return p;
+}
+
+int YoloLayerPlugin::enqueue(int batchSize, const void* TRT_CONST_ENQUEUE* inputs, void* const* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT {
+
+    forwardGpu((const float* const*)inputs, (float*)outputs[0], stream, mYoloV8netHeight, mYoloV8NetWidth, batchSize);
+    return 0;
+}
+
+
+__device__ float Logist(float data) { return 1.0f / (1.0f + expf(-data)); };
+
+__global__ void CalDetection(const float* input, float* output, int numElements, int maxoutobject,
+                             const int grid_h, int grid_w, const int stride, int classes, int outputElem, bool is_segmentation) {
+    int idx = threadIdx.x + blockDim.x * blockIdx.x;
+    if (idx >= numElements) return;
+
+    int total_grid = grid_h * grid_w;
+    int info_len = 4 + classes;
+    if (is_segmentation) info_len += 32;
+    int batchIdx = idx / total_grid;
+    int elemIdx = idx % total_grid;
+    const float* curInput = input + batchIdx * total_grid * info_len;
+    int outputIdx = batchIdx * outputElem;
+
+    int class_id = 0;
+    float max_cls_prob = 0.0;
+    for (int i = 4; i < 4 + classes; i++) {
+        float p = Logist(curInput[elemIdx + i * total_grid]);
+        if (p > max_cls_prob) {
+            max_cls_prob = p;
+            class_id = i - 4;
+        }
+    }
+
+    if (max_cls_prob < 0.1) return;
+
+    int count = (int)atomicAdd(output + outputIdx, 1);
+    if (count >= maxoutobject) return;
+    char* data = (char*)(output + outputIdx) + sizeof(float) + count * sizeof(Detection);
+    Detection* det = (Detection*)(data);
+
+    int row = elemIdx / grid_w;
+    int col = elemIdx % grid_w;
+
+    det->conf = max_cls_prob;
+    det->class_id = class_id;
+    det->bbox[0] = (col + 0.5f - curInput[elemIdx + 0 * total_grid]) * stride;
+    det->bbox[1] = (row + 0.5f - curInput[elemIdx + 1 * total_grid]) * stride;
+    det->bbox[2] = (col + 0.5f + curInput[elemIdx + 2 * total_grid]) * stride;
+    det->bbox[3] = (row + 0.5f + curInput[elemIdx + 3 * total_grid]) * stride;
+
+    for (int k = 0; is_segmentation && k < 32; k++) {
+        det->mask[k] = curInput[elemIdx + (k + 4 + classes) * total_grid];
+    }
+}
+
+void YoloLayerPlugin::forwardGpu(const float* const* inputs, float* output, cudaStream_t stream, int mYoloV8netHeight,int mYoloV8NetWidth, int batchSize) {
+    int outputElem = 1 + mMaxOutObject * sizeof(Detection) / sizeof(float);
+    cudaMemsetAsync(output, 0, sizeof(float), stream);
+    for (int idx = 0; idx < batchSize; ++idx) {
+        CUDA_CHECK(cudaMemsetAsync(output + idx * outputElem, 0, sizeof(float), stream));
+    }
+    int numElem = 0;
+    int grids[3][2] = { {mYoloV8netHeight / 8, mYoloV8NetWidth / 8}, {mYoloV8netHeight / 16, mYoloV8NetWidth / 16}, {mYoloV8netHeight / 32, mYoloV8NetWidth / 32} };
+    int strides[] = { 8, 16, 32 };
+    for (unsigned int i = 0; i < 3; i++) {
+        int grid_h = grids[i][0];
+        int grid_w = grids[i][1];
+        int stride = strides[i];
+        numElem = grid_h * grid_w * batchSize;
+        if (numElem < mThreadCount) mThreadCount = numElem;
+
+        CalDetection << <(numElem + mThreadCount - 1) / mThreadCount, mThreadCount, 0, stream >> >
+            (inputs[i], output, numElem, mMaxOutObject, grid_h, grid_w, stride, mClassCount, outputElem, is_segmentation_);
+    }
+}
+
+PluginFieldCollection YoloPluginCreator::mFC{};
+std::vector<PluginField> YoloPluginCreator::mPluginAttributes;
+
+YoloPluginCreator::YoloPluginCreator() {
+    mPluginAttributes.clear();
+    mFC.nbFields = mPluginAttributes.size();
+    mFC.fields = mPluginAttributes.data();
+}
+
+const char* YoloPluginCreator::getPluginName() const TRT_NOEXCEPT {
+    return "YoloLayer_TRT";
+}
+
+const char* YoloPluginCreator::getPluginVersion() const TRT_NOEXCEPT {
+    return "1";
+}
+
+const PluginFieldCollection* YoloPluginCreator::getFieldNames() TRT_NOEXCEPT {
+    return &mFC;
+}
+
+IPluginV2IOExt* YoloPluginCreator::createPlugin(const char* name, const PluginFieldCollection* fc) TRT_NOEXCEPT {
+    assert(fc->nbFields == 1);
+    assert(strcmp(fc->fields[0].name, "netinfo") == 0);
+    int* p_netinfo = (int*)(fc->fields[0].data);
+    int class_count = p_netinfo[0];
+    int input_w = p_netinfo[1];
+    int input_h = p_netinfo[2];
+    int max_output_object_count = p_netinfo[3];
+    bool is_segmentation = p_netinfo[4];
+    YoloLayerPlugin* obj = new YoloLayerPlugin(class_count, input_w, input_h, max_output_object_count, is_segmentation);
+    obj->setPluginNamespace(mNamespace.c_str());
+    return obj;
+}
+
+IPluginV2IOExt* YoloPluginCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT {
+    // This object will be deleted when the network is destroyed, which will
+    // call YoloLayerPlugin::destroy()
+    YoloLayerPlugin* obj = new YoloLayerPlugin(serialData, serialLength);
+    obj->setPluginNamespace(mNamespace.c_str());
+    return obj;
+}
+
+} // namespace nvinfer1

+ 102 - 0
src/detection/vision_yolov8/plugin/yololayer.h

@@ -0,0 +1,102 @@
+#pragma once
+#include "macros.h"
+#include "NvInfer.h"
+#include <string>
+#include <vector>
+#include "macros.h"
+namespace nvinfer1 {
+class API YoloLayerPlugin : public IPluginV2IOExt {
+public:
+        YoloLayerPlugin(int classCount, int netWdith, int netHeight, int maxOut, bool is_segmentation);
+        YoloLayerPlugin(const void* data, size_t length);
+        ~YoloLayerPlugin();
+
+        int getNbOutputs() const TRT_NOEXCEPT override {
+            return 1;
+        }
+
+        nvinfer1::Dims getOutputDimensions(int index, const nvinfer1::Dims* inputs, int nbInputDims) TRT_NOEXCEPT override;
+
+        int initialize() TRT_NOEXCEPT override;
+
+        virtual void terminate() TRT_NOEXCEPT override {}
+
+        virtual size_t getWorkspaceSize(int maxBatchSize) const TRT_NOEXCEPT override { return 0; }
+
+        virtual int enqueue(int batchSize, const void* const* inputs, void* TRT_CONST_ENQUEUE* outputs, void* workspace, cudaStream_t stream) TRT_NOEXCEPT override;
+
+        virtual size_t getSerializationSize() const TRT_NOEXCEPT override;
+
+        virtual void serialize(void* buffer) const TRT_NOEXCEPT override;
+
+        bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const TRT_NOEXCEPT override {
+            return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT;
+        }
+
+
+        const char* getPluginType() const TRT_NOEXCEPT override;
+
+        const char* getPluginVersion() const TRT_NOEXCEPT override;
+
+        void destroy() TRT_NOEXCEPT override;
+
+        IPluginV2IOExt* clone() const TRT_NOEXCEPT override;
+
+        void setPluginNamespace(const char* pluginNamespace) TRT_NOEXCEPT override;
+
+        const char* getPluginNamespace() const TRT_NOEXCEPT override;
+
+        nvinfer1::DataType getOutputDataType(int32_t index, nvinfer1::DataType const* inputTypes, int32_t nbInputs) const TRT_NOEXCEPT;
+
+        bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const TRT_NOEXCEPT override;
+
+        bool canBroadcastInputAcrossBatch(int inputIndex) const TRT_NOEXCEPT override;
+
+        void attachToContext(cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) TRT_NOEXCEPT override;
+
+        void configurePlugin(PluginTensorDesc const* in, int32_t nbInput, PluginTensorDesc const* out, int32_t nbOutput) TRT_NOEXCEPT override;
+
+        void detachFromContext() TRT_NOEXCEPT override;
+
+    private:
+        void forwardGpu(const float* const* inputs, float* output, cudaStream_t stream, int mYoloV8netHeight, int mYoloV8NetWidth, int batchSize);
+        int mThreadCount = 256;
+        const char* mPluginNamespace;
+        int mClassCount;
+        int mYoloV8NetWidth;
+        int mYoloV8netHeight;
+        int mMaxOutObject;
+        bool is_segmentation_;
+    };
+
+class API YoloPluginCreator : public IPluginCreator {
+public:
+        YoloPluginCreator();
+        ~YoloPluginCreator() override = default;
+
+        const char* getPluginName() const TRT_NOEXCEPT override;
+
+        const char* getPluginVersion() const TRT_NOEXCEPT override;
+
+        const nvinfer1::PluginFieldCollection* getFieldNames() TRT_NOEXCEPT override;
+
+        nvinfer1::IPluginV2IOExt* createPlugin(const char* name, const nvinfer1::PluginFieldCollection* fc) TRT_NOEXCEPT override;
+
+        nvinfer1::IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) TRT_NOEXCEPT override;
+
+        void setPluginNamespace(const char* libNamespace) TRT_NOEXCEPT override {
+            mNamespace = libNamespace;
+        }
+
+        const char* getPluginNamespace() const TRT_NOEXCEPT override {
+            return mNamespace.c_str();
+        }
+
+    private:
+        std::string mNamespace;
+        static PluginFieldCollection mFC;
+        static std::vector<PluginField> mPluginAttributes;
+    };
+    REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
+} // namespace nvinfer1
+

+ 57 - 0
src/detection/vision_yolov8/show_coordinate_video.py

@@ -0,0 +1,57 @@
+import cv2
+import yaml
+import numpy as np
+import sys
+import os
+
+
+def test():
+
+    cap = cv2.VideoCapture("/home/nvidia/Videos/video.mp4")
+    while True:
+        ret, frame = cap.read()
+        #frame = frame[410-90:590+90, 380+35:1020+35]
+        #frame = cv2.resize(frame,(W,H))
+        #frame = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
+        cv2.imshow("capture", frame)
+        if cv2.waitKey(10) & 0xFF == ord('q'):
+            img = frame
+            cv2.imwrite('img_cali.jpg', frame)
+            break
+    cap.release()
+    cv2.destroyAllWindows()
+    #img = cv2.imread('img.jpg')
+
+
+    def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):
+        if event == cv2.EVENT_LBUTTONDOWN:
+            xy = "%d,%d" % (x, y)
+            cv2.circle(img, (x, y), 1, (255, 0, 0), thickness=-1)
+            print((x,y))
+            cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,
+                        1.5, (255, 255, 255), thickness=2)
+            cv2.imshow("image", img)
+
+    cv2.namedWindow("image", cv2.WINDOW_AUTOSIZE)
+    cv2.setMouseCallback("image", on_EVENT_LBUTTONDOWN)
+    cv2.imshow("image", img)
+
+    while (True):
+        try:
+            if cv2.waitKey(10) & 0xFF == ord('q'):
+                break
+        except Exception:
+            cv2.destroyWindow("image")
+            break
+
+
+
+if __name__=='__main__':
+    test()
+
+
+
+
+
+
+

+ 193 - 0
src/detection/vision_yolov8/src/block.cpp

@@ -0,0 +1,193 @@
+#include "block.h"
+#include "yololayer.h"
+#include "config.h"
+#include <iostream>
+#include <assert.h>
+#include <fstream>
+#include <math.h>
+
+std::map<std::string, nvinfer1::Weights> loadWeights(const std::string file){
+    std::cout << "Loading weights: " << file << std::endl;
+    std::map<std::string, nvinfer1::Weights> WeightMap;
+
+    std::ifstream input(file);
+    assert(input.is_open() && "Unable to load weight file. please check if the .wts file path is right!!!!!!");
+
+    int32_t count;
+    input>>count ;
+    assert(count > 0 && "Invalid weight map file.");
+
+    while(count--){
+        nvinfer1::Weights wt{nvinfer1::DataType::kFLOAT, nullptr, 0};
+        uint32_t size;
+
+        std::string name;
+        input >> name >> std::dec >> size;
+        wt.type = nvinfer1::DataType::kFLOAT;
+
+        uint32_t* val = reinterpret_cast<uint32_t*>(malloc(sizeof(val) * size));
+        for(uint32_t x = 0, y = size; x < y; x++){
+            input >> std::hex >> val[x];
+        }
+        wt.values = val;
+        wt.count = size;
+        WeightMap[name] = wt;
+    }
+    return WeightMap;
+}
+
+
+static nvinfer1::IScaleLayer* addBatchNorm2d(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap,
+nvinfer1::ITensor& input, std::string lname, float eps){
+    float* gamma = (float*)weightMap[lname + ".weight"].values;
+    float* beta = (float*)weightMap[lname + ".bias"].values;
+    float* mean = (float*)weightMap[lname + ".running_mean"].values;
+    float* var = (float*)weightMap[lname + ".running_var"].values;
+    int len = weightMap[lname + ".running_var"].count;
+
+    float* scval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
+    for(int i = 0; i < len; i++){
+        scval[i] = gamma[i] / sqrt(var[i] + eps);
+    }
+    nvinfer1::Weights scale{nvinfer1::DataType::kFLOAT, scval, len};
+
+    float* shval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
+    for(int i = 0; i < len; i++){
+        shval[i] = beta[i] - mean[i] * gamma[i] / sqrt(var[i] + eps);
+    }
+    nvinfer1::Weights shift{nvinfer1::DataType::kFLOAT, shval, len};
+
+    float* pval = reinterpret_cast<float*>(malloc(sizeof(float) * len));
+    for (int i = 0; i < len; i++) {
+        pval[i] = 1.0;
+    }
+    nvinfer1::Weights power{ nvinfer1::DataType::kFLOAT, pval, len };
+    weightMap[lname + ".scale"] = scale;
+    weightMap[lname + ".shift"] = shift;
+    weightMap[lname + ".power"] = power;
+    nvinfer1::IScaleLayer* output = network->addScale(input, nvinfer1::ScaleMode::kCHANNEL, shift, scale, power);
+    assert(output);
+    return output;
+}
+
+
+nvinfer1::IElementWiseLayer* convBnSiLU(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int ch, int k, int s, int p, std::string lname){
+    nvinfer1::Weights bias_empty{nvinfer1::DataType::kFLOAT, nullptr, 0};
+    nvinfer1::IConvolutionLayer* conv = network->addConvolutionNd(input, ch, nvinfer1::DimsHW{k, k}, weightMap[lname+".conv.weight"], bias_empty);
+    assert(conv);
+    conv->setStrideNd(nvinfer1::DimsHW{s, s});
+    conv->setPaddingNd(nvinfer1::DimsHW{p, p});
+
+    nvinfer1::IScaleLayer* bn = addBatchNorm2d(network, weightMap, *conv->getOutput(0), lname+".bn", 1e-5);
+
+    nvinfer1::IActivationLayer* sigmoid = network->addActivation(*bn->getOutput(0), nvinfer1::ActivationType::kSIGMOID);
+    nvinfer1::IElementWiseLayer* ew = network->addElementWise(*bn->getOutput(0), *sigmoid->getOutput(0), nvinfer1::ElementWiseOperation::kPROD);
+    assert(ew);
+    return ew;
+}
+
+
+nvinfer1::ILayer* bottleneck(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int c1, int c2, bool shortcut, float e, std::string lname){
+    nvinfer1::IElementWiseLayer* conv1 = convBnSiLU(network, weightMap, input, c2, 3, 1, 1, lname+".cv1");
+    nvinfer1::IElementWiseLayer* conv2 = convBnSiLU(network, weightMap, *conv1->getOutput(0), c2, 3, 1, 1, lname+".cv2");
+    
+    if(shortcut && c1 == c2){
+        nvinfer1::IElementWiseLayer* ew = network->addElementWise(input, *conv2->getOutput(0), nvinfer1::ElementWiseOperation::kSUM);
+        return ew;
+    }
+    return conv2;
+}
+
+
+nvinfer1::IElementWiseLayer* C2F(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int c1, int c2, int n, bool shortcut, float e, std::string lname){
+    int c_ = (float)c2 * e;
+    
+    nvinfer1::IElementWiseLayer* conv1 = convBnSiLU(network, weightMap, input, 2* c_, 1, 1, 0, lname+".cv1");
+    nvinfer1::Dims d = conv1->getOutput(0)->getDimensions();
+    
+    nvinfer1::ISliceLayer* split1 = network->addSlice(*conv1->getOutput(0), nvinfer1::Dims3{0,0,0}, nvinfer1::Dims3{d.d[0]/2, d.d[1], d.d[2]}, nvinfer1::Dims3{1,1,1});
+    nvinfer1::ISliceLayer* split2 = network->addSlice(*conv1->getOutput(0), nvinfer1::Dims3{d.d[0]/2,0,0}, nvinfer1::Dims3{d.d[0]/2, d.d[1], d.d[2]}, nvinfer1::Dims3{1,1,1});
+    nvinfer1::ITensor* inputTensor0[] = {split1->getOutput(0), split2->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat = network->addConcatenation(inputTensor0, 2);
+    nvinfer1::ITensor* y1 = split2->getOutput(0);
+    for(int i = 0; i < n; i++){
+        auto* b = bottleneck(network, weightMap, *y1, c_, c_, shortcut, 1.0, lname+".m." + std::to_string(i));
+        y1 = b->getOutput(0);
+
+        nvinfer1::ITensor* inputTensors[] = {cat->getOutput(0), b->getOutput(0)};
+        cat = network->addConcatenation(inputTensors, 2);
+    }
+    
+    nvinfer1::IElementWiseLayer* conv2 = convBnSiLU(network, weightMap, *cat->getOutput(0), c2, 1, 1, 0, lname+".cv2");
+    
+    return conv2;
+}
+
+
+nvinfer1::IElementWiseLayer* SPPF(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int c1, int c2, int k, std::string lname){
+    int c_ = c1 / 2;
+    
+    nvinfer1::IElementWiseLayer* conv1 = convBnSiLU(network, weightMap, input, c_, 1, 1, 0, lname+".cv1");
+    
+    nvinfer1::IPoolingLayer* pool1 = network->addPoolingNd(*conv1->getOutput(0), nvinfer1::PoolingType::kMAX, nvinfer1::DimsHW{k,k});
+    pool1->setStrideNd(nvinfer1::DimsHW{1, 1});
+    pool1->setPaddingNd(nvinfer1::DimsHW{ k / 2, k / 2 });
+    nvinfer1::IPoolingLayer* pool2 = network->addPoolingNd(*pool1->getOutput(0), nvinfer1::PoolingType::kMAX, nvinfer1::DimsHW{k,k});
+    pool2->setStrideNd(nvinfer1::DimsHW{1, 1});
+    pool2->setPaddingNd(nvinfer1::DimsHW{ k / 2, k / 2 });
+    nvinfer1::IPoolingLayer* pool3 = network->addPoolingNd(*pool2->getOutput(0), nvinfer1::PoolingType::kMAX, nvinfer1::DimsHW{k,k});
+    pool3->setStrideNd(nvinfer1::DimsHW{1, 1});
+    pool3->setPaddingNd(nvinfer1::DimsHW{ k / 2, k / 2 });
+    nvinfer1::ITensor* inputTensors[] = {conv1->getOutput(0), pool1->getOutput(0), pool2->getOutput(0), pool3->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat = network->addConcatenation(inputTensors, 4);
+    nvinfer1::IElementWiseLayer* conv2 = convBnSiLU(network, weightMap, *cat->getOutput(0), c2, 1, 1, 0, lname+".cv2");
+    return conv2;
+}
+
+
+nvinfer1::IShuffleLayer* DFL(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights> weightMap, 
+nvinfer1::ITensor& input, int ch, int grid, int k, int s, int p, std::string lname){
+
+    nvinfer1::IShuffleLayer* shuffle1 = network->addShuffle(input);
+    shuffle1->setReshapeDimensions(nvinfer1::Dims3{4, 16, grid});
+    shuffle1->setSecondTranspose(nvinfer1::Permutation{1, 0, 2});
+    nvinfer1::ISoftMaxLayer* softmax = network->addSoftMax(*shuffle1->getOutput(0));
+
+    nvinfer1::Weights bias_empty{nvinfer1::DataType::kFLOAT, nullptr, 0};
+    nvinfer1::IConvolutionLayer* conv = network->addConvolutionNd(*softmax->getOutput(0), 1, nvinfer1::DimsHW{1, 1}, weightMap[lname], bias_empty);
+    conv->setStrideNd(nvinfer1::DimsHW{s, s});
+    conv->setPaddingNd(nvinfer1::DimsHW{p, p});
+
+    nvinfer1::IShuffleLayer* shuffle2 = network->addShuffle(*conv->getOutput(0));
+    shuffle2->setReshapeDimensions(nvinfer1::Dims2{4, grid});
+
+    return shuffle2;
+}
+
+
+nvinfer1::IPluginV2Layer* addYoLoLayer(nvinfer1::INetworkDefinition *network, std::vector<nvinfer1::IConcatenationLayer*> dets, bool is_segmentation) {
+    auto creator = getPluginRegistry()->getPluginCreator("YoloLayer_TRT", "1");
+
+    nvinfer1::PluginField plugin_fields[1];
+    int netinfo[5] = {kNumClass, kInputW, kInputH, kMaxNumOutputBbox, is_segmentation};
+    plugin_fields[0].data = netinfo;
+    plugin_fields[0].length = 5;
+    plugin_fields[0].name = "netinfo";
+    plugin_fields[0].type = nvinfer1::PluginFieldType::kFLOAT32;
+
+
+    nvinfer1::PluginFieldCollection plugin_data;
+    plugin_data.nbFields = 1;
+    plugin_data.fields = plugin_fields;
+    nvinfer1::IPluginV2 *plugin_obj = creator->createPlugin("yololayer", &plugin_data);
+    std::vector<nvinfer1::ITensor*> input_tensors;
+    for (auto det: dets) {
+        input_tensors.push_back(det->getOutput(0));
+    }
+    auto yolo = network->addPluginV2(&input_tensors[0], input_tensors.size(), *plugin_obj);
+    return yolo;
+}

+ 80 - 0
src/detection/vision_yolov8/src/calibrator.cpp

@@ -0,0 +1,80 @@
+#include <iostream>
+#include <iterator>
+#include <fstream>
+#include <opencv2/dnn/dnn.hpp>
+#include "calibrator.h"
+#include "cuda_utils.h"
+#include "utils.h"
+
+Int8EntropyCalibrator2::Int8EntropyCalibrator2(int batchsize, int input_w, int input_h, const char* img_dir, const char* calib_table_name,
+                                               const char* input_blob_name, bool read_cache)
+    : batchsize_(batchsize)
+    , input_w_(input_w)
+    , input_h_(input_h)
+    , img_idx_(0)
+    , img_dir_(img_dir)
+    , calib_table_name_(calib_table_name)
+    , input_blob_name_(input_blob_name)
+    , read_cache_(read_cache)
+{
+    input_count_ = 3 * input_w * input_h * batchsize;
+    CUDA_CHECK(cudaMalloc(&device_input_, input_count_ * sizeof(float)));
+    read_files_in_dir(img_dir, img_files_);
+}
+
+Int8EntropyCalibrator2::~Int8EntropyCalibrator2()
+{
+    CUDA_CHECK(cudaFree(device_input_));
+}
+
+int Int8EntropyCalibrator2::getBatchSize() const TRT_NOEXCEPT
+{
+    return batchsize_;
+}
+
+bool Int8EntropyCalibrator2::getBatch(void* bindings[], const char* names[], int nbBindings) TRT_NOEXCEPT
+{
+    if (img_idx_ + batchsize_ > (int)img_files_.size()) {
+        return false;
+    }
+
+    std::vector<cv::Mat> input_imgs_;
+    for (int i = img_idx_; i < img_idx_ + batchsize_; i++) {
+        std::cout << img_files_[i] << "  " << i << std::endl;
+        cv::Mat temp = cv::imread(img_dir_ + img_files_[i]);
+        if (temp.empty()){
+            std::cerr << "Fatal error: image cannot open!" << std::endl;
+            return false;
+        }
+        cv::Mat pr_img = preprocess_img(temp, input_w_, input_h_);
+        input_imgs_.push_back(pr_img);
+    }
+    img_idx_ += batchsize_;
+    cv::Mat blob = cv::dnn::blobFromImages(input_imgs_, 1.0 / 255.0, cv::Size(input_w_, input_h_), cv::Scalar(0, 0, 0), true, false);
+    CUDA_CHECK(cudaMemcpy(device_input_, blob.ptr<float>(0), input_count_ * sizeof(float), cudaMemcpyHostToDevice));
+    assert(!strcmp(names[0], input_blob_name_));
+    bindings[0] = device_input_;
+    return true;
+}
+
+const void* Int8EntropyCalibrator2::readCalibrationCache(size_t& length) TRT_NOEXCEPT
+{
+    std::cout << "reading calib cache: " << calib_table_name_ << std::endl;
+    calib_cache_.clear();
+    std::ifstream input(calib_table_name_, std::ios::binary);
+    input >> std::noskipws;
+    if (read_cache_ && input.good())
+    {
+        std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(), std::back_inserter(calib_cache_));
+    }
+    length = calib_cache_.size();
+    return length ? calib_cache_.data() : nullptr;
+}
+
+void Int8EntropyCalibrator2::writeCalibrationCache(const void* cache, size_t length) TRT_NOEXCEPT
+{
+    std::cout << "writing calib cache: " << calib_table_name_ << " size: " << length << std::endl;
+    std::ofstream output(calib_table_name_, std::ios::binary);
+    output.write(reinterpret_cast<const char*>(cache), length);
+}
+

+ 394 - 0
src/detection/vision_yolov8/src/model.cpp

@@ -0,0 +1,394 @@
+#include <math.h>
+#include <iostream>
+
+#include "model.h"
+#include "block.h"
+#include "calibrator.h"
+#include "config.h"
+
+static int get_width(int x, float gw, int max_channels, int divisor = 8) {
+    auto channel = int(ceil((x * gw) / divisor)) * divisor;
+    return channel >= max_channels ? max_channels : channel;
+}
+
+static int get_depth(int x, float gd) {
+    if (x == 1) return 1;
+    int r = round(x * gd);
+    if (x * gd - int(x * gd) == 0.5 && (int(x * gd) % 2) == 0) --r;
+    return std::max<int>(r, 1);
+}
+
+static nvinfer1::IElementWiseLayer* Proto(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights>& weightMap,
+                                          nvinfer1::ITensor& input, std::string lname, float gw, int max_channels) {
+    int mid_channel = get_width(256, gw, max_channels);
+    auto cv1 = convBnSiLU(network, weightMap, input, mid_channel, 3, 1, 1, "model.22.proto.cv1");
+    float* convTranpsose_bais = (float*)weightMap["model.22.proto.upsample.bias"].values;
+    int convTranpsose_bais_len = weightMap["model.22.proto.upsample.bias"].count;
+    nvinfer1::Weights bias{nvinfer1::DataType::kFLOAT, convTranpsose_bais, convTranpsose_bais_len};
+    auto convTranpsose  = network->addDeconvolutionNd(*cv1->getOutput(0), mid_channel, nvinfer1::DimsHW{2,2}, weightMap["model.22.proto.upsample.weight"], bias);
+    assert(convTranpsose);
+    convTranpsose->setStrideNd(nvinfer1::DimsHW{2, 2});
+    auto cv2 = convBnSiLU(network,weightMap,*convTranpsose->getOutput(0), mid_channel, 3, 1, 1, "model.22.proto.cv2");
+    auto cv3 = convBnSiLU(network,weightMap,*cv2->getOutput(0), 32, 1, 1, 0,"model.22.proto.cv3");
+    assert(cv3);
+    return cv3;
+}
+
+static nvinfer1::IShuffleLayer* ProtoCoef(nvinfer1::INetworkDefinition* network, std::map<std::string, nvinfer1::Weights>& weightMap,
+                                          nvinfer1::ITensor& input, std::string lname, int grid_shape, float gw) {
+
+    int mid_channle = 0;
+    if(gw == 0.25 || gw== 0.5) {
+        mid_channle = 32;
+    } else if(gw == 0.75) {
+        mid_channle = 48;
+    } else if(gw == 1.00) {
+        mid_channle = 64;
+    } else if(gw == 1.25) {
+        mid_channle = 80;
+    }
+    auto cv0 = convBnSiLU(network, weightMap, input, mid_channle, 3, 1, 1, lname + ".0");
+    auto cv1 = convBnSiLU(network, weightMap, *cv0->getOutput(0), mid_channle, 3, 1, 1, lname + ".1");
+    float* cv2_bais_value = (float*)weightMap[lname + ".2" + ".bias"].values;
+    int cv2_bais_len = weightMap[lname + ".2" + ".bias"].count;
+    nvinfer1::Weights cv2_bais{nvinfer1::DataType::kFLOAT, cv2_bais_value, cv2_bais_len};
+    auto cv2 = network->addConvolutionNd(*cv1->getOutput(0), 32, nvinfer1::DimsHW{1, 1}, weightMap[lname + ".2" + ".weight"], cv2_bais);
+    cv2->setStrideNd(nvinfer1::DimsHW{1, 1});
+    nvinfer1::IShuffleLayer* cv2_shuffle = network->addShuffle(*cv2->getOutput(0));
+    cv2_shuffle->setReshapeDimensions(nvinfer1::Dims2{ 32, grid_shape});
+    return cv2_shuffle;
+}
+
+nvinfer1::IHostMemory* buildEngineYolov8Det(nvinfer1::IBuilder* builder,
+                                            nvinfer1::IBuilderConfig* config, nvinfer1::DataType dt,
+                                            const std::string& wts_path, float& gd, float& gw, int& max_channels) {
+    std::map<std::string, nvinfer1::Weights> weightMap = loadWeights(wts_path);
+    nvinfer1::INetworkDefinition* network = builder->createNetworkV2(0U);
+
+    /*******************************************************************************************************
+    ******************************************  YOLOV8 INPUT  **********************************************
+    *******************************************************************************************************/
+    nvinfer1::ITensor* data = network->addInput(kInputTensorName, dt, nvinfer1::Dims3{3, kInputH, kInputW});
+    assert(data);
+
+    /*******************************************************************************************************
+    *****************************************  YOLOV8 BACKBONE  ********************************************
+    *******************************************************************************************************/
+    nvinfer1::IElementWiseLayer* conv0 = convBnSiLU(network, weightMap, *data, get_width(64, gw, max_channels), 3, 2, 1, "model.0");
+    nvinfer1::IElementWiseLayer* conv1 = convBnSiLU(network, weightMap, *conv0->getOutput(0), get_width(128, gw, max_channels), 3, 2, 1, "model.1");
+    // 11233
+    nvinfer1::IElementWiseLayer* conv2 = C2F(network, weightMap, *conv1->getOutput(0), get_width(128, gw, max_channels), get_width(128, gw, max_channels), get_depth(3, gd), true, 0.5, "model.2");
+    nvinfer1::IElementWiseLayer* conv3 = convBnSiLU(network, weightMap, *conv2->getOutput(0), get_width(256, gw, max_channels), 3, 2, 1, "model.3");
+    // 22466
+    nvinfer1::IElementWiseLayer* conv4 = C2F(network, weightMap, *conv3->getOutput(0), get_width(256, gw, max_channels), get_width(256, gw, max_channels), get_depth(6, gd), true, 0.5, "model.4");
+    nvinfer1::IElementWiseLayer* conv5 = convBnSiLU(network, weightMap, *conv4->getOutput(0), get_width(512, gw, max_channels), 3, 2, 1, "model.5");
+    // 22466
+    nvinfer1::IElementWiseLayer* conv6 = C2F(network, weightMap, *conv5->getOutput(0), get_width(512, gw, max_channels), get_width(512, gw, max_channels), get_depth(6, gd), true, 0.5, "model.6");
+    nvinfer1::IElementWiseLayer* conv7 = convBnSiLU(network, weightMap, *conv6->getOutput(0), get_width(1024, gw, max_channels), 3, 2, 1, "model.7");
+    // 11233
+    nvinfer1::IElementWiseLayer* conv8 = C2F(network, weightMap, *conv7->getOutput(0), get_width(1024, gw, max_channels), get_width(1024, gw, max_channels), get_depth(3, gd), true, 0.5, "model.8");
+    nvinfer1::IElementWiseLayer* conv9 = SPPF(network, weightMap, *conv8->getOutput(0), get_width(1024, gw, max_channels), get_width(1024, gw, max_channels), 5, "model.9");
+
+    /*******************************************************************************************************
+    *********************************************  YOLOV8 HEAD  ********************************************
+    *******************************************************************************************************/
+    float scale[] = {1.0, 2.0, 2.0};
+    nvinfer1::IResizeLayer* upsample10 = network->addResize(*conv9->getOutput(0));
+    assert(upsample10);
+    upsample10->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
+    upsample10->setScales(scale, 3);
+
+    nvinfer1::ITensor* inputTensor11[] = {upsample10->getOutput(0), conv6->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat11 = network->addConcatenation(inputTensor11, 2);
+    nvinfer1::IElementWiseLayer* conv12 = C2F(network, weightMap, *cat11->getOutput(0), get_width(512, gw, max_channels), get_width(512, gw, max_channels), get_depth(3, gd), false, 0.5, "model.12");
+
+    nvinfer1::IResizeLayer* upsample13 = network->addResize(*conv12->getOutput(0));
+    assert(upsample13);
+    upsample13->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
+    upsample13->setScales(scale, 3);
+
+    nvinfer1::ITensor* inputTensor14[] = {upsample13->getOutput(0), conv4->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat14 = network->addConcatenation(inputTensor14, 2);
+    nvinfer1::IElementWiseLayer* conv15 = C2F(network, weightMap, *cat14->getOutput(0), get_width(256, gw, max_channels), get_width(256, gw, max_channels), get_depth(3, gd), false, 0.5, "model.15");
+    nvinfer1::IElementWiseLayer* conv16 = convBnSiLU(network, weightMap, *conv15->getOutput(0), get_width(256, gw, max_channels), 3, 2, 1, "model.16");
+    nvinfer1::ITensor* inputTensor17[] = {conv16->getOutput(0), conv12->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat17 = network->addConcatenation(inputTensor17, 2);
+    nvinfer1::IElementWiseLayer* conv18 = C2F(network, weightMap, *cat17->getOutput(0), get_width(512, gw, max_channels), get_width(512, gw, max_channels), get_depth(3, gd), false, 0.5, "model.18");
+    nvinfer1::IElementWiseLayer* conv19 = convBnSiLU(network, weightMap, *conv18->getOutput(0), get_width(512, gw, max_channels), 3, 2, 1, "model.19");
+    nvinfer1::ITensor* inputTensor20[] = {conv19->getOutput(0), conv9->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat20 = network->addConcatenation(inputTensor20, 2);
+    nvinfer1::IElementWiseLayer* conv21 = C2F(network, weightMap, *cat20->getOutput(0), get_width(1024, gw, max_channels), get_width(1024, gw, max_channels), get_depth(3, gd), false, 0.5, "model.21");
+
+    /*******************************************************************************************************
+    *********************************************  YOLOV8 OUTPUT  ******************************************
+    *******************************************************************************************************/
+    int base_in_channel = (gw == 1.25) ? 80 : 64;
+    int base_out_channel = (gw == 0.25) ? std::max(64, std::min(kNumClass, 100)) : get_width(256, gw, max_channels);
+
+    // output0
+    nvinfer1::IElementWiseLayer* conv22_cv2_0_0 = convBnSiLU(network, weightMap, *conv15->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.0.0");
+    nvinfer1::IElementWiseLayer* conv22_cv2_0_1 = convBnSiLU(network, weightMap, *conv22_cv2_0_0->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.0.1");
+    nvinfer1::IConvolutionLayer* conv22_cv2_0_2 = network->addConvolutionNd(*conv22_cv2_0_1->getOutput(0), 64, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv2.0.2.weight"], weightMap["model.22.cv2.0.2.bias"]);
+    conv22_cv2_0_2->setStrideNd(nvinfer1::DimsHW{1, 1});
+    conv22_cv2_0_2->setPaddingNd(nvinfer1::DimsHW{0, 0});
+    nvinfer1::IElementWiseLayer* conv22_cv3_0_0 = convBnSiLU(network, weightMap, *conv15->getOutput(0),base_out_channel, 3, 1, 1, "model.22.cv3.0.0");
+    nvinfer1::IElementWiseLayer* conv22_cv3_0_1 = convBnSiLU(network, weightMap, *conv22_cv3_0_0->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.0.1");
+    nvinfer1::IConvolutionLayer* conv22_cv3_0_2 = network->addConvolutionNd(*conv22_cv3_0_1->getOutput(0), kNumClass, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv3.0.2.weight"], weightMap["model.22.cv3.0.2.bias"]);
+    conv22_cv3_0_2->setStride(nvinfer1::DimsHW{1, 1});
+    conv22_cv3_0_2->setPadding(nvinfer1::DimsHW{0, 0});
+    nvinfer1::ITensor* inputTensor22_0[] = {conv22_cv2_0_2->getOutput(0), conv22_cv3_0_2->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat22_0 = network->addConcatenation(inputTensor22_0, 2);
+
+    // output1
+    nvinfer1::IElementWiseLayer* conv22_cv2_1_0 = convBnSiLU(network, weightMap, *conv18->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.1.0");
+    nvinfer1::IElementWiseLayer* conv22_cv2_1_1 = convBnSiLU(network, weightMap, *conv22_cv2_1_0->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.1.1");
+    nvinfer1::IConvolutionLayer* conv22_cv2_1_2 = network->addConvolutionNd(*conv22_cv2_1_1->getOutput(0), 64, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv2.1.2.weight"], weightMap["model.22.cv2.1.2.bias"]);
+    conv22_cv2_1_2->setStrideNd(nvinfer1::DimsHW{1, 1});
+    conv22_cv2_1_2->setPaddingNd(nvinfer1::DimsHW{0, 0});
+    nvinfer1::IElementWiseLayer* conv22_cv3_1_0 = convBnSiLU(network, weightMap, *conv18->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.1.0");
+    nvinfer1::IElementWiseLayer* conv22_cv3_1_1 = convBnSiLU(network, weightMap, *conv22_cv3_1_0->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.1.1");
+    nvinfer1::IConvolutionLayer* conv22_cv3_1_2 = network->addConvolutionNd(*conv22_cv3_1_1->getOutput(0), kNumClass, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv3.1.2.weight"], weightMap["model.22.cv3.1.2.bias"]);
+    conv22_cv3_1_2->setStrideNd(nvinfer1::DimsHW{1, 1});
+    conv22_cv3_1_2->setPaddingNd(nvinfer1::DimsHW{0, 0});
+    nvinfer1::ITensor* inputTensor22_1[] = {conv22_cv2_1_2->getOutput(0), conv22_cv3_1_2->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat22_1 = network->addConcatenation(inputTensor22_1, 2);
+
+    // output2
+    nvinfer1::IElementWiseLayer* conv22_cv2_2_0 = convBnSiLU(network, weightMap, *conv21->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.2.0");
+    nvinfer1::IElementWiseLayer* conv22_cv2_2_1 = convBnSiLU(network, weightMap, *conv22_cv2_2_0->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.2.1");
+    nvinfer1::IConvolutionLayer* conv22_cv2_2_2 = network->addConvolution(*conv22_cv2_2_1->getOutput(0), 64, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv2.2.2.weight"], weightMap["model.22.cv2.2.2.bias"]);
+    nvinfer1::IElementWiseLayer* conv22_cv3_2_0 = convBnSiLU(network, weightMap, *conv21->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.2.0");
+    nvinfer1::IElementWiseLayer* conv22_cv3_2_1 = convBnSiLU(network, weightMap, *conv22_cv3_2_0->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.2.1");
+    nvinfer1::IConvolutionLayer* conv22_cv3_2_2 = network->addConvolution(*conv22_cv3_2_1->getOutput(0), kNumClass, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv3.2.2.weight"], weightMap["model.22.cv3.2.2.bias"]);
+    nvinfer1::ITensor* inputTensor22_2[] = {conv22_cv2_2_2->getOutput(0), conv22_cv3_2_2->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat22_2 = network->addConcatenation(inputTensor22_2, 2);
+
+    /*******************************************************************************************************
+    *********************************************  YOLOV8 DETECT  ******************************************
+    *******************************************************************************************************/
+
+    nvinfer1::IShuffleLayer* shuffle22_0 = network->addShuffle(*cat22_0->getOutput(0));
+    shuffle22_0->setReshapeDimensions(nvinfer1::Dims2{64 + kNumClass, (kInputH / 8) * (kInputW / 8)});
+
+    nvinfer1::ISliceLayer* split22_0_0 = network->addSlice(*shuffle22_0->getOutput(0), nvinfer1::Dims2{0, 0}, nvinfer1::Dims2{64, (kInputH / 8) * (kInputW / 8)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::ISliceLayer* split22_0_1 = network->addSlice(*shuffle22_0->getOutput(0), nvinfer1::Dims2{64, 0}, nvinfer1::Dims2{kNumClass, (kInputH / 8) * (kInputW / 8)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::IShuffleLayer* dfl22_0 = DFL(network, weightMap, *split22_0_0->getOutput(0), 4, (kInputH / 8) * (kInputW / 8), 1, 1, 0, "model.22.dfl.conv.weight");
+    nvinfer1::ITensor* inputTensor22_dfl_0[] = {dfl22_0->getOutput(0), split22_0_1->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat22_dfl_0 = network->addConcatenation(inputTensor22_dfl_0, 2);
+
+    nvinfer1::IShuffleLayer* shuffle22_1 = network->addShuffle(*cat22_1->getOutput(0));
+    shuffle22_1->setReshapeDimensions(nvinfer1::Dims2{64 + kNumClass, (kInputH / 16) * (kInputW / 16)});
+    nvinfer1::ISliceLayer* split22_1_0 = network->addSlice(*shuffle22_1->getOutput(0), nvinfer1::Dims2{0, 0}, nvinfer1::Dims2{64, (kInputH / 16) * (kInputW / 16)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::ISliceLayer* split22_1_1 = network->addSlice(*shuffle22_1->getOutput(0), nvinfer1::Dims2{64, 0}, nvinfer1::Dims2{kNumClass, (kInputH / 16) * (kInputW / 16)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::IShuffleLayer* dfl22_1 = DFL(network, weightMap, *split22_1_0->getOutput(0), 4, (kInputH / 16) * (kInputW / 16), 1, 1, 0, "model.22.dfl.conv.weight");
+    nvinfer1::ITensor* inputTensor22_dfl_1[] = {dfl22_1->getOutput(0), split22_1_1->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat22_dfl_1 = network->addConcatenation(inputTensor22_dfl_1, 2);
+
+    nvinfer1::IShuffleLayer* shuffle22_2 = network->addShuffle(*cat22_2->getOutput(0));
+    shuffle22_2->setReshapeDimensions(nvinfer1::Dims2{64 + kNumClass, (kInputH / 32) * (kInputW / 32)});
+    nvinfer1::ISliceLayer* split22_2_0 = network->addSlice(*shuffle22_2->getOutput(0), nvinfer1::Dims2{0, 0}, nvinfer1::Dims2{64, (kInputH / 32) * (kInputW / 32)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::ISliceLayer* split22_2_1 = network->addSlice(*shuffle22_2->getOutput(0), nvinfer1::Dims2{64, 0}, nvinfer1::Dims2{kNumClass, (kInputH / 32) * (kInputW / 32)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::IShuffleLayer* dfl22_2 = DFL(network, weightMap, *split22_2_0->getOutput(0), 4, (kInputH / 32) * (kInputW / 32), 1, 1, 0, "model.22.dfl.conv.weight");
+    nvinfer1::ITensor* inputTensor22_dfl_2[] = {dfl22_2->getOutput(0), split22_2_1->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat22_dfl_2 = network->addConcatenation(inputTensor22_dfl_2, 2);
+
+    nvinfer1::IPluginV2Layer* yolo = addYoLoLayer(network, std::vector<nvinfer1::IConcatenationLayer *>{cat22_dfl_0, cat22_dfl_1, cat22_dfl_2});
+    yolo->getOutput(0)->setName(kOutputTensorName);
+    network->markOutput(*yolo->getOutput(0));
+
+    builder->setMaxBatchSize(kBatchSize);
+    config->setMaxWorkspaceSize(16 * (1 << 20));
+
+#if defined(USE_FP16)
+    config->setFlag(nvinfer1::BuilderFlag::kFP16);
+#elif defined(USE_INT8)
+    std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
+    assert(builder->platformHasFastInt8());
+    config->setFlag(nvinfer1::BuilderFlag::kINT8);
+    auto* calibrator = new Int8EntropyCalibrator2(1, kInputW, kInputH, "../coco_calib/", "int8calib.table", kInputTensorName);
+    config->setInt8Calibrator(calibrator);
+#endif
+
+    std::cout << "Building engine, please wait for a while..." << std::endl;
+    nvinfer1::IHostMemory* serialized_model = builder->buildSerializedNetwork(*network, *config);
+    std::cout << "Build engine successfully!" << std::endl;
+
+    delete network;
+
+    for (auto &mem : weightMap){
+        free((void *)(mem.second.values));
+    }
+    return serialized_model;
+}
+
+nvinfer1::IHostMemory* buildEngineYolov8Seg(nvinfer1::IBuilder* builder,
+                                            nvinfer1::IBuilderConfig* config, nvinfer1::DataType dt,
+                                            const std::string& wts_path, float& gd, float& gw, int& max_channels) {
+    std::map<std::string, nvinfer1::Weights> weightMap = loadWeights(wts_path);
+    nvinfer1::INetworkDefinition* network = builder->createNetworkV2(0U);
+
+    /*******************************************************************************************************
+    ******************************************  YOLOV8 INPUT  **********************************************
+    *******************************************************************************************************/
+    nvinfer1::ITensor* data = network->addInput(kInputTensorName, dt, nvinfer1::Dims3{3, kInputH, kInputW});
+    assert(data);
+
+    /*******************************************************************************************************
+    *****************************************  YOLOV8 BACKBONE  ********************************************
+    *******************************************************************************************************/
+    nvinfer1::IElementWiseLayer* conv0 = convBnSiLU(network, weightMap, *data, get_width(64, gw, max_channels), 3, 2, 1, "model.0");
+    nvinfer1::IElementWiseLayer* conv1 = convBnSiLU(network, weightMap, *conv0->getOutput(0), get_width(128, gw, max_channels), 3, 2, 1, "model.1");
+    nvinfer1::IElementWiseLayer* conv2 = C2F(network, weightMap, *conv1->getOutput(0), get_width(128, gw, max_channels), get_width(128, gw, max_channels), get_depth(3, gd), true, 0.5, "model.2");
+    nvinfer1::IElementWiseLayer* conv3 = convBnSiLU(network, weightMap, *conv2->getOutput(0), get_width(256, gw, max_channels), 3, 2, 1, "model.3");
+    nvinfer1::IElementWiseLayer* conv4 = C2F(network, weightMap, *conv3->getOutput(0), get_width(256, gw, max_channels), get_width(256, gw, max_channels), get_depth(6, gd), true, 0.5, "model.4");
+    nvinfer1::IElementWiseLayer* conv5 = convBnSiLU(network, weightMap, *conv4->getOutput(0), get_width(512, gw, max_channels), 3, 2, 1, "model.5");
+    nvinfer1::IElementWiseLayer* conv6 = C2F(network, weightMap, *conv5->getOutput(0), get_width(512, gw, max_channels), get_width(512, gw, max_channels), get_depth(6, gd), true, 0.5, "model.6");
+    nvinfer1::IElementWiseLayer* conv7 = convBnSiLU(network, weightMap, *conv6->getOutput(0), get_width(1024, gw, max_channels), 3, 2, 1, "model.7");
+    nvinfer1::IElementWiseLayer* conv8 = C2F(network, weightMap, *conv7->getOutput(0), get_width(1024, gw, max_channels), get_width(1024, gw, max_channels), get_depth(3, gd), true, 0.5, "model.8");
+    nvinfer1::IElementWiseLayer* conv9 = SPPF(network, weightMap, *conv8->getOutput(0), get_width(1024, gw, max_channels), get_width(1024, gw, max_channels), 5, "model.9");
+
+    /*******************************************************************************************************
+    *********************************************  YOLOV8 HEAD  ********************************************
+    *******************************************************************************************************/
+    float scale[] = {1.0, 2.0, 2.0};
+    nvinfer1::IResizeLayer* upsample10 = network->addResize(*conv9->getOutput(0));
+    assert(upsample10);
+    upsample10->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
+    upsample10->setScales(scale, 3);
+
+    nvinfer1::ITensor* inputTensor11[] = {upsample10->getOutput(0), conv6->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat11 = network->addConcatenation(inputTensor11, 2);
+    nvinfer1::IElementWiseLayer* conv12 = C2F(network, weightMap, *cat11->getOutput(0), get_width(512, gw, max_channels), get_width(512, gw, max_channels), get_depth(3, gd), false, 0.5, "model.12");
+
+    nvinfer1::IResizeLayer* upsample13 = network->addResize(*conv12->getOutput(0));
+    assert(upsample13);
+    upsample13->setResizeMode(nvinfer1::ResizeMode::kNEAREST);
+    upsample13->setScales(scale, 3);
+
+    nvinfer1::ITensor* inputTensor14[] = {upsample13->getOutput(0), conv4->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat14 = network->addConcatenation(inputTensor14, 2);
+    nvinfer1::IElementWiseLayer* conv15 = C2F(network, weightMap, *cat14->getOutput(0), get_width(256, gw, max_channels), get_width(256, gw, max_channels), get_depth(3, gd), false, 0.5, "model.15");
+    nvinfer1::IElementWiseLayer* conv16 = convBnSiLU(network, weightMap, *conv15->getOutput(0), get_width(256, gw, max_channels), 3, 2, 1, "model.16");
+    nvinfer1::ITensor* inputTensor17[] = {conv16->getOutput(0), conv12->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat17 = network->addConcatenation(inputTensor17, 2);
+    nvinfer1::IElementWiseLayer* conv18 = C2F(network, weightMap, *cat17->getOutput(0), get_width(512, gw, max_channels), get_width(512, gw, max_channels), get_depth(3, gd), false, 0.5, "model.18");
+    nvinfer1::IElementWiseLayer* conv19 = convBnSiLU(network, weightMap, *conv18->getOutput(0), get_width(512, gw, max_channels), 3, 2, 1, "model.19");
+    nvinfer1::ITensor* inputTensor20[] = {conv19->getOutput(0), conv9->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat20 = network->addConcatenation(inputTensor20, 2);
+    nvinfer1::IElementWiseLayer* conv21 = C2F(network, weightMap, *cat20->getOutput(0), get_width(1024, gw, max_channels), get_width(1024, gw, max_channels), get_depth(3, gd), false, 0.5, "model.21");
+
+    /*******************************************************************************************************
+    *********************************************  YOLOV8 OUTPUT  ******************************************
+    *******************************************************************************************************/
+    int base_in_channel = (gw == 1.25) ? 80 : 64;
+    int base_out_channel = (gw == 0.25) ? std::max(64, std::min(kNumClass, 100)) : get_width(256, gw, max_channels);
+
+    // output0
+    nvinfer1::IElementWiseLayer* conv22_cv2_0_0 = convBnSiLU(network, weightMap, *conv15->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.0.0");
+    nvinfer1::IElementWiseLayer* conv22_cv2_0_1 = convBnSiLU(network, weightMap, *conv22_cv2_0_0->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.0.1");
+    nvinfer1::IConvolutionLayer* conv22_cv2_0_2 = network->addConvolutionNd(*conv22_cv2_0_1->getOutput(0), 64, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv2.0.2.weight"], weightMap["model.22.cv2.0.2.bias"]);
+    conv22_cv2_0_2->setStrideNd(nvinfer1::DimsHW{1, 1});
+    conv22_cv2_0_2->setPaddingNd(nvinfer1::DimsHW{0, 0});
+    nvinfer1::IElementWiseLayer *conv22_cv3_0_0 = convBnSiLU(network, weightMap, *conv15->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.0.0");
+    nvinfer1::IElementWiseLayer *conv22_cv3_0_1 = convBnSiLU(network, weightMap, *conv22_cv3_0_0->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.0.1");
+    nvinfer1::IConvolutionLayer *conv22_cv3_0_2 = network->addConvolutionNd(*conv22_cv3_0_1->getOutput(0), kNumClass, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv3.0.2.weight"], weightMap["model.22.cv3.0.2.bias"]);
+    conv22_cv3_0_2->setStride(nvinfer1::DimsHW{1, 1});
+    conv22_cv3_0_2->setPadding(nvinfer1::DimsHW{0, 0});
+    nvinfer1::ITensor* inputTensor22_0[] = {conv22_cv2_0_2->getOutput(0), conv22_cv3_0_2->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat22_0 = network->addConcatenation(inputTensor22_0, 2);
+
+    // output1
+    nvinfer1::IElementWiseLayer* conv22_cv2_1_0 = convBnSiLU(network, weightMap, *conv18->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.1.0");
+    nvinfer1::IElementWiseLayer* conv22_cv2_1_1 = convBnSiLU(network, weightMap, *conv22_cv2_1_0->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.1.1");
+    nvinfer1::IConvolutionLayer* conv22_cv2_1_2 = network->addConvolutionNd(*conv22_cv2_1_1->getOutput(0), 64, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv2.1.2.weight"], weightMap["model.22.cv2.1.2.bias"]);
+    conv22_cv2_1_2->setStrideNd(nvinfer1::DimsHW{1, 1});
+    conv22_cv2_1_2->setPaddingNd(nvinfer1::DimsHW{0, 0});
+    nvinfer1::IElementWiseLayer* conv22_cv3_1_0 = convBnSiLU(network, weightMap, *conv18->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.1.0");
+    nvinfer1::IElementWiseLayer* conv22_cv3_1_1 = convBnSiLU(network, weightMap, *conv22_cv3_1_0->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.1.1");
+    nvinfer1::IConvolutionLayer* conv22_cv3_1_2 = network->addConvolutionNd(*conv22_cv3_1_1->getOutput(0), kNumClass, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv3.1.2.weight"], weightMap["model.22.cv3.1.2.bias"]);
+    conv22_cv3_1_2->setStrideNd(nvinfer1::DimsHW{1, 1});
+    conv22_cv3_1_2->setPaddingNd(nvinfer1::DimsHW{0, 0});
+    nvinfer1::ITensor* inputTensor22_1[] = {conv22_cv2_1_2->getOutput(0), conv22_cv3_1_2->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat22_1 = network->addConcatenation(inputTensor22_1, 2);
+
+    // output2
+    nvinfer1::IElementWiseLayer* conv22_cv2_2_0 = convBnSiLU(network, weightMap, *conv21->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.2.0");
+    nvinfer1::IElementWiseLayer* conv22_cv2_2_1 = convBnSiLU(network, weightMap, *conv22_cv2_2_0->getOutput(0), base_in_channel, 3, 1, 1, "model.22.cv2.2.1");
+    nvinfer1::IConvolutionLayer* conv22_cv2_2_2 = network->addConvolution(*conv22_cv2_2_1->getOutput(0), 64, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv2.2.2.weight"], weightMap["model.22.cv2.2.2.bias"]);
+    nvinfer1::IElementWiseLayer* conv22_cv3_2_0 = convBnSiLU(network, weightMap, *conv21->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.2.0");
+    nvinfer1::IElementWiseLayer* conv22_cv3_2_1 = convBnSiLU(network, weightMap, *conv22_cv3_2_0->getOutput(0), base_out_channel, 3, 1, 1, "model.22.cv3.2.1");
+    nvinfer1::IConvolutionLayer* conv22_cv3_2_2 = network->addConvolution(*conv22_cv3_2_1->getOutput(0), kNumClass, nvinfer1::DimsHW{1, 1}, weightMap["model.22.cv3.2.2.weight"], weightMap["model.22.cv3.2.2.bias"]);
+    nvinfer1::ITensor* inputTensor22_2[] = {conv22_cv2_2_2->getOutput(0), conv22_cv3_2_2->getOutput(0)};
+    nvinfer1::IConcatenationLayer* cat22_2 = network->addConcatenation(inputTensor22_2, 2);
+
+    /*******************************************************************************************************
+    *********************************************  YOLOV8 DETECT  ******************************************
+    *******************************************************************************************************/
+
+    nvinfer1::IShuffleLayer* shuffle22_0 = network->addShuffle(*cat22_0->getOutput(0));
+    shuffle22_0->setReshapeDimensions(nvinfer1::Dims2{64 + kNumClass, (kInputH / 8) * (kInputW / 8)});
+
+    nvinfer1::ISliceLayer* split22_0_0 = network->addSlice(*shuffle22_0->getOutput(0), nvinfer1::Dims2{0, 0}, nvinfer1::Dims2{64, (kInputH / 8) * (kInputW / 8)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::ISliceLayer* split22_0_1 = network->addSlice(*shuffle22_0->getOutput(0), nvinfer1::Dims2{64, 0}, nvinfer1::Dims2{kNumClass, (kInputH / 8) * (kInputW / 8)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::IShuffleLayer* dfl22_0 = DFL(network, weightMap, *split22_0_0->getOutput(0), 4, (kInputH / 8) * (kInputW / 8), 1, 1, 0, "model.22.dfl.conv.weight");
+
+    nvinfer1::IShuffleLayer* shuffle22_1 = network->addShuffle(*cat22_1->getOutput(0));
+    shuffle22_1->setReshapeDimensions(nvinfer1::Dims2{64 + kNumClass, (kInputH / 16) * (kInputW / 16)});
+    nvinfer1::ISliceLayer* split22_1_0 = network->addSlice(*shuffle22_1->getOutput(0), nvinfer1::Dims2{0, 0}, nvinfer1::Dims2{64, (kInputH / 16) * (kInputW / 16)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::ISliceLayer* split22_1_1 = network->addSlice(*shuffle22_1->getOutput(0), nvinfer1::Dims2{64, 0}, nvinfer1::Dims2{kNumClass, (kInputH / 16) * (kInputW / 16)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::IShuffleLayer* dfl22_1 = DFL(network, weightMap, *split22_1_0->getOutput(0), 4, (kInputH / 16) * (kInputW / 16), 1, 1, 0, "model.22.dfl.conv.weight");
+
+    nvinfer1::IShuffleLayer* shuffle22_2 = network->addShuffle(*cat22_2->getOutput(0));
+    shuffle22_2->setReshapeDimensions(nvinfer1::Dims2{64 + kNumClass, (kInputH / 32) * (kInputW / 32)});
+    nvinfer1::ISliceLayer* split22_2_0 = network->addSlice(*shuffle22_2->getOutput(0), nvinfer1::Dims2{0, 0}, nvinfer1::Dims2{64, (kInputH / 32) * (kInputW / 32)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::ISliceLayer* split22_2_1 = network->addSlice(*shuffle22_2->getOutput(0), nvinfer1::Dims2{64, 0}, nvinfer1::Dims2{kNumClass, (kInputH / 32) * (kInputW / 32)}, nvinfer1::Dims2{1, 1});
+    nvinfer1::IShuffleLayer* dfl22_2 = DFL(network, weightMap, *split22_2_0->getOutput(0), 4, (kInputH / 32) * (kInputW / 32), 1, 1, 0, "model.22.dfl.conv.weight");
+
+    // det0
+    auto proto_coef_0 = ProtoCoef(network, weightMap, *conv15->getOutput(0), "model.22.cv4.0", 6400, gw);
+    nvinfer1::ITensor* inputTensor22_dfl_0[] = { dfl22_0->getOutput(0), split22_0_1->getOutput(0),proto_coef_0->getOutput(0)};
+    nvinfer1::IConcatenationLayer *cat22_dfl_0 = network->addConcatenation(inputTensor22_dfl_0, 3);
+
+    // det1
+    auto proto_coef_1 = ProtoCoef(network, weightMap, *conv18->getOutput(0), "model.22.cv4.1", 1600, gw);
+    nvinfer1::ITensor* inputTensor22_dfl_1[] = { dfl22_1->getOutput(0), split22_1_1->getOutput(0),proto_coef_1->getOutput(0)};
+    nvinfer1::IConcatenationLayer *cat22_dfl_1 = network->addConcatenation(inputTensor22_dfl_1, 3);
+
+    // det2
+    auto proto_coef_2 = ProtoCoef(network, weightMap, *conv21->getOutput(0), "model.22.cv4.2", 400, gw);
+    nvinfer1::ITensor* inputTensor22_dfl_2[] = { dfl22_2->getOutput(0), split22_2_1->getOutput(0) ,proto_coef_2->getOutput(0)};
+    nvinfer1::IConcatenationLayer *cat22_dfl_2 = network->addConcatenation(inputTensor22_dfl_2, 3);
+
+
+    nvinfer1::IPluginV2Layer* yolo = addYoLoLayer(network, std::vector<nvinfer1::IConcatenationLayer *>{cat22_dfl_0, cat22_dfl_1, cat22_dfl_2}, true);
+    yolo->getOutput(0)->setName(kOutputTensorName);
+    network->markOutput(*yolo->getOutput(0));
+
+    auto proto = Proto(network, weightMap, *conv15->getOutput(0), "model.22.proto", gw, max_channels);
+    proto->getOutput(0)->setName("proto");
+    network->markOutput(*proto->getOutput(0));
+
+    builder->setMaxBatchSize(kBatchSize);
+    config->setMaxWorkspaceSize(16 * (1 << 20));
+
+#if defined(USE_FP16)
+    config->setFlag(nvinfer1::BuilderFlag::kFP16);
+#elif defined(USE_INT8)
+    std::cout << "Your platform support int8: " << (builder->platformHasFastInt8() ? "true" : "false") << std::endl;
+    assert(builder->platformHasFastInt8());
+    config->setFlag(nvinfer1::BuilderFlag::kINT8);
+    auto* calibrator = new Int8EntropyCalibrator2(1, kInputW, kInputH, "../coco_calib/", "int8calib.table", kInputTensorName);
+    config->setInt8Calibrator(calibrator);
+#endif
+
+    std::cout << "Building engine, please wait for a while..." << std::endl;
+    nvinfer1::IHostMemory* serialized_model = builder->buildSerializedNetwork(*network, *config);
+    std::cout << "Build engine successfully!" << std::endl;
+
+    delete network;
+
+    for (auto& mem : weightMap) {
+        free((void*)(mem.second.values));
+    }
+    return serialized_model;
+}

+ 331 - 0
src/detection/vision_yolov8/src/postprocess.cpp

@@ -0,0 +1,331 @@
+#include "postprocess.h"
+#include "utils.h"
+
+cv::Rect get_rect(cv::Mat &img, float bbox[4]) {
+    float l, r, t, b;
+    float r_w = kInputW / (img.cols * 1.0);
+    float r_h = kInputH / (img.rows * 1.0);
+
+    if (r_h > r_w) {
+        l = bbox[0];
+        r = bbox[2];
+        t = bbox[1] - (kInputH - r_w * img.rows) / 2;
+        b = bbox[3] - (kInputH - r_w * img.rows) / 2;
+        l = l / r_w;
+        r = r / r_w;
+        t = t / r_w;
+        b = b / r_w;
+    } else {
+        l = bbox[0] - (kInputW - r_h * img.cols) / 2;
+        r = bbox[2] - (kInputW - r_h * img.cols) / 2;
+        t = bbox[1];
+        b = bbox[3];
+        l = l / r_h;
+        r = r / r_h;
+        t = t / r_h;
+        b = b / r_h;
+    }
+    return cv::Rect(round(l), round(t), round(r - l), round(b - t));
+}
+
+static float iou(float lbox[4], float rbox[4]) {
+    float interBox[] = {
+            (std::max)(lbox[0], rbox[0]), //left
+            (std::min)(lbox[2], rbox[2]), //right
+            (std::max)(lbox[1], rbox[1]), //top
+            (std::min)(lbox[3], rbox[3]), //bottom
+    };
+
+    if (interBox[2] > interBox[3] || interBox[0] > interBox[1])
+        return 0.0f;
+
+    float interBoxS = (interBox[1] - interBox[0]) * (interBox[3] - interBox[2]);
+    float unionBoxS = (lbox[2] - lbox[0]) * (lbox[3] - lbox[1]) + (rbox[2] - rbox[0]) * (rbox[3] - rbox[1]) - interBoxS;
+    return interBoxS / unionBoxS;
+}
+
+static bool cmp(const Detection &a, const Detection &b) {
+    return a.conf > b.conf;
+}
+
+void nms(std::vector<Detection> &res, float *output, float conf_thresh, float nms_thresh) {
+    int det_size = sizeof(Detection) / sizeof(float);
+    std::map<float, std::vector<Detection>> m;
+
+    for (int i = 0; i < output[0]; i++) {
+        if (output[1 + det_size * i + 4] <= conf_thresh) continue;
+        Detection det;
+        memcpy(&det, &output[1 + det_size * i], det_size * sizeof(float));
+        if (m.count(det.class_id) == 0) m.emplace(det.class_id, std::vector<Detection>());
+        m[det.class_id].push_back(det);
+    }
+    for (auto it = m.begin(); it != m.end(); it++) {
+        auto &dets = it->second;
+        std::sort(dets.begin(), dets.end(), cmp);
+        for (size_t m = 0; m < dets.size(); ++m) {
+            auto &item = dets[m];
+            res.push_back(item);
+            for (size_t n = m + 1; n < dets.size(); ++n) {
+                if (iou(item.bbox, dets[n].bbox) > nms_thresh) {
+                    dets.erase(dets.begin() + n);
+                    --n;
+                }
+            }
+        }
+    }
+}
+
+void batch_nms(std::vector<std::vector<Detection>> &res_batch, float *output, int batch_size, int output_size,
+               float conf_thresh, float nms_thresh) {
+    res_batch.resize(batch_size);
+    for (int i = 0; i < batch_size; i++) {
+        nms(res_batch[i], &output[i * output_size], conf_thresh, nms_thresh);
+    }
+}
+
+void process_decode_ptr_host(std::vector<Detection> &res, const float* decode_ptr_host, int bbox_element, cv::Mat& img, int count) {
+    Detection det;
+    for (int i = 0; i < count; i++) {
+        int basic_pos = 1 + i * bbox_element;
+        int keep_flag = decode_ptr_host[basic_pos + 6];
+        if (keep_flag == 1) {
+            det.bbox[0] = decode_ptr_host[basic_pos + 0];
+            det.bbox[1] = decode_ptr_host[basic_pos + 1];
+            det.bbox[2] = decode_ptr_host[basic_pos + 2];
+            det.bbox[3] = decode_ptr_host[basic_pos + 3];
+            det.conf = decode_ptr_host[basic_pos + 4];
+            det.class_id = decode_ptr_host[basic_pos + 5];
+            res.push_back(det);
+        }
+    }
+}
+
+void batch_process(std::vector<std::vector<Detection>> &res_batch, const float* decode_ptr_host, int batch_size, int bbox_element, const std::vector<cv::Mat>& img_batch) {
+    res_batch.resize(batch_size);
+    int count = static_cast<int>(*decode_ptr_host);
+    count = std::min(count, kMaxNumOutputBbox);
+    for (int i = 0; i < batch_size; i++) {
+        auto& img = const_cast<cv::Mat&>(img_batch[i]);
+        process_decode_ptr_host(res_batch[i], &decode_ptr_host[i * count], bbox_element, img, count);
+    }
+}
+
+float distanceXY(float a, float b){
+    return std::sqrt(std::pow(a,2)+std::pow(b,2));
+}
+
+iv::vision::Visionyolov8 draw_bbox(std::vector<cv::Mat> &img_batch, std::vector<std::vector<Detection>> &res_batch, cv::Mat &pers_mat_inv, cv::Point2i transed_O, float METER_PER_PIXEL_X, float METER_PER_PIXEL_Y, float X, float Y, bool show_distance) {
+    for (size_t i = 0; i < img_batch.size(); i++) {
+        auto &res = res_batch[i];
+        cv::Mat img = img_batch[i];
+        std::cout<<"res.size="<<res.size()<<std::endl;
+        // vertical view
+//        cv::Mat img_batch_vertical;
+//        cv::warpPerspective(img, img_batch_vertical, pers_mat_inv, img.size());
+//        cv::Mat img_batch_vertical(720, 1280, CV_8UC4, cv::Scalar(0,0,0,0));
+//        cv::rectangle(img_batch_vertical, cv::Point(624,655), cv::Point(656,719), cv::Scalar(0,255,0,255));
+//        std::vector<cv::Point> pts;
+//        pts.push_back(cv::Point(584,655));
+//        pts.push_back(cv::Point(584,719));
+//        pts.push_back(cv::Point(696,719));
+//        pts.push_back(cv::Point(696,655));
+//        cv::fillConvexPoly(img_batch_vertical, pts, cv::Scalar(0,255,0,255));
+
+        iv::vision::Visionyolov8 vision_yolov8;
+
+        for (size_t j = 0; j < res.size(); j++) {
+            cv::Rect r = get_rect(img, res[j].bbox);
+            std::vector<cv::Point2f> src_point, warped_point;
+            src_point.push_back(cv::Point2f(r.x+r.width/2, r.y+r.height));
+            src_point.push_back(cv::Point2f(r.x, r.y+r.height));
+            src_point.push_back(cv::Point2f(r.x+r.width, r.y+r.height));
+
+            // cv::circle(img, cv::Point2f(r.x+r.width/2, r.y+r.height), 3, cv::Scalar(0, 255, 0), -1);
+            cv::perspectiveTransform(src_point, warped_point, pers_mat_inv);
+
+//            for(int i=0;i<warped_point.size();i++){
+//                warped_point[i].x/=2;
+//                warped_point[i].x+=300;
+//                warped_point[i].y/=2;
+//                warped_point[i].y+=300;
+//            }
+
+            cv::Point2f position_world;
+            position_world.x = float((warped_point[0].x - transed_O.x) * METER_PER_PIXEL_X);
+            position_world.y = float((transed_O.y - warped_point[0].y) * METER_PER_PIXEL_Y);
+            position_world.y += Y;
+            position_world.x += X;
+            float width_obj = (warped_point[2].x-warped_point[1].x) * METER_PER_PIXEL_X;
+
+
+            if(res[j].class_id <= 7 && res[j].class_id != 4 && res[j].class_id != 6){
+                int baseline = 0;
+                cv::Size labelSize1 = cv::getTextSize(objectName[(int) res[j].class_id]+
+                        " x:"+(std::to_string(position_world.x)).substr(0,4)+"m"+
+                        " y:"+(std::to_string(position_world.y)).substr(0,4)+"m"+"0"
+                        , cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseline);
+                cv::Size labelSize2 = cv::getTextSize("width:"
+                        +(std::to_string(width_obj)).substr(0,4)+"m"+"0"
+                        , cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseline);
+                if(position_world.y < 3){
+                    cv::rectangle(img, cv::Point(r.x, r.y - labelSize1.height),
+                                  cv::Point(r.x + labelSize1.width, r.y), cv::Scalar(0, 0, 255), -1);
+                    cv::rectangle(img, r, cv::Scalar(0, 0, 255), 1);
+                    cv::putText(img, objectName[(int) res[j].class_id]+
+                            " x:"+(std::to_string(position_world.x)).substr(0,4)+"m"+
+                            " y:"+(std::to_string(position_world.y)).substr(0,4)+"m"
+                            , cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_SIMPLEX,
+                            0.5, cv::Scalar(255, 255, 255), 2);
+
+                    cv::rectangle(img, cv::Point(r.x, r.y + r.height),
+                                  cv::Point(r.x + labelSize2.width, r.y + r.height + labelSize2.height + 2), cv::Scalar(0, 0, 255), -1);
+                    cv::putText(img, "width:"
+                            +(std::to_string(width_obj)).substr(0,4)+"m",
+                                cv::Point(r.x, r.y+r.height+labelSize2.height), cv::FONT_HERSHEY_SIMPLEX,
+                            0.5, cv::Scalar(255, 255, 255), 2);
+
+                    if(show_distance){
+                        cv::line(img, cv::Point(r.x+r.width/2, r.y+r.height), cv::Point(img.size().width/2, img.size().height),
+                                 cv::Scalar(0, 0, 255), 1);
+                        cv::putText(img, "distance:"
+                                +(std::to_string(distanceXY(position_world.x, position_world.y))).substr(0,4)+"m",
+                                    cv::Point((r.x+r.width/2+img.size().width/2)/2, (r.y+r.height+img.size().height)/2), cv::FONT_HERSHEY_SIMPLEX,
+                                0.5, cv::Scalar(0, 0, 255), 2);
+                    }
+
+                    // vertical
+//                    cv::circle(img_batch_vertical, warped_point[0], 5, cv::Scalar(0, 0, 255, 255), -1);
+//                    cv::rectangle(img_batch_vertical, cv::Rect(warped_point[1].x,
+//                                  warped_point[1].y-(warped_point[2].x-warped_point[1].x),
+//                                  warped_point[2].x-warped_point[1].x,
+//                                  warped_point[2].x-warped_point[1].x),
+//                                  cv::Scalar(0, 0, 255, 255), 2);
+//                    cv::putText(img_batch_vertical, objectName[(int) res[j].class_id]+
+//                            " x:"+(std::to_string(position_world.x)).substr(0,4)+"m"+
+//                            " y:"+(std::to_string(position_world.y)).substr(0,4)+"m",
+//                            warped_point[0], cv::FONT_HERSHEY_PLAIN,
+//                            1, cv::Scalar(0, 0, 255, 255), 1);
+
+                }else{
+                    cv::rectangle(img, cv::Point(r.x, r.y - labelSize1.height),
+                                  cv::Point(r.x + labelSize1.width, r.y), cv::Scalar(0, 255, 0), -1);
+                    cv::rectangle(img, r, cv::Scalar(0, 255, 0), 1);
+                    cv::putText(img, objectName[(int) res[j].class_id]+
+                            " x:"+(std::to_string(position_world.x)).substr(0,4)+"m"+
+                            " y:"+(std::to_string(position_world.y)).substr(0,4)+"m"
+                            , cv::Point(r.x, r.y - 1), cv::FONT_HERSHEY_SIMPLEX,
+                            0.5, cv::Scalar(255, 255, 255), 2);
+
+                    cv::rectangle(img, cv::Point(r.x, r.y + r.height),
+                                  cv::Point(r.x + labelSize2.width, r.y + r.height + labelSize2.height + 2), cv::Scalar(0, 255, 0), -1);
+                    cv::putText(img, "width:"
+                            +(std::to_string(width_obj)).substr(0,4)+"m",
+                                cv::Point(r.x, r.y+r.height+labelSize2.height), cv::FONT_HERSHEY_SIMPLEX,
+                            0.5, cv::Scalar(255, 255, 255), 2);
+                    if(show_distance){
+                        cv::line(img, cv::Point(r.x+r.width/2, r.y+r.height), cv::Point(img.size().width/2, img.size().height),
+                                 cv::Scalar(0, 255, 0), 1);
+                        cv::putText(img, "distance:"
+                                +(std::to_string(distanceXY(position_world.x, position_world.y))).substr(0,4)+"m",
+                                    cv::Point((r.x+r.width/2+img.size().width/2)/2, (r.y+r.height+img.size().height)/2), cv::FONT_HERSHEY_SIMPLEX,
+                                0.5, cv::Scalar(0, 255, 0), 2);
+                    }
+
+                    // vertical
+//                    cv::circle(img_batch_vertical, warped_point[0], 5, cv::Scalar(0, 255, 0, 255), -1);
+//                    cv::rectangle(img_batch_vertical, cv::Rect(warped_point[1].x,
+//                                  warped_point[1].y-(warped_point[2].x-warped_point[1].x),
+//                                  warped_point[2].x-warped_point[1].x, warped_point[2].x-warped_point[1].x),
+//                                  cv::Scalar(0, 255, 0, 255), 2);
+//                    cv::putText(img_batch_vertical, objectName[(int) res[j].class_id]+
+//                            " x:"+(std::to_string(position_world.x)).substr(0,4)+"m"+
+//                            " y:"+(std::to_string(position_world.y)).substr(0,4)+"m",
+//                            warped_point[0], cv::FONT_HERSHEY_PLAIN,
+//                            1, cv::Scalar(0, 255, 0, 255), 1);
+                }
+
+                // protoful Msg
+                iv::vision::PixelObject *pixelObject = vision_yolov8.add_pixelobject();
+                iv::vision::WorldObject *worldObject = vision_yolov8.add_worldobject();
+
+                pixelObject->set_type(objectName[(int) res[j].class_id]);
+                pixelObject->set_x(r.x);
+                pixelObject->set_y(r.y);
+                pixelObject->set_width(r.width);
+                pixelObject->set_height(r.height);
+
+                worldObject->set_type(objectName[(int) res[j].class_id]);
+                worldObject->set_x(position_world.x);
+                worldObject->set_y(position_world.y);
+                worldObject->set_width(width_obj);
+            }
+        }
+        // vertical
+//        cv::imshow("vertical view", img_batch_vertical);
+        return vision_yolov8;
+    }
+}
+
+
+cv::Mat scale_mask(cv::Mat mask, cv::Mat img) {
+  int x, y, w, h;
+  float r_w = kInputW / (img.cols * 1.0);
+  float r_h = kInputH / (img.rows * 1.0);
+  if (r_h > r_w) {
+    w = kInputW;
+    h = r_w * img.rows;
+    x = 0;
+    y = (kInputH - h) / 2;
+  } else {
+    w = r_h * img.cols;
+    h = kInputH;
+    x = (kInputW - w) / 2;
+    y = 0;
+  }
+  cv::Rect r(x, y, w, h);
+  cv::Mat res;
+  cv::resize(mask(r), res, img.size());
+  return res;
+}
+
+void draw_mask_bbox(cv::Mat& img, std::vector<Detection>& dets, std::vector<cv::Mat>& masks, std::unordered_map<int, std::string>& labels_map) {
+  static std::vector<uint32_t> colors = {0xFF3838, 0xFF9D97, 0xFF701F, 0xFFB21D, 0xCFD231, 0x48F90A,
+                                         0x92CC17, 0x3DDB86, 0x1A9334, 0x00D4BB, 0x2C99A8, 0x00C2FF,
+                                         0x344593, 0x6473FF, 0x0018EC, 0x8438FF, 0x520085, 0xCB38FF,
+                                         0xFF95C8, 0xFF37C7};
+  for (size_t i = 0; i < dets.size(); i++) {
+    cv::Mat img_mask = scale_mask(masks[i], img);
+    auto color = colors[(int)dets[i].class_id % colors.size()];
+    auto bgr = cv::Scalar(color & 0xFF, color >> 8 & 0xFF, color >> 16 & 0xFF);
+
+    cv::Rect r = get_rect(img, dets[i].bbox);
+    for (int x = r.x; x < r.x + r.width; x++) {
+      for (int y = r.y; y < r.y + r.height; y++) {
+        float val = img_mask.at<float>(y, x);
+        if (val <= 0.5) continue;
+        img.at<cv::Vec3b>(y, x)[0] = img.at<cv::Vec3b>(y, x)[0] / 2 + bgr[0] / 2;
+        img.at<cv::Vec3b>(y, x)[1] = img.at<cv::Vec3b>(y, x)[1] / 2 + bgr[1] / 2;
+        img.at<cv::Vec3b>(y, x)[2] = img.at<cv::Vec3b>(y, x)[2] / 2 + bgr[2] / 2;
+      }
+    }
+
+    cv::rectangle(img, r, bgr, 2);
+    
+    // Get the size of the text
+    cv::Size textSize = cv::getTextSize(labels_map[(int)dets[i].class_id] + " " + to_string_with_precision(dets[i].conf), cv::FONT_HERSHEY_PLAIN, 1.2, 2, NULL);
+    // Set the top left corner of the rectangle
+    cv::Point topLeft(r.x, r.y - textSize.height);
+
+    // Set the bottom right corner of the rectangle
+    cv::Point bottomRight(r.x + textSize.width, r.y + textSize.height);
+
+    // Set the thickness of the rectangle lines
+    int lineThickness = 2;
+
+    // Draw the rectangle on the image
+    cv::rectangle(img, topLeft, bottomRight, bgr, -1);
+
+    cv::putText(img, labels_map[(int)dets[i].class_id] + " " + to_string_with_precision(dets[i].conf), cv::Point(r.x, r.y + 4), cv::FONT_HERSHEY_PLAIN, 1.2, cv::Scalar::all(0xFF), 2);
+
+  }
+}

+ 84 - 0
src/detection/vision_yolov8/src/postprocess.cu

@@ -0,0 +1,84 @@
+//
+// Created by lindsay on 23-7-17.
+//
+#include "types.h"
+#include "postprocess.h"
+
+static __global__ void
+decode_kernel(float *predict, int num_bboxes, float confidence_threshold, float *parray, int max_objects) {
+    float count = predict[0];
+    int position = (blockDim.x * blockIdx.x + threadIdx.x);
+    if (position >= count) return;
+
+    float *pitem = predict + 1 + position * (sizeof(Detection) / sizeof(float));
+    int index = atomicAdd(parray, 1);
+    if (index >= max_objects) return;
+
+    float confidence = pitem[4];
+    if (confidence < confidence_threshold) return;
+
+    float left = pitem[0];
+    float top = pitem[1];
+    float right = pitem[2];
+    float bottom = pitem[3];
+    float label = pitem[5];
+
+    float *pout_item = parray + 1 + index * bbox_element;
+    *pout_item++ = left;
+    *pout_item++ = top;
+    *pout_item++ = right;
+    *pout_item++ = bottom;
+    *pout_item++ = confidence;
+    *pout_item++ = label;
+    *pout_item++ = 1;  // 1 = keep, 0 = ignore
+}
+
+static __device__ float
+box_iou(float aleft, float atop, float aright, float abottom, float bleft, float btop, float bright, float bbottom) {
+    float cleft = max(aleft, bleft);
+    float ctop = max(atop, btop);
+    float cright = min(aright, bright);
+    float cbottom = min(abottom, bbottom);
+    float c_area = max(cright - cleft, 0.0f) * max(cbottom - ctop, 0.0f);
+    if (c_area == 0.0f) return 0.0f;
+
+    float a_area = max(0.0f, aright - aleft) * max(0.0f, abottom - atop);
+    float b_area = max(0.0f, bright - bleft) * max(0.0f, bbottom - btop);
+    return c_area / (a_area + b_area - c_area);
+}
+
+static __global__ void nms_kernel(float *bboxes, int max_objects, float threshold) {
+    int position = (blockDim.x * blockIdx.x + threadIdx.x);
+    int count = bboxes[0];
+    if (position >= count) return;
+
+    float *pcurrent = bboxes + 1 + position * bbox_element;
+    for (int i = 0; i < count; ++i) {
+        float *pitem = bboxes + 1 + i * bbox_element;
+        if (i == position || pcurrent[5] != pitem[5]) continue;
+        if (pitem[4] >= pcurrent[4]) {
+            if (pitem[4] == pcurrent[4] && i < position) continue;
+            float iou = box_iou(
+                pcurrent[0], pcurrent[1], pcurrent[2], pcurrent[3],
+                pitem[0], pitem[1], pitem[2], pitem[3]
+            );
+            if (iou > threshold) {
+                pcurrent[6] = 0;
+                return;
+            }
+        }
+    }
+}
+
+void cuda_decode(float *predict, int num_bboxes, float confidence_threshold, float *parray, int max_objects,
+                 cudaStream_t stream) {
+    int block = 256;
+    int grid = ceil(num_bboxes / (float)block);
+    decode_kernel<<<grid, block, 0, stream>>>((float*)predict, num_bboxes, confidence_threshold, parray, max_objects);
+}
+
+void cuda_nms(float *parray, float nms_threshold, int max_objects, cudaStream_t stream) {
+    int block = max_objects < 256 ? max_objects : 256;
+    int grid = ceil(max_objects / (float)block);
+    nms_kernel<<<grid, block, 0, stream>>>(parray, max_objects, nms_threshold);
+}

+ 155 - 0
src/detection/vision_yolov8/src/preprocess.cu

@@ -0,0 +1,155 @@
+#include "preprocess.h"
+#include "cuda_utils.h"
+
+static uint8_t *img_buffer_host = nullptr;
+static uint8_t *img_buffer_device = nullptr;
+
+
+__global__ void
+warpaffine_kernel(uint8_t *src, int src_line_size, int src_width, int src_height, float *dst, int dst_width,
+                  int dst_height, uint8_t const_value_st, AffineMatrix d2s, int edge) {
+    int position = blockDim.x * blockIdx.x + threadIdx.x;
+    if (position >= edge) return;
+
+    float m_x1 = d2s.value[0];
+    float m_y1 = d2s.value[1];
+    float m_z1 = d2s.value[2];
+    float m_x2 = d2s.value[3];
+    float m_y2 = d2s.value[4];
+    float m_z2 = d2s.value[5];
+
+    int dx = position % dst_width;
+    int dy = position / dst_width;
+    float src_x = m_x1 * dx + m_y1 * dy + m_z1 + 0.5f;
+    float src_y = m_x2 * dx + m_y2 * dy + m_z2 + 0.5f;
+    float c0, c1, c2;
+
+    if (src_x <= -1 || src_x >= src_width || src_y <= -1 || src_y >= src_height) {
+        // out of range
+        c0 = const_value_st;
+        c1 = const_value_st;
+        c2 = const_value_st;
+    } else {
+        int y_low = floorf(src_y);
+        int x_low = floorf(src_x);
+        int y_high = y_low + 1;
+        int x_high = x_low + 1;
+
+        uint8_t const_value[] = {const_value_st, const_value_st, const_value_st};
+        float ly = src_y - y_low;
+        float lx = src_x - x_low;
+        float hy = 1 - ly;
+        float hx = 1 - lx;
+        float w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
+        uint8_t *v1 = const_value;
+        uint8_t *v2 = const_value;
+        uint8_t *v3 = const_value;
+        uint8_t *v4 = const_value;
+
+        if (y_low >= 0) {
+            if (x_low >= 0)
+                v1 = src + y_low * src_line_size + x_low * 3;
+
+            if (x_high < src_width)
+                v2 = src + y_low * src_line_size + x_high * 3;
+        }
+
+        if (y_high < src_height) {
+            if (x_low >= 0)
+                v3 = src + y_high * src_line_size + x_low * 3;
+
+            if (x_high < src_width)
+                v4 = src + y_high * src_line_size + x_high * 3;
+        }
+
+        c0 = w1 * v1[0] + w2 * v2[0] + w3 * v3[0] + w4 * v4[0];
+        c1 = w1 * v1[1] + w2 * v2[1] + w3 * v3[1] + w4 * v4[1];
+        c2 = w1 * v1[2] + w2 * v2[2] + w3 * v3[2] + w4 * v4[2];
+    }
+
+    // bgr to rgb
+    float t = c2;
+    c2 = c0;
+    c0 = t;
+
+    // normalization
+    c0 = c0 / 255.0f;
+    c1 = c1 / 255.0f;
+    c2 = c2 / 255.0f;
+
+    // rgbrgbrgb to rrrgggbbb
+    int area = dst_width * dst_height;
+    float *pdst_c0 = dst + dy * dst_width + dx;
+    float *pdst_c1 = pdst_c0 + area;
+    float *pdst_c2 = pdst_c1 + area;
+    *pdst_c0 = c0;
+    *pdst_c1 = c1;
+    *pdst_c2 = c2;
+}
+
+
+
+
+void cuda_preprocess(uint8_t *src, int src_width, int src_height, float *dst, int dst_width, int dst_height,
+                     cudaStream_t stream) {
+    int img_size = src_width * src_height * 3;
+    // copy data to pinned memory
+    memcpy(img_buffer_host, src, img_size);
+    // copy data to device memory
+    CUDA_CHECK(cudaMemcpyAsync(img_buffer_device, img_buffer_host, img_size, cudaMemcpyHostToDevice, stream));
+
+    AffineMatrix s2d, d2s;
+    float scale = std::min(dst_height / (float) src_height, dst_width / (float) src_width);
+
+    s2d.value[0] = scale;
+    s2d.value[1] = 0;
+    s2d.value[2] = -scale * src_width * 0.5 + dst_width * 0.5;
+    s2d.value[3] = 0;
+    s2d.value[4] = scale;
+    s2d.value[5] = -scale * src_height * 0.5 + dst_height * 0.5;
+    cv::Mat m2x3_s2d(2, 3, CV_32F, s2d.value);
+    cv::Mat m2x3_d2s(2, 3, CV_32F, d2s.value);
+    cv::invertAffineTransform(m2x3_s2d, m2x3_d2s);
+
+    memcpy(d2s.value, m2x3_d2s.ptr<float>(0), sizeof(d2s.value));
+
+    int jobs = dst_height * dst_width;
+    int threads = 256;
+    int blocks = ceil(jobs / (float) threads);
+    warpaffine_kernel<<<blocks, threads, 0, stream>>>(
+            img_buffer_device, src_width * 3, src_width,
+            src_height, dst, dst_width,
+            dst_height, 128, d2s, jobs);
+}
+
+
+void cuda_batch_preprocess(std::vector<cv::Mat> &img_batch,
+                           float *dst, int dst_width, int dst_height,
+                           cudaStream_t stream) {
+    int dst_size = dst_width * dst_height * 3;
+    for (size_t i = 0; i < img_batch.size(); i++) {
+        cuda_preprocess(img_batch[i].ptr(), img_batch[i].cols, img_batch[i].rows, &dst[dst_size * i], dst_width,
+                        dst_height, stream);
+        CUDA_CHECK(cudaStreamSynchronize(stream));
+    }
+}
+
+
+
+
+
+void cuda_preprocess_init(int max_image_size) {
+    // prepare input data in pinned memory
+    CUDA_CHECK(cudaMallocHost((void **) &img_buffer_host, max_image_size * 3));
+    // prepare input data in device memory
+    CUDA_CHECK(cudaMalloc((void **) &img_buffer_device, max_image_size * 3));
+}
+
+void cuda_preprocess_destroy() {
+    CUDA_CHECK(cudaFree(img_buffer_device));
+    CUDA_CHECK(cudaFreeHost(img_buffer_host));
+}
+
+
+
+

+ 128 - 0
src/detection/vision_yolov8/vision_yolov8.pro

@@ -0,0 +1,128 @@
+######################################################################
+# Automatically generated by qmake (3.1) Thu Dec 28 15:58:13 2023
+######################################################################
+QT -= gui
+CONFIG += c++11
+CONFIG -= app_bundle
+
+
+# The following define makes your compiler warn you if you use any
+# feature of Qt which has been marked as deprecated (the exact warnings
+# depend on your compiler). Please consult the documentation of the
+# deprecated API in order to know how to port your code away from it.
+DEFINES += QT_DEPRECATED_WARNINGS
+
+# You can also make your code fail to compile if you use deprecated APIs.
+# In order to do so, uncomment the following line.
+# You can also select to disable deprecated APIs only up to a certain version of Qt.
+#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000    # disables all the APIs deprecated before Qt 6.0.0
+
+# Input
+HEADERS += include/block.h \
+           include/calibrator.h \
+           include/config.h \
+           include/cuda_utils.h \
+           include/logging.h \
+           include/macros.h \
+           include/model.h \
+           include/postprocess.h \
+           include/preprocess.h \
+           include/types.h \
+           include/utils.h \
+           include/imageBuffer.h \
+           plugin/yololayer.h \
+    ../../include/msgtype/visionyolov8.pb.h \
+    ../../include/msgtype/rawpic.pb.h
+SOURCES += main.cpp \
+           src/block.cpp \
+           src/calibrator.cpp \
+           src/model.cpp \
+           src/postprocess.cpp \
+    ../../include/msgtype/visionyolov8.pb.cc \
+    ../../include/msgtype/rawpic.pb.cc
+DISTFILES += plugin/yololayer.cu \
+           src/postprocess.cu \
+           src/preprocess.cu
+
+INCLUDEPATH += /home/nvidia/modularization/src/detection/vision_yolov8/include \
+           /home/nvidia/modularization/src/detection/vision_yolov8/plugin \
+           /usr/include/opencv4 \
+           /usr/local/cuda/include \
+           /usr/local/cuda/lib64 \
+           ../../include/msgtype/
+
+#LIBS += /usr/lib/aarch64-linux-gnu/*.so
+
+#LIBS += /usr/local/cuda-10.2/lib64/*.so
+
+
+LIBS += -L/usr/local/cuda/targets/aarch64-linux/lib
+LIBS += -lcudart -lcufft -lyaml-cpp
+LIBS +=  -lnvinfer -lnvonnxparser -lnvcaffe_parser
+LIBS += -lboost_system
+unix:LIBS +=  -lpcl_common\
+        -lpcl_features\
+        -lpcl_filters\
+        -lpcl_io\
+        -lpcl_io_ply\
+        -lpcl_kdtree\
+        -lpcl_keypoints\
+        -lpcl_octree\
+        -lpcl_outofcore\
+        -lpcl_people\
+        -lpcl_recognition\
+        -lpcl_registration\
+        -lpcl_sample_consensus\
+        -lpcl_search\
+        -lpcl_segmentation\
+        -lpcl_surface\
+        -lpcl_tracking\
+        -lpcl_visualization
+
+INCLUDEPATH += /usr/include/opencv4/
+LIBS += /usr/lib/aarch64-linux-gnu/libopencv*.so
+
+
+CUDA_SOURCES += plugin/yololayer.cu \
+           src/postprocess.cu \
+           src/preprocess.cu
+
+#QMAKE_LIBDIR += $$CUDA_DIR/lib/
+CUDA_SDK = "/usr/local/cuda/"
+CUDA_DIR = "/usr/local/cuda/"
+CUDA_OBJECTS_DIR = ./
+NVCC_OPTIONS = --use_fast_math --compiler-options "-fPIC"
+CUDA_INC = $$join(INCLUDEPATH,'" -I"','-I"','"')
+NVCC_LIBS = $$join(CUDA_LIBS,' -l','-l', '')
+SYSTEM_TYPE = 64
+CUDA_ARCH = sm_72
+CONFIG(debug, debug|release) {
+    # Debug mode
+    cuda_d.input = CUDA_SOURCES
+    cuda_d.output = $$CUDA_OBJECTS_DIR/${QMAKE_FILE_BASE}_cuda.o
+    cuda_d.commands = $$CUDA_DIR/bin/nvcc -D_DEBUG $$NVCC_OPTIONS $$CUDA_INC $$NVCC_LIBS --machine $$SYSTEM_TYPE -arch=$$CUDA_ARCH -c -o ${QMAKE_FILE_OUT} ${QMAKE_FILE_NAME}
+    cuda_d.dependency_type = TYPE_C
+    QMAKE_EXTRA_COMPILERS += cuda_d
+}
+
+else {
+    # Release mode
+    cuda.input = CUDA_SOURCES
+    cuda.output = $$CUDA_OBJECTS_DIR/${QMAKE_FILE_BASE}_cuda.o
+    cuda.commands = $$CUDA_DIR/bin/nvcc $$NVCC_OPTIONS $$CUDA_INC $$NVCC_LIBS --machine $$SYSTEM_TYPE -arch=$$CUDA_ARCH -O3 -c -o ${QMAKE_FILE_OUT} ${QMAKE_FILE_NAME}
+    cuda.dependency_type = TYPE_C
+    QMAKE_EXTRA_COMPILERS += cuda
+}
+
+!include(../../../include/common.pri ) {
+    error( "Couldn't find the common.pri file!" )
+}
+
+!include(../../../include/ivprotobuf.pri ) {
+    error( "Couldn't find the ivprotobuf.pri file!" )
+}
+!include(../../../include/ivopencv.pri ) {
+    error( "Couldn't find the ivopencv.pri file!" )
+}
+INCLUDEPATH += $$PWD/../../include/msgtype
+LIBS += -L/usr/lib/aarch64-linux-gnu/ -lglog

+ 28 - 0
src/detection/vision_yolov8/vision_yolov8_config.yaml

@@ -0,0 +1,28 @@
+%YAML:1.0
+---
+show:
+  imshow_flag: true
+  show_distance: false
+test_video:
+  open: false
+  video_path: /home/nvidia/Videos/video.mp4 
+warpPerspective:
+  BOARD_SIDE_LENGTH_X: 7.5
+  BOARD_SIDE_LENGTH_Y: 50
+  y: 3.0
+  x: 0.0
+model:
+  engine_path: /home/nvidia/app/app/model/yolov8n.engine
+camera:
+  calibrationstate : false
+  calibration_yamlpath : /home/nvidia/yaml/camera_middle_640_360.yaml
+  cropstate: true
+  crop_height: 
+    start: 0
+    end: 600
+  crop_width:
+    start: 0
+    end: 1280
+
+
+

+ 24 - 0
src/detection/vision_yolov8/visionyolov8.proto

@@ -0,0 +1,24 @@
+syntax = "proto2";
+package iv.vision;
+
+message PixelObject{
+  optional string type = 1;
+  optional int32 x = 2;
+  optional int32 y = 3;
+  optional int32 width = 4;
+  optional int32 height = 5;
+};
+
+message WorldObject{
+  optional string type = 1;
+  optional float x = 2;
+  optional float y = 3;
+  optional float width = 4;
+};
+
+message Visionyolov8{
+  optional int32 index = 1;
+  repeated PixelObject pixelObject = 2;
+  repeated WorldObject worldObject = 3;
+  optional int64 time = 4;
+};

+ 250 - 0
src/detection/vision_yolov8/yolov8_det.cpp

@@ -0,0 +1,250 @@
+
+#include <iostream>
+#include <fstream>
+#include <opencv2/opencv.hpp>
+#include "model.h"
+#include "utils.h"
+#include "preprocess.h"
+#include "postprocess.h"
+#include "cuda_utils.h"
+#include "logging.h"
+
+Logger gLogger;
+using namespace nvinfer1;
+const int kOutputSize = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
+
+void serialize_engine(std::string &wts_name, std::string &engine_name, std::string &sub_type, float &gd, float &gw, int &max_channels) {
+    IBuilder *builder = createInferBuilder(gLogger);
+    IBuilderConfig *config = builder->createBuilderConfig();
+    IHostMemory *serialized_engine = nullptr;
+
+    serialized_engine = buildEngineYolov8Det(builder, config, DataType::kFLOAT, wts_name, gd, gw, max_channels);
+
+    assert(serialized_engine);
+    std::ofstream p(engine_name, std::ios::binary);
+    if (!p) {
+        std::cout << "could not open plan output file" << std::endl;
+        assert(false);
+    }
+    p.write(reinterpret_cast<const char *>(serialized_engine->data()), serialized_engine->size());
+
+    delete builder;
+    delete config;
+    delete serialized_engine;
+}
+
+
+void deserialize_engine(std::string &engine_name, IRuntime **runtime, ICudaEngine **engine, IExecutionContext **context) {
+    std::ifstream file(engine_name, std::ios::binary);
+    if (!file.good()) {
+        std::cerr << "read " << engine_name << " error!" << std::endl;
+        assert(false);
+    }
+    size_t size = 0;
+    file.seekg(0, file.end);
+    size = file.tellg();
+    file.seekg(0, file.beg);
+    char *serialized_engine = new char[size];
+    assert(serialized_engine);
+    file.read(serialized_engine, size);
+    file.close();
+
+    *runtime = createInferRuntime(gLogger);
+    assert(*runtime);
+    *engine = (*runtime)->deserializeCudaEngine(serialized_engine, size);
+    assert(*engine);
+    *context = (*engine)->createExecutionContext();
+    assert(*context);
+    delete[] serialized_engine;
+}
+
+void prepare_buffer(ICudaEngine *engine, float **input_buffer_device, float **output_buffer_device,
+                    float **output_buffer_host, float **decode_ptr_host, float **decode_ptr_device, std::string cuda_post_process) {
+    assert(engine->getNbBindings() == 2);
+    // In order to bind the buffers, we need to know the names of the input and output tensors.
+    // Note that indices are guaranteed to be less than IEngine::getNbBindings()
+    const int inputIndex = engine->getBindingIndex(kInputTensorName);
+    const int outputIndex = engine->getBindingIndex(kOutputTensorName);
+    assert(inputIndex == 0);
+    assert(outputIndex == 1);
+    // Create GPU buffers on device
+    CUDA_CHECK(cudaMalloc((void **) input_buffer_device, kBatchSize * 3 * kInputH * kInputW * sizeof(float)));
+    CUDA_CHECK(cudaMalloc((void **) output_buffer_device, kBatchSize * kOutputSize * sizeof(float)));
+    if (cuda_post_process == "c") {
+        *output_buffer_host = new float[kBatchSize * kOutputSize];
+    } else if (cuda_post_process == "g") {
+        if (kBatchSize > 1) {
+            std::cerr << "Do not yet support GPU post processing for multiple batches" << std::endl;
+            exit(0);
+        }
+        // Allocate memory for decode_ptr_host and copy to device
+        *decode_ptr_host = new float[1 + kMaxNumOutputBbox * bbox_element];
+        CUDA_CHECK(cudaMalloc((void **)decode_ptr_device, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element)));
+    }
+}
+
+void infer(IExecutionContext &context, cudaStream_t &stream, void **buffers, float *output, int batchsize, float* decode_ptr_host, float* decode_ptr_device, int model_bboxes, std::string cuda_post_process) {
+    // infer on the batch asynchronously, and DMA output back to host
+    auto start = std::chrono::system_clock::now();
+    context.enqueue(batchsize, buffers, stream, nullptr);
+    if (cuda_post_process == "c") {
+        CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchsize * kOutputSize * sizeof(float), cudaMemcpyDeviceToHost,stream));
+        auto end = std::chrono::system_clock::now();
+        std::cout << "inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
+    } else if (cuda_post_process == "g") {
+        CUDA_CHECK(cudaMemsetAsync(decode_ptr_device, 0, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element), stream));
+        cuda_decode((float *)buffers[1], model_bboxes, kConfThresh, decode_ptr_device, kMaxNumOutputBbox, stream);
+        cuda_nms(decode_ptr_device, kNmsThresh, kMaxNumOutputBbox, stream);//cuda nms
+        CUDA_CHECK(cudaMemcpyAsync(decode_ptr_host, decode_ptr_device, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element), cudaMemcpyDeviceToHost, stream));
+        auto end = std::chrono::system_clock::now();
+        std::cout << "inference and gpu postprocess time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
+    }
+
+    CUDA_CHECK(cudaStreamSynchronize(stream));
+}
+
+
+bool parse_args(int argc, char **argv, std::string &wts, std::string &engine, std::string &img_dir, std::string &sub_type, 
+                std::string &cuda_post_process, float &gd, float &gw, int &max_channels) {
+    if (argc < 4) return false;
+    if (std::string(argv[1]) == "-s" && argc == 5) {
+        wts = std::string(argv[2]);
+        engine = std::string(argv[3]);
+        sub_type = std::string(argv[4]);
+        if (sub_type == "n") {
+          gd = 0.33;
+          gw = 0.25;
+          max_channels = 1024;
+        } else if (sub_type == "s"){
+          gd = 0.33;
+          gw = 0.50;
+          max_channels = 1024;
+        } else if (sub_type == "m") {
+          gd = 0.67;
+          gw = 0.75;
+          max_channels = 576; 
+        } else if (sub_type == "l") {
+          gd = 1.0;
+          gw = 1.0;
+          max_channels = 512;
+        } else if (sub_type == "x") {
+          gd = 1.0;
+          gw = 1.25;
+          max_channels = 640;
+        } else {
+          return false;
+        }
+    } else if (std::string(argv[1]) == "-d" && argc == 5) {
+        engine = std::string(argv[2]);
+        img_dir = std::string(argv[3]);
+        cuda_post_process = std::string(argv[4]);
+    } else {
+        return false;
+    }
+    return true;
+}
+
+int main(int argc, char **argv) {
+    cudaSetDevice(kGpuId);
+    std::string wts_name = "";
+    std::string engine_name = "";
+    std::string img_dir;
+    std::string sub_type = "";
+    std::string cuda_post_process="";
+    int model_bboxes;
+    float gd = 0.0f, gw = 0.0f;
+    int max_channels = 0;
+
+    if (!parse_args(argc, argv, wts_name, engine_name, img_dir, sub_type, cuda_post_process, gd, gw, max_channels)) {
+        std::cerr << "Arguments not right!" << std::endl;
+        std::cerr << "./yolov8 -s [.wts] [.engine] [n/s/m/l/x]  // serialize model to plan file" << std::endl;
+        std::cerr << "./yolov8 -d [.engine] ../samples  [c/g]// deserialize plan file and run inference" << std::endl;
+        return -1;
+    }
+
+    // Create a model using the API directly and serialize it to a file
+    if (!wts_name.empty()) {
+        serialize_engine(wts_name, engine_name, sub_type, gd, gw, max_channels);
+        return 0;
+    }
+
+    // Deserialize the engine from file
+    IRuntime *runtime = nullptr;
+    ICudaEngine *engine = nullptr;
+    IExecutionContext *context = nullptr;
+    deserialize_engine(engine_name, &runtime, &engine, &context);
+    cudaStream_t stream;
+    CUDA_CHECK(cudaStreamCreate(&stream));
+    cuda_preprocess_init(kMaxInputImageSize);
+    auto out_dims = engine->getBindingDimensions(1);
+    model_bboxes = out_dims.d[0];
+    // Prepare cpu and gpu buffers
+    float *device_buffers[2];
+    float *output_buffer_host = nullptr;
+    float *decode_ptr_host=nullptr;
+    float *decode_ptr_device=nullptr;
+
+    // Read images from directory
+    std::vector<std::string> file_names;
+    if (read_files_in_dir(img_dir.c_str(), file_names) < 0) {
+        std::cerr << "read_files_in_dir failed." << std::endl;
+        return -1;
+    }
+
+    prepare_buffer(engine, &device_buffers[0], &device_buffers[1], &output_buffer_host, &decode_ptr_host, &decode_ptr_device, cuda_post_process);
+
+    // batch predict
+    for (size_t i = 0; i < file_names.size(); i += kBatchSize) {
+        // Get a batch of images
+        std::vector<cv::Mat> img_batch;
+        std::vector<std::string> img_name_batch;
+        for (size_t j = i; j < i + kBatchSize && j < file_names.size(); j++) {
+            cv::Mat img = cv::imread(img_dir + "/" + file_names[j]);
+            img_batch.push_back(img);
+            img_name_batch.push_back(file_names[j]);
+        }
+        // Preprocess
+        cuda_batch_preprocess(img_batch, device_buffers[0], kInputW, kInputH, stream);
+        // Run inference
+        infer(*context, stream, (void **)device_buffers, output_buffer_host, kBatchSize, decode_ptr_host, decode_ptr_device, model_bboxes, cuda_post_process);
+        std::vector<std::vector<Detection>> res_batch;
+        if (cuda_post_process == "c") {
+            // NMS
+            batch_nms(res_batch, output_buffer_host, img_batch.size(), kOutputSize, kConfThresh, kNmsThresh);
+        } else if (cuda_post_process == "g") {
+            //Process gpu decode and nms results
+            batch_process(res_batch, decode_ptr_host, img_batch.size(), bbox_element, img_batch);
+        }
+        // Draw bounding boxes
+        draw_bbox(img_batch, res_batch);
+        // Save images
+        for (size_t j = 0; j < img_batch.size(); j++) {
+            cv::imwrite("_" + img_name_batch[j], img_batch[j]);
+        }
+    }
+
+    // Release stream and buffers
+    cudaStreamDestroy(stream);
+    CUDA_CHECK(cudaFree(device_buffers[0]));
+    CUDA_CHECK(cudaFree(device_buffers[1]));
+    CUDA_CHECK(cudaFree(decode_ptr_device));
+    delete[] decode_ptr_host;
+    delete[] output_buffer_host;
+    cuda_preprocess_destroy();
+    // Destroy the engine
+    delete context;
+    delete engine;
+    delete runtime;
+
+    // Print histogram of the output distribution
+    //std::cout << "\nOutput:\n\n";
+    //for (unsigned int i = 0; i < kOutputSize; i++)
+    //{
+    //    std::cout << prob[i] << ", ";
+    //    if (i % 10 == 0) std::cout << std::endl;
+    //}
+    //std::cout << std::endl;
+
+    return 0;
+}
+

+ 455 - 0
src/detection/vision_yolov8/yolov8_det_trt.py

@@ -0,0 +1,455 @@
+"""
+An example that uses TensorRT's Python api to make inferences.
+"""
+import ctypes
+import os
+import shutil
+import random
+import sys
+import threading
+import time
+import cv2
+import numpy as np
+import pycuda.autoinit
+import pycuda.driver as cuda
+import tensorrt as trt
+
+CONF_THRESH = 0.5
+IOU_THRESHOLD = 0.4
+
+
+def get_img_path_batches(batch_size, img_dir):
+    ret = []
+    batch = []
+    for root, dirs, files in os.walk(img_dir):
+        for name in files:
+            if len(batch) == batch_size:
+                ret.append(batch)
+                batch = []
+            batch.append(os.path.join(root, name))
+    if len(batch) > 0:
+        ret.append(batch)
+    return ret
+
+
+def plot_one_box(x, img, color=None, label=None, line_thickness=None):
+    """
+    description: Plots one bounding box on image img,
+                 this function comes from YoLov8 project.
+    param: 
+        x:      a box likes [x1,y1,x2,y2]
+        img:    a opencv image object
+        color:  color to draw rectangle, such as (0,255,0)
+        label:  str
+        line_thickness: int
+    return:
+        no return
+
+    """
+    tl = (
+            line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
+    )  # line/font thickness
+    color = color or [random.randint(0, 255) for _ in range(3)]
+    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
+    cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
+    if label:
+        tf = max(tl - 1, 1)  # font thickness
+        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
+        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
+        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
+        cv2.putText(
+            img,
+            label,
+            (c1[0], c1[1] - 2),
+            0,
+            tl / 3,
+            [225, 255, 255],
+            thickness=tf,
+            lineType=cv2.LINE_AA,
+        )
+
+
+class YoLov8TRT(object):
+    """
+    description: A YOLOv8 class that warps TensorRT ops, preprocess and postprocess ops.
+    """
+
+    def __init__(self, engine_file_path):
+        # Create a Context on this device,
+        self.ctx = cuda.Device(0).make_context()
+        stream = cuda.Stream()
+        TRT_LOGGER = trt.Logger(trt.Logger.INFO)
+        runtime = trt.Runtime(TRT_LOGGER)
+
+        # Deserialize the engine from file
+        with open(engine_file_path, "rb") as f:
+            engine = runtime.deserialize_cuda_engine(f.read())
+        context = engine.create_execution_context()
+
+        host_inputs = []
+        cuda_inputs = []
+        host_outputs = []
+        cuda_outputs = []
+        bindings = []
+
+        for binding in engine:
+            print('bingding:', binding, engine.get_binding_shape(binding))
+            size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
+            dtype = trt.nptype(engine.get_binding_dtype(binding))
+            # Allocate host and device buffers
+            host_mem = cuda.pagelocked_empty(size, dtype)
+            cuda_mem = cuda.mem_alloc(host_mem.nbytes)
+            # Append the device buffer to device bindings.
+            bindings.append(int(cuda_mem))
+            # Append to the appropriate list.
+            if engine.binding_is_input(binding):
+                self.input_w = engine.get_binding_shape(binding)[-1]
+                self.input_h = engine.get_binding_shape(binding)[-2]
+                host_inputs.append(host_mem)
+                cuda_inputs.append(cuda_mem)
+            else:
+                host_outputs.append(host_mem)
+                cuda_outputs.append(cuda_mem)
+
+        # Store
+        self.stream = stream
+        self.context = context
+        self.engine = engine
+        self.host_inputs = host_inputs
+        self.cuda_inputs = cuda_inputs
+        self.host_outputs = host_outputs
+        self.cuda_outputs = cuda_outputs
+        self.bindings = bindings
+        self.batch_size = engine.max_batch_size
+
+    def infer(self, raw_image_generator):
+        threading.Thread.__init__(self)
+        # Make self the active context, pushing it on top of the context stack.
+        self.ctx.push()
+        # Restore
+        stream = self.stream
+        context = self.context
+        engine = self.engine
+        host_inputs = self.host_inputs
+        cuda_inputs = self.cuda_inputs
+        host_outputs = self.host_outputs
+        cuda_outputs = self.cuda_outputs
+        bindings = self.bindings
+        # Do image preprocess
+        batch_image_raw = []
+        batch_origin_h = []
+        batch_origin_w = []
+        batch_input_image = np.empty(shape=[self.batch_size, 3, self.input_h, self.input_w])
+        for i, image_raw in enumerate(raw_image_generator):
+            input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_raw)
+            batch_image_raw.append(image_raw)
+            batch_origin_h.append(origin_h)
+            batch_origin_w.append(origin_w)
+            np.copyto(batch_input_image[i], input_image)
+        batch_input_image = np.ascontiguousarray(batch_input_image)
+
+        # Copy input image to host buffer
+        np.copyto(host_inputs[0], batch_input_image.ravel())
+        start = time.time()
+        # Transfer input data  to the GPU.
+        cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)
+        # Run inference.
+        context.execute_async(batch_size=self.batch_size, bindings=bindings, stream_handle=stream.handle)
+        # Transfer predictions back from the GPU.
+        cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)
+        # Synchronize the stream
+        stream.synchronize()
+        end = time.time()
+        # Remove any context from the top of the context stack, deactivating it.
+        self.ctx.pop()
+        # Here we use the first row of output in that batch_size = 1
+        output = host_outputs[0]
+        # Do postprocess
+        for i in range(self.batch_size):
+            result_boxes, result_scores, result_classid = self.post_process(
+                output[i * 38001: (i + 1) * 38001], batch_origin_h[i], batch_origin_w[i]
+            )
+            # Draw rectangles and labels on the original image
+            for j in range(len(result_boxes)):
+                box = result_boxes[j]
+                plot_one_box(
+                    box,
+                    batch_image_raw[i],
+                    label="{}:{:.2f}".format(
+                        categories[int(result_classid[j])], result_scores[j]
+                    ),
+                )
+        return batch_image_raw, end - start
+
+    def destroy(self):
+        # Remove any context from the top of the context stack, deactivating it.
+        self.ctx.pop()
+
+    def get_raw_image(self, image_path_batch):
+        """
+        description: Read an image from image path
+        """
+        for img_path in image_path_batch:
+            yield cv2.imread(img_path)
+
+    def get_raw_image_zeros(self, image_path_batch=None):
+        """
+        description: Ready data for warmup
+        """
+        for _ in range(self.batch_size):
+            yield np.zeros([self.input_h, self.input_w, 3], dtype=np.uint8)
+
+    def preprocess_image(self, raw_bgr_image):
+        """
+        description: Convert BGR image to RGB,
+                     resize and pad it to target size, normalize to [0,1],
+                     transform to NCHW format.
+        param:
+            input_image_path: str, image path
+        return:
+            image:  the processed image
+            image_raw: the original image
+            h: original height
+            w: original width
+        """
+        image_raw = raw_bgr_image
+        h, w, c = image_raw.shape
+        image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)
+        # Calculate widht and height and paddings
+        r_w = self.input_w / w
+        r_h = self.input_h / h
+        if r_h > r_w:
+            tw = self.input_w
+            th = int(r_w * h)
+            tx1 = tx2 = 0
+            ty1 = int((self.input_h - th) / 2)
+            ty2 = self.input_h - th - ty1
+        else:
+            tw = int(r_h * w)
+            th = self.input_h
+            tx1 = int((self.input_w - tw) / 2)
+            tx2 = self.input_w - tw - tx1
+            ty1 = ty2 = 0
+        # Resize the image with long side while maintaining ratio
+        image = cv2.resize(image, (tw, th))
+        # Pad the short side with (128,128,128)
+        image = cv2.copyMakeBorder(
+            image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, None, (128, 128, 128)
+        )
+        image = image.astype(np.float32)
+        # Normalize to [0,1]
+        image /= 255.0
+        # HWC to CHW format:
+        image = np.transpose(image, [2, 0, 1])
+        # CHW to NCHW format
+        image = np.expand_dims(image, axis=0)
+        # Convert the image to row-major order, also known as "C order":
+        image = np.ascontiguousarray(image)
+        return image, image_raw, h, w
+
+    def xywh2xyxy(self, origin_h, origin_w, x):
+        """
+        description:    Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
+        param:
+            origin_h:   height of original image
+            origin_w:   width of original image
+            x:          A boxes numpy, each row is a box [center_x, center_y, w, h]
+        return:
+            y:          A boxes numpy, each row is a box [x1, y1, x2, y2]
+        """
+        y = np.zeros_like(x)
+        r_w = self.input_w / origin_w
+        r_h = self.input_h / origin_h
+        if r_h > r_w:
+            y[:, 0] = x[:, 0]
+            y[:, 2] = x[:, 2]
+            y[:, 1] = x[:, 1] - (self.input_h - r_w * origin_h) / 2
+            y[:, 3] = x[:, 3] - (self.input_h - r_w * origin_h) / 2
+            y /= r_w
+        else:
+            y[:, 0] = x[:, 0] - (self.input_w - r_h * origin_w) / 2
+            y[:, 2] = x[:, 2] - (self.input_w - r_h * origin_w) / 2
+            y[:, 1] = x[:, 1]
+            y[:, 3] = x[:, 3]
+            y /= r_h
+
+        return y
+
+    def post_process(self, output, origin_h, origin_w):
+        """
+        description: postprocess the prediction
+        param:
+            output:     A numpy likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...] 
+            origin_h:   height of original image
+            origin_w:   width of original image
+        return:
+            result_boxes: finally boxes, a boxes numpy, each row is a box [x1, y1, x2, y2]
+            result_scores: finally scores, a numpy, each element is the score correspoing to box
+            result_classid: finally classid, a numpy, each element is the classid correspoing to box
+        """
+        # Get the num of boxes detected
+        num = int(output[0])
+        # Reshape to a two dimentional ndarray
+        pred = np.reshape(output[1:], (-1, 38))[:num, :]
+        # Do nms
+        boxes = self.non_max_suppression(pred, origin_h, origin_w, conf_thres=CONF_THRESH, nms_thres=IOU_THRESHOLD)
+        result_boxes = boxes[:, :4] if len(boxes) else np.array([])
+        result_scores = boxes[:, 4] if len(boxes) else np.array([])
+        result_classid = boxes[:, 5] if len(boxes) else np.array([])
+        return result_boxes, result_scores, result_classid
+
+    def bbox_iou(self, box1, box2, x1y1x2y2=True):
+        """
+        description: compute the IoU of two bounding boxes
+        param:
+            box1: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))
+            box2: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))            
+            x1y1x2y2: select the coordinate format
+        return:
+            iou: computed iou
+        """
+        if not x1y1x2y2:
+            # Transform from center and width to exact coordinates
+            b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
+            b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
+            b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
+            b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
+        else:
+            # Get the coordinates of bounding boxes
+            b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
+            b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
+
+        # Get the coordinates of the intersection rectangle
+        inter_rect_x1 = np.maximum(b1_x1, b2_x1)
+        inter_rect_y1 = np.maximum(b1_y1, b2_y1)
+        inter_rect_x2 = np.minimum(b1_x2, b2_x2)
+        inter_rect_y2 = np.minimum(b1_y2, b2_y2)
+        # Intersection area
+        inter_area = np.clip(inter_rect_x2 - inter_rect_x1 + 1, 0, None) * \
+                     np.clip(inter_rect_y2 - inter_rect_y1 + 1, 0, None)
+        # Union Area
+        b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
+        b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
+
+        iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
+
+        return iou
+
+    def non_max_suppression(self, prediction, origin_h, origin_w, conf_thres=0.5, nms_thres=0.4):
+        """
+        description: Removes detections with lower object confidence score than 'conf_thres' and performs
+        Non-Maximum Suppression to further filter detections.
+        param:
+            prediction: detections, (x1, y1, x2, y2, conf, cls_id)
+            origin_h: original image height
+            origin_w: original image width
+            conf_thres: a confidence threshold to filter detections
+            nms_thres: a iou threshold to filter detections
+        return:
+            boxes: output after nms with the shape (x1, y1, x2, y2, conf, cls_id)
+        """
+        # Get the boxes that score > CONF_THRESH
+        boxes = prediction[prediction[:, 4] >= conf_thres]
+        # Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2]
+        boxes[:, :4] = self.xywh2xyxy(origin_h, origin_w, boxes[:, :4])
+        # clip the coordinates
+        boxes[:, 0] = np.clip(boxes[:, 0], 0, origin_w - 1)
+        boxes[:, 2] = np.clip(boxes[:, 2], 0, origin_w - 1)
+        boxes[:, 1] = np.clip(boxes[:, 1], 0, origin_h - 1)
+        boxes[:, 3] = np.clip(boxes[:, 3], 0, origin_h - 1)
+        # Object confidence
+        confs = boxes[:, 4]
+        # Sort by the confs
+        boxes = boxes[np.argsort(-confs)]
+        # Perform non-maximum suppression
+        keep_boxes = []
+        while boxes.shape[0]:
+            large_overlap = self.bbox_iou(np.expand_dims(boxes[0, :4], 0), boxes[:, :4]) > nms_thres
+            label_match = boxes[0, -1] == boxes[:, -1]
+            # Indices of boxes with lower confidence scores, large IOUs and matching labels
+            invalid = large_overlap & label_match
+            keep_boxes += [boxes[0]]
+            boxes = boxes[~invalid]
+        boxes = np.stack(keep_boxes, 0) if len(keep_boxes) else np.array([])
+        return boxes
+
+
+class inferThread(threading.Thread):
+    def __init__(self, yolov8_wrapper, image_path_batch):
+        threading.Thread.__init__(self)
+        self.yolov8_wrapper = yolov8_wrapper
+        self.image_path_batch = image_path_batch
+
+    def run(self):
+        batch_image_raw, use_time = self.yolov8_wrapper.infer(self.yolov8_wrapper.get_raw_image(self.image_path_batch))
+        for i, img_path in enumerate(self.image_path_batch):
+            parent, filename = os.path.split(img_path)
+            save_name = os.path.join('output', filename)
+            # Save image
+            cv2.imwrite(save_name, batch_image_raw[i])
+        print('input->{}, time->{:.2f}ms, saving into output/'.format(self.image_path_batch, use_time * 1000))
+
+
+class warmUpThread(threading.Thread):
+    def __init__(self, yolov8_wrapper):
+        threading.Thread.__init__(self)
+        self.yolov8_wrapper = yolov8_wrapper
+
+    def run(self):
+        batch_image_raw, use_time = self.yolov8_wrapper.infer(self.yolov8_wrapper.get_raw_image_zeros())
+        print('warm_up->{}, time->{:.2f}ms'.format(batch_image_raw[0].shape, use_time * 1000))
+
+
+if __name__ == "__main__":
+    # load custom plugin and engine
+    PLUGIN_LIBRARY = "build/libmyplugins.so"
+    engine_file_path = "yolov8n.engine"
+
+    if len(sys.argv) > 1:
+        engine_file_path = sys.argv[1]
+    if len(sys.argv) > 2:
+        PLUGIN_LIBRARY = sys.argv[2]
+
+    ctypes.CDLL(PLUGIN_LIBRARY)
+
+    # load coco labels
+
+    categories = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
+                  "traffic light",
+                  "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
+                  "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase",
+                  "frisbee",
+                  "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard",
+                  "surfboard",
+                  "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
+                  "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
+                  "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard",
+                  "cell phone",
+                  "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
+                  "teddy bear",
+                  "hair drier", "toothbrush"]
+
+    if os.path.exists('output/'):
+        shutil.rmtree('output/')
+    os.makedirs('output/')
+    # a YoLov8TRT instance
+    yolov8_wrapper = YoLov8TRT(engine_file_path)
+    try:
+        print('batch size is', yolov8_wrapper.batch_size)
+
+        image_dir = "samples/"
+        image_path_batches = get_img_path_batches(yolov8_wrapper.batch_size, image_dir)
+
+        for i in range(10):
+            # create a new thread to do warm_up
+            thread1 = warmUpThread(yolov8_wrapper)
+            thread1.start()
+            thread1.join()
+        for batch in image_path_batches:
+            # create a new thread to do inference
+            thread1 = inferThread(yolov8_wrapper, batch)
+            thread1.start()
+            thread1.join()
+    finally:
+        # destroy the instance
+        yolov8_wrapper.destroy()

+ 321 - 0
src/detection/vision_yolov8/yolov8_seg.cpp

@@ -0,0 +1,321 @@
+
+#include <iostream>
+#include <fstream>
+#include <opencv2/opencv.hpp>
+#include "model.h"
+#include "utils.h"
+#include "preprocess.h"
+#include "postprocess.h"
+#include "cuda_utils.h"
+#include "logging.h"
+
+Logger gLogger;
+using namespace nvinfer1;
+const int kOutputSize = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
+const static int kOutputSegSize = 32 * (kInputH / 4) * (kInputW / 4);
+
+static cv::Rect get_downscale_rect(float bbox[4], float scale) {
+
+  float left = bbox[0];
+  float top  = bbox[1];
+  float right  = bbox[0] + bbox[2];
+  float bottom = bbox[1] + bbox[3];
+
+  left    = left < 0 ? 0 : left;
+  top     = top < 0 ? 0: top;
+  right   = right > 640 ? 640 : right;
+  bottom  = bottom > 640 ? 640: bottom;
+
+  left   /= scale;
+  top    /= scale;
+  right  /= scale;
+  bottom /= scale;
+  return cv::Rect(int(left), int(top), int(right - left), int(bottom - top));
+}
+
+std::vector<cv::Mat> process_mask(const float* proto, int proto_size, std::vector<Detection>& dets) {
+
+  std::vector<cv::Mat> masks;
+  for (size_t i = 0; i < dets.size(); i++) {
+
+    cv::Mat mask_mat = cv::Mat::zeros(kInputH / 4, kInputW / 4, CV_32FC1);
+    auto r = get_downscale_rect(dets[i].bbox, 4);
+
+    for (int x = r.x; x < r.x + r.width; x++) {
+      for (int y = r.y; y < r.y + r.height; y++) {
+        float e = 0.0f;
+        for (int j = 0; j < 32; j++) {
+            e += dets[i].mask[j] * proto[j * proto_size / 32 + y * mask_mat.cols + x];
+        }
+        e = 1.0f / (1.0f + expf(-e));
+        mask_mat.at<float>(y, x) = e;
+      }
+    }
+    cv::resize(mask_mat, mask_mat, cv::Size(kInputW, kInputH));
+    masks.push_back(mask_mat);
+  }
+  return masks;
+}
+
+
+void serialize_engine(std::string &wts_name, std::string &engine_name, std::string &sub_type, float &gd, float &gw, int &max_channels)
+{
+    IBuilder *builder = createInferBuilder(gLogger);
+    IBuilderConfig *config = builder->createBuilderConfig();
+    IHostMemory *serialized_engine = nullptr;
+
+    serialized_engine = buildEngineYolov8Seg(builder, config, DataType::kFLOAT, wts_name, gd, gw, max_channels);
+
+    assert(serialized_engine);
+    std::ofstream p(engine_name, std::ios::binary);
+    if (!p)
+    {
+        std::cout << "could not open plan output file" << std::endl;
+        assert(false);
+    }
+    p.write(reinterpret_cast<const char *>(serialized_engine->data()), serialized_engine->size());
+
+    delete builder;
+    delete config;
+    delete serialized_engine;
+}
+
+void deserialize_engine(std::string &engine_name, IRuntime **runtime, ICudaEngine **engine, IExecutionContext **context)
+{
+    std::ifstream file(engine_name, std::ios::binary);
+    if (!file.good())
+    {
+        std::cerr << "read " << engine_name << " error!" << std::endl;
+        assert(false);
+    }
+    size_t size = 0;
+    file.seekg(0, file.end);
+    size = file.tellg();
+    file.seekg(0, file.beg);
+    char *serialized_engine = new char[size];
+    assert(serialized_engine);
+    file.read(serialized_engine, size);
+    file.close();
+
+    *runtime = createInferRuntime(gLogger);
+    assert(*runtime);
+    *engine = (*runtime)->deserializeCudaEngine(serialized_engine, size);
+    assert(*engine);
+    *context = (*engine)->createExecutionContext();
+    assert(*context);
+    delete[] serialized_engine;
+}
+
+void prepare_buffer(ICudaEngine *engine, float **input_buffer_device, float **output_buffer_device, float **output_seg_buffer_device,
+                    float **output_buffer_host,float **output_seg_buffer_host ,float **decode_ptr_host, float **decode_ptr_device, std::string cuda_post_process) {
+    assert(engine->getNbBindings() == 3);
+    // In order to bind the buffers, we need to know the names of the input and output tensors.
+    // Note that indices are guaranteed to be less than IEngine::getNbBindings()
+    const int inputIndex = engine->getBindingIndex(kInputTensorName);
+    const int outputIndex = engine->getBindingIndex(kOutputTensorName);
+    const int outputIndex_seg = engine->getBindingIndex("proto");
+
+    assert(inputIndex == 0);
+    assert(outputIndex == 1);
+    assert(outputIndex_seg == 2);
+    // Create GPU buffers on device
+    CUDA_CHECK(cudaMalloc((void **) input_buffer_device, kBatchSize * 3 * kInputH * kInputW * sizeof(float)));
+    CUDA_CHECK(cudaMalloc((void **) output_buffer_device, kBatchSize * kOutputSize * sizeof(float)));
+    CUDA_CHECK(cudaMalloc((void **) output_seg_buffer_device, kBatchSize * kOutputSegSize * sizeof(float)));
+    
+    if (cuda_post_process == "c") {
+        *output_buffer_host = new float[kBatchSize * kOutputSize];
+        *output_seg_buffer_host = new float[kBatchSize * kOutputSegSize];
+    } else if (cuda_post_process == "g") {
+        if (kBatchSize > 1) {
+            std::cerr << "Do not yet support GPU post processing for multiple batches" << std::endl;
+            exit(0);
+        }
+        // Allocate memory for decode_ptr_host and copy to device
+        *decode_ptr_host = new float[1 + kMaxNumOutputBbox * bbox_element];
+        CUDA_CHECK(cudaMalloc((void **)decode_ptr_device, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element)));
+    }
+}
+
+void infer(IExecutionContext &context, cudaStream_t &stream, void **buffers, float *output, float *output_seg,int batchsize, float* decode_ptr_host, float* decode_ptr_device, int model_bboxes, std::string cuda_post_process) {
+    // infer on the batch asynchronously, and DMA output back to host
+    auto start = std::chrono::system_clock::now();
+    context.enqueue(batchsize, buffers, stream, nullptr);
+    if (cuda_post_process == "c") {
+
+        std::cout << "kOutputSize:" << kOutputSize <<std::endl;
+        CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchsize * kOutputSize * sizeof(float), cudaMemcpyDeviceToHost,stream));
+        std::cout << "kOutputSegSize:" << kOutputSegSize <<std::endl;
+        CUDA_CHECK(cudaMemcpyAsync(output_seg, buffers[2], batchsize * kOutputSegSize * sizeof(float), cudaMemcpyDeviceToHost, stream));
+
+        auto end = std::chrono::system_clock::now();
+        std::cout << "inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
+    } else if (cuda_post_process == "g") {
+        CUDA_CHECK(cudaMemsetAsync(decode_ptr_device, 0, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element), stream));
+        cuda_decode((float *)buffers[1], model_bboxes, kConfThresh, decode_ptr_device, kMaxNumOutputBbox, stream);
+        cuda_nms(decode_ptr_device, kNmsThresh, kMaxNumOutputBbox, stream);//cuda nms
+        CUDA_CHECK(cudaMemcpyAsync(decode_ptr_host, decode_ptr_device, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element), cudaMemcpyDeviceToHost, stream));
+        auto end = std::chrono::system_clock::now();
+        std::cout << "inference and gpu postprocess time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
+    }
+
+    CUDA_CHECK(cudaStreamSynchronize(stream));
+}
+
+bool parse_args(int argc, char **argv, std::string &wts, std::string &engine, std::string &img_dir, std::string &sub_type,
+                std::string &cuda_post_process, std::string& labels_filename, float &gd, float &gw, int &max_channels)
+{
+    if (argc < 4)
+        return false;
+    if (std::string(argv[1]) == "-s" && argc == 5) {
+        wts = std::string(argv[2]);
+        engine = std::string(argv[3]);
+        sub_type = std::string(argv[4]);
+        if (sub_type == "n") {
+          gd = 0.33;
+          gw = 0.25;
+          max_channels = 1024;
+        } else if (sub_type == "s") {
+          gd = 0.33;
+          gw = 0.50;
+          max_channels = 1024;
+        } else if (sub_type == "m") {
+          gd = 0.67;
+          gw = 0.75;
+          max_channels = 576;
+        } else if (sub_type == "l") {
+          gd = 1.0;
+          gw = 1.0;
+          max_channels = 512;
+        } else if (sub_type == "x") {
+          gd = 1.0;
+          gw = 1.25;
+          max_channels = 640;
+        } else{
+          return false;
+        }
+    } else if (std::string(argv[1]) == "-d" && argc == 6) {
+      engine = std::string(argv[2]);
+      img_dir = std::string(argv[3]);
+      cuda_post_process = std::string(argv[4]);
+      labels_filename = std::string(argv[5]);
+    } else {
+      return false;
+    }
+    return true;
+}
+
+int main(int argc, char **argv) {
+    cudaSetDevice(kGpuId);
+    std::string wts_name = "";
+    std::string engine_name = "";
+    std::string img_dir;
+    std::string sub_type = "";
+    std::string cuda_post_process = "";
+    std::string labels_filename = "../coco.txt";
+    int model_bboxes;
+    float gd = 0.0f, gw = 0.0f;
+    int max_channels = 0;
+
+    if (!parse_args(argc, argv, wts_name, engine_name, img_dir, sub_type, cuda_post_process, labels_filename, gd, gw, max_channels)) {
+        std::cerr << "Arguments not right!" << std::endl;
+        std::cerr << "./yolov8 -s [.wts] [.engine] [n/s/m/l/x]  // serialize model to plan file" << std::endl;
+        std::cerr << "./yolov8 -d [.engine] ../samples  [c/g] coco_file// deserialize plan file and run inference" << std::endl;
+        return -1;
+    }
+
+    // Create a model using the API directly and serialize it to a file
+    if (!wts_name.empty()) {
+        serialize_engine(wts_name, engine_name, sub_type, gd, gw, max_channels);
+        return 0;
+    }
+
+       // Deserialize the engine from file
+    IRuntime *runtime = nullptr;
+    ICudaEngine *engine = nullptr;
+    IExecutionContext *context = nullptr;
+    deserialize_engine(engine_name, &runtime, &engine, &context);
+    cudaStream_t stream;
+    CUDA_CHECK(cudaStreamCreate(&stream));
+    cuda_preprocess_init(kMaxInputImageSize);
+    auto out_dims = engine->getBindingDimensions(1);
+    model_bboxes = out_dims.d[0];
+    // Prepare cpu and gpu buffers
+    float *device_buffers[3];
+    float *output_buffer_host = nullptr;
+    float *output_seg_buffer_host = nullptr;
+    float *decode_ptr_host=nullptr;
+    float *decode_ptr_device=nullptr;
+
+    // Read images from directory
+    std::vector<std::string> file_names;
+    if (read_files_in_dir(img_dir.c_str(), file_names) < 0) {
+        std::cerr << "read_files_in_dir failed." << std::endl;
+        return -1;
+    }
+
+    std::unordered_map<int, std::string> labels_map;
+    read_labels(labels_filename, labels_map);
+    assert(kNumClass == labels_map.size());
+
+    prepare_buffer(engine, &device_buffers[0], &device_buffers[1], &device_buffers[2], &output_buffer_host, &output_seg_buffer_host,&decode_ptr_host, &decode_ptr_device, cuda_post_process);
+
+    // // batch predict
+    for (size_t i = 0; i < file_names.size(); i += kBatchSize) {
+        // Get a batch of images
+        std::vector<cv::Mat> img_batch;
+        std::vector<std::string> img_name_batch;
+        for (size_t j = i; j < i + kBatchSize && j < file_names.size(); j++) {
+            cv::Mat img = cv::imread(img_dir + "/" + file_names[j]);
+            img_batch.push_back(img);
+            img_name_batch.push_back(file_names[j]);
+        }
+        // Preprocess
+        cuda_batch_preprocess(img_batch, device_buffers[0], kInputW, kInputH, stream);
+        // Run inference
+        infer(*context, stream, (void **)device_buffers, output_buffer_host, output_seg_buffer_host,kBatchSize, decode_ptr_host, decode_ptr_device, model_bboxes, cuda_post_process);
+        std::vector<std::vector<Detection>> res_batch;
+        if (cuda_post_process == "c") {
+            // NMS
+            batch_nms(res_batch, output_buffer_host, img_batch.size(), kOutputSize, kConfThresh, kNmsThresh);
+            for (size_t b = 0; b < img_batch.size(); b++) {
+                auto& res = res_batch[b];
+                cv::Mat img = img_batch[b];
+                auto masks = process_mask(&output_seg_buffer_host[b * kOutputSegSize], kOutputSegSize, res);
+                draw_mask_bbox(img, res, masks, labels_map);
+                cv::imwrite("_" + img_name_batch[b], img);
+            }
+        } else if (cuda_post_process == "g") {
+            // Process gpu decode and nms results
+            // batch_process(res_batch, decode_ptr_host, img_batch.size(), bbox_element, img_batch);
+            // todo seg in gpu
+            std::cerr << "seg_postprocess is not support in gpu right now" << std::endl;
+        }
+    }
+
+    // Release stream and buffers
+    cudaStreamDestroy(stream);
+    CUDA_CHECK(cudaFree(device_buffers[0]));
+    CUDA_CHECK(cudaFree(device_buffers[1]));
+    CUDA_CHECK(cudaFree(device_buffers[2]));
+    CUDA_CHECK(cudaFree(decode_ptr_device));
+    delete[] decode_ptr_host;
+    delete[] output_buffer_host;
+    delete[] output_seg_buffer_host;
+    cuda_preprocess_destroy();
+    // Destroy the engine
+    delete context;
+    delete engine;
+    delete runtime;
+
+    // Print histogram of the output distribution
+    // std::cout << "\nOutput:\n\n";
+    // for (unsigned int i = 0; i < kOutputSize; i++)
+    //{
+    //    std::cout << prob[i] << ", ";
+    //    if (i % 10 == 0) std::cout << std::endl;
+    //}
+    // std::cout << std::endl;
+
+    return 0;
+}

+ 570 - 0
src/detection/vision_yolov8/yolov8_seg_trt.py

@@ -0,0 +1,570 @@
+"""
+An example that uses TensorRT's Python api to make inferences.
+"""
+import ctypes
+import os
+import shutil
+import random
+import sys
+import threading
+import time
+import cv2
+import numpy as np
+import pycuda.autoinit
+import pycuda.driver as cuda
+import tensorrt as trt
+
+CONF_THRESH = 0.5
+IOU_THRESHOLD = 0.4
+
+
+def get_img_path_batches(batch_size, img_dir):
+    ret = []
+    batch = []
+    for root, dirs, files in os.walk(img_dir):
+        for name in files:
+            if len(batch) == batch_size:
+                ret.append(batch)
+                batch = []
+            batch.append(os.path.join(root, name))
+    if len(batch) > 0:
+        ret.append(batch)
+    return ret
+
+
+def plot_one_box(x, img, color=None, label=None, line_thickness=None):
+    """
+    description: Plots one bounding box on image img,
+                 this function comes from YoLov8 project.
+    param: 
+        x:      a box likes [x1,y1,x2,y2]
+        img:    a opencv image object
+        color:  color to draw rectangle, such as (0,255,0)
+        label:  str
+        line_thickness: int
+    return:
+        no return
+
+    """
+    tl = (
+            line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
+    )  # line/font thickness
+    color = color or [random.randint(0, 255) for _ in range(3)]
+    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
+    cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
+    if label:
+        tf = max(tl - 1, 1)  # font thickness
+        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
+        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
+        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)  # filled
+        cv2.putText(
+            img,
+            label,
+            (c1[0], c1[1] - 2),
+            0,
+            tl / 3,
+            [225, 255, 255],
+            thickness=tf,
+            lineType=cv2.LINE_AA,
+        )
+
+
+class YoLov8TRT(object):
+    """
+    description: A YOLOv8 class that warps TensorRT ops, preprocess and postprocess ops.
+    """
+
+    def __init__(self, engine_file_path):
+        # Create a Context on this device,
+        self.ctx = cuda.Device(0).make_context()
+        stream = cuda.Stream()
+        TRT_LOGGER = trt.Logger(trt.Logger.INFO)
+        runtime = trt.Runtime(TRT_LOGGER)
+
+        # Deserialize the engine from file
+        with open(engine_file_path, "rb") as f:
+            engine = runtime.deserialize_cuda_engine(f.read())
+        context = engine.create_execution_context()
+
+        host_inputs = []
+        cuda_inputs = []
+        host_outputs = []
+        cuda_outputs = []
+        bindings = []
+
+        for binding in engine:
+            print('bingding:', binding, engine.get_binding_shape(binding))
+            size = trt.volume(engine.get_binding_shape(binding)) * engine.max_batch_size
+            dtype = trt.nptype(engine.get_binding_dtype(binding))
+            # Allocate host and device buffers
+            host_mem = cuda.pagelocked_empty(size, dtype)
+            cuda_mem = cuda.mem_alloc(host_mem.nbytes)
+            # Append the device buffer to device bindings.
+            bindings.append(int(cuda_mem))
+            # Append to the appropriate list.
+            if engine.binding_is_input(binding):
+                self.input_w = engine.get_binding_shape(binding)[-1]
+                self.input_h = engine.get_binding_shape(binding)[-2]
+                host_inputs.append(host_mem)
+                cuda_inputs.append(cuda_mem)
+            else:
+                host_outputs.append(host_mem)
+                cuda_outputs.append(cuda_mem)
+
+        # Store
+        self.stream = stream
+        self.context = context
+        self.engine = engine
+        self.host_inputs = host_inputs
+        self.cuda_inputs = cuda_inputs
+        self.host_outputs = host_outputs
+        self.cuda_outputs = cuda_outputs
+        self.bindings = bindings
+        self.batch_size = engine.max_batch_size
+
+        #Data length
+        self.det_output_length  = host_outputs[0].shape[0]
+        self.seg_output_length = host_outputs[1].shape[0]
+        self.seg_w = int(self.input_w / 4)
+        self.seg_h = int(self.input_h / 4)
+        self.seg_c = int(self.seg_output_length / (self.seg_w * self.seg_w))
+        self.det_row_output_length = self.seg_c + 6
+
+        # Draw mask
+        self.colors_obj = Colors()
+
+
+    def infer(self, raw_image_generator):
+        threading.Thread.__init__(self)
+        # Make self the active context, pushing it on top of the context stack.
+        self.ctx.push()
+        # Restore
+        stream = self.stream
+        context = self.context
+        engine = self.engine
+        host_inputs = self.host_inputs
+        cuda_inputs = self.cuda_inputs
+        host_outputs = self.host_outputs
+        cuda_outputs = self.cuda_outputs
+        bindings = self.bindings
+        # Do image preprocess
+        batch_image_raw = []
+        batch_origin_h = []
+        batch_origin_w = []
+        batch_input_image = np.empty(shape=[self.batch_size, 3, self.input_h, self.input_w])
+        for i, image_raw in enumerate(raw_image_generator):
+            input_image, image_raw, origin_h, origin_w = self.preprocess_image(image_raw)
+            batch_image_raw.append(image_raw)
+            batch_origin_h.append(origin_h)
+            batch_origin_w.append(origin_w)
+            np.copyto(batch_input_image[i], input_image)
+        batch_input_image = np.ascontiguousarray(batch_input_image)
+
+        # Copy input image to host buffer
+        np.copyto(host_inputs[0], batch_input_image.ravel())
+        start = time.time()
+        # Transfer input data  to the GPU.
+        cuda.memcpy_htod_async(cuda_inputs[0], host_inputs[0], stream)
+        # Run inference.
+        context.execute_async(batch_size=self.batch_size, bindings=bindings, stream_handle=stream.handle)
+        # Transfer predictions back from the GPU.
+        cuda.memcpy_dtoh_async(host_outputs[0], cuda_outputs[0], stream)
+        cuda.memcpy_dtoh_async(host_outputs[1], cuda_outputs[1], stream)
+
+        # Synchronize the stream
+        stream.synchronize()
+        end = time.time()
+        # Remove any context from the top of the context stack, deactivating it.
+        self.ctx.pop()
+        # Here we use the first row of output in that batch_size = 1
+        output = host_outputs[0]
+        output_proto_mask = host_outputs[1]
+        # Do postprocess
+        for i in range(self.batch_size):
+            result_boxes, result_scores, result_classid,result_proto_coef = self.post_process(
+                output[i * 38001: (i + 1) * 38001], batch_origin_h[i], batch_origin_w[i]
+            )
+
+            if result_proto_coef.shape[0] == 0:
+                continue
+            result_masks = self.process_mask(output_proto_mask, result_proto_coef, result_boxes, batch_origin_h[i], batch_origin_w[i])
+            
+            self.draw_mask(result_masks, colors_=[self.colors_obj(x, True) for x in result_classid],im_src=batch_image_raw[i])
+
+            # Draw rectangles and labels on the original image
+            for j in range(len(result_boxes)):
+                box = result_boxes[j]
+                plot_one_box(
+                    box,
+                    batch_image_raw[i],
+                    label="{}:{:.2f}".format(
+                        categories[int(result_classid[j])], result_scores[j]
+                    ),
+                )
+        return batch_image_raw, end - start
+
+    def destroy(self):
+        # Remove any context from the top of the context stack, deactivating it.
+        self.ctx.pop()
+
+    def get_raw_image(self, image_path_batch):
+        """
+        description: Read an image from image path
+        """
+        for img_path in image_path_batch:
+            yield cv2.imread(img_path)
+
+    def get_raw_image_zeros(self, image_path_batch=None):
+        """
+        description: Ready data for warmup
+        """
+        for _ in range(self.batch_size):
+            yield np.zeros([self.input_h, self.input_w, 3], dtype=np.uint8)
+
+    def preprocess_image(self, raw_bgr_image):
+        """
+        description: Convert BGR image to RGB,
+                     resize and pad it to target size, normalize to [0,1],
+                     transform to NCHW format.
+        param:
+            input_image_path: str, image path
+        return:
+            image:  the processed image
+            image_raw: the original image
+            h: original height
+            w: original width
+        """
+        image_raw = raw_bgr_image
+        h, w, c = image_raw.shape
+        image = cv2.cvtColor(image_raw, cv2.COLOR_BGR2RGB)
+        # Calculate widht and height and paddings
+        r_w = self.input_w / w
+        r_h = self.input_h / h
+        if r_h > r_w:
+            tw = self.input_w
+            th = int(r_w * h)
+            tx1 = tx2 = 0
+            ty1 = int((self.input_h - th) / 2)
+            ty2 = self.input_h - th - ty1
+        else:
+            tw = int(r_h * w)
+            th = self.input_h
+            tx1 = int((self.input_w - tw) / 2)
+            tx2 = self.input_w - tw - tx1
+            ty1 = ty2 = 0
+        # Resize the image with long side while maintaining ratio
+        image = cv2.resize(image, (tw, th))
+        # Pad the short side with (128,128,128)
+        image = cv2.copyMakeBorder(
+            image, ty1, ty2, tx1, tx2, cv2.BORDER_CONSTANT, None, (128, 128, 128)
+        )
+        image = image.astype(np.float32)
+        # Normalize to [0,1]
+        image /= 255.0
+        # HWC to CHW format:
+        image = np.transpose(image, [2, 0, 1])
+        # CHW to NCHW format
+        image = np.expand_dims(image, axis=0)
+        # Convert the image to row-major order, also known as "C order":
+        image = np.ascontiguousarray(image)
+        return image, image_raw, h, w
+
+    def xywh2xyxy(self, origin_h, origin_w, x):
+        """
+        description:    Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
+        param:
+            origin_h:   height of original image
+            origin_w:   width of original image
+            x:          A boxes numpy, each row is a box [center_x, center_y, w, h]
+        return:
+            y:          A boxes numpy, each row is a box [x1, y1, x2, y2]
+        """
+        y = np.zeros_like(x)
+        r_w = self.input_w / origin_w
+        r_h = self.input_h / origin_h
+        if r_h > r_w:
+            y[:, 0] = x[:, 0]
+            y[:, 2] = x[:, 2]
+            y[:, 1] = x[:, 1] - (self.input_h - r_w * origin_h) / 2
+            y[:, 3] = x[:, 3] - (self.input_h - r_w * origin_h) / 2
+            y /= r_w
+        else:
+            y[:, 0] = x[:, 0] - (self.input_w - r_h * origin_w) / 2
+            y[:, 2] = x[:, 2] - (self.input_w - r_h * origin_w) / 2
+            y[:, 1] = x[:, 1]
+            y[:, 3] = x[:, 3]
+            y /= r_h
+
+        return y
+
+    def post_process(self, output, origin_h, origin_w):
+        """
+        description: postprocess the prediction
+        param:
+            output:     A numpy likes [num_boxes,cx,cy,w,h,conf,cls_id, cx,cy,w,h,conf,cls_id, ...] 
+            origin_h:   height of original image
+            origin_w:   width of original image
+        return:
+            result_boxes: finally boxes, a boxes numpy, each row is a box [x1, y1, x2, y2]
+            result_scores: finally scores, a numpy, each element is the score correspoing to box
+            result_classid: finally classid, a numpy, each element is the classid correspoing to box
+        """
+        # Get the num of boxes detected
+        num = int(output[0])
+        # Reshape to a two dimentional ndarray
+        pred = np.reshape(output[1:], (-1, 38))[:num, :]
+
+        # Do nms
+        boxes = self.non_max_suppression(pred, origin_h, origin_w, conf_thres=CONF_THRESH, nms_thres=IOU_THRESHOLD)
+        result_boxes = boxes[:, :4] if len(boxes) else np.array([])
+        result_scores = boxes[:, 4] if len(boxes) else np.array([])
+        result_classid = boxes[:, 5] if len(boxes) else np.array([])
+        result_proto_coef = boxes[:, 6:] if len(boxes) else np.array([])
+        return result_boxes, result_scores, result_classid,result_proto_coef
+
+    def bbox_iou(self, box1, box2, x1y1x2y2=True):
+        """
+        description: compute the IoU of two bounding boxes
+        param:
+            box1: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))
+            box2: A box coordinate (can be (x1, y1, x2, y2) or (x, y, w, h))            
+            x1y1x2y2: select the coordinate format
+        return:
+            iou: computed iou
+        """
+        if not x1y1x2y2:
+            # Transform from center and width to exact coordinates
+            b1_x1, b1_x2 = box1[:, 0] - box1[:, 2] / 2, box1[:, 0] + box1[:, 2] / 2
+            b1_y1, b1_y2 = box1[:, 1] - box1[:, 3] / 2, box1[:, 1] + box1[:, 3] / 2
+            b2_x1, b2_x2 = box2[:, 0] - box2[:, 2] / 2, box2[:, 0] + box2[:, 2] / 2
+            b2_y1, b2_y2 = box2[:, 1] - box2[:, 3] / 2, box2[:, 1] + box2[:, 3] / 2
+        else:
+            # Get the coordinates of bounding boxes
+            b1_x1, b1_y1, b1_x2, b1_y2 = box1[:, 0], box1[:, 1], box1[:, 2], box1[:, 3]
+            b2_x1, b2_y1, b2_x2, b2_y2 = box2[:, 0], box2[:, 1], box2[:, 2], box2[:, 3]
+
+        # Get the coordinates of the intersection rectangle
+        inter_rect_x1 = np.maximum(b1_x1, b2_x1)
+        inter_rect_y1 = np.maximum(b1_y1, b2_y1)
+        inter_rect_x2 = np.minimum(b1_x2, b2_x2)
+        inter_rect_y2 = np.minimum(b1_y2, b2_y2)
+        # Intersection area
+        inter_area = np.clip(inter_rect_x2 - inter_rect_x1 + 1, 0, None) * \
+                     np.clip(inter_rect_y2 - inter_rect_y1 + 1, 0, None)
+        # Union Area
+        b1_area = (b1_x2 - b1_x1 + 1) * (b1_y2 - b1_y1 + 1)
+        b2_area = (b2_x2 - b2_x1 + 1) * (b2_y2 - b2_y1 + 1)
+
+        iou = inter_area / (b1_area + b2_area - inter_area + 1e-16)
+
+        return iou
+
+    def non_max_suppression(self, prediction, origin_h, origin_w, conf_thres=0.5, nms_thres=0.4):
+        """
+        description: Removes detections with lower object confidence score than 'conf_thres' and performs
+        Non-Maximum Suppression to further filter detections.
+        param:
+            prediction: detections, (x1, y1, x2, y2, conf, cls_id)
+            origin_h: original image height
+            origin_w: original image width
+            conf_thres: a confidence threshold to filter detections
+            nms_thres: a iou threshold to filter detections
+        return:
+            boxes: output after nms with the shape (x1, y1, x2, y2, conf, cls_id)
+        """
+        # Get the boxes that score > CONF_THRESH
+        boxes = prediction[prediction[:, 4] >= conf_thres]
+        # Trandform bbox from [center_x, center_y, w, h] to [x1, y1, x2, y2]
+        boxes[:, :4] = self.xywh2xyxy(origin_h, origin_w, boxes[:, :4])
+        # clip the coordinates
+        boxes[:, 0] = np.clip(boxes[:, 0], 0, origin_w - 1)
+        boxes[:, 2] = np.clip(boxes[:, 2], 0, origin_w - 1)
+        boxes[:, 1] = np.clip(boxes[:, 1], 0, origin_h - 1)
+        boxes[:, 3] = np.clip(boxes[:, 3], 0, origin_h - 1)
+        # Object confidence
+        confs = boxes[:, 4]
+        # Sort by the confs
+        boxes = boxes[np.argsort(-confs)]
+        # Perform non-maximum suppression
+        keep_boxes = []
+        while boxes.shape[0]:
+            large_overlap = self.bbox_iou(np.expand_dims(boxes[0, :4], 0), boxes[:, :4]) > nms_thres
+            label_match = boxes[0, 5] == boxes[:, 5]
+            # Indices of boxes with lower confidence scores, large IOUs and matching labels
+            invalid = large_overlap & label_match
+            keep_boxes += [boxes[0]]
+            boxes = boxes[~invalid]
+        boxes = np.stack(keep_boxes, 0) if len(keep_boxes) else np.array([])
+        return boxes
+
+    def sigmoid(self, x):
+        return 1 / (1 + np.exp(-x))
+
+    def scale_mask(self, mask, ih, iw):
+        mask = cv2.resize(mask, (self.input_w, self.input_h))
+        r_w = self.input_w / (iw * 1.0)
+        r_h = self.input_h / (ih * 1.0)
+        if r_h > r_w:
+            w = self.input_w
+            h = int(r_w * ih)
+            x = 0
+            y = int((self.input_h - h) / 2)
+        else:
+            w = int(r_h * iw)
+            h = self.input_h
+            x = int((self.input_w - w) / 2)
+            y = 0
+        crop = mask[y:y+h, x:x+w]
+        crop = cv2.resize(crop, (iw, ih))
+        return crop 
+    
+    def process_mask(self, output_proto_mask, result_proto_coef, result_boxes, ih, iw):
+        """
+        description: Mask pred by yolov8 instance segmentation ,
+        param: 
+            output_proto_mask: prototype mask e.g. (32, 160, 160) for 640x640 input
+            result_proto_coef: prototype mask coefficients (n, 32), n represents n results
+            result_boxes     :  
+            ih: rows of original image
+            iw: cols of original image
+        return:
+            mask_result: (n, ih, iw)
+        """
+        result_proto_masks = output_proto_mask.reshape(self.seg_c, self.seg_h, self.seg_w)
+        c, mh, mw = result_proto_masks.shape
+        masks = self.sigmoid((result_proto_coef @ result_proto_masks.astype(np.float32).reshape(c, -1))).reshape(-1, mh, mw)
+        
+
+        mask_result = []
+        for mask, box in zip(masks, result_boxes):
+            mask_s = np.zeros((ih, iw))
+            crop_mask = self.scale_mask(mask, ih, iw)    
+            x1 = int(box[0])
+            y1 = int(box[1])
+            x2 = int(box[2])
+            y2 = int(box[3])
+            crop = crop_mask[y1:y2, x1:x2]
+            crop = np.where(crop >= 0.5, 1, 0)
+            crop = crop.astype(np.uint8)
+            mask_s[y1:y2, x1:x2] = crop
+
+            mask_result.append(mask_s)
+        mask_result = np.array(mask_result)
+        return mask_result
+
+    def draw_mask(self, masks, colors_, im_src, alpha=0.5):
+        """
+        description: Draw mask on image ,
+        param: 
+            masks  : result_mask
+            colors_: color to draw mask
+            im_src : original image
+            alpha  : scale between original  image and mask
+        return:
+            no return
+        """
+        if len(masks) == 0:
+            return
+        masks = np.asarray(masks, dtype=np.uint8)
+        masks = np.ascontiguousarray(masks.transpose(1, 2, 0))
+        masks = np.asarray(masks, dtype=np.float32)
+        colors_ = np.asarray(colors_, dtype=np.float32)
+        s = masks.sum(2, keepdims=True).clip(0, 1)
+        masks = (masks @ colors_).clip(0, 255)
+        im_src[:] = masks * alpha + im_src * (1 - s * alpha)
+
+class inferThread(threading.Thread):
+    def __init__(self, yolov8_wrapper, image_path_batch):
+        threading.Thread.__init__(self)
+        self.yolov8_wrapper = yolov8_wrapper
+        self.image_path_batch = image_path_batch
+
+    def run(self):
+        batch_image_raw, use_time = self.yolov8_wrapper.infer(self.yolov8_wrapper.get_raw_image(self.image_path_batch))
+        for i, img_path in enumerate(self.image_path_batch):
+            parent, filename = os.path.split(img_path)
+            save_name = os.path.join('output', filename)
+            # Save image
+            cv2.imwrite(save_name, batch_image_raw[i])
+        print('input->{}, time->{:.2f}ms, saving into output/'.format(self.image_path_batch, use_time * 1000))
+
+
+class warmUpThread(threading.Thread):
+    def __init__(self, yolov8_wrapper):
+        threading.Thread.__init__(self)
+        self.yolov8_wrapper = yolov8_wrapper
+
+    def run(self):
+        batch_image_raw, use_time = self.yolov8_wrapper.infer(self.yolov8_wrapper.get_raw_image_zeros())
+        print('warm_up->{}, time->{:.2f}ms'.format(batch_image_raw[0].shape, use_time * 1000))
+
+class Colors:
+    def __init__(self):
+        hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A',
+                '92CC17', '3DDB86', '1A9334', '00D4BB', '2C99A8', '00C2FF',
+                '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF',
+                'FF95C8', 'FF37C7')
+        self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
+        self.n = len(self.palette)
+
+    def __call__(self, i, bgr=False):
+        c = self.palette[int(i) % self.n]
+        return (c[2], c[1], c[0]) if bgr else c
+
+    @staticmethod
+    def hex2rgb(h):  # rgb order (PIL)
+        return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
+
+if __name__ == "__main__":
+    # load custom plugin and engine
+    PLUGIN_LIBRARY = "build/libmyplugins.so"
+    engine_file_path = "yolov8s-seg.engine"
+
+    if len(sys.argv) > 1:
+        engine_file_path = sys.argv[1]
+    if len(sys.argv) > 2:
+        PLUGIN_LIBRARY = sys.argv[2]
+
+    ctypes.CDLL(PLUGIN_LIBRARY)
+
+    # load coco labels
+
+    categories = ["person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat",
+                  "traffic light",
+                  "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
+                  "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase",
+                  "frisbee",
+                  "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard",
+                  "surfboard",
+                  "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
+                  "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
+                  "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard",
+                  "cell phone",
+                  "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors",
+                  "teddy bear",
+                  "hair drier", "toothbrush"]
+
+    if os.path.exists('output/'):
+        shutil.rmtree('output/')
+    os.makedirs('output/')
+    # a YoLov8TRT instance
+    yolov8_wrapper = YoLov8TRT(engine_file_path)
+    try:
+        print('batch size is', yolov8_wrapper.batch_size)
+
+        image_dir = "images/"
+        image_path_batches = get_img_path_batches(yolov8_wrapper.batch_size, image_dir)
+
+        for i in range(10):
+            # create a new thread to do warm_up
+            thread1 = warmUpThread(yolov8_wrapper)
+            thread1.start()
+            thread1.join()
+        for batch in image_path_batches:
+            # create a new thread to do inference
+            thread1 = inferThread(yolov8_wrapper, batch)
+            thread1.start()
+            thread1.join()
+    finally:
+        # destroy the instance
+        yolov8_wrapper.destroy()

+ 8 - 0
src/detection/yaml/camera.yaml

@@ -0,0 +1,8 @@
+D:
+- [-23.425988880936135, -21.80458632893629, -0.5387917768358769, 0.4939328755112909,
+  0.8515868920493874]
+K:
+- [6628.560676825491, 0.0, 176.13848220061746]
+- [0.0, 6611.257267601054, 321.4390302745282]
+- [0.0, 0.0, 1.0]
+dim: [360, 640]

+ 12 - 0
src/detection/yaml/camera147.yaml

@@ -0,0 +1,12 @@
+%YAML:1.0
+
+cameraMatrix:
+    rows: 3
+    cols: 3
+    dt: f
+    data: [329.8960137714725, 0.0, 306.98655702722937, 0.0, 329.8476481330821, 147.92102146061933, 0.0, 0.0, 1.0]
+distCoeffs: 
+    rows: 1
+    cols: 4
+    dt: f
+    data: [0.06540082256039308, 0.003679070986049775, 0.02795206257493852, 0.019310323874426313]

+ 12 - 0
src/detection/yaml/camera_middle_640_360.yaml

@@ -0,0 +1,12 @@
+%YAML:1.0
+
+cameraMatrix:
+    rows: 3
+    cols: 3
+    dt: f
+    data: [6628.560676825491, 0.0, 176.13848220061746,0.0, 6611.257267601054, 321.4390302745282, 0.0, 0.0, 1.0]
+distCoeffs: 
+    rows: 1
+    cols: 5
+    dt: f
+    data: [-23.425988880936135, -21.80458632893629, -0.5387917768358769, 0.4939328755112909,0.8515868920493874]

BIN
src/detection/yaml/img_cali.jpg


+ 16 - 0
src/detection/yaml/init.yaml

@@ -0,0 +1,16 @@
+source:
+- 0
+
+src:
+- [140, 370]
+- [220, 298]
+- [410, 292]
+- [478, 360]
+dst:
+- [125, 371]
+- [125, 218]
+- [484, 218]
+- [484, 370]
+
+meters: 2.0
+pixels: 640

+ 2 - 0
src/detection/yaml/pers.yaml

@@ -0,0 +1,2 @@
+%YAML:1.0
+---

+ 10 - 0
src/detection/yaml/pers_ori .yaml

@@ -0,0 +1,10 @@
+%YAML:1.0
+
+
+board_left_top: [539,423]
+
+board_left_bottom: [362,532]
+
+board_right_top: [719,429]
+
+board_right_bottom: [875,550]

+ 10 - 0
src/detection/yaml/pers_yolov8.yaml

@@ -0,0 +1,10 @@
+%YAML:1.0
+
+
+board_left_top: [639,371]
+
+board_left_bottom: [93,564]
+
+board_right_top: [762,372]
+
+board_right_bottom: [1273,537]

+ 102 - 0
src/detection/yaml/show_coordinate_new.py

@@ -0,0 +1,102 @@
+import cv2
+import yaml
+import numpy as np
+import sys
+import os
+
+
+def test():
+
+    """
+    camera_files = "./yaml/camera_middle_640_360.yaml"
+    print("files",camera_files)
+    W, H = 640, 360
+    #W, H = 1280, 720
+    init_files=(os.path.realpath(sys.path[0]) + "/yaml/init.yaml")
+    
+    with open(init_files, "r") as f:
+        init_data = yaml.load(f)
+   
+    source = init_data["source"]
+    print("source=",source)
+
+
+
+
+    cap = cv2.VideoCapture("v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink")
+   # cap = cv2.VideoCapture(1)
+
+
+
+
+
+    #with open(camera_files, "r") as f:
+    #    data = yaml.load(f,Loader=yaml.FullLoader)
+    #K = np.array(data["K"])
+    #D = np.array(data["D"])    
+    
+    data = cv2.FileStorage(camera_files, cv2.FILE_STORAGE_READ)
+    K = data.getNode("cameraMatrix").mat()
+    D = data.getNode("distCoeffs").mat()
+    data.release()
+    print("K=",K)
+    print("D=",D)
+
+    map1, map2 = cv2.initUndistortRectifyMap(
+            K,
+            D,
+            np.eye(3),
+            K,
+            (W, H),
+            cv2.CV_16SC2
+    )
+    """
+
+    cap = cv2.VideoCapture("v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink")
+    while True:
+        ret, frame = cap.read()
+        #frame = frame[410-90:590+90, 380+35:1020+35]
+        #frame = cv2.resize(frame,(W,H))
+        #frame = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
+        cv2.imshow("capture", frame)
+        if cv2.waitKey(10) & 0xFF == ord('q'):
+            img = frame
+            cv2.imwrite('img_cali.jpg', frame)  # 存储为图像
+            break
+    cap.release()
+    cv2.destroyAllWindows()
+    #img = cv2.imread('img.jpg')
+
+
+    def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):
+        if event == cv2.EVENT_LBUTTONDOWN:
+            xy = "%d,%d" % (x, y)
+            cv2.circle(img, (x, y), 1, (255, 0, 0), thickness=-1)
+            print((x,y))
+            cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,
+                        1.5, (255, 255, 255), thickness=2)
+            cv2.imshow("image", img)
+
+    cv2.namedWindow("image", cv2.WINDOW_AUTOSIZE)
+    cv2.setMouseCallback("image", on_EVENT_LBUTTONDOWN)
+    cv2.imshow("image", img)
+
+    while (True):
+        try:
+            if cv2.waitKey(10) & 0xFF == ord('q'):
+                break
+        except Exception:
+            cv2.destroyWindow("image")
+            break
+
+
+
+if __name__=='__main__':
+    test()
+
+
+
+
+
+
+

+ 57 - 0
src/detection/yaml/show_coordinate_video.py

@@ -0,0 +1,57 @@
+import cv2
+import yaml
+import numpy as np
+import sys
+import os
+
+
+def test():
+
+    cap = cv2.VideoCapture("/home/nvidia/Videos/video.mp4")
+    while True:
+        ret, frame = cap.read()
+        #frame = frame[410-90:590+90, 380+35:1020+35]
+        #frame = cv2.resize(frame,(W,H))
+        #frame = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
+        cv2.imshow("capture", frame)
+        if cv2.waitKey(10) & 0xFF == ord('q'):
+            img = frame
+            cv2.imwrite('img_cali.jpg', frame)
+            break
+    cap.release()
+    cv2.destroyAllWindows()
+    #img = cv2.imread('img.jpg')
+
+
+    def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):
+        if event == cv2.EVENT_LBUTTONDOWN:
+            xy = "%d,%d" % (x, y)
+            cv2.circle(img, (x, y), 1, (255, 0, 0), thickness=-1)
+            print((x,y))
+            cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,
+                        1.5, (255, 255, 255), thickness=2)
+            cv2.imshow("image", img)
+
+    cv2.namedWindow("image", cv2.WINDOW_AUTOSIZE)
+    cv2.setMouseCallback("image", on_EVENT_LBUTTONDOWN)
+    cv2.imshow("image", img)
+
+    while (True):
+        try:
+            if cv2.waitKey(10) & 0xFF == ord('q'):
+                break
+        except Exception:
+            cv2.destroyWindow("image")
+            break
+
+
+
+if __name__=='__main__':
+    test()
+
+
+
+
+
+
+

+ 25 - 0
src/detection/yaml/sign_config.yaml

@@ -0,0 +1,25 @@
+%YAML:1.0
+---
+imshow_flag: true
+ivlog_flag: true
+test_video:
+  open: false
+  video_path: /media/nvidia/Elements-lqx/saveVideoPy/1685957551.1323013.avi
+trackstate: true
+conf_thr: 0.5
+nms_thr : 0.5
+model:
+  engine_path: ./model/sign_yolov8m_new_fp16.trt
+camera:
+  calibrationstate : false
+  calibration_yamlpath : ./yaml/camera_middle_640_360.yaml
+  cropstate: true
+  crop_height: 
+    start: 0
+    end: 720
+  crop_width:
+    start: 280
+    end: 1000
+
+
+

+ 27 - 0
src/detection/yaml/trafficlight_config.yaml

@@ -0,0 +1,27 @@
+%YAML:1.0
+---
+imshow_flag: true
+ivlog_flag: true
+lightstart: true
+test_video:
+  open: false
+  video_path: /home/nvidia/code/modularization/src/detection/trafficlight_detection/data/camera_test2.mp4 
+trackstate: true
+conf_thr: 0.5
+nms_thr : 0.4
+model:
+  onnx_path: ./model/yolov5s_640_old+new.onnx
+  engine_path: ./model/yolov5s_640_old+new.engine
+camera:
+  calibrationstate : false
+  calibration_yamlpath : ./yaml/camera_middle_640_360.yaml
+  cropstate: true
+  crop_height: 
+    start: 40
+    end: 680
+  crop_width:
+    start: 320
+    end: 960
+
+
+

+ 28 - 0
src/detection/yaml/vision_yolov8_config.yaml

@@ -0,0 +1,28 @@
+%YAML:1.0
+---
+show:
+  imshow_flag: true
+  show_distance: false
+test_video:
+  open: false
+  video_path: /home/nvidia/Videos/video.mp4 
+warpPerspective:
+  BOARD_SIDE_LENGTH_X: 7.5
+  BOARD_SIDE_LENGTH_Y: 50
+  y: 3.0
+  x: 0.0
+model:
+  engine_path: /home/nvidia/app/app/model/yolov8n.engine
+camera:
+  calibrationstate : false
+  calibration_yamlpath : /home/nvidia/yaml/camera_middle_640_360.yaml
+  cropstate: true
+  crop_height: 
+    start: 0
+    end: 600
+  crop_width:
+    start: 0
+    end: 1280
+
+
+

+ 24 - 0
src/include/proto/visionyolov8.proto

@@ -0,0 +1,24 @@
+syntax = "proto2";
+package iv.vision;
+
+message PixelObject{
+  optional string type = 1;
+  optional int32 x = 2;
+  optional int32 y = 3;
+  optional int32 width = 4;
+  optional int32 height = 5;
+};
+
+message WorldObject{
+  optional string type = 1;
+  optional float x = 2;
+  optional float y = 3;
+  optional float width = 4;
+};
+
+message Visionyolov8{
+  optional int32 index = 1;
+  repeated PixelObject pixelObject = 2;
+  repeated WorldObject worldObject = 3;
+  optional int64 time = 4;
+};

+ 8 - 0
src/tool/video_record/Readme.md

@@ -0,0 +1,8 @@
+## adciv_save 
+
+2024/1/17 by zk
+
+This project is used to record and save frontcamera data.
+
+
+You can edit * savePosition.xml * to change the directory of video file.

+ 98 - 0
src/tool/video_record/main.cpp

@@ -0,0 +1,98 @@
+#include <QCoreApplication>
+#include <pcl/io/pcd_io.h>
+
+#include "object.pb.h"
+#include "objectarray.pb.h"
+
+#include "modulecomm.h"
+#include "xmlparam.h"
+#include "ivfault.h"
+#include "ivlog.h"
+#include "ivexit.h"
+#include "ivversion.h"
+#include "rawpic.pb.cc"
+
+#include <string>
+#include <thread>
+#include <time.h>
+#include <opencv2/opencv.hpp>
+
+// save camera data
+void * gpcamera;
+
+const std::string cameraname="image00";
+
+iv::Ivfault *gfault = nullptr;
+iv::Ivlog *givlog = nullptr;
+
+std::string strpath = "./video_record.xml";
+
+iv::xmlparam::Xmlparam xp(strpath);
+
+std::string strDir_frontcamera = xp.GetParam("save_position","/home/nvidia/Videos/video.avi");
+int cam_index = xp.GetParam("cam_index", 0);
+int gwidth = xp.GetParam("width", 1280);
+int gheight = xp.GetParam("height", 720);
+int pic_num = 0;
+
+std::string gstr_f = "v4l2src device=/dev/video"+std::to_string(cam_index)+" ! video/x-raw, width=(int)"+std::to_string(gwidth)+", height=(int)"+std::to_string(gheight)+" ! videoconvert ! appsink";
+
+std::string gst = "/home/nvidia/modularization/src/detection/vision_yolov8/data/test2.mp4";
+
+
+int main(int argc, char *argv[])
+{
+    gfault = new iv::Ivfault("video_save");
+    givlog = new iv::Ivlog("video_save");
+    gfault->SetFaultState(0,0,"video_save initialize. ");
+    showversion("save_video_data");
+    QCoreApplication a(argc, argv);
+
+    std::cout<<"frontcamera Dir="<<strDir_frontcamera<<std::endl;
+
+    cv::VideoCapture cap(gstr_f);
+    cv::Size sizeReturn = cv::Size(cap.get(cv::CAP_PROP_FRAME_WIDTH), cap.get(cv::CAP_PROP_FRAME_HEIGHT));
+    cv::VideoWriter wri(strDir_frontcamera, cv::VideoWriter::fourcc('M','J','P','G'), cap.get(cv::CAP_PROP_FPS), sizeReturn);
+    cv::namedWindow("video_record",cv::WINDOW_NORMAL);
+    if(!cap.isOpened())
+    {
+        std::cout<<"camera failed to open"<<std::endl;
+    }
+    double waittime = (double)cv::getTickCount();
+    while(1)
+    {
+        cv::Mat frame;
+        if(cap.read(frame))
+        {
+            cv::Mat image;
+            frame.copyTo(image);
+            wri<<image;
+            cv::imshow("video_record", image);
+            std::cout<<"count: "<<pic_num<<std::endl;
+            pic_num++;
+        }
+        else
+        {
+            double waittotal = (double)cv::getTickCount() - waittime;
+            double totaltime = waittotal/cv::getTickFrequency();
+            std::this_thread::sleep_for(std::chrono::milliseconds(1));
+            if(totaltime>1)
+            {
+                std::cout<<"Cant't get frame and quit"<<std::endl;
+                cv::destroyAllWindows();
+                std::cout<<"------end program------"<<std::endl;
+                break;
+            }
+            continue;
+        }
+        if (cv::waitKey(10) == 'q')
+        {
+            break;
+        }
+        waittime = (double)cv::getTickCount();
+    }
+    wri.release();
+    cap.release();
+    cv::destroyAllWindows();
+    return 0;
+}

+ 82 - 0
src/tool/video_record/video_record.pro

@@ -0,0 +1,82 @@
+QT -= gui
+
+QMAKE_LFLAGS += -no-pie
+greaterThan(QT_MAJOR_VERSION, 4): QT += widgets
+
+TARGET = video_record
+TEMPLATE = app
+
+CONFIG += c++11 #console
+CONFIG -= app_bundle
+
+# The following define makes your compiler emit warnings if you use
+# any feature of Qt which has been marked as deprecated (the exact warnings
+# depend on your compiler). Please consult the documentation of the
+# deprecated API in order to know how to port your code away from it.
+DEFINES += QT_DEPRECATED_WARNINGS
+
+#DEFINES += USE_PLUS_MODULECOMM
+
+
+# You can also make your code fail to compile if you use deprecated APIs.
+# In order to do so, uncomment the following line.
+# You can also select to disable deprecated APIs only up to a certain version of Qt.
+#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000    # disables all the APIs deprecated before Qt 6.0.0
+
+
+SOURCES += main.cpp \
+    ../../include/msgtype/rawpic.pb.cc
+
+
+INCLUDEPATH += /usr/include/pcl-1.7
+INCLUDEPATH += /usr/include/pcl-1.8
+INCLUDEPATH += /usr/include/eigen3
+INCLUDEPATH += /usr/include/opencv4
+
+INCLUDEPATH += /usr/include/pcl-1.10 \
+               ../../include/msgtype/rawpic.pb.h
+
+
+INCLUDEPATH += /usr/local/cuda-11.4/targets/aarch64-linux/include
+
+LIBS += -L/usr/local/cuda-11.4/targets/aarch64-linux/lib  # -lcublas
+
+
+INCLUDEPATH += /usr/local/cuda-10.2/targets/aarch64-linux/include
+
+LIBS += -lpcl_io -lpcl_common
+
+
+LIBS += -lboost_system  -lavutil  -lprotobuf -lcudnn
+
+
+LIBS += -lcudnn
+
+LIBS += /usr/lib/aarch64-linux-gnu/libopencv_highgui.so \
+        /usr/lib/aarch64-linux-gnu/libopencv_core.so    \
+        /usr/lib/aarch64-linux-gnu/libopencv_imgproc.so \
+        /usr/lib/aarch64-linux-gnu/libopencv_imgcodecs.so \
+        /usr/lib/aarch64-linux-gnu/libopencv_videoio.so \
+        /usr/lib/aarch64-linux-gnu/libopencv_video.so \
+        /usr/lib/aarch64-linux-gnu/libopencv_calib3d.so \
+
+#unix:!macx: LIBS += -L$$PWD/../../../thirdpartylib/caffe/arm64 -lcaffe -lcudnn
+
+HEADERS += \
+    ../../include/msgtype/object.pb.h \
+    ../../include/msgtype/objectarray.pb.h
+
+
+!include(../../../include/common.pri ) {
+    error( "Couldn't find the common.pri file!" )
+}
+
+!include(../../../include/ivprotobuf.pri ) {
+    error( "Couldn't find the ivprotobuf.pri file!" )
+}
+
+INCLUDEPATH += $$PWD/../../include/msgtype
+LIBS += -L/usr/lib/aarch64-linux-gnu/ -lglog
+
+
+

+ 8 - 0
src/tool/video_record/video_record.xml

@@ -0,0 +1,8 @@
+<xml>	
+	<node name="video_save">
+		<param name="save_position" value="./video.mp4" />
+		<param name="cam_index" value="0" />
+		<param name="width" value="1280" />
+		<param name="height" value="720" />
+	</node>
+</xml>

+ 311 - 0
src/tool/video_replay/main.cpp

@@ -0,0 +1,311 @@
+#include <QCoreApplication>
+#include <thread>
+
+#include <opencv2/opencv.hpp>
+#include <opencv2/core.hpp>
+
+extern "C"
+{
+#include <libavcodec/avcodec.h>
+#include <libavutil/opt.h>
+#include <libavutil/imgutils.h>
+#include <libavutil/common.h>
+}
+
+#ifdef USE_OPENCV4
+#include "opencv2/imgcodecs/legacy/constants_c.h"   //OpenCV4 use this line
+#include <opencv2/imgproc/types_c.h>   //OpenCV4 use this line
+#endif
+
+#include <opencv2/videoio.hpp>
+
+#include <QTime>
+#include <QFile>
+
+#define TESTYUV 0
+
+#include "modulecomm.h"
+#include "rawpic.pb.h"
+#include "xmlparam.h"
+
+#include "ivversion.h"
+
+void * gpa;
+
+int gindex = 0;
+
+int gwidth = 1920;
+int gheight = 1080;
+int gcamindex = 0;
+int gcompress = 1;
+int gcompquality = 95;
+
+
+std::string gmsgname = "image00";
+
+std::string gstrdevname = "video0";
+std::string video_path = "/home/nvidia/Videos/video.mp4";
+int time_sleep = 0;
+
+#ifndef CV_CAP_PROP_FRAME_WIDTH
+#define CV_CAP_PROP_FRAME_WIDTH cv::CAP_PROP_FRAME_WIDTH
+#define CV_CAP_PROP_FRAME_HEIGHT cv::CAP_PROP_FRAME_HEIGHT
+#define CV_CAP_PROP_FPS CAP_PROP_FPS
+#define CV_CAP_PROP_FOURCC CAP_PROP_FOURCC
+#define CV_BGR2GRAY cv::COLOR_BGR2GRAY
+#endif
+
+
+static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
+                   FILE *outfile)
+{
+    int ret;
+
+    /* send the frame to the encoder */
+    if (frame)
+        printf("Send frame %3"PRId64"\n", frame->pts);
+
+    ret = avcodec_send_frame(enc_ctx, frame);
+    if (ret < 0) {
+        fprintf(stderr, "Error sending a frame for encoding\n");
+        exit(1);
+    }
+
+    while (ret >= 0) {
+        ret = avcodec_receive_packet(enc_ctx, pkt);
+        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+            return;
+        else if (ret < 0) {
+            fprintf(stderr, "Error during encoding\n");
+            exit(1);
+        }
+
+        printf("Write packet %3"PRId64" (size=%5d)\n", pkt->pts, pkt->size);
+        fwrite(pkt->data, 1, pkt->size, outfile);
+        av_packet_unref(pkt);
+    }
+}
+
+
+void VideoThread(int n)
+{
+
+
+
+    QTime xT;
+    xT.start();
+    int ncount = 0;
+    int nserbufsize = 20000000;
+
+    char * strser = new char[nserbufsize];
+
+    cv::VideoCapture capture;
+
+#ifdef TESTYUV
+    QFile xFileYUV;
+    xFileYUV.setFileName("/home/nvidia/testYUV.yuv");
+    bool bTestYUV = false;
+    if(xFileYUV.open(QIODevice::ReadWrite))
+    {
+        bTestYUV = true;
+    }
+    else
+    {
+        std::cout<<" Open YUV File Fail."<<std::endl;
+    }
+#endif
+
+////If OpenCV4 use this
+
+//    //std::string gstr_f = "v4l2src device=/dev/video"+std::to_string(gcamindex)+" ! video/x-raw, width=(int)"+std::to_string(gwidth)+", height=(int)"+std::to_string(gheight)+" ! videoconvert ! appsink";
+//    std::string gstr_f = "/home/nvidia/Videos/video.mp4";
+//#ifdef USE_OPENCV4
+//    if(gstrcamera.length() > 2)
+//    {
+//        //gstr_f = "v4l2src device=/dev/"+gstrcamera+" ! video/x-raw, width=(int)"+std::to_string(gwidth)+", height=(int)"+std::to_string(gheight)+" ! videoconvert ! appsink";
+//        gstr_f = "/home/nvidia/Videos/video.mp4";
+//        std::cout<<gstr_f<<std::endl;
+//        std::cout<<gstr_f<<std::endl;
+//        capture.open(gstr_f);
+//    }
+//    else
+//        capture.open(gcamindex);
+
+
+//#else
+
+//        capture.open(gcamindex);
+//#endif
+//    capture.set(CV_CAP_PROP_FRAME_WIDTH,gwidth);
+//    capture.set(CV_CAP_PROP_FRAME_HEIGHT,gheight);
+
+    std::cout<<video_path<<std::endl;
+    capture.open(video_path);
+
+    while(true)
+    {
+        cv::Mat mat, mat1;
+        capture>>mat;
+
+        cv::resize(mat, mat1, cv::Size(1280, 720), 0, 0, cv::INTER_LINEAR);
+
+        cv::imshow("video_replay",mat1);
+        if(!mat1.empty())
+        {
+
+#ifdef TESTYUV
+        if(bTestYUV &&(ncount<61))
+        {
+            cv::Mat dstYuvImage;
+            QTime xTime;
+            xTime.start();
+            cv::cvtColor(mat1, dstYuvImage, CV_BGR2YUV_I420);
+            std::cout<<" cvt time: "<<xTime.elapsed()<<std::endl;
+            xFileYUV.write((char *)dstYuvImage.data,gwidth*gheight*3/2);
+            xFileYUV.flush();
+        }
+        else
+        {
+            xFileYUV.close();
+        }
+#endif
+
+        ncount++;
+            std::cout<<" count "<<ncount<<std::endl;
+            QTime timex;
+            timex.start();
+
+            iv::vision::rawpic pic;
+
+            qint64 time = QDateTime::currentMSecsSinceEpoch();
+            QTime xg;
+            xg.start();
+            pic.set_time(time);
+            pic.set_index(gindex);gindex++;
+
+
+            pic.set_elemsize(mat1.elemSize());
+            pic.set_width(mat1.cols);
+            pic.set_height(mat1.rows);
+            pic.set_mattype(mat1.type());
+
+//            cv::Mat matcompress;
+//            cv::resize(mat1,matcompress,cv::Size(640,360));
+
+
+            std::vector<int> param = std::vector<int>(2);
+            param[0] = CV_IMWRITE_JPEG_QUALITY;
+            param[1] = gcompquality; // default(95) 0-100
+            std::vector<unsigned char> buff;
+            if(gcompress == 1)
+            {
+                cv::imencode(".jpg", mat1, buff, param);
+//                cv::imencode(".jpg", matcompress, buff, param);
+                pic.set_picdata(buff.data(),buff.size());
+                buff.clear();
+                pic.set_type(2);
+            }
+            else
+            {
+                pic.set_picdata(mat1.data,mat1.rows*mat1.cols*mat1.elemSize());
+//                pic.set_picdata(matcompress.data,matcompress.rows*matcompress.cols*matcompress.elemSize());
+                pic.set_type(1);
+            }
+
+
+//            qDebug("buff size is %d",buff.size());
+
+
+            int nSize = pic.ByteSize();
+
+  //          char * strser = new char[nSize];
+            bool bser = pic.SerializeToArray(strser,nSize);
+            qDebug("time is %ld pac time is %d size is %d",QDateTime::currentMSecsSinceEpoch(),xg.elapsed(),nSize);
+
+    //        iv::modulecomm::ModuleSendMsg(gpa,str.data(),str.length());
+            if(bser)iv::modulecomm::ModuleSendMsg(gpa,strser,nSize);
+            else
+            {
+                std::cout<<"usbcamera "<< "serialize error. "<<std::endl;
+            }
+ //           delete strser;
+ //           delete strout;
+
+
+
+   //         qDebug("%d have a pic. len is %d",xT.elapsed(),str.length());
+//            qDebug("now is %d",xT.elapsed());
+//            gw->mMutex.lock();
+//            mat1.copyTo(gw->msrcImage);
+//            gw->mMutex.unlock();
+
+//            gMutexInput.lock();
+//            mat1.copyTo(gsrcImage);
+//            g_CapIndex++;
+//            gMutexInput.unlock();
+        }
+        else
+        {
+            qDebug("no data.");
+            std::this_thread::sleep_for(std::chrono::milliseconds(1));
+        }
+        std::this_thread::sleep_for(std::chrono::milliseconds(time_sleep));
+    }
+}
+
+
+#include <thread>
+
+void testthread()
+{
+    void * mpx = iv::modulecomm::RegisterSend("test121",10000000,1);
+    char * str = new char[10000000];
+    while(1)
+    {
+        iv::modulecomm::ModuleSendMsg(mpx,str,9000000);
+    }
+}
+
+
+int main(int argc, char *argv[])
+{
+
+    cv::namedWindow("video_replay",cv::WINDOW_NORMAL);
+
+    showversion("video_replay");
+    QCoreApplication a(argc, argv);
+
+//    std::thread * pthread = new std::thread(testthread);
+//    return a.exec();
+
+    QString strpath = QCoreApplication::applicationDirPath();
+//    QString apppath = strpath;
+    if(argc < 2)
+        strpath = strpath + "/video_replay.xml";
+    else
+        strpath = argv[1];
+    std::cout<<strpath.toStdString()<<std::endl;
+    iv::xmlparam::Xmlparam xp(strpath.toStdString());
+
+    std::string xmlmsgname = xp.GetParam("msgname","image00");
+    gmsgname = xmlmsgname;
+    std::string strwidth = xp.GetParam("width","1280");
+    std::string strheight = xp.GetParam("height","720");
+    std::string strcompress = xp.GetParam("bcompress","1");
+    std::string strcompquality = xp.GetParam("compquality","95");
+    std::string videoPath = xp.GetParam("video_path","/home/nvidia/Videos/video.mp4");
+    std::string timeSleep = xp.GetParam("time_sleep","0");
+    gcompquality = atoi(strcompquality.data());
+    gwidth = atoi(strwidth.data());
+    gheight = atoi(strheight.data());
+    gcompress = atoi(strcompress.data());
+    time_sleep = atoi(timeSleep.data());
+    video_path = videoPath;
+
+    std::cout<<"video_path:"<<video_path<<std::endl;
+
+    gpa = iv::modulecomm::RegisterSend(gmsgname.data(),20000000,1);
+    std::thread * mthread = new std::thread(VideoThread,0);
+
+    return a.exec();
+}

+ 48 - 0
src/tool/video_replay/video_replay.pro

@@ -0,0 +1,48 @@
+
+QT -= gui
+
+CONFIG += c++11
+CONFIG -= app_bundle
+
+# The following define makes your compiler emit warnings if you use
+# any feature of Qt which as been marked deprecated (the exact warnings
+# depend on your compiler). Please consult the documentation of the
+# deprecated API in order to know how to port your code away from it.
+DEFINES += QT_DEPRECATED_WARNINGS
+
+# You can also make your code fail to compile if you use deprecated APIs.
+# In order to do so, uncomment the following line.
+# You can also select to disable deprecated APIs only up to a certain version of Qt.
+#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000    # disables all the APIs deprecated before Qt 6.0.0
+
+SOURCES += main.cpp \
+    ../../include/msgtype/rawpic.pb.cc
+
+
+!include(../../../include/common.pri ) {
+    error( "Couldn't find the common.pri file!" )
+}
+
+!include(../../../include/ivopencv.pri ) {
+    error( "Couldn't find the ivopencv.pri file!" )
+}
+
+!include(../../../include/ivprotobuf.pri ) {
+    error( "Couldn't find the ivprotobuf.pri file!" )
+}
+
+
+INCLUDEPATH += /home/linaro/opencv3
+
+
+contains(QMAKE_HOST.arch, aarch64){
+    DEFINES += USE_OPENCV4
+}
+
+
+HEADERS += \
+    ../../include/msgtype/rawpic.pb.h
+
+LIBS += -lavcodec -lavformat -lavutil
+
+

+ 11 - 0
src/tool/video_replay/video_replay.xml

@@ -0,0 +1,11 @@
+<xml>	
+	<node name="video_replay">
+		<param name="msgname" value="image00" />
+		<param name="width" value = "1280" />
+		<param name="height" value="720" />
+		<param name="bcompress" value="1" />
+		<param name="compquality" value="60" />
+		<param name="video_path" value="/home/nvidia/Videos/video.mp4" />
+		<param name="sleep_time" value="0" />
+	</node>
+</xml>

+ 94 - 257
src/ui/ui_ads_hmi/ADCIntelligentVehicle.cpp

@@ -4,19 +4,7 @@
 #include "xmlparam.h"
 
 
-#ifdef OS_UNIX
-#include "sys/statfs.h"
-#endif
-
-
-#ifdef  OS_WIN
-#include  <windows.h>
-
-#endif
-
 #include <thread>
-#include <QMessageBox>
-#include <QFile>
 
 #define qtcout qDebug() << "[ " << __FILE__ << ":" << __LINE__<< " ]";
 
@@ -370,6 +358,9 @@ ADCIntelligentVehicle::ADCIntelligentVehicle(QWidget *parent)
     ModuleFun funfusion = std::bind(&ADCIntelligentVehicle::UpdateFusion,this,std::placeholders::_1,std::placeholders::_2,std::placeholders::_3,std::placeholders::_4,std::placeholders::_5);
     mpafusion = iv::modulecomm::RegisterRecvPlus("li_ra_fusion",funfusion);
 
+    ModuleFun funYolo = std::bind(&ADCIntelligentVehicle::UpdateYolo,this,std::placeholders::_1,std::placeholders::_2,std::placeholders::_3,std::placeholders::_4,std::placeholders::_5);
+    mpaYolo = iv::modulecomm::RegisterRecvPlus("yolo_obs",funYolo);
+
 
     ModuleFun funmap = std::bind(&ADCIntelligentVehicle::UpdateMap,this,std::placeholders::_1,std::placeholders::_2,std::placeholders::_3,std::placeholders::_4,std::placeholders::_5);
     mpa = iv::modulecomm::RegisterRecvPlus("tracemap",funmap);
@@ -514,51 +505,10 @@ ADCIntelligentVehicle::ADCIntelligentVehicle(QWidget *parent)
         ui->groupBox_11->setVisible(false);
     }
 
-    mnHDDSpaceMB = get_path_availspace("./");
-    std::cout<<" HDD Space: "<<mnHDDSpaceMB<<"MB"<<std::endl;
-
-    if(mnHDDSpaceMB<2000)
-    {
-        QMessageBox::warning(this,tr("Warning"),tr("没有足够硬盘空间。请删除回收站文件,log文件夹文件等等,释放足够空间。"),QMessageBox::YesAll);
-    }
-
-    connect(this,SIGNAL(otaversion(const char*)),this,SLOT(onotaversion(const char*)));
-
-    mpthreadota = new std::thread(&ADCIntelligentVehicle::threadotacheck,this);
-
-
-
-    QFile xFile;
-    xFile.setFileName("version");
-    if(xFile.open(QIODevice::ReadOnly))
-    {
-        char strversion[256];
-        int nread = 0;
-        if((nread =xFile.readLine(strversion,255)) > 0)
-        {
-            if(strversion[nread-1] == '\n')
-            {
-                strversion[nread-1] = 0;
-                nread = nread - 1;
-            }
-            if(nread > 0)
-            {
-                QString strversioncode = QString("version: ") + QString(strversion);
-                ui->statusBar->addPermanentWidget(new QLabel(strversioncode));
-            }
-        }
-        xFile.close();
-
-
-    }
-
-
 }
 
 ADCIntelligentVehicle::~ADCIntelligentVehicle()
 {
-    mbotacheckclose = true;
-    mpthreadota->join();
     gIvlog->warn("ADCIntelligentVehchicle Exit.");
     iv::modulecomm::Unregister(mpaplantrace);
     iv::modulecomm::Unregister(mpamapreq);
@@ -686,14 +636,34 @@ void ADCIntelligentVehicle::timeoutslot()
             ServiceCarStatus.mLidarS = -1;
         }
     }
-
+    //------------------YOLO----------------------------
+    is_ok = (ServiceCarStatus.mYoloS>0)?QStringLiteral("ok"):QStringLiteral("error");
+    int nOldYoloState = 0;
+    if(ServiceCarStatus.mYoloS>0)
+    {
+        if(nOldYoloState != 1)
+        {
+            nOldYoloState = 1;
+            ui->Camera_Yolo->setStyleSheet("background-color: green");
+        }
+    }
+    else
+    {
+        if(nOldYoloState != -1)
+        {
+            nOldYoloState = -1;
+            ui->Camera_Yolo->setStyleSheet("background-color:red");
+            ServiceCarStatus.mYoloS = -1;
+        }
+    }
+    //-------------------------------------------------
     int nMState = 0;
     if(mimobileyeState>0)
     {
         if(nMState != 1)
         {
             nMState = 1;
-            ui->Mobileye_st->setStyleSheet("background-color: green");
+//            ui->Mobileye_st->setStyleSheet("background-color: green");
         }
     }
     else
@@ -701,7 +671,7 @@ void ADCIntelligentVehicle::timeoutslot()
         if(nMState != -1)
         {
             nMState =-1;
-            ui->Mobileye_st->setStyleSheet("background-color: red");
+//            ui->Mobileye_st->setStyleSheet("background-color: red");
         }
         mimobileyeState = -1;
     }
@@ -838,8 +808,16 @@ void ADCIntelligentVehicle::timeoutslot()
     //        if(ui->checkBox_4->isChecked()==true)ServiceControlStatus.special_signle = 1;//是否特殊信号输入标志
     //        else ServiceControlStatus.special_signle = 0;
     //        if(ui->checkBox_5->isChecked()==true)ServiceControlStatus.car_pullover = 1;//是否靠边停车标志位
-    //        else ServiceControlStatus.car_pullover = 0;
 
+    //        else ServiceControlStatus.car_pullover = 0;
+    //zk add speed scale
+    //ui->lb_current_speed_big->setText(QString::number(ServiceCarStatus.speed,'f',1));
+    QString strpath = QCoreApplication::applicationDirPath();
+    strpath = strpath + "/speed.xml";
+    iv::xmlparam::Xmlparam xp1(strpath.toStdString());
+    float speed_scale = stof(xp1.GetParam("speed_scale","1"));
+//    std::cout<<speed_scale<<std::endl;
+    ui->lb_current_speed_big->setText(QString::number(ServiceCarStatus.speed*speed_scale,'f',1));
     ui->lb_current_speed_big->setText(QString::number(ServiceCarStatus.speed,'f',1));
 
 
@@ -927,7 +905,12 @@ void ADCIntelligentVehicle::onStateTimer()
         mDataToUI.mInfo.lidarStatus = 0;
     else
         mDataToUI.mInfo.lidarStatus = 1;
-
+    //----------Yolo-----------
+    if(ServiceCarStatus.mYoloS<0)
+        mDataToUI.mInfo.YoloStatus = 0;
+    else
+        mDataToUI.mInfo.YoloStatus = 1;
+    //---------------------------
     if(ServiceCarStatus.mRadarS<0)
         mDataToUI.mInfo.radarStatus = 0;
     else
@@ -935,6 +918,7 @@ void ADCIntelligentVehicle::onStateTimer()
 
     ServiceCarStatus.mLidarS--;  //如果从10减到0,表示没有接收到数据。
     ServiceCarStatus.mRadarS--;
+    ServiceCarStatus.mYoloS--;
     mimobileyeState--;
 
     mDataToUI.mInfo.RTKstatus = ServiceCarStatus.mRTKStatus;
@@ -1026,7 +1010,16 @@ void ADCIntelligentVehicle::on_pb_start_all_clicked() {
  * @brief ADCIntelligentVehicle::on_timer_car_status_time_out
  */
 void ADCIntelligentVehicle::on_timer_car_status_time_out() {
-    ui->lb_current_speed_big->setText(QString::number(ServiceCarStatus.speed));
+
+    // zk add speed scale
+    //ui->lb_current_speed_big->setText(QString::number(ServiceCarStatus.speed));
+    // speed scale
+    QString strpath = QCoreApplication::applicationDirPath();
+    strpath = strpath + "/speed.xml";
+    iv::xmlparam::Xmlparam xp1(strpath.toStdString());
+    float speed_scale = stof(xp1.GetParam("speed_scale","1"));
+//    std::cout<<speed_scale<<std::endl;
+    ui->lb_current_speed_big->setText(QString::number(ServiceCarStatus.speed*speed_scale));
     //ui->sb_accelerate_percent->setValue(ServiceControlStatus.accelerate_percent);
     //ui->sb_wheel_percent->setValue(ServiceControlStatus.wheel_angle);
 
@@ -1461,7 +1454,29 @@ void ADCIntelligentVehicle::paintEvent(QPaintEvent *)
             }
         }
 
-
+        //----------------------Yolo-------------------------
+        if(ui->checkBox_11->isChecked())
+        {
+            painter->setPen(QColor(255,0,0));
+            iv::fusion::fusionobjectarray xYoloarray;
+            if(mbYoloUpdate)
+            {
+                mMutexYolo.lock();
+                xYoloarray.CopyFrom(mYoloarray);
+                mMutexYolo.unlock();
+                std::cout<<"Yolo_array.size(): "<<xYoloarray.obj_size()<<std::endl;
+                for(int a = 0; a < xYoloarray.obj_size(); a++)
+                {
+                    for(int b = 0; b < xYoloarray.obj(a).nomal_centroid_size(); b++)
+                    {
+                        std::cout<<"x:"<<xYoloarray.obj(a).nomal_centroid(b).nomal_x()<<std::endl;
+                        std::cout<<"y:"<<xYoloarray.obj(a).nomal_centroid(b).nomal_y()<<std::endl;
+                        painter->drawPoint((xYoloarray.obj(a).nomal_centroid(b).nomal_x())*10 + 450, -(xYoloarray.obj(a).nomal_centroid(b).nomal_y())*10 + 700);
+                    }
+                }
+            }
+         }
+        //----------------------------------------------------
         //////////////////////////////////////
         //                                  //
         //	          显示融合的信息          //
@@ -1477,10 +1492,13 @@ void ADCIntelligentVehicle::paintEvent(QPaintEvent *)
                 mMutexFusion.lock();
                 xfusionarray.CopyFrom(mfusionarray);
                 mMutexFusion.unlock();
+                std::cout<<"fusion_array.size(): "<<xfusionarray.obj_size()<<std::endl;
                 for(int a = 0; a < xfusionarray.obj_size(); a++)
                 {
                     for(int b = 0; b < xfusionarray.obj(a).nomal_centroid_size(); b++)
                     {
+                        std::cout<<"x:"<<xfusionarray.obj(a).nomal_centroid(b).nomal_x()<<std::endl;
+                        std::cout<<"y:"<<xfusionarray.obj(a).nomal_centroid(b).nomal_y()<<std::endl;
                         painter->drawPoint((xfusionarray.obj(a).nomal_centroid(b).nomal_x())*10 + 450, -(xfusionarray.obj(a).nomal_centroid(b).nomal_y() + ServiceCarStatus.lidar_y_offset)*10 + 700);
                     }
                 }
@@ -1753,11 +1771,27 @@ void ADCIntelligentVehicle::UpdateFusion(const char * strdata,const unsigned int
     }
     mMutexFusion.lock();
     mfusionarray.CopyFrom(xfusionarray);
+    std::cout<<"Fusion update_flag: "<<mbfusionUpdate<<std::endl;
     mbfusionUpdate = true;
     mMutexFusion.unlock();
     ServiceCarStatus.mLidarS = 10;
 }
 
+void ADCIntelligentVehicle::UpdateYolo(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
+{
+    iv::fusion::fusionobjectarray xYoloarray;
+    if(!xYoloarray.ParseFromArray(strdata,nSize))
+    {
+        gIvlog->warn("ADCIntelligentVehicle::UpdateYolo Parse Error.");
+        return;
+    }
+    mMutexYolo.lock();
+    mYoloarray.CopyFrom(xYoloarray);
+    std::cout<<"Yolo update_flag: "<<mbYoloUpdate<<std::endl;
+    mbYoloUpdate = true;
+    mMutexYolo.unlock();
+    ServiceCarStatus.mYoloS = 10;
+}
 
 /**
  * @brief ADCIntelligentVehicle::UpdateMap
@@ -2758,200 +2792,3 @@ void ADCIntelligentVehicle::UpdateMapDomain(std::vector<iv::GPSData> &xvectorMap
     }
 
 }
-
-
-int ADCIntelligentVehicle::get_path_availspace(const QString & path)
-{
-
-#ifdef OS_UNIX
-    struct statfs diskInfo;
-    std::cout<<" run hea1r. "<<std::endl;
-
-    statfs(path.toLatin1().data(), &diskInfo);
-    std::cout<<" run hear. "<<std::endl;
-
-    qDebug("%s 总大小:%.0lfMB 可用大小:%.0lfMB",path.toLatin1().data(),(diskInfo.f_blocks * diskInfo.f_bsize)/1024.0/1024.0,(diskInfo.f_bavail * diskInfo.f_bsize)/1024.0/1024.0);
-    return (diskInfo.f_bavail * diskInfo.f_bsize)/1024.0/1024.0;
-#endif
-
-#ifdef OS_WIN
-    LPCWSTR lpcwstrDriver=(LPCWSTR)path.utf16();
-
-        ULARGE_INTEGER liFreeBytesAvailable, liTotalBytes, liTotalFreeBytes;
-
-        if( !GetDiskFreeSpaceEx( lpcwstrDriver, &liFreeBytesAvailable, &liTotalBytes, &liTotalFreeBytes) )
-        {
-            qDebug() << "ERROR: Call to GetDiskFreeSpaceEx() failed.";
-            return 0;
-        }
-        return (quint64) liTotalFreeBytes.QuadPart/1024/1024;
-
-#endif
-}
-
-void ADCIntelligentVehicle::threadotacheck()
-{
-    int npresleep = 0;
-    while((mbotacheckclose == false)&&(npresleep<3000))
-    {
-        npresleep++;
-        std::this_thread::sleep_for(std::chrono::milliseconds(1));
-    }
-    if(mbotacheckclose)
-    {
-        return;
-    }
-
-    QString strignorefilepath = "./otaignore";
-    QString strupdatesigpath = "./otaupdate.sig";
-    QString strupdateconfirm = "./otaconfirm";
-
-    QFile xFileConfirm;
-    xFileConfirm.setFileName(strupdateconfirm);
-    if(xFileConfirm.exists())
-    {
-        std::cout<<" confirm file is exist. not need check"<<std::endl;
-        return;
-    }
-
-    std::string strversion = "";
-    bool bNeedUpdate = false;
-    QFile xFilesig;
-    xFilesig.setFileName(strupdatesigpath);
-    if(xFilesig.open(QIODevice::ReadOnly))
-    {
-        char strline[256];
-        int nread = xFilesig.readLine(strline,256);
-        xFilesig.close();
-        if(nread >0)
-        {
-            if(strline[nread-1] == '\n')
-            {
-                nread = nread -1;
-            }
-            if(nread >0)
-            {
-                strline[nread] = 0;
-                strversion = strline;
-                bNeedUpdate = true;
-            }
-        }
-    }
-    else
-    {
-        std::cout<<" no update ."<<std::endl;
-        return;
-    }
-
-    if(bNeedUpdate == false)
-    {
-        std::cout<<" have update sig. but no version."<<std::endl;
-    }
-
-    QFile xFileignore;
-    xFileignore.setFileName(strignorefilepath);
-    if(xFileignore.open(QIODevice::ReadOnly))
-    {
-        char strline[256];
-        int nread = xFileignore.readLine(strline,256);
-        xFileignore.close();
-        if(nread >0)
-        {
-            if(strline[nread-1] == '\n')
-            {
-                nread = nread -1;
-            }
-            if(nread >0)
-            {
-                strline[nread] = 0;
-                std::string strignoreversion = strline;
-                if(strignoreversion == strversion)
-                {
-                    bNeedUpdate = false;
-                    std::cout<<" update sig equal ignore. so not need update"<<std::endl;
-                }
-            }
-        }
-    }
-
-    if(bNeedUpdate)
-    {
-        emit otaversion(strversion.data());
-    }
-
-    npresleep = 0;
-    while((mbotacheckclose == false)&&(npresleep<10000))
-    {
-        npresleep++;
-        std::this_thread::sleep_for(std::chrono::milliseconds(1));
-    }
-
-
-}
-
-void ADCIntelligentVehicle::onotaversion(const char * strversion)
-{
-    QString strignorefilepath = "./otaignore";
-    QString strupdatesigpath = "./otaupdate.sig";
-    QString strupdateconfirm = "./otaconfirm";
-    QMessageBox::StandardButton button;
-    char strquery[1000];
-    snprintf(strquery,1000,"Have new firmware,version %s. Do you want update?",strversion);
-    bool bignore = false;
-    bool bupdateconfirm = false;
-    button=QMessageBox::question(this,tr("OTA"),QString(strquery),QMessageBox::Yes|QMessageBox::No);
-    if(button==QMessageBox::Yes)
-    {
-        bupdateconfirm  = true;
-    }
-    else
-    {
-        button=QMessageBox::question(this,tr("OTA"),QString("Do you ignore this version?"),QMessageBox::Yes|QMessageBox::No);
-        if(button == QMessageBox::Yes)
-        {
-            bignore = true;
-        }
-    }
-
-    if(bupdateconfirm)
-    {
-        if(mnHDDSpaceMB > 2000)
-        {
-
-
-            QFile xFileconfirm;
-            xFileconfirm.setFileName(strupdateconfirm);
-            if(!xFileconfirm.exists())
-            {
-                if(xFileconfirm.open(QIODevice::ReadWrite))
-                {
-                    xFileconfirm.close();
-                    QMessageBox::information(this,tr("OTA"),tr("OTA in background."),QMessageBox::YesAll);
-                }
-                else
-                {
-                    QMessageBox::information(this,tr("OTA"),tr("Write Confirm File Fail."),QMessageBox::YesAll);
-                }
-            }
-        }
-        else
-        {
-            QMessageBox::warning(this,tr("OTA"),tr("No Enough HDD Space for ota."),QMessageBox::YesAll);
-        }
-    }
-
-    if(bignore)
-    {
-        QFile xFileignore;
-        xFileignore.setFileName(strignorefilepath);
-        if(xFileignore.open(QIODevice::ReadWrite))
-        {
-            xFileignore.write(strversion);
-            xFileignore.close();
-        }
-        else
-        {
-            QMessageBox::warning(this,tr("OTA"),tr("Write ignore fail."),QMessageBox::YesAll);
-        }
-    }
-}

+ 9 - 12
src/ui/ui_ads_hmi/ADCIntelligentVehicle.h

@@ -55,7 +55,6 @@
 #include "modulecomm.h"
 
 #include <QMutex>
-#include <thread>
 
 //#include <platform/platform.h>
 
@@ -138,6 +137,7 @@ struct dataInfo    //与PAD通信结构体
     unsigned char RTKstatus;
     unsigned char radarStatus;
     unsigned char lidarStatus;
+    unsigned char YoloStatus;
     unsigned char boche_status;
 
 };
@@ -171,9 +171,6 @@ public:
     explicit ADCIntelligentVehicle(QWidget *parent = 0);
     ~ADCIntelligentVehicle();
 
-signals:
-    void otaversion(const char * strversion);
-
 public slots:
 
     void savestabuyEditinfo(const QString &txt);
@@ -204,9 +201,6 @@ public slots:
     void onStateTimer();
     void onStateTimerMap();
     void onStateTimer1();
-
-    void onotaversion(const char * strversion);
-
 protected:
 
     void keyPressEvent(QKeyEvent *event) Q_DECL_OVERRIDE;// 加/减键进行缩放
@@ -388,6 +382,14 @@ private:
     bool mbfusionUpdate = false;
     void * mpafusion;
 /*****************************************/
+public:
+    void UpdateYolo(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname);
+    QMutex mMutexYolo;
+private:
+    iv::fusion::fusionobjectarray mYoloarray;
+    bool mbYoloUpdate = false;
+    void * mpaYolo;
+/*****************************************/
 
 private:
     void * mpaLidar;
@@ -422,12 +424,7 @@ private:
     std::vector<double> mDraw_x0,mDraw_y0,mDraw_x1,mDraw_y1,mDraw_x2,mDraw_y2;  //Save Draw Point
     QMutex mMutexDraw;
 
-    int get_path_availspace(const QString & path);
-    int mnHDDSpaceMB;
 
-    void threadotacheck();
-    bool mbotacheckclose = false;
-    std::thread * mpthreadota;
 
 };
 

+ 23 - 4
src/ui/ui_ads_hmi/ADCIntelligentVehicle.ui

@@ -827,7 +827,7 @@
       <widget class="QPushButton" name="pushButton_26">
        <property name="geometry">
         <rect>
-         <x>309</x>
+         <x>307</x>
          <y>60</y>
          <width>81</width>
          <height>21</height>
@@ -860,10 +860,10 @@
         <string>决策周期</string>
        </property>
       </widget>
-      <widget class="QPushButton" name="Mobileye_st">
+      <widget class="QPushButton" name="Camera_Yolo">
        <property name="geometry">
         <rect>
-         <x>412</x>
+         <x>410</x>
          <y>30</y>
          <width>81</width>
          <height>21</height>
@@ -875,7 +875,7 @@
         </font>
        </property>
        <property name="text">
-        <string>Mobileye</string>
+        <string>Camera</string>
        </property>
       </widget>
      </widget>
@@ -2518,6 +2518,25 @@ background-color: rgb(85, 87, 83);</string>
      <bool>true</bool>
     </property>
    </widget>
+   <widget class="QCheckBox" name="checkBox_11">
+    <property name="geometry">
+     <rect>
+      <x>1620</x>
+      <y>880</y>
+      <width>121</width>
+      <height>23</height>
+     </rect>
+    </property>
+    <property name="styleSheet">
+     <string notr="true">color: rgb(238, 238, 236);</string>
+    </property>
+    <property name="text">
+     <string>Camera show</string>
+    </property>
+    <property name="checked">
+     <bool>true</bool>
+    </property>
+   </widget>
   </widget>
   <widget class="QMenuBar" name="menuBar">
    <property name="geometry">

Some files were not shown because too many files changed in this diff