fujiankuan 2 lat temu
rodzic
commit
806d27e714

+ 82 - 0
src/detection/laneATT_trt/imageBuffer.h

@@ -0,0 +1,82 @@
+#ifndef IMAGEBUFFER_H
+#define IMAGEBUFFER_H
+
+#include <opencv2/opencv.hpp>
+#include <mutex>
+#include <condition_variable>
+#include <queue>
+template<typename T>
+class ConsumerProducerQueue
+{
+
+public:
+    ConsumerProducerQueue(int mxsz,bool dropFrame) :
+            maxSize(mxsz),dropFrame(dropFrame)
+    { }
+
+    bool add(T request)
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        if(dropFrame && isFull())
+        {
+            //lock.unlock();
+            //return false;
+            cpq.pop();
+            cpq.push(request);
+            cond.notify_all();
+            return true;
+        }
+        else {
+            cond.wait(lock, [this]() { return !isFull(); });
+            cpq.push(request);
+            //lock.unlock();
+            cond.notify_all();
+            return true;
+        }
+    }
+    void consume(T &request)
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        cond.wait(lock, [this]()
+        { return !isEmpty(); });
+        request = cpq.front();
+        cpq.pop();
+        //lock.unlock();
+        cond.notify_all();
+
+    }
+
+    bool isFull() const
+    {
+        return cpq.size() >= maxSize;
+    }
+
+    bool isEmpty() const
+    {
+        return cpq.size() == 0;
+    }
+
+    int length() const
+    {
+        return cpq.size();
+    }
+
+    void clear()
+    {
+        std::unique_lock<std::mutex> lock(mutex);
+        while (!isEmpty())
+        {
+            cpq.pop();
+        }
+        lock.unlock();
+        cond.notify_all();
+    }
+
+private:
+    std::condition_variable cond;  //条件变量允许通过通知进而实现线程同步
+    std::mutex mutex;     //提供了多种互斥操作,可以显式避免数据竞争
+    std::queue<T> cpq;    //容器适配器,它给予程序员队列的功能
+    int maxSize;
+    bool dropFrame;
+};
+#endif // IMAGEBUFFER_H

+ 504 - 0
src/detection/laneATT_trt/main.cpp

@@ -0,0 +1,504 @@
+#include "modulecomm.h"
+#include "xmlparam.h"
+#include "ivfault.h"
+#include "ivlog.h"
+#include "ivexit.h"
+#include "ivversion.h"
+#include "rawpic.pb.cc"
+#include <QCoreApplication>
+#include <thread>
+#include "ivversion.h"
+#include <chrono>
+#include "opencv2/imgcodecs/legacy/constants_c.h"
+#include "qmutex.h"
+#include "condition_variable"
+#include "laneatt.hh"
+#include <opencv2/opencv.hpp>
+#include "lanearray.pb.h"
+#include "imageBuffer.h"
+
+#define INPUT_H 360
+#define INPUT_W 640
+#define N_OFFSETS 72
+#define N_STRIPS (N_OFFSETS - 1)
+#define MAX_COL_BLOCKS 1000
+
+using namespace std;
+
+const std::string trt_path = "./LaneATT_test.trt8"; // trt paths
+
+const bool calibrationstate = false;
+const std::string calibration_yamlpath = "./yaml/camera_middle_640_360.yaml";
+cv::Mat camera_matrix,dist_coe,map1,map2;
+
+
+//透视变换的点
+std::string PERS_FILE = "./yaml/pers_ori.yaml";
+
+bool test_video = true;
+//string video_path = "v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink";
+std::string video_path = "/home/nvidia/code/modularization/src/detection/laneATT_trt_old/test/camera_test1.mp4";
+bool stop_read = false;
+
+//图片队列
+void * gpcamera;
+const std::string cameraname="image00";  //共享内存的名字
+ConsumerProducerQueue<cv::Mat> * imageBuffer =  new ConsumerProducerQueue<cv::Mat>(3,true);
+
+const bool cropstate = false;
+cv::Range crop_height = cv::Range(240,600);
+cv::Range crop_width = cv::Range(320,960);
+
+std::vector<cv::Point2f> warped_point, origin_point;
+cv::Point2i transed_O;
+
+float METER_PER_PIXEL_X,METER_PER_PIXEL_Y;
+
+float BOARD_SIDE_LENGTH_X = 3.7;
+float BOARD_SIDE_LENGTH_Y = 10.0;
+
+
+void * gpdetect;
+const std::string resultname="linearray";  //共享内存的名字
+
+
+iv::Ivfault *gfault = nullptr;
+iv::Ivlog *givlog = nullptr;
+
+
+
+cv::Mat img_warp,raw_image;
+
+cv::VideoWriter outputVideo;
+
+
+//读取视频数据
+void ReadFunc(int n)
+{
+    cv::VideoCapture cap(video_path);
+    if(!cap.isOpened())
+    {
+        cout<<"camera failed to open"<<endl;
+    }
+    while(!stop_read)
+    {
+        cv::Mat frame;
+        //读视频的时候加上,读摄像头去掉
+        if(imageBuffer->isFull())
+        {
+            continue;
+        }
+        if(cap.read(frame))
+        {
+            if(calibrationstate)
+                cv::remap(frame,frame,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+            if(cropstate)
+                frame = frame(crop_height,crop_width);
+            imageBuffer->add(frame);
+        }
+        else
+        {
+            std::this_thread::sleep_for(std::chrono::milliseconds(1));
+        }
+    }
+    return;
+}
+void Listenpic(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
+{
+    if(nSize<1000)return;
+    iv::vision::rawpic pic;
+    if(false == pic.ParseFromArray(strdata,nSize))
+    {
+        std::cout<<"picview Listenpic fail."<<std::endl;
+        return;
+    }
+    cv::Mat mat(pic.height(),pic.width(),pic.mattype());
+    if(pic.type() == 1)
+        memcpy(mat.data,pic.picdata().data(),mat.rows*mat.cols*mat.elemSize());
+    else
+    {
+        //     mat.release();
+        std::vector<unsigned char> buff(pic.picdata().data(),pic.picdata().data() + pic.picdata().size());
+        mat = cv::imdecode(buff,cv::IMREAD_COLOR);
+    }
+    if(calibrationstate)
+        cv::remap(mat,mat,map1,map2,cv::INTER_LINEAR,cv::BORDER_CONSTANT);
+    if(cropstate)
+        mat = mat(crop_height,crop_width);
+    imageBuffer->add(mat);
+    mat.release();
+}
+
+
+
+void exitfunc()
+{
+//    gbstate = false;
+//    gpthread->join();
+//    std::cout<<"state thread closed."<<std::endl;
+    return;
+}
+
+cv::Mat get_pers_mat(std::string pers_file)
+{
+    cv::Point2f src[4];
+    cv::Point2f dst[4];
+    cv::FileStorage pf(pers_file, cv::FileStorage::READ);
+    pf["board_left_top"]>>src[0];
+    pf["board_left_bottom"]>>src[1];
+    pf["board_right_top"]>>src[2];
+    pf["board_right_bottom"]>>src[3];
+
+//    pf["board_left_top_dst"]>>dst[0];
+//    pf["board_left_bottom_dst"]>>dst[1];
+//    pf["board_right_top_dst"]>>dst[2];
+//    pf["board_right_bottom_dst"]>>dst[3];
+
+/* -------------------------------------zhengke
+    dst[1].x = src[0].x;
+    dst[1].y = src[1].y;
+
+    dst[3].x = src[2].x;
+    dst[3].y = src[3].y;
+
+    int side_length = dst[3].x - dst[1].x;
+
+    METER_PER_PIXEL = BOARD_SIDE_LENGTH / side_length;
+
+    dst[0].x = src[0].x;
+    dst[0].y = dst[1].y - side_length;
+
+    dst[2].x = src[2].x;
+    dst[2].y = dst[3].y - side_length;
+
+    cv::Mat pers_mat = getPerspectiveTransform(dst, src);
+    cv::Mat pers_mat_inv = pers_mat.inv();
+
+    return pers_mat_inv;
+*///-------------------------------------zhengke
+    dst[1].x = 130*2;
+    dst[1].y = 330*2;
+
+    dst[3].x = 510*2;
+    dst[3].y = 330*2;
+
+    int side_length_x = dst[3].x - dst[1].x;
+
+    METER_PER_PIXEL_X = BOARD_SIDE_LENGTH_X / float(side_length_x);
+
+    dst[0].x = dst[1].x;
+    dst[0].y = 10*2;
+
+    dst[2].x = dst[3].x;
+    dst[2].y = 10*2;
+
+    int side_length_y = dst[1].y - dst[0].y;
+
+    METER_PER_PIXEL_Y = BOARD_SIDE_LENGTH_Y / float(side_length_y);
+
+    transed_O.x = (dst[1].x + dst[3].x)/2;
+    transed_O.y = (dst[1].y + dst[3].y)/2;
+
+    cv::Mat pers_mat = getPerspectiveTransform(dst, src);
+    cv::Mat pers_mat_inv = pers_mat.inv();
+
+    return pers_mat_inv;
+}
+
+void transform (std::vector<std::vector<cv::Point2f>> &lanes,cv::Mat &pers_mat_inv,
+                cv::Mat &raw_image,cv::Mat &img_warp)
+{
+    // PerspectiveTransform
+    std::vector<cv::Point2f> leftLane;
+    std::vector<cv::Point2f> rightLane;
+    std::vector<std::vector<cv::Point2f>> warped_lanes;
+
+    for (int i = 0; i < lanes.size(); i++) {
+        cv::perspectiveTransform(lanes[i], warped_point , pers_mat_inv); //  坐标变换(输入点,输出点,变换矩阵)
+        warped_lanes.push_back(warped_point);
+        double sum = 0.0;
+        for (int i = 0 ; i < 5 ; i++){
+            sum += warped_point[i].x;
+        }
+        double mean = sum / 5;
+        if (mean < raw_image.size().width / 2){
+            leftLane.push_back(cv::Point2f(mean, i));
+        }else{
+            rightLane.push_back(cv::Point2f(mean, i));
+        }
+    }
+    float left_min = 0;
+    float right_max = INPUT_W;
+    int left_num = -1;
+    int right_num = -1;
+    int left_left = -1;
+    int right_right = -1;
+    for (const auto& points : leftLane){
+        if(points.x > left_min){
+            left_num = (int) round(points.y);
+            left_min = points.x;
+        }
+    }
+    for (const auto& points : leftLane){
+        if((int)round(points.y) != left_num){
+            left_left = points.y;
+        }
+    }
+//    if (left_num == -1){
+//        std::cout<<"left lane failed"<<std::endl;
+//    }
+    for (const auto& points : rightLane){
+        if(points.x < right_max){
+            right_num = (int) round(points.y);
+            right_max = points.x;
+        }
+    }
+    for (const auto& points : rightLane){
+        if((int)round(points.y) != right_num){
+            right_right = points.y;
+        }
+    }
+//    if (right_num == -1){
+//        std::cout<<"right lane failed"<<std::endl;
+//    }
+
+    // Visualize
+
+    /*-----------------warp image and points ----------------------*/
+    cv::Point2f pp;
+    cv::Point2f ppp;
+    int flag = 0;
+    float DX = 0.055, DY = 0.26; // 像素和实际距离的比例
+    std::vector<std::vector<cv::Point2f>> world_lanes;
+    cv::warpPerspective(raw_image, img_warp, pers_mat_inv, raw_image.size());        //  图像变换(输入图像,输出图像,变换矩阵)
+    for (const auto& lane_points : warped_lanes) {
+        if(flag == left_num || flag == right_num){
+            std::vector<cv::Point2f> world_points;
+            for (const auto& point : lane_points) {
+                pp.x =float(point.x);
+                pp.y =float(point.y);
+                cv::circle(img_warp, pp, 3, cv::Scalar(0, 255, 0), -1);
+                //ppp.x = float((point.x - img_warp.size().width / 2) * METER_PER_PIXEL_X);
+                //ppp.y = float((img_warp.size().height - point.y) * METER_PER_PIXEL_Y);
+                ppp.x = float((point.x - transed_O.x) * METER_PER_PIXEL_X);
+                ppp.y = float((transed_O.y - point.y) * METER_PER_PIXEL_Y);
+                world_points.push_back(ppp);
+            }
+            world_lanes.push_back(world_points);
+        }else{
+            std::vector<cv::Point2f> world_points;
+            for (const auto& point : lane_points) {
+                pp.x =float(point.x);
+                pp.y =float(point.y);
+                //std::cout<<pp.x<<" "<<pp.y<<std::endl;
+                cv::circle(img_warp, pp, 3, cv::Scalar(0, 0, 255), -1);
+                ppp.x = float((point.x - transed_O.x) * METER_PER_PIXEL_X);
+                ppp.y = float((transed_O.y - point.y) * METER_PER_PIXEL_Y);
+                world_points.push_back(ppp);
+                //std::cout<<ppp.x<<" "<<ppp.y<<std::endl;
+            }
+            world_lanes.push_back(world_points);
+        }
+        flag++;
+    }
+    //cv::imshow("warp",img_warp);
+
+
+   /*------------------------show circle--------------------------*/
+    flag = 0;
+    for (const auto& lane_points : lanes) {
+        if(flag == left_num || flag == right_num){
+            for (int i=0; i<lane_points.size(); i++)  {
+                pp.x =float(lane_points[i].x);
+                pp.y =float(lane_points[i].y);
+                cv::circle(raw_image, pp, 3, cv::Scalar(0, 255, 0), -1);
+
+            }
+        }else if(flag == left_left){
+            for (int i=0; i<lane_points.size(); i++) {
+                pp.x =float(lane_points[i].x);
+                pp.y =float(lane_points[i].y);
+                cv::circle(raw_image, pp, 3, cv::Scalar(0, 0, 255), -1);
+            }
+        }else {
+            for (int i=0; i<lane_points.size(); i++) {
+                pp.x =float(lane_points[i].x);
+                pp.y =float(lane_points[i].y);
+                cv::circle(raw_image, pp, 3, cv::Scalar(255, 0, 0), -1);
+            }
+        }
+        flag++;
+    }
+/*------------------------show lanes_line------------------------------*/
+//    cv::Point2f p1, p2;
+//    flag = 0;
+//    for (const auto& lane_points : lanes) {
+//        if(flag == left_num || flag == right_num){
+//            for (int i=0; i<lane_points.size()-1; i++) {
+//                p1.x =float(lane_points[i].x) *  x_ratio;
+//                p1.y =float(lane_points[i].y) * y_ratio;
+//                p2.x =float(lane_points[i+1].x) *  x_ratio;
+//                p2.y =float(lane_points[i+1].y) *  y_ratio;
+//                cv::line(raw_image, p1, p2, cv::Scalar(0, 255, 0), 1);
+
+//            }
+//        }else{
+//            for (int i=0; i<lane_points.size()-1; i++) {
+//                p1.x =float(lane_points[i].x) *  x_ratio;
+//                p1.y =float(lane_points[i].y) * y_ratio;
+//                p2.x =float(lane_points[i+1].x) *  x_ratio;
+//                p2.y =float(lane_points[i+1].y) *  y_ratio;
+//                cv::line(raw_image, p1, p2, cv::Scalar(0, 0, 255), 1);
+//            }
+//        }
+//        flag++;
+//    }
+
+    //cv::imshow("raw",raw_image);
+
+    /*---------------------- protobuf ------------------------*/
+
+    iv::vision::Linearray line_array;
+
+    flag = 0;
+    for (const auto& lane_points : world_lanes) {
+        iv::vision::Line *line = line_array.add_line();
+
+        if(flag == left_left){
+            line->set_index(1);
+            for (int i=0; i<lane_points.size(); i++)  {
+                iv::vision::Point2f *points = line->add_linepoint();
+                points->set_x(lane_points[i].x);
+                points->set_y(lane_points[i].y);
+
+            }
+        }else if(flag == left_num){
+            line->set_index(2);
+            for (int i=0; i<lane_points.size(); i++) {
+                iv::vision::Point2f *points = line->add_linepoint();
+                points->set_x(lane_points[i].x);
+                points->set_y(lane_points[i].y);
+            }
+        }else if(flag == right_num){
+            line->set_index(3);
+            for (int i=0; i<lane_points.size(); i++) {
+                iv::vision::Point2f *points = line->add_linepoint();
+                points->set_x(lane_points[i].x);
+                points->set_y(lane_points[i].y);
+            }
+        }else{
+            line->set_index(4);
+            for (int i=0; i<lane_points.size(); i++) {
+                iv::vision::Point2f *points = line->add_linepoint();
+                points->set_x(lane_points[i].x);
+                points->set_y(lane_points[i].y);
+            }
+        }
+        flag++;
+    }
+    /*-----------------------shareMsg-------------------------*/
+
+    int size = line_array.ByteSize();
+    char * strdata = new char[line_array.ByteSize()];
+    if(line_array.SerializeToArray(strdata, size))
+    {
+        iv::modulecomm::ModuleSendMsg(gpdetect, strdata, size);
+        std::cout<<"lane serialize success."<<std::endl;
+    }
+    else
+    {
+        std::cout<<"lane serialize error."<<std::endl;
+    }
+    delete strdata;
+
+}
+
+int main(int argc, char *argv[]) {
+
+// ==============================================================================
+    showversion("laneatt");
+    QCoreApplication a(argc, argv);
+
+    gfault = new iv::Ivfault("laneATT");
+    givlog = new iv::Ivlog("laneATT");
+
+    gfault->SetFaultState(0,0,"laneATT initialize.");
+
+
+    if(argc==3)
+    {
+        test_video = (strcmp(argv[1], "true") == 0)?true:false;
+        video_path = argv[2];
+    }
+    if(argc==2)
+    {
+        test_video = (strcmp(argv[1], "true") == 0)?true:false;
+    }
+
+    if(test_video)
+        std::thread * readthread = new std::thread(ReadFunc,1);
+    else
+        gpcamera= iv::modulecomm::RegisterRecv(&cameraname[0],Listenpic);
+
+    gpdetect = iv::modulecomm::RegisterSend(&resultname[0],10000000,1);
+
+    if (calibrationstate)
+    {
+        cv::FileStorage calib_file(calibration_yamlpath, cv::FileStorage::READ);
+        calib_file["cameraMatrix"]>>camera_matrix;
+        calib_file["distCoeffs"]>>dist_coe;
+        cv::Mat R = cv::Mat::eye(3, 3, CV_64F);
+        cv::Size imgsize=cv::Size(1280,720);
+        cv::initUndistortRectifyMap(camera_matrix, dist_coe, R, camera_matrix,imgsize,CV_16SC2,map1,map2);
+    }
+    cv::Mat pers_mat_inv = get_pers_mat(PERS_FILE);  // 得到透视变换关系
+
+    //初始化模型
+    LaneATT model(trt_path);
+
+    double waittime = (double)cv::getTickCount();
+    while (1)
+    {
+        if(imageBuffer->isEmpty())
+        {
+            double waittotal = (double)cv::getTickCount() - waittime;
+            double totaltime = waittotal/cv::getTickFrequency();
+            //--------长时间获取不到图片则退出程序----------
+            if(totaltime>30.0)
+            {
+                cout<<"Cant't get frame and quit"<<endl;
+                cv::destroyAllWindows();
+                std::cout<<"------end program------"<<std::endl;
+                break;
+            }
+            cout<<"Wait for frame "<<totaltime<<"s"<<endl;
+            continue;
+        }
+        auto start = std::chrono::system_clock::now();  //时间函数
+        cv::Mat raw_image,img_warp;
+        imageBuffer->consume(raw_image);
+        std::vector<std::vector<cv::Point2f>> lanes;
+        lanes = model.DetectLane(raw_image);
+        transform (lanes,pers_mat_inv,raw_image,img_warp);
+        auto end = std::chrono::system_clock::now();  //时间函数
+        std::cout <<"total time lane : "<<
+                    std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() <<
+                    "ms" << std::endl;
+
+        cv::imshow("rst",raw_image);
+        cv::imshow("warp",img_warp );
+        if (cv::waitKey(10) == 'q')
+        {
+            stop_read = true;
+            break;
+        }
+
+        double waittime = (double)cv::getTickCount();
+
+    }
+
+    cv::destroyAllWindows();
+    //iv::ivexit::RegIVExitCall(exitfunc);
+    return a.exec();
+}
+