Ver código fonte

修改读取图片的方式

fujiankuan 2 anos atrás
pai
commit
b08a1ae8de

+ 1 - 1
src/detection/laneATT_trt/laneATT_trt.pro

@@ -13,7 +13,7 @@ QMAKE_LFLAGS += -no-pie
 
 #system("cd ./caffe/proto ; protoc *.proto -I=./ --cpp_out=./ ; cd ../../ ; echo $PWD")
 
-SOURCES += yolop.cpp \
+SOURCES += main.cpp \
             laneatt.cc \
     ../../include/msgtype/rawpic.pb.cc \
     ../../include/msgtype/lanearray.pb.cc

+ 27 - 26
src/detection/laneATT_trt/main.cpp

@@ -25,17 +25,25 @@
 
 using namespace std;
 
-const std::string trt_path = "./LaneATT_test.trt8"; // trt paths
+const std::string trt_path = "./model/LaneATT_test.trt8"; // trt paths
 
 const bool calibrationstate = false;
 const std::string calibration_yamlpath = "./yaml/camera_middle_640_360.yaml";
 cv::Mat camera_matrix,dist_coe,map1,map2;
 
-
 //透视变换的点
 std::string PERS_FILE = "./yaml/pers_ori.yaml";
 
-bool test_video = true;
+
+const bool cropstate = false;
+cv::Range crop_height = cv::Range(240,600);
+cv::Range crop_width = cv::Range(320,960);
+
+//是否显示检测结果
+bool imshow_flag = true;
+
+//是否读取视频
+bool test_video = false;
 string video_path = "v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink";
 //std::string video_path = "/home/nvidia/code/modularization/src/detection/laneATT_trt_old/test/camera_test1.mp4";
 bool stop_read = false;
@@ -45,10 +53,6 @@ void * gpcamera;
 const std::string cameraname="image00";  //共享内存的名字
 ConsumerProducerQueue<cv::Mat> * imageBuffer =  new ConsumerProducerQueue<cv::Mat>(3,true);
 
-const bool cropstate = false;
-cv::Range crop_height = cv::Range(240,600);
-cv::Range crop_width = cv::Range(320,960);
-
 std::vector<cv::Point2f> warped_point, origin_point;
 cv::Point2i transed_O;
 
@@ -65,10 +69,6 @@ const std::string resultname="linearray";  //共享内存的名字
 iv::Ivfault *gfault = nullptr;
 iv::Ivlog *givlog = nullptr;
 
-
-
-cv::Mat img_warp,raw_image;
-
 cv::VideoWriter outputVideo;
 
 
@@ -463,14 +463,14 @@ int main(int argc, char *argv[]) {
         {
             double waittotal = (double)cv::getTickCount() - waittime;
             double totaltime = waittotal/cv::getTickFrequency();
-            //--------长时间获取不到图片则退出程序----------
-            if(totaltime>30.0)
-            {
-                cout<<"Cant't get frame and quit"<<endl;
-                cv::destroyAllWindows();
-                std::cout<<"------end program------"<<std::endl;
-                break;
-            }
+//            //--------长时间获取不到图片则退出程序----------
+//            if(totaltime>120.0)
+//            {
+//                cout<<"Cant't get frame and quit"<<endl;
+//                cv::destroyAllWindows();
+//                std::cout<<"------end program------"<<std::endl;
+//                break;
+//            }
             cout<<"Wait for frame "<<totaltime<<"s"<<endl;
             continue;
         }
@@ -484,15 +484,16 @@ int main(int argc, char *argv[]) {
         std::cout <<"total time lane : "<<
                     std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() <<
                     "ms" << std::endl;
-
-        cv::imshow("rst",raw_image);
-        cv::imshow("warp",img_warp );
-        if (cv::waitKey(10) == 'q')
+        if(imshow_flag)
         {
-            stop_read = true;
-            break;
+            cv::imshow("rst",raw_image);
+            cv::imshow("warp",img_warp );
+            if (cv::waitKey(10) == 'q')
+            {
+                stop_read = true;
+                break;
+            }
         }
-
         double waittime = (double)cv::getTickCount();
 
     }

BIN
src/detection/laneATT_trt/python/img_cali.jpg


+ 100 - 0
src/detection/laneATT_trt/python/show_coordinate.py

@@ -0,0 +1,100 @@
+import cv2
+import yaml
+import numpy as np
+import sys
+import os
+
+
+def test():
+
+
+    camera_files = "./yaml/camera_ori.yaml"
+    print("files",camera_files)
+    #W, H = 640, 480
+    W, H = 1280, 720
+    init_files=(os.path.realpath(sys.path[0]) + "/yaml/init.yaml")
+    
+    with open(init_files, "r") as f:
+        init_data = yaml.load(f)
+   
+    source = init_data["source"]
+    print("source=",source)
+
+
+
+
+    #cap = cv2.VideoCapture("v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink")
+    cap = cv2.VideoCapture("/home/nvidia/modularization/src/detection/laneATT_trt/test/camera_test.mp4")
+
+
+
+
+
+    #with open(camera_files, "r") as f:
+    #    data = yaml.load(f,Loader=yaml.FullLoader)
+    #K = np.array(data["K"])
+    #D = np.array(data["D"])    
+    
+    data = cv2.FileStorage(camera_files, cv2.FILE_STORAGE_READ)
+    K = data.getNode("cameraMatrix").mat()
+    D = data.getNode("distCoeffs").mat()
+    data.release()
+    print("K=",K)
+    print("D=",D)
+
+    map1, map2 = cv2.fisheye.initUndistortRectifyMap(
+            K,
+            D,
+            np.eye(3),
+            K,
+            (W, H),
+            cv2.CV_16SC2
+    )
+    
+
+    while True:
+        ret, frame = cap.read()
+        frame = cv2.resize(frame,(W,H))
+        #frame = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
+        cv2.imshow("capture", frame)
+        if cv2.waitKey(10) & 0xFF == ord('q'):
+            img = frame
+            cv2.imwrite('img.jpg', frame)  # 存储为图像
+            break
+    cap.release()
+    cv2.destroyAllWindows()
+    #img = cv2.imread('img.jpg')
+
+
+    def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):
+        if event == cv2.EVENT_LBUTTONDOWN:
+            xy = "%d,%d" % (x, y)
+            cv2.circle(img, (x, y), 1, (255, 0, 0), thickness=-1)
+            print((x,y))
+            cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,
+                        1.5, (255, 255, 255), thickness=2)
+            cv2.imshow("image", img)
+
+    cv2.namedWindow("image", cv2.WINDOW_AUTOSIZE)
+    cv2.setMouseCallback("image", on_EVENT_LBUTTONDOWN)
+    cv2.imshow("image", img)
+
+    while (True):
+        try:
+            if cv2.waitKey(10) & 0xFF == ord('q'):
+                break
+        except Exception:
+            cv2.destroyWindow("image")
+            break
+
+
+
+if __name__=='__main__':
+    test()
+
+
+
+
+
+
+

+ 102 - 0
src/detection/laneATT_trt/python/show_coordinate_new.py

@@ -0,0 +1,102 @@
+import cv2
+import yaml
+import numpy as np
+import sys
+import os
+
+
+def test():
+
+    """
+    camera_files = "./yaml/camera_middle_640_360.yaml"
+    print("files",camera_files)
+    W, H = 640, 360
+    #W, H = 1280, 720
+    init_files=(os.path.realpath(sys.path[0]) + "/yaml/init.yaml")
+    
+    with open(init_files, "r") as f:
+        init_data = yaml.load(f)
+   
+    source = init_data["source"]
+    print("source=",source)
+
+
+
+
+    cap = cv2.VideoCapture("v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink")
+   # cap = cv2.VideoCapture(1)
+
+
+
+
+
+    #with open(camera_files, "r") as f:
+    #    data = yaml.load(f,Loader=yaml.FullLoader)
+    #K = np.array(data["K"])
+    #D = np.array(data["D"])    
+    
+    data = cv2.FileStorage(camera_files, cv2.FILE_STORAGE_READ)
+    K = data.getNode("cameraMatrix").mat()
+    D = data.getNode("distCoeffs").mat()
+    data.release()
+    print("K=",K)
+    print("D=",D)
+
+    map1, map2 = cv2.initUndistortRectifyMap(
+            K,
+            D,
+            np.eye(3),
+            K,
+            (W, H),
+            cv2.CV_16SC2
+    )
+    """
+
+    cap = cv2.VideoCapture("v4l2src device=/dev/video0 ! video/x-raw, width=(int)1280, height=(int)720 ! videoconvert ! appsink")
+    while True:
+        ret, frame = cap.read()
+        #frame = frame[410-90:590+90, 380+35:1020+35]
+        #frame = cv2.resize(frame,(W,H))
+        #frame = cv2.remap(frame, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
+        cv2.imshow("capture", frame)
+        if cv2.waitKey(10) & 0xFF == ord('q'):
+            img = frame
+            cv2.imwrite('img_cali.jpg', frame)  # 存储为图像
+            break
+    cap.release()
+    cv2.destroyAllWindows()
+    #img = cv2.imread('img.jpg')
+
+
+    def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):
+        if event == cv2.EVENT_LBUTTONDOWN:
+            xy = "%d,%d" % (x, y)
+            cv2.circle(img, (x, y), 1, (255, 0, 0), thickness=-1)
+            print((x,y))
+            cv2.putText(img, xy, (x, y), cv2.FONT_HERSHEY_PLAIN,
+                        1.5, (255, 255, 255), thickness=2)
+            cv2.imshow("image", img)
+
+    cv2.namedWindow("image", cv2.WINDOW_AUTOSIZE)
+    cv2.setMouseCallback("image", on_EVENT_LBUTTONDOWN)
+    cv2.imshow("image", img)
+
+    while (True):
+        try:
+            if cv2.waitKey(10) & 0xFF == ord('q'):
+                break
+        except Exception:
+            cv2.destroyWindow("image")
+            break
+
+
+
+if __name__=='__main__':
+    test()
+
+
+
+
+
+
+

+ 3 - 3
src/detection/laneATT_trt/python/show_coordinate_shot.py

@@ -7,9 +7,9 @@ import os
 def test():
 
 
-    pers_file = "./yaml/pers.yaml"
+    pers_file = "../yaml/pers.yaml"
     W, H = 640, 360
-    init_files="./yaml/init.yaml"
+    init_files="../yaml/init.yaml"
     
     with open(init_files, "r") as f:
         init_data = yaml.load(f)
@@ -23,7 +23,7 @@ def test():
     
 
 
-    img = cv2.imread('./img.jpg')
+    img = cv2.imread('./img_cali.jpg')
 
 
     def on_EVENT_LBUTTONDOWN(event, x, y, flags, param):