浏览代码

front vision yolov4 tensorrt7

liqingxia 3 年之前
父节点
当前提交
3f7e6b10bf
共有 100 个文件被更改,包括 16057 次插入0 次删除
  1. 1 0
      src/detection/front_vision_ros_trt7/front_vison_ros/.catkin_workspace
  2. 73 0
      src/detection/front_vision_ros_trt7/front_vison_ros/README.MD
  3. 69 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/CMakeLists.txt
  4. 215 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/CMakeLists.txt
  5. 8 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/msg/adc_depth.msg
  6. 7 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/msg/bbox2d.msg
  7. 3 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/msg/bboxes2d.msg
  8. 10 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/msg/lane_info.msg
  9. 3 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/msg/lane_info_array.msg
  10. 73 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/package.xml
  11. 217 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/binocular_camera/CMakeLists.txt
  12. 9 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/binocular_camera/launch/binocular.launch
  13. 11 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/binocular_camera/launch/binocular_calibration.launch
  14. 74 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/binocular_camera/package.xml
  15. 135 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/binocular_camera/src/binocular_camera_node.cpp
  16. 162 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/CMakeLists.txt
  17. 211 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/detection_app_node.cpp
  18. 832 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/detection_depth_fusion_app_node.cpp
  19. 726 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/detection_depth_fusion_app_node_old.cpp
  20. 325 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/detection_lane_node.cpp
  21. 205 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/detection_trt7_app_node.cpp
  22. 552 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/lane_depth_fusion_app_node.cpp
  23. 209 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/lane_detection_app_node.cpp
  24. 40 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/modulecomm.h
  25. 227 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/sgm_app_node.cpp
  26. 36 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/adc_3d/utils.h
  27. 76 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/cmw_app/detection_trt7_app.h
  28. 74 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/cmw_app/lane_detection_app.h
  29. 180 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/cmw_app/sgm_app.h
  30. 66 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/cuda_utils.h
  31. 2209 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/impl/cxxopts.hpp
  32. 34 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/project_root.h
  33. 35 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/time_adcsoft.h
  34. 131 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/utils.h
  35. 53 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/work.h
  36. 36 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/include/Hungarian.h
  37. 89 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/include/KalmanTracker.h
  38. 32 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/include/dataReader.h
  39. 57 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/include/detect_obstacle.h
  40. 94 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/Utils.h
  41. 503 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/logging.h
  42. 106 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/mish.h
  43. 70 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/trt_utils.h
  44. 164 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/yolo.h
  45. 52 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/yolodetect.h
  46. 126 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/yololayer.h
  47. 38 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/lane_detection/Hungarian.h
  48. 72 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/lane_detection/KalmanTracker.h
  49. 101 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/lane_detection/lanedetect.h
  50. 78 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/lane_detection/tensorrt/tensorrt.h
  51. 2209 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/common/impl/cxxopts.hpp
  52. 45 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/census_transform.hpp
  53. 104 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/device_buffer.hpp
  54. 45 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/horizontal_path_aggregation.hpp
  55. 69 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/oblique_path_aggregation.hpp
  56. 52 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/path_aggregation.hpp
  57. 98 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/path_aggregation_common.hpp
  58. 62 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/sgm.hpp
  59. 24 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/types.hpp
  60. 241 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/utility.hpp
  61. 45 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/vertical_path_aggregation.hpp
  62. 62 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/winner_takes_all.hpp
  63. 48 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/internal.h
  64. 171 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/sgm.h
  65. 9 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/sgm_config.h
  66. 9 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/sgm_config.h.in
  67. 80 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/sgm_wrapper.h
  68. 12 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/detection.launch
  69. 12 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/detection_depth_fusion.launch
  70. 12 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/detection_trt7.launch
  71. 13 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/lane_depth_fusion.launch
  72. 13 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/lane_detection.launch
  73. 59 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/smart_eys.launch
  74. 17 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/stereo_match.launch
  75. 80 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/package.xml
  76. 80 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/coco.names
  77. 20 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/front_vision.yaml
  78. 5 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/lane_detection.yaml
  79. 29 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/left.yaml
  80. 11 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/obstacle_detection_trt7.yaml
  81. 30 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/right.yaml
  82. 19 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/stereo.yaml
  83. 1158 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/yolov4.cfg
  84. 28 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/proto/center_net.proto
  85. 33 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/proto/lanearray.proto
  86. 87 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/proto/obstacles.proto
  87. 48 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/proto/tensor_rt.proto
  88. 28 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/cmw_app/CMakeLists.txt
  89. 181 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/cmw_app/detection_trt7_app.cpp
  90. 114 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/cmw_app/lane_detection_app.cpp
  91. 301 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/cmw_app/sgm_app.cpp
  92. 78 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/CMakeLists.txt
  93. 19 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/cuda/CMakeLists.txt
  94. 148 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/cuda/utils.cu
  95. 71 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/project_root.cpp
  96. 37 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/time_adcsoft.cpp
  97. 533 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/utils.cpp
  98. 65 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/work.cpp
  99. 36 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/detection_trt7/CMakeLists.txt
  100. 398 0
      src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/detection_trt7/Hungarian.cpp

+ 1 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/.catkin_workspace

@@ -0,0 +1 @@
+# This file currently only serves to mark the location of a catkin workspace for tool integration

+ 73 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/README.MD

@@ -0,0 +1,73 @@
+![实例](image/1.gif)
+# 概述
+本工程致力于将前方图像视觉落地化,总体思路为通过双目立体视觉获取三维数据,图像获得语义信息,将图像的语义信息和三维数据融合后获得相机坐标系下障碍物,可行驶区域以及车道线等。
+# 目标
+本工程目标希望完成一个可以输出在相机坐标系下的障碍物,可行驶区域,车道线以及双目视觉slam的完整功能。可作为一个智能相机部署到任意车端。
+# 依赖库
+- pcl  
+- ros  
+- opencv  
+- cuda  
+- yaml-cpp  
+  
+# 运行平台
+Nvidia AGX xavier
+# 硬件
+1.[LENACV/HNY-CV-003C](https://item.taobao.com/item.htm?id=577872947701&price=1580&sourceType=item&sourceType=item&suid=ff281182-d15d-4671-8d0c-d9f0c91f8002&shareUniqueId=2874781904&ut_sk=1.XDA55m0p9Z8DAJ9qj%2FX9M2Gi_21646297_1598426555405.Copy.1&un=e7683ce7f820d4c1132b0f6991806569&share_crt_v=1&spm=a2159r.13376460.0.0&sp_tk=T0UyT2MycXV3RWo=&cpp=1&shareurl=true&short_name=h.VEaqibP&bxsign=scd159842664470948493aa7e697335b263e43a8be0771c9&sm=db67cc&app=chrome)  
+
+![binocular](image/binocular.jpg)
+
+# 编译
+1. git clone http://192.168.14.18:3000/adc_pilot/front_vison_ros.git
+2. cd fonrt_vison_ros
+3. catkin_make
+   
+# 运行
+1.打开roscore  
+> roscore  
+
+2.把双目摄像头插入到xavier,启动摄像头  
+>  cd front_vision_ros  
+>  source devel/setup.bash  
+>  roslaunch front_vision bincilar.alunch  
+
+3.开启双目立体匹配节点  
+> cd front_vision_ros  
+> source devel/setup.bash  
+> roslaunch front_vision stereo_match.launch  
+
+4.开启目标检测节点
+> cd front_vision_ros  
+> source devel/setup.bash  
+> roslaunch front_vision detection.launch  
+
+5.开启深度与检测融合节点
+> cd front_vision_ros  
+> source devel/setup.bash  
+> roslaunch front_vision detection_depth_fusion.launch  
+
+
+# 可视化
+## 目标检测可视化
+修改`src/front_vision/launch/ detection_depth_fusion.launch`文件,将参数vis_detection设置成true  
+
+## 深度点云可视化
+1.开启坐标变换节点  
+> rosrun tf static_transform_pubsher 0.0 0.0 0.0 0.0 0.0 0.0 map stereo_camera
+
+2.启动rviz
+> rviz
+
+3.通过topic添加点云,可视化点云
+
+## 深度与检测融合可视化
+1.开启坐标变换节点
+> rosrun tf static_transform_pubsher 0.0 0.0 0.0 0.0 0.0 0.0 map stereo_camera  
+
+2.启动rviz
+> rviz
+
+# 目标检测与深度融合
+![融合1](image/2.gif)
+![融合2](image/3.gif)
+

+ 69 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/CMakeLists.txt

@@ -0,0 +1,69 @@
+# toplevel CMakeLists.txt for a catkin workspace
+# catkin/cmake/toplevel.cmake
+
+cmake_minimum_required(VERSION 3.0.2)
+
+project(Project)
+
+set(CATKIN_TOPLEVEL TRUE)
+
+# search for catkin within the workspace
+set(_cmd "catkin_find_pkg" "catkin" "${CMAKE_SOURCE_DIR}")
+execute_process(COMMAND ${_cmd}
+  RESULT_VARIABLE _res
+  OUTPUT_VARIABLE _out
+  ERROR_VARIABLE _err
+  OUTPUT_STRIP_TRAILING_WHITESPACE
+  ERROR_STRIP_TRAILING_WHITESPACE
+)
+if(NOT _res EQUAL 0 AND NOT _res EQUAL 2)
+  # searching fot catkin resulted in an error
+  string(REPLACE ";" " " _cmd_str "${_cmd}")
+  message(FATAL_ERROR "Search for 'catkin' in workspace failed (${_cmd_str}): ${_err}")
+endif()
+
+# include catkin from workspace or via find_package()
+if(_res EQUAL 0)
+  set(catkin_EXTRAS_DIR "${CMAKE_SOURCE_DIR}/${_out}/cmake")
+  # include all.cmake without add_subdirectory to let it operate in same scope
+  include(${catkin_EXTRAS_DIR}/all.cmake NO_POLICY_SCOPE)
+  add_subdirectory("${_out}")
+
+else()
+  # use either CMAKE_PREFIX_PATH explicitly passed to CMake as a command line argument
+  # or CMAKE_PREFIX_PATH from the environment
+  if(NOT DEFINED CMAKE_PREFIX_PATH)
+    if(NOT "$ENV{CMAKE_PREFIX_PATH}" STREQUAL "")
+      if(NOT WIN32)
+        string(REPLACE ":" ";" CMAKE_PREFIX_PATH $ENV{CMAKE_PREFIX_PATH})
+      else()
+        set(CMAKE_PREFIX_PATH $ENV{CMAKE_PREFIX_PATH})
+      endif()
+    endif()
+  endif()
+
+  # list of catkin workspaces
+  set(catkin_search_path "")
+  foreach(path ${CMAKE_PREFIX_PATH})
+    if(EXISTS "${path}/.catkin")
+      list(FIND catkin_search_path ${path} _index)
+      if(_index EQUAL -1)
+        list(APPEND catkin_search_path ${path})
+      endif()
+    endif()
+  endforeach()
+
+  # search for catkin in all workspaces
+  set(CATKIN_TOPLEVEL_FIND_PACKAGE TRUE)
+  find_package(catkin QUIET
+    NO_POLICY_SCOPE
+    PATHS ${catkin_search_path}
+    NO_DEFAULT_PATH NO_CMAKE_FIND_ROOT_PATH)
+  unset(CATKIN_TOPLEVEL_FIND_PACKAGE)
+
+  if(NOT catkin_FOUND)
+    message(FATAL_ERROR "find_package(catkin) failed. catkin was neither found in the workspace nor in the CMAKE_PREFIX_PATH. One reason may be that no ROS setup.sh was sourced before.")
+  endif()
+endif()
+
+catkin_workspace()

+ 215 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/CMakeLists.txt

@@ -0,0 +1,215 @@
+cmake_minimum_required(VERSION 3.0.2)
+project(adc_msg)
+
+## Compile as C++11, supported in ROS Kinetic and newer
+# add_compile_options(-std=c++11)
+
+## Find catkin macros and libraries
+## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
+## is used, also find other catkin packages
+find_package(catkin REQUIRED COMPONENTS
+  cv_bridge
+  image_transport
+  roscpp
+  std_msgs
+  sensor_msgs
+  message_generation
+  message_runtime
+)
+
+## System dependencies are found with CMake's conventions
+# find_package(Boost REQUIRED COMPONENTS system)
+
+
+## Uncomment this if the package has a setup.py. This macro ensures
+## modules and global scripts declared therein get installed
+## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
+# catkin_python_setup()
+
+################################################
+## Declare ROS messages, services and actions ##
+################################################
+
+## To declare and build messages, services or actions from within this
+## package, follow these steps:
+## * Let MSG_DEP_SET be the set of packages whose message types you use in
+##   your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
+## * In the file package.xml:
+##   * add a build_depend tag for "message_generation"
+##   * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
+##   * If MSG_DEP_SET isn't empty the following dependency has been pulled in
+##     but can be declared for certainty nonetheless:
+##     * add a exec_depend tag for "message_runtime"
+## * In this file (CMakeLists.txt):
+##   * add "message_generation" and every package in MSG_DEP_SET to
+##     find_package(catkin REQUIRED COMPONENTS ...)
+##   * add "message_runtime" and every package in MSG_DEP_SET to
+##     catkin_package(CATKIN_DEPENDS ...)
+##   * uncomment the add_*_files sections below as needed
+##     and list every .msg/.srv/.action file to be processed
+##   * uncomment the generate_messages entry below
+##   * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
+
+## Generate messages in the 'msg' folder
+add_message_files(
+  FILES
+  adc_depth.msg
+  bbox2d.msg
+  bboxes2d.msg
+  lane_info.msg
+  lane_info_array.msg
+)
+
+## Generate services in the 'srv' folder
+# add_service_files(
+#   FILES
+#   Service1.srv
+#   Service2.srv
+# )
+
+## Generate actions in the 'action' folder
+# add_action_files(
+#   FILES
+#   Action1.action
+#   Action2.action
+# )
+
+## Generate added messages and services with any dependencies listed here
+generate_messages(
+  DEPENDENCIES
+  std_msgs
+  sensor_msgs
+)
+
+################################################
+## Declare ROS dynamic reconfigure parameters ##
+################################################
+
+## To declare and build dynamic reconfigure parameters within this
+## package, follow these steps:
+## * In the file package.xml:
+##   * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
+## * In this file (CMakeLists.txt):
+##   * add "dynamic_reconfigure" to
+##     find_package(catkin REQUIRED COMPONENTS ...)
+##   * uncomment the "generate_dynamic_reconfigure_options" section below
+##     and list every .cfg file to be processed
+
+## Generate dynamic reconfigure parameters in the 'cfg' folder
+# generate_dynamic_reconfigure_options(
+#   cfg/DynReconf1.cfg
+#   cfg/DynReconf2.cfg
+# )
+
+###################################
+## catkin specific configuration ##
+###################################
+## The catkin_package macro generates cmake config files for your package
+## Declare things to be passed to dependent projects
+## INCLUDE_DIRS: uncomment this if your package contains header files
+## LIBRARIES: libraries you create in this project that dependent projects also need
+## CATKIN_DEPENDS: catkin_packages dependent projects also need
+## DEPENDS: system dependencies of this project that dependent projects also need
+catkin_package(
+  # message_runtime
+#  INCLUDE_DIRS include
+#  LIBRARIES adc_msg
+#  CATKIN_DEPENDS cv_bridge image_transport roscpp roscpp roscpp std_msgs
+#  DEPENDS system_lib
+)
+
+###########
+## Build ##
+###########
+
+## Specify additional locations of header files
+## Your package locations should be listed before other locations
+include_directories(
+# include
+  ${catkin_INCLUDE_DIRS}
+)
+
+## Declare a C++ library
+# add_library(${PROJECT_NAME}
+#   src/${PROJECT_NAME}/adc_msg.cpp
+# )
+
+## Add cmake target dependencies of the library
+## as an example, code may need to be generated before libraries
+## either from message generation or dynamic reconfigure
+# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Declare a C++ executable
+## With catkin_make all packages are built within a single CMake context
+## The recommended prefix ensures that target names across packages don't collide
+# add_executable(${PROJECT_NAME}_node src/adc_msg_node.cpp)
+
+## Rename C++ executable without prefix
+## The above recommended prefix causes long target names, the following renames the
+## target back to the shorter version for ease of user use
+## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
+# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
+
+## Add cmake target dependencies of the executable
+## same as for the library above
+# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Specify libraries to link a library or executable target against
+# target_link_libraries(${PROJECT_NAME}_node
+#   ${catkin_LIBRARIES}
+# )
+
+#############
+## Install ##
+#############
+
+# all install targets should use catkin DESTINATION variables
+# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
+
+## Mark executable scripts (Python etc.) for installation
+## in contrast to setup.py, you can choose the destination
+# catkin_install_python(PROGRAMS
+#   scripts/my_python_script
+#   DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark executables for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
+# install(TARGETS ${PROJECT_NAME}_node
+#   RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark libraries for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
+# install(TARGETS ${PROJECT_NAME}
+#   ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+#   LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+#   RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
+# )
+
+## Mark cpp header files for installation
+# install(DIRECTORY include/${PROJECT_NAME}/
+#   DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
+#   FILES_MATCHING PATTERN "*.h"
+#   PATTERN ".svn" EXCLUDE
+# )
+
+## Mark other files for installation (e.g. launch and bag files, etc.)
+# install(FILES
+#   # myfile1
+#   # myfile2
+#   DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
+# )
+
+#############
+## Testing ##
+#############
+
+## Add gtest based cpp test target and link libraries
+# catkin_add_gtest(${PROJECT_NAME}-test test/test_adc_msg.cpp)
+# if(TARGET ${PROJECT_NAME}-test)
+#   target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
+# endif()
+
+## Add folders to be run by python nosetests
+# catkin_add_nosetests(test)

+ 8 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/msg/adc_depth.msg

@@ -0,0 +1,8 @@
+std_msgs/Header header
+
+sensor_msgs/Image depth
+
+float32 fu
+float32 fv
+float32 u0
+float32 v0

+ 7 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/msg/bbox2d.msg

@@ -0,0 +1,7 @@
+int32 x1
+int32 y1
+int32 x2
+int32 y2
+
+string category
+float32 score

+ 3 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/msg/bboxes2d.msg

@@ -0,0 +1,3 @@
+std_msgs/Header header
+
+bbox2d[] bboxes

+ 10 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/msg/lane_info.msg

@@ -0,0 +1,10 @@
+int32 index
+int32 type
+int32 color
+float32 a
+float32 b
+float32 c
+float32 curvature
+float32 min_y
+float32 max_y
+

+ 3 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/msg/lane_info_array.msg

@@ -0,0 +1,3 @@
+std_msgs/Header header
+
+lane_info[] lane_infos

+ 73 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/adc_msg/package.xml

@@ -0,0 +1,73 @@
+<?xml version="1.0"?>
+<package format="2">
+  <name>adc_msg</name>
+  <version>0.0.0</version>
+  <description>The adc_msg package</description>
+
+  <!-- One maintainer tag required, multiple allowed, one person per tag -->
+  <!-- Example:  -->
+  <!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
+  <maintainer email="nvidia@todo.todo">nvidia</maintainer>
+
+
+  <!-- One license tag required, multiple allowed, one license per tag -->
+  <!-- Commonly used license strings: -->
+  <!--   BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
+  <license>TODO</license>
+
+
+  <!-- Url tags are optional, but multiple are allowed, one per tag -->
+  <!-- Optional attribute type can be: website, bugtracker, or repository -->
+  <!-- Example: -->
+  <!-- <url type="website">http://wiki.ros.org/adc_msg</url> -->
+
+
+  <!-- Author tags are optional, multiple are allowed, one per tag -->
+  <!-- Authors do not have to be maintainers, but could be -->
+  <!-- Example: -->
+  <!-- <author email="jane.doe@example.com">Jane Doe</author> -->
+
+
+  <!-- The *depend tags are used to specify dependencies -->
+  <!-- Dependencies can be catkin packages or system dependencies -->
+  <!-- Examples: -->
+  <!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
+  <!--   <depend>roscpp</depend> -->
+  <!--   Note that this is equivalent to the following: -->
+  <!--   <build_depend>roscpp</build_depend> -->
+  <!--   <exec_depend>roscpp</exec_depend> -->
+  <!-- Use build_depend for packages you need at compile time: -->
+  <!--   <build_depend>message_generation</build_depend> -->
+  <!-- Use build_export_depend for packages you need in order to build against this package: -->
+  <!--   <build_export_depend>message_generation</build_export_depend> -->
+  <!-- Use buildtool_depend for build tool packages: -->
+  <!--   <buildtool_depend>catkin</buildtool_depend> -->
+  <!-- Use exec_depend for packages you need at runtime: -->
+  <!--   <exec_depend>message_runtime</exec_depend> -->
+  <!-- Use test_depend for packages you need only for testing: -->
+  <!--   <test_depend>gtest</test_depend> -->
+  <!-- Use doc_depend for packages you need only for building documentation: -->
+  <!--   <doc_depend>doxygen</doc_depend> -->
+  <buildtool_depend>catkin</buildtool_depend>
+  <build_depend>cv_bridge</build_depend>
+  <build_depend>image_transport</build_depend>
+  <build_depend>roscpp</build_depend>
+  <build_depend>std_msgs</build_depend>
+  <build_export_depend>cv_bridge</build_export_depend>
+  <build_export_depend>image_transport</build_export_depend>
+  <build_export_depend>roscpp</build_export_depend>
+  <build_export_depend>std_msgs</build_export_depend>
+  <build_export_depend>message_generation</build_export_depend>
+  <exec_depend>cv_bridge</exec_depend>
+  <exec_depend>image_transport</exec_depend>
+  <exec_depend>roscpp</exec_depend>
+  <exec_depend>std_msgs</exec_depend>
+  <exec_depend>message_runtime</exec_depend>
+
+
+  <!-- The export tag contains other, unspecified, tags -->
+  <export>
+    <!-- Other tools can request additional information be placed here -->
+
+  </export>
+</package>

+ 217 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/binocular_camera/CMakeLists.txt

@@ -0,0 +1,217 @@
+cmake_minimum_required(VERSION 3.0.2)
+project(binocular_camera)
+
+set(CMAKE_CXX_STANDARD 11)
+set(CMAKE_CXX_STANDARD_REQUIRED TRUE)
+## Compile as C++11, supported in ROS Kinetic and newer
+# add_compile_options(-std=c++11)
+
+## Find catkin macros and libraries
+## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
+## is used, also find other catkin packages
+find_package(PCL REQUIRED) 
+link_directories(${PCL_LIBRARY_DIRS}) 
+
+find_package(catkin REQUIRED COMPONENTS
+  cv_bridge
+  image_transport
+  roscpp
+  sensor_msgs
+  std_msgs
+)
+find_package(OpenCV REQUIRED)
+
+## System dependencies are found with CMake's conventions
+# find_package(Boost REQUIRED COMPONENTS system)
+
+
+## Uncomment this if the package has a setup.py. This macro ensures
+## modules and global scripts declared therein get installed
+## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
+# catkin_python_setup()
+
+################################################
+## Declare ROS messages, services and actions ##
+################################################
+
+## To declare and build messages, services or actions from within this
+## package, follow these steps:
+## * Let MSG_DEP_SET be the set of packages whose message types you use in
+##   your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
+## * In the file package.xml:
+##   * add a build_depend tag for "message_generation"
+##   * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
+##   * If MSG_DEP_SET isn't empty the following dependency has been pulled in
+##     but can be declared for certainty nonetheless:
+##     * add a exec_depend tag for "message_runtime"
+## * In this file (CMakeLists.txt):
+##   * add "message_generation" and every package in MSG_DEP_SET to
+##     find_package(catkin REQUIRED COMPONENTS ...)
+##   * add "message_runtime" and every package in MSG_DEP_SET to
+##     catkin_package(CATKIN_DEPENDS ...)
+##   * uncomment the add_*_files sections below as needed
+##     and list every .msg/.srv/.action file to be processed
+##   * uncomment the generate_messages entry below
+##   * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
+
+## Generate messages in the 'msg' folder
+# add_message_files(
+#   FILES
+#   Message1.msg
+#   Message2.msg
+# )
+
+## Generate services in the 'srv' folder
+# add_service_files(
+#   FILES
+#   Service1.srv
+#   Service2.srv
+# )
+
+## Generate actions in the 'action' folder
+# add_action_files(
+#   FILES
+#   Action1.action
+#   Action2.action
+# )
+
+## Generate added messages and services with any dependencies listed here
+# generate_messages(
+#   DEPENDENCIES
+#   sensor_msgs#   std_msgs
+# )
+
+################################################
+## Declare ROS dynamic reconfigure parameters ##
+################################################
+
+## To declare and build dynamic reconfigure parameters within this
+## package, follow these steps:
+## * In the file package.xml:
+##   * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
+## * In this file (CMakeLists.txt):
+##   * add "dynamic_reconfigure" to
+##     find_package(catkin REQUIRED COMPONENTS ...)
+##   * uncomment the "generate_dynamic_reconfigure_options" section below
+##     and list every .cfg file to be processed
+
+## Generate dynamic reconfigure parameters in the 'cfg' folder
+# generate_dynamic_reconfigure_options(
+#   cfg/DynReconf1.cfg
+#   cfg/DynReconf2.cfg
+# )
+
+###################################
+## catkin specific configuration ##
+###################################
+## The catkin_package macro generates cmake config files for your package
+## Declare things to be passed to dependent projects
+## INCLUDE_DIRS: uncomment this if your package contains header files
+## LIBRARIES: libraries you create in this project that dependent projects also need
+## CATKIN_DEPENDS: catkin_packages dependent projects also need
+## DEPENDS: system dependencies of this project that dependent projects also need
+catkin_package(
+#  INCLUDE_DIRS include
+#  LIBRARIES binocular_camera
+#  CATKIN_DEPENDS cv_bridge image_transport roscpp sensor_msgs std_msgs
+#  DEPENDS system_lib
+)
+
+###########
+## Build ##
+###########
+
+## Specify additional locations of header files
+## Your package locations should be listed before other locations
+include_directories(
+# include
+  ${catkin_INCLUDE_DIRS}
+  ${Opencv_INCLUDE_DIRS}
+  ${PCL_INCLUDE_DIRS}
+)
+
+## Declare a C++ library
+# add_library(${PROJECT_NAME}
+#   src/${PROJECT_NAME}/binocular_camera.cpp
+# )
+
+## Add cmake target dependencies of the library
+## as an example, code may need to be generated before libraries
+## either from message generation or dynamic reconfigure
+# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Declare a C++ executable
+## With catkin_make all packages are built within a single CMake context
+## The recommended prefix ensures that target names across packages don't collide
+add_executable(${PROJECT_NAME}_node src/binocular_camera_node.cpp)
+
+## Rename C++ executable without prefix
+## The above recommended prefix causes long target names, the following renames the
+## target back to the shorter version for ease of user use
+## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
+# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
+
+## Add cmake target dependencies of the executable
+## same as for the library above
+# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Specify libraries to link a library or executable target against
+target_link_libraries(${PROJECT_NAME}_node
+  ${catkin_LIBRARIES}
+  ${OpenCV_LIBS}
+)
+
+#############
+## Install ##
+#############
+
+# all install targets should use catkin DESTINATION variables
+# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
+
+## Mark executable scripts (Python etc.) for installation
+## in contrast to setup.py, you can choose the destination
+# catkin_install_python(PROGRAMS
+#   scripts/my_python_script
+#   DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark executables for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
+# install(TARGETS ${PROJECT_NAME}_node
+#   RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark libraries for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
+# install(TARGETS ${PROJECT_NAME}
+#   ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+#   LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+#   RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
+# )
+
+## Mark cpp header files for installation
+# install(DIRECTORY include/${PROJECT_NAME}/
+#   DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
+#   FILES_MATCHING PATTERN "*.h"
+#   PATTERN ".svn" EXCLUDE
+# )
+
+## Mark other files for installation (e.g. launch and bag files, etc.)
+# install(FILES
+#   # myfile1
+#   # myfile2
+#   DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
+# )
+
+#############
+## Testing ##
+#############
+
+## Add gtest based cpp test target and link libraries
+# catkin_add_gtest(${PROJECT_NAME}-test test/test_binocular_camera.cpp)
+# if(TARGET ${PROJECT_NAME}-test)
+#   target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
+# endif()
+
+## Add folders to be run by python nosetests
+# catkin_add_nosetests(test)

+ 9 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/binocular_camera/launch/binocular.launch

@@ -0,0 +1,9 @@
+<launch>
+    <node name="binocular_node" pkg="binocular_camera" type="binocular_camera_node" output="screen">
+        <param name="left_camera_topic" value="/camera/left" />
+        <param name="right_camera_topic" value="/camera/right" />
+        <param name="pub_rate" value="60.0" />
+        <param name="vis" value="false" />
+        <param name="split_publish" value="flase" />
+     </node>
+</launch>

+ 11 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/binocular_camera/launch/binocular_calibration.launch

@@ -0,0 +1,11 @@
+<launch>
+    <node name="binocular_calibration" pkg="binocular_camera" type="binocular_camera_node" output="screen">
+        <param name="left_camera_topic" value="/camera/left" />
+        <param name="right_camera_topic" value="/camera/right" />
+        <param name="pub_rate" value="60.0" />
+        <param name="steroe_config" value="stereo.yaml" />
+        <param name="hz" value="30.0" />
+        <param name="vis" value="false" />
+        <param name="split_publish" value="true" />
+     </node>
+</launch>

+ 74 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/binocular_camera/package.xml

@@ -0,0 +1,74 @@
+<?xml version="1.0"?>
+<package format="2">
+  <name>binocular_camera</name>
+  <version>0.0.0</version>
+  <description>The binocular_camera package</description>
+
+  <!-- One maintainer tag required, multiple allowed, one person per tag -->
+  <!-- Example:  -->
+  <!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
+  <maintainer email="nvidia@todo.todo">nvidia</maintainer>
+
+
+  <!-- One license tag required, multiple allowed, one license per tag -->
+  <!-- Commonly used license strings: -->
+  <!--   BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
+  <license>TODO</license>
+
+
+  <!-- Url tags are optional, but multiple are allowed, one per tag -->
+  <!-- Optional attribute type can be: website, bugtracker, or repository -->
+  <!-- Example: -->
+  <!-- <url type="website">http://wiki.ros.org/binocular_camera</url> -->
+
+
+  <!-- Author tags are optional, multiple are allowed, one per tag -->
+  <!-- Authors do not have to be maintainers, but could be -->
+  <!-- Example: -->
+  <!-- <author email="jane.doe@example.com">Jane Doe</author> -->
+
+
+  <!-- The *depend tags are used to specify dependencies -->
+  <!-- Dependencies can be catkin packages or system dependencies -->
+  <!-- Examples: -->
+  <!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
+  <!--   <depend>roscpp</depend> -->
+  <!--   Note that this is equivalent to the following: -->
+  <!--   <build_depend>roscpp</build_depend> -->
+  <!--   <exec_depend>roscpp</exec_depend> -->
+  <!-- Use build_depend for packages you need at compile time: -->
+  <!--   <build_depend>message_generation</build_depend> -->
+  <!-- Use build_export_depend for packages you need in order to build against this package: -->
+  <!--   <build_export_depend>message_generation</build_export_depend> -->
+  <!-- Use buildtool_depend for build tool packages: -->
+  <!--   <buildtool_depend>catkin</buildtool_depend> -->
+  <!-- Use exec_depend for packages you need at runtime: -->
+  <!--   <exec_depend>message_runtime</exec_depend> -->
+  <!-- Use test_depend for packages you need only for testing: -->
+  <!--   <test_depend>gtest</test_depend> -->
+  <!-- Use doc_depend for packages you need only for building documentation: -->
+  <!--   <doc_depend>doxygen</doc_depend> -->
+  <buildtool_depend>catkin</buildtool_depend>
+  <build_depend>cv_bridge</build_depend>
+  <build_depend>image_transport</build_depend>
+  <build_depend>roscpp</build_depend>
+  <build_depend>sensor_msgs</build_depend>
+  <build_depend>std_msgs</build_depend>
+  <build_export_depend>cv_bridge</build_export_depend>
+  <build_export_depend>image_transport</build_export_depend>
+  <build_export_depend>roscpp</build_export_depend>
+  <build_export_depend>sensor_msgs</build_export_depend>
+  <build_export_depend>std_msgs</build_export_depend>
+  <exec_depend>cv_bridge</exec_depend>
+  <exec_depend>image_transport</exec_depend>
+  <exec_depend>roscpp</exec_depend>
+  <exec_depend>sensor_msgs</exec_depend>
+  <exec_depend>std_msgs</exec_depend>
+
+
+  <!-- The export tag contains other, unspecified, tags -->
+  <export>
+    <!-- Other tools can request additional information be placed here -->
+
+  </export>
+</package>

+ 135 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/binocular_camera/src/binocular_camera_node.cpp

@@ -0,0 +1,135 @@
+#include <opencv2/core.hpp>
+#include <opencv2/highgui.hpp>
+#include <opencv2/videoio.hpp>
+#include <iostream>
+
+#include <ros/ros.h>
+#include <image_transport/image_transport.h>
+#include <cv_bridge/cv_bridge.h>
+#include <sensor_msgs/image_encodings.h>
+
+
+#define IMAGE_WIDTH 640
+#define IMAGE_HEIGHT 480
+
+int main(int argc, char** argv)
+{
+
+    ros::init(argc, argv, "binocular_canera");
+    ros::NodeHandle nh("~");
+    std::string left_camera_topic;
+    std::string right_camera_topic;
+    std::string binocular_camera_topic;
+    int camera_device;
+    double pub_rate;
+    bool vis;
+    bool split_publish;
+
+    nh.param("left_camera_topic", left_camera_topic, std::string("/camera/left"));
+    nh.param("right_camera_topic", right_camera_topic, std::string("/camera/right"));
+    nh.param("binocular_camera_topic", binocular_camera_topic, std::string("/camera/binocular"));
+    nh.param("pub_rate", pub_rate, 60.0);
+    nh.param("camera_device", camera_device, 0);
+    nh.param("vis", vis, false);
+    nh.param("split_publish", split_publish, false);
+
+
+
+    cv::Mat image;
+    cv::VideoCapture capture;
+    capture.open("/media/7CB400FAB400B8A2/savevideo_5.28/20210528163022.avi");
+    
+    if(capture.isOpened())
+    {
+        
+        ROS_INFO("Capture is opened!");
+        capture.set(cv::CAP_PROP_FRAME_WIDTH, IMAGE_WIDTH*2);
+        capture.set(cv::CAP_PROP_FRAME_HEIGHT, IMAGE_HEIGHT);
+        
+        image_transport::ImageTransport transport_L(nh);
+        image_transport::ImageTransport transport_R(nh);
+
+        image_transport::Publisher image_pub_L = transport_L.advertise(left_camera_topic, 1);
+        image_transport::Publisher image_pub_R = transport_R.advertise(right_camera_topic, 1);
+
+        cv_bridge::CvImage cv_img_L;
+        cv_bridge::CvImage cv_img_R;
+        sensor_msgs::Image img_msg_L;
+        sensor_msgs::Image img_msg_R;
+    
+        image_transport::ImageTransport transport_binocular(nh);
+        image_transport::Publisher image_pub_binocular = transport_binocular.advertise(binocular_camera_topic, 1);
+        cv_bridge::CvImage cv_img_binocular;
+        sensor_msgs::Image img_msg_binocular;            
+        
+        ros::Rate rate(pub_rate);
+
+        cv::Mat frame_L, frame_R;
+	//cv::nameWindow("binocular", cv::WINDOW_NORMAL);
+        while(ros::ok())
+        {
+            ros::Time time = ros::Time::now();
+            capture>>image;
+            if(image.empty())
+            {
+                break;
+            }
+            if(split_publish)
+            {
+                frame_L = image(cv::Rect(0,0, IMAGE_WIDTH, IMAGE_HEIGHT));
+                
+                frame_R = image(cv::Rect(IMAGE_WIDTH, 0, IMAGE_WIDTH, IMAGE_HEIGHT));
+                if(vis)
+                {
+                    cv::imshow("L", frame_L);
+                    cv::imshow("R", frame_R);
+                    cv::waitKey(10);
+                }
+                
+                cv_img_L.header.stamp = time;
+                cv_img_L.header.frame_id = "image_left";
+                cv_img_L.encoding = "bgr8";
+                cv_img_L.image = frame_L;
+                cv_img_L.toImageMsg(img_msg_L);
+
+                cv_img_R.header.stamp = time;
+                cv_img_R.header.frame_id = "image_right";
+                cv_img_R.encoding = "bgr8";
+                cv_img_R.image = frame_R;
+                cv_img_R.toImageMsg(img_msg_R);
+
+
+                image_pub_L.publish(img_msg_L);
+                image_pub_R.publish(img_msg_R);
+            }
+            else
+            {
+                if(vis)
+                {
+		            frame_L = image(cv::Rect(0,0, IMAGE_WIDTH, IMAGE_HEIGHT)).clone();
+                    int half_width = int(0.5*IMAGE_WIDTH);
+                    int half_height = int(0.5*IMAGE_HEIGHT);
+                    cv::line(frame_L,cv::Point(half_width,0),cv::Point(half_width,IMAGE_HEIGHT),cv::Scalar(0,0,255),5,8);
+                    for(int i=half_height;i<IMAGE_HEIGHT;i += 20)
+                        cv::line(frame_L,cv::Point(0,i),cv::Point(IMAGE_WIDTH,i),cv::Scalar(0,0,255),3,8);                
+                    cv::imshow("binocular", frame_L);
+                    cv::waitKey(10);
+                }
+                cv_img_binocular.header.stamp = time;
+                cv_img_binocular.header.frame_id = "binocular";
+                cv_img_binocular.encoding = "bgr8";
+                cv_img_binocular.image = image;
+                cv_img_binocular.toImageMsg(img_msg_binocular);
+                image_pub_binocular.publish(img_msg_binocular);
+            }
+            rate.sleep();
+        }
+        capture.release();
+    }
+    else
+    {
+        ROS_ERROR("No Capture");
+    }
+    return 0;
+}
+

+ 162 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/CMakeLists.txt

@@ -0,0 +1,162 @@
+cmake_minimum_required(VERSION 2.8.3)
+project(front_vision)
+
+## Compile as C++11, supported in ROS Kinetic and newer
+# add_compile_options(-std=c++11)
+
+option(BUILD_ROS "" ON)
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -lglog   std=c++11 -Wall -Wextra -Ofast -pthread -lpthread  -Wno-deprecated-declarations ")
+set(CMAKE_C_FLAGS " -std=c11 -fPIE -fPIC -Wall -Wextra ")
+if (CMAKE_COMPILER_IS_GNUCXX)
+	set(CMAKE_CXX_FLAGS "-O3 -Wall")
+	set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11")
+endif()
+set(CMAKE_BUILD_TYPE Release)
+set(GPU_ARCHS 61)  ## config your GPU_ARCHS,See [here](https://developer.nvidia.com/cuda-gpus) for finding what maximum compute capability your specific GPU supports.
+
+find_package(OpenCV REQUIRED)
+find_package(CUDA REQUIRED)
+find_package(Qt5 COMPONENTS Core REQUIRED)
+find_library(YAML_CPP_LIBRARIES yaml-cpp)
+if(NOT YAML_CPP_LIBRARIES)
+       find_package(yaml-cpp REQUIRED)
+include_directories(${YAML_CPP_INCLUDE_DIRS})
+endif(NOT YAML_CPP_LIBRARIES)
+
+include_directories(
+    ${PROJECT_SOURCE_DIR}/include
+    ${PROJECT_SOURCE_DIR}/app
+    ${Opencv_INCLUDE_DIRS}
+    ${CUDA_INCLUDE_DIRS}
+    ${Qt5Core_INCLUDE_DIRS}
+    ${Qt5Core_INCLUDE_DIRS}
+    ${CMAKE_CURRENT_SOURCE_DIR}/../../../include
+    $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/src/common>
+    $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/src/common>
+    $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}>
+)
+
+add_subdirectory(src/common)
+add_subdirectory(src/common/cuda)
+add_subdirectory(src/stereo_vision)
+add_subdirectory(src/cmw_app)
+add_subdirectory(src/detection_trt7)
+add_subdirectory(src/lane_detection)
+
+if (BUILD_ROS)
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -lglog  -std=c++11 -Wall -Wextra -Ofast -pthread -lpthread  -Wno-deprecated-declarations -Wno-attributes")
+set(CMAKE_C_FLAGS " -std=c11 -fPIE -fPIC -Wall -Wextra -Wno-attributes ")
+  find_package(catkin REQUIRED COMPONENTS
+                cv_bridge
+                image_transport
+                roscpp
+                sensor_msgs
+                std_msgs
+                pcl_ros
+                adc_msg
+                message_filters
+                )
+    include_directories(
+                ${catkin_INCLUDE_DIRS}
+    )
+  
+
+catkin_package(
+  #  INCLUDE_DIRS include
+  #  LIBRARIES front_vision
+  #  CATKIN_DEPENDS cv_bridge image_transport roscpp sensor_msgs std_msgs
+  #  DEPENDS system_lib
+)
+
+
+add_executable(stereo_node app/sgm_app_node.cpp)
+add_executable(detection_depth_fusion_node app/detection_depth_fusion_app_node.cpp)
+add_executable(detection_trt7_node app/detection_trt7_app_node.cpp)
+add_executable(lane_detection_node app/lane_detection_app_node.cpp)
+
+
+target_link_libraries(stereo_node
+  ${catkin_LIBRARIES}
+  sgm
+  cmw_app
+  common
+)
+target_link_libraries(detection_trt7_node
+  ${catkin_LIBRARIES}
+  cmw_app
+)
+
+
+target_link_libraries(detection_depth_fusion_node
+  ${PCL_LIBRARIES}
+  ${catkin_LIBRARIES}
+  cmw_app
+  ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/libmodulecomm.so
+  Qt5::Core
+)
+
+target_link_libraries(lane_detection_node
+  ${catkin_LIBRARIES}
+  cmw_app
+  ${CMAKE_CURRENT_SOURCE_DIR}/../../lib/libmodulecomm.so
+  Qt5::Core
+)
+
+
+#############
+## Install ##
+#############
+
+# all install targets should use catkin DESTINATION variables
+# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
+
+## Mark executable scripts (Python etc.) for installation
+## in contrast to setup.py, you can choose the destination
+# catkin_install_python(PROGRAMS
+#   scripts/my_python_script
+#   DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark executables for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
+# install(TARGETS ${PROJECT_NAME}_node
+#   RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark libraries for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
+# install(TARGETS ${PROJECT_NAME}
+#   ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+#   LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+#   RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
+# )
+
+## Mark cpp header files for installation
+# install(DIRECTORY include/${PROJECT_NAME}/
+#   DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
+#   FILES_MATCHING PATTERN "*.h"
+#   PATTERN ".svn" EXCLUDE
+# )
+
+## Mark other files for installation (e.g. launch and bag files, etc.)
+# install(FILES
+#   # myfile1
+#   # myfile2
+#   DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
+# )
+
+#############
+## Testing ##
+#############
+
+## Add gtest based cpp test target and link libraries
+# catkin_add_gtest(${PROJECT_NAME}-test test/test_front_vision.cpp)
+# if(TARGET ${PROJECT_NAME}-test)
+#   target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
+# endif()
+
+## Add folders to be run by python nosetests
+# catkin_add_nosetests(test)
+
+endif(BUILD_ROS)

+ 211 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/detection_app_node.cpp

@@ -0,0 +1,211 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-08-21
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#include <ros/ros.h>
+#include <message_filters/subscriber.h>
+#include <message_filters/time_synchronizer.h>
+#include <message_filters/sync_policies/approximate_time.h> 
+#include <image_transport/image_transport.h>
+#include <cv_bridge/cv_bridge.h>
+#include <sensor_msgs/image_encodings.h>
+#include <memory>
+#include <stdlib.h>
+#include <opencv2/core.hpp>
+#include <opencv2/highgui.hpp>
+#include <opencv2/videoio.hpp>
+#include <opencv2/calib3d/calib3d.hpp>
+#include "cmw_app/detection_app.h"
+#include <yaml-cpp/yaml.h>
+#include <vector>
+
+#include "common/project_root.h"
+#include "adc_msg/bboxes2d.h"
+#include "adc_msg/bbox2d.h"
+
+
+#define IMAGE_WIDTH 640
+#define IMAGE_HEIGHT 480
+
+namespace adc
+{
+namespace ros_cmw
+{
+
+class DetectionCMW
+{
+public:
+    // typedef message_filters::sync_policies::ApproximateTime<adc_msg::adc_depth, sensor_msgs::Image> SyncPolicy;
+    typedef std::chrono::system_clock::time_point  TimeType;
+    typedef  std::shared_ptr<std::list<vector<Tn::Bbox>>> Bboxes2DType;
+    typedef  cv::Mat ImageType;
+    typedef std::vector<std::string> ClassNameType;
+
+
+public:
+    DetectionCMW(int argc, char** argv)
+    {
+        ros::init(argc, argv, "detection");
+        nh_ptr_ = std::make_shared<ros::NodeHandle>("~");
+        InitConfig();
+    }
+private:
+    void CallbackDetection(Bboxes2DType& bboxes, TimeType& tm, ImageType& img, ClassNameType& class_name)
+    {
+        std::vector<adc_msg::bbox2d> detections;
+        auto detection_res_ptr = (*bboxes).begin();
+        for(std::vector<Tn::Bbox>::iterator bbox = detection_res_ptr->begin(); bbox != detection_res_ptr->end(); ++bbox)
+        {
+            adc_msg::bbox2d bbox2d_msg;
+            bbox2d_msg.x1 = bbox->left;
+            bbox2d_msg.y1 = bbox->top;
+            bbox2d_msg.x2 = bbox->right;
+            bbox2d_msg.y2 = bbox->bot;
+            bbox2d_msg.score = bbox->score;
+            bbox2d_msg.category = class_name[bbox->classId];
+            detections.push_back(bbox2d_msg);
+        }
+        adc_msg::bboxes2d bboxes2s_msg;
+        int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(tm.time_since_epoch()).count();
+        int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(tm.time_since_epoch()).count() % 1000000000UL;
+        ros::Time stamp(sec, nsec);
+        std_msgs::Header header;
+        header.stamp = stamp;
+        bboxes2s_msg.header = header;
+        bboxes2s_msg.bboxes = detections;
+        bboxes_pushisher_.publish(bboxes2s_msg);
+        if(publis_origin_img_)
+        {
+            cv_bridge::CvImage cv_img_l;
+            sensor_msgs::Image img_msg_l;
+            cv_img_l.header.stamp = stamp;
+            cv_img_l.encoding = "bgr8";
+            cv_img_l.image = img;
+            cv_img_l.toImageMsg(img_msg_l);
+            left_image_publisher_.publish(img_msg_l);
+        }
+    }
+
+    void CallbackBinocular(const sensor_msgs::ImageConstPtr& binocular_image_msg)
+    {
+
+        ros::Time image_time = binocular_image_msg->header.stamp;
+        // ros::Time image_time = ros::Time::now();
+        cv::Mat binocular = cv_bridge::toCvCopy(binocular_image_msg, binocular_image_msg->encoding)->image;
+        cv::Mat left_image = binocular(left_rect_).clone();
+        uint64_t nsecs = image_time.toNSec();
+        std::chrono::nanoseconds ns(nsecs);
+        TimeType time_stamp(ns);
+        cv::remap(left_image, left_image, map_x_, map_y_, cv::INTER_LINEAR);
+        detection_ptr_->CacheImages(left_image, time_stamp);
+    }
+    void ReadCameraParams(const std::string camera_yaml)
+    {
+        try
+        {
+            YAML::Node node = YAML::LoadFile(camera_yaml);
+            int height = node["image_height"].as<int>();
+            int width = node["image_width"].as<int>();
+            cv::Size size(width, height);
+            std::vector<double> K_v= node["camera_matrix"]["data"].as<std::vector<double>>();
+            std::vector<double> P_v = node["projection_matrix"]["data"].as<std::vector<double>>();
+            std::vector<double> D_v = node["distortion_coefficients"]["data"].as<std::vector<double>>();
+            std::vector<double> R_v = node["rectification_matrix"]["data"].as<std::vector<double>>();
+
+            cv::Mat _K(K_v);
+            cv::Mat _P(P_v);
+            cv::Mat _D(D_v);
+            cv::Mat _R(R_v);
+
+            _K = _K.reshape(3,3);
+            _P = _P.reshape(4,3);
+            _D = _D.reshape(5,1);
+            _R = _R.reshape(3,3);
+            cv::initUndistortRectifyMap(_K, _D, _R, _P, size, CV_32FC1, map_x_, map_y_);
+            
+        }
+        catch (const std::exception& e)
+        {
+            std::cerr << e.what() << '\n'<<std::endl;;
+            std::cerr<<" detection Ros app read camera params file faile!"<<std::endl;
+            exit(1);
+
+        }
+    }
+    void InitConfig()
+    {
+
+        std::string detection_config;
+        std::string root_path;
+        std::string left_camera_params_filename;
+        std::string detection_bbox2d_topic;
+        std::string origin_imgage_topic;
+        float hz;
+        nh_ptr_->param("binocular_topic", binocular_topic_, std::string("/camera/binocular"));
+        nh_ptr_->param("hz", hz, 30.0f);
+        nh_ptr_->param("detection_config", detection_config, std::string("detection.yaml"));
+        nh_ptr_->param("root_path", root_path, std::string("/opt/adc"));
+        nh_ptr_->param("left_camera_params_filename", left_camera_params_filename, std::string("left.yaml"));
+        nh_ptr_->param("detection_bbox2d_topic", detection_bbox2d_topic, std::string("/detection/bbox2d"));
+        nh_ptr_->param("publis_origin_img", publis_origin_img_, true);
+        nh_ptr_->param("origin_imgage_topic", origin_imgage_topic, std::string("/detection/image"));
+        std::string param_root = "ADC_ROOT="+root_path;
+        char* path_c = const_cast<char*>(param_root.c_str());
+        putenv(path_c);
+        std::string left_camera_params_file_path = adc::RootPath::GetAbsolutePath(left_camera_params_filename);
+        ReadCameraParams(left_camera_params_file_path);
+        detection_ptr_ = std::make_shared<adc::DetectionApp>(detection_config, hz);
+        detection_ptr_->SetDetectionCallback(
+            std::bind(
+                &DetectionCMW::CallbackDetection, this, 
+                std::placeholders::_1, std::placeholders::_2,
+                std::placeholders::_3, std::placeholders::_4)
+        );
+//std::bind(&StereoMatchCMW::CallbackDepthHandler, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
+
+        left_rect_ = cv::Rect(0,0,IMAGE_WIDTH, IMAGE_HEIGHT);
+        binocular_subscriber_ = nh_ptr_->subscribe<sensor_msgs::Image>(
+            binocular_topic_, 1,
+            std::bind(&DetectionCMW::CallbackBinocular,
+            this, 
+            std::placeholders::_1)
+            );
+        bboxes_pushisher_ = nh_ptr_->advertise<adc_msg::bboxes2d>(detection_bbox2d_topic,1);
+        transport_img_ptr_ = std::make_shared<image_transport::ImageTransport>(*nh_ptr_);
+        left_image_publisher_ = transport_img_ptr_->advertise(origin_imgage_topic, 1);
+        detection_ptr_->Run();
+        // depth_sub_ptr_ = std::make_shared<message_filters::Subscriber<adc_msg::adc_depth>>(*nh_ptr_, stereo_depth_topic_, 1);
+        // image_sub_ptr_ = std::make_shared<message_filters::Subscriber<sensor_msgs::Image>>(*nh_ptr_, binocular_topic_, 1);
+        // sync_ptr_ = std::make_shared<message_filters::Synchronizer<SyncPolicy>>(SyncPolicy(100), *depth_sub_ptr_, *image_sub_ptr_);
+        // sync_ptr_->registerCallback(std::bind(&DetectionCMW::CallbackImageDepth,this,std::placeholders::_1, std::placeholders::_2));
+        ros::spin();    
+
+    }
+
+private:
+    std::shared_ptr<ros::NodeHandle> nh_ptr_;
+    std::string binocular_topic_;
+    std::shared_ptr<adc::DetectionApp> detection_ptr_;
+    cv::Rect left_rect_;
+    ros::Subscriber binocular_subscriber_;
+    ros::Publisher bboxes_pushisher_;
+    std::shared_ptr<image_transport::ImageTransport> transport_img_ptr_;
+    image_transport::Publisher left_image_publisher_;
+
+    cv::Mat map_x_;
+    cv::Mat map_y_;
+    bool publis_origin_img_;
+
+};
+}
+}
+
+int main(int argc,  char** argv)
+{   
+    adc::ros_cmw::DetectionCMW app(argc, argv);
+    return 0;
+}

+ 832 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/detection_depth_fusion_app_node.cpp

@@ -0,0 +1,832 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-08-25
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#include <ros/ros.h>
+#include <message_filters/subscriber.h>
+#include <message_filters/time_synchronizer.h>
+#include <message_filters/sync_policies/approximate_time.h> 
+#include <image_transport/image_transport.h>
+#include <cv_bridge/cv_bridge.h>
+#include <sensor_msgs/image_encodings.h>
+#include <memory>
+#include <queue>
+#include <mutex>
+#include <opencv2/core.hpp>
+#include <opencv2/highgui.hpp>
+#include <opencv2/videoio.hpp>
+#include "adc_msg/adc_depth.h"
+#include "cmw_app/detection_trt7_app.h"
+#include <sensor_msgs/PointCloud2.h> 
+#include "adc_msg/bboxes2d.h"
+#include "adc_msg/bbox2d.h"
+#include <pcl/point_cloud.h>
+#include <pcl_conversions/pcl_conversions.h>
+#include "common/utils.h"
+#include <cuda_runtime_api.h>
+#include "cuda_runtime.h"
+#include <visualization_msgs/Marker.h>
+#include "modulecomm.h"
+#include "obstacles.pb.h"
+#include <time.h>
+
+#define IMAGE_WIDTH 640
+#define IMAGE_HEIGHT 480
+
+#define DEBUG
+
+namespace adc
+{
+namespace ros_cmw
+{
+
+class DetectionFusionCMW
+{
+public:
+    typedef message_filters::sync_policies::ApproximateTime<adc_msg::adc_depth, adc_msg::bboxes2d> SyncPolicy;
+    typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, adc_msg::bboxes2d> SyncPolicyDetection;
+    typedef std::vector<std::vector<float32_t>> ObstaclesType;
+    typedef std::vector<float> PointType;
+    
+public:
+    DetectionFusionCMW(int argc, char** argv)
+    {
+        ros::init(argc, argv, "detection");
+        nh_ptr_ = std::make_shared<ros::NodeHandle>("~");
+        InitConfig();
+        
+    }
+
+    ~DetectionFusionCMW()
+    {
+        if(nullptr != x_gpu_buf_)
+        {
+            SafeCudaFree(depth_gpu_buf_);
+            SafeCudaFree(x_gpu_buf_);
+            SafeCudaFree(y_gpu_buf_);
+            SafeCudaFree(z_gpu_buf_);
+            CUDA_CHECK(cudaFreeHost(depth_cpu_buf_));
+            CUDA_CHECK(cudaFreeHost(x_cpu_buf_));
+            CUDA_CHECK(cudaFreeHost(y_cpu_buf_));
+            CUDA_CHECK(cudaFreeHost(z_cpu_buf_));
+
+            depth_gpu_buf_ = nullptr;
+            x_gpu_buf_ = nullptr;
+            y_gpu_buf_ = nullptr;
+            z_gpu_buf_ = nullptr;
+            depth_cpu_buf_ = nullptr;
+            x_cpu_buf_ = nullptr;
+            y_cpu_buf_ = nullptr;
+            z_cpu_buf_ = nullptr;
+        }
+
+        CUDA_CHECK(cudaStreamDestroy(stream_depth_));
+        CUDA_CHECK(cudaStreamDestroy(stream_x_));
+        CUDA_CHECK(cudaStreamDestroy(stream_y_));
+        CUDA_CHECK(cudaStreamDestroy(stream_z_));
+    }
+private:
+
+    struct depth_bbox
+    {
+        adc_msg::adc_depth depth;
+        adc_msg::bboxes2d boxes;
+        ros::Time stamp;
+    };
+
+    struct Obstacle
+    {
+        std::vector<pcl::PointXYZRGB> corners;
+        pcl::PointXYZRGB center;
+
+    };
+
+    struct Obstacles
+    {
+        double time;
+        std::vector<Obstacle> obstacles;
+    };
+
+    void GenrateObstacleBox(const pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud, Obstacle& obtsacle)
+    {
+        float length, width, height;
+        std::vector<pcl::PointXYZRGB>& corners_ref = obtsacle.corners;
+        obb_extrator_.GetOBB(cloud, corners_ref, length, width, height);
+        (void) length;
+        (void) height;
+        (void) width;
+        pcl::PointXYZRGB& center_ref =  obtsacle.center;
+        center_ref.x = (corners_ref[0].x + corners_ref[7].x) / 2.0;
+        center_ref.y = (corners_ref[0].y + corners_ref[7].y) / 2.0;
+        center_ref.z = (corners_ref[0].z + corners_ref[7].z) / 2.0;
+    }
+
+    void GenerateBoxMarker(const pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud, ros::Time time, visualization_msgs::Marker& bbox_lines)
+    {
+        (void) time;
+        std::vector<pcl::PointXYZRGB> bbox;
+        float length, width, height;
+        obb_extrator_.GetOBB(cloud, bbox, length, width, height);
+
+        std::cout<<"l,w,h: "<<length<<","<<width<<","<<height<<std::endl;
+
+        geometry_msgs::Point p1, p2;
+
+        // line1
+        p1.x = bbox[0].x;
+        p1.y = bbox[0].y;
+        p1.z = bbox[0].z;
+
+        p2.x = bbox[1].x;
+        p2.y = bbox[1].y;
+        p2.z = bbox[1].z;
+
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+        // line 2
+        p1.x = bbox[2].x;
+        p1.y = bbox[2].y;
+        p1.z = bbox[2].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+        // line3
+        p2.x = bbox[3].x;
+        p2.y = bbox[3].y;
+        p2.z = bbox[3].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+        //line 4
+        p1.x = bbox[0].x;
+        p1.y = bbox[0].y;
+        p1.z = bbox[0].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+        //line 5
+        p1.x = bbox[4].x;
+        p1.y = bbox[4].y;
+        p1.z = bbox[4].z;
+        p2.x = bbox[5].x;
+        p2.y = bbox[5].y;
+        p2.z = bbox[5].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+        // line 6
+        p1.x = bbox[6].x;
+        p1.y = bbox[6].y;
+        p1.z = bbox[6].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+         // line 7
+        p2.x = bbox[7].x;
+        p2.y = bbox[7].y;
+        p2.z = bbox[7].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+         // line 8
+        p1.x = bbox[4].x;
+        p1.y = bbox[4].y;
+        p1.z = bbox[4].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+        // line1
+        for(size_t i = 0 ; i < 4; i++)
+        {
+            p1.x = bbox[i].x;
+            p1.y = bbox[i].y;
+            p1.z = bbox[i].z;
+
+            p2.x = bbox[i+4].x;
+            p2.y = bbox[i+4].y;
+            p2.z = bbox[i+4].z;
+
+            bbox_lines.points.push_back(p1);
+            bbox_lines.points.push_back(p2);
+        }
+
+        // marker_publisher_.publish(bbox_lines);
+        // ROS_INFO("publis maeker");
+    }
+    
+
+     void PushsObstaclesBySharedMem(std::vector<pcl::PointCloud<pcl::PointXYZRGB>>& obs_points, 
+                                   ros::Time time, std::vector<std::string>& categorys,
+                                   std::vector<cv::Rect>& obs_bbox2d)
+    { 
+            iv::vision::ObstacleInfo obstacle_info;
+            obstacle_info.set_time(time.toSec());
+            assert(obs_points.size() == categorys.size());
+            int count = 0;
+            for(auto obstacle_points : obs_points)
+            {
+                Obstacle obs;
+                GenrateObstacleBox(obstacle_points.makeShared(), obs);
+                
+                iv::vision::Bbox3D* bbox_3d = obstacle_info.add_bbox_3d();
+
+                bbox_3d->set_category(categorys[count]);
+                bbox_3d->set_top_left_x(obs_bbox2d[count].x);
+                bbox_3d->set_top_left_y(obs_bbox2d[count].y);
+                bbox_3d->set_bottom_right_x(obs_bbox2d[count].x+obs_bbox2d[count].width);
+                bbox_3d->set_bottom_right_y(obs_bbox2d[count].y+obs_bbox2d[count].height);
+
+                iv::vision::Point3D* p = bbox_3d->mutable_p1();
+                p->set_x(obs.corners[0].x);
+                p->set_y(obs.corners[0].y);
+                p->set_z(obs.corners[0].z);
+
+                p = bbox_3d->mutable_p2();
+                p->set_x(obs.corners[1].x);
+                p->set_y(obs.corners[1].y);
+                p->set_z(obs.corners[1].z);
+         
+                p = bbox_3d->mutable_p3();
+                p->set_x(obs.corners[2].x);
+                p->set_y(obs.corners[2].y);
+                p->set_z(obs.corners[2].z);
+
+                p = bbox_3d->mutable_p4();
+                p->set_x(obs.corners[3].x);
+                p->set_y(obs.corners[3].y);
+                p->set_z(obs.corners[3].z);
+
+                p = bbox_3d->mutable_p5();
+                p->set_x(obs.corners[4].x);
+                p->set_y(obs.corners[4].y);
+                p->set_z(obs.corners[4].z);
+
+                p = bbox_3d->mutable_p6();
+                p->set_x(obs.corners[5].x);
+                p->set_y(obs.corners[5].y);
+                p->set_z(obs.corners[5].z);
+
+                p = bbox_3d->mutable_p7();
+                p->set_x(obs.corners[6].x);
+                p->set_y(obs.corners[6].y);
+                p->set_z(obs.corners[6].z);
+
+                p = bbox_3d->mutable_p8();
+                p->set_x(obs.corners[7].x);
+                p->set_y(obs.corners[7].y);
+                p->set_z(obs.corners[7].z);
+
+                p = bbox_3d->mutable_c();
+                p->set_x(obs.center.x);
+                p->set_y(obs.center.y);
+                p->set_z(obs.center.z);
+
+                count++;
+            }
+
+        std::string pub_string = obstacle_info.SerializeAsString();
+        iv::modulecomm::ModuleSendMsg(obstacles_shared_mem_handle_, pub_string.data(), pub_string.length());
+    }
+
+    void GenerateBoxMarkersAndPublish(std::vector<pcl::PointCloud<pcl::PointXYZRGB>>& obs_points, ros::Time time )
+    {
+        visualization_msgs::Marker bbox_lines;
+        bbox_lines.header.frame_id = "/obs_vis";
+        bbox_lines.header.stamp = time;
+        bbox_lines.ns = "bbox_obb_lines";
+        bbox_lines.pose.orientation.w = 1.0;
+
+        bbox_lines.id = 0;
+        bbox_lines.scale.x = 0.008;
+        bbox_lines.type = visualization_msgs::Marker::LINE_LIST;
+        bbox_lines.color.r = 1.0;
+        bbox_lines.color.a = 1.0;
+
+        for(size_t i = 0; i < obs_points.size(); i++)
+        {
+            pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud = obs_points[i].makeShared();
+            GenerateBoxMarker(cloud, time, bbox_lines);
+        }
+        marker_publisher_.publish(bbox_lines);
+    }
+
+    int MaxMinCluster(std::vector<cv::Point3f> sample,float theta,int *clas)
+    {
+        //clock_t start,finish;
+        //start = clock();
+        //cout<<"Start Clustering "<<endl;
+        int n = sample.size();
+        int center[20];//保存聚类中心
+        float D[n][20];//保存点与点之间的距离
+        float min[n];  //始终保存各点到各个聚类中心最小的距离
+        int minindex[n];
+        float theshold;
+        float D12=0.0;//第一个聚类和第二个聚类中的距离
+        float tmp=0;
+        int index=0;
+        center[0]=0;//first center第一个聚类选出来了
+        int k=0,i,m;
+        //计算其他样本到第一个聚类中心(0,0)的距离
+        //D[0][j]保存第i个点到到第1个聚类中心的距离
+        for(i=0;i<n;i++)
+        {
+            tmp=pow(sample[i].x-sample[0].x,2)+pow(sample[i].y-sample[0].y,2)
+                    +pow(sample[i].z-sample[0].z,2);
+            D[i][0]=(float)sqrt(tmp);
+            if(D[i][0]>D12)
+            {
+                D12=D[i][0];
+                index=i;
+            }  //求出距离第一个聚类中心最远的点
+        }
+        center[1]=index;//second center第二个聚类中心选出来了 并保存
+        k=1;
+        index=0;
+        theshold=D12;//两个聚类中心的距离
+        //最新的两聚类中心距离大于θ*D12则继续选取聚类中心
+        while(theshold>theta*D12)
+        {
+            for(i=0;i<n;i++)
+            {
+                tmp=pow(sample[i].x-sample[center[k]].x,2)+ pow(sample[i].y-sample[center[k]].y,2)+
+                        pow(sample[i].z-sample[center[k]].z,2);
+                D[i][k]=(float)sqrt(tmp);
+            }
+            for(i=0;i<n;i++)
+            {
+                float tmp=D12;
+                for(m=0;m<=k;m++)
+                    if (D[i][m]<tmp)
+                    {
+                        tmp=D[i][m];
+                        index=m;
+                    }
+                min[i]=tmp;
+                minindex[i]=index;
+                /*D[i][num]保存的是i点到第n-1个聚类中心的距离,
+                    从其中找出最小的并保存
+                    min[]始终保存各点到各个聚类中心最小的距离
+                    */
+            }//min-operate
+            float max=0;index=0;
+            for(i=0;i<n;i++)
+            {
+                if(min[i]>max)
+                {
+                    max=min[i];      //找到最大的
+                    index=i;         //并找到相应的点的下标
+                }
+            }
+            if (max>theta*D12)
+            {
+                k++;
+                center[k]=index;
+            }// add a center
+            theshold=max;// prepare to loop next time
+        }  //求出所有中心,final array min[] is still useful
+        for(i=0;i<n;i++)
+            clas[i]=minindex[i];
+        //finish = clock();
+        //double duration = (double)(finish - start) / CLOCKS_PER_SEC;
+        //cout << "Clustering cost: "<< duration <<"s"<< endl;
+        //cout<<"Number of clusters: "<< k+1<<endl;
+        return k+1;
+    }
+
+    /* sort in descending order according to the number of samples in the class */
+    static bool SortClusterBySampleNum(std::vector<std::pair<cv::Point2i,cv::Point3f>> &cluster1, 
+                                std::vector<std::pair<cv::Point2i,cv::Point3f>> &cluster2)
+    {
+        return cluster1.size() > cluster2.size();
+    }
+
+    /* sort in ascending order according to the depth of samples in the class */
+    static bool SortPointByDepth(std::pair<cv::Point2i,cv::Point3f> &point1, 
+                          std::pair<cv::Point2i,cv::Point3f> &point2)
+    {
+        return point1.second.z > point2.second.z;
+    }
+    /* sort in ascending order according to the depth of samples in the class */
+    static bool SortPointByDepth1(cv::Point3f &point1, cv::Point3f &point2)
+    {
+        return point1.z > point2.z;
+    }
+    void DepthBboxesHandle()
+    {
+        size_t cache_size = 0;
+        int frame_count = 0;
+        while(true)
+        {   
+            depth_bbox  current_depth_bbox;
+            {
+                std::lock_guard<std::mutex> mutex_sync(mutex_sync_depth_bbox_);
+                cache_size = cache_queue_.size();
+            }
+            if(cache_size > 0)
+            {
+           
+
+                {
+                    std::lock_guard<std::mutex> mutex_sync(mutex_sync_depth_bbox_);
+                    current_depth_bbox = cache_queue_.front();
+                    cache_queue_.pop();
+                }
+
+                float32_t fu = current_depth_bbox.depth.fu;
+                float32_t fv = current_depth_bbox.depth.fv;
+                float32_t u0 = current_depth_bbox.depth.u0;
+                float32_t v0 = current_depth_bbox.depth.v0;
+                float32_t inv_fu = 1 / fu;
+                float32_t inv_fv = 1 / fv;
+
+                cv::Mat depth_mono16;
+                sensor_msgs::Image depth_mono16_msg = current_depth_bbox.depth.depth;
+                depth_mono16 = cv_bridge::toCvCopy(depth_mono16_msg, "mono16")->image;
+
+                ///////////imshow
+                cv::Mat color, depth_8u;
+                depth_8u = depth_mono16 / 1000;
+                depth_8u.convertTo(depth_8u, CV_8U, 255/80);
+                cv::applyColorMap(depth_8u, color, cv::COLORMAP_RAINBOW);
+                color.setTo(cv::Scalar(0, 0, 0), depth_mono16 <= 0);
+                //////////
+
+
+                int rows = depth_mono16.rows;
+                int cols = depth_mono16.cols;
+                size_t mem_grid_size = static_cast<size_t>(rows * cols);
+                if(nullptr == x_gpu_buf_)
+                {
+                    depth_gpu_buf_ = (short*)SafeCudaMalloc(mem_grid_size * sizeof(short));
+                    x_gpu_buf_ = (float*)SafeCudaMalloc(mem_grid_size * sizeof(float));
+                    y_gpu_buf_ = (float*)SafeCudaMalloc(mem_grid_size * sizeof(float));
+                    z_gpu_buf_ = (float*)SafeCudaMalloc(mem_grid_size * sizeof(float));
+                    
+                    depth_cpu_buf_ = (short*)SafeHostMalloc(mem_grid_size * sizeof(short));
+                    x_cpu_buf_ = (float*)SafeHostMalloc(mem_grid_size * sizeof(float));
+                    y_cpu_buf_ = (float*)SafeHostMalloc(mem_grid_size * sizeof(float));
+                    z_cpu_buf_ = (float*)SafeHostMalloc(mem_grid_size * sizeof(float));
+                    z_cpu_buf_ = new float[mem_grid_size];
+                }
+
+                void* depht_data_ptr = (void*)depth_mono16.data;
+     
+                CUDA_CHECK(cudaMemcpyAsync(depth_gpu_buf_, depht_data_ptr, mem_grid_size * sizeof(short),cudaMemcpyHostToDevice,stream_depth_));
+                memset(z_cpu_buf_, 1.0, mem_grid_size * sizeof(float));
+                
+                adc::common::ComputeXYZGPU((short *)depth_gpu_buf_, 0, 0, inv_fu, inv_fv, u0, v0, min_depth_, max_depth_, rows, cols, x_gpu_buf_, y_gpu_buf_,z_gpu_buf_, stream_depth_);
+                CUDA_CHECK(cudaStreamSynchronize(stream_depth_));;
+
+                CUDA_CHECK(cudaMemcpyAsync(x_cpu_buf_, x_gpu_buf_, mem_grid_size * sizeof(float), cudaMemcpyDeviceToHost,stream_x_));
+                CUDA_CHECK(cudaMemcpyAsync(y_cpu_buf_, y_gpu_buf_, mem_grid_size * sizeof(float), cudaMemcpyDeviceToHost,stream_y_));
+                CUDA_CHECK(cudaMemcpyAsync(z_cpu_buf_,z_gpu_buf_, mem_grid_size * sizeof(float), cudaMemcpyDeviceToHost,stream_z_));
+               
+                CUDA_CHECK(cudaStreamSynchronize(stream_x_));
+                CUDA_CHECK(cudaStreamSynchronize(stream_y_));
+                CUDA_CHECK(cudaStreamSynchronize(stream_z_));
+
+                cv::Mat x_map(rows, cols, CV_32FC1, x_cpu_buf_);
+                cv::Mat y_map(rows, cols, CV_32FC1, y_cpu_buf_);
+                cv::Mat z_map(rows, cols, CV_32FC1, z_cpu_buf_);
+
+                std::vector<adc_msg::bbox2d> bbooxes = current_depth_bbox.boxes.bboxes;
+                
+                #ifdef DEBUG 
+                int color_num = 16;
+                uchar r[] = {0,125,121,116,112,108,103,99,95,91,86,82,78,73,69,65};
+                uchar g[] = {0,0,30,0,0,250,0,0,0,150,0,0,0,0,0,0};
+                uchar b[] = {120,125,255,10,0,255,255,255,255,255,225,255,255,255,255,255,255};
+
+                pcl::PointCloud<pcl::PointXYZRGB>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
+                sensor_msgs::PointCloud2 point_cloud_msg;
+                int count = 0;
+                bool save_flag = false;
+                #endif
+
+                std::vector<pcl::PointCloud<pcl::PointXYZRGB>> obstacles_points;
+                std::vector<std::string> categorys;
+                std::vector<cv::Rect> obstacles_bbox2d;
+                std::vector<cv::Point3f> box_centers;
+
+                std::vector<cv::Point3f> xyzpoints;
+                std::vector<cv::Point2i> clusterposition;
+                std::vector<std::pair<cv::Point2i,cv::Point3f>> xyzcluster;
+                std::vector<std::vector<std::pair<cv::Point2i,cv::Point3f>>> xyzclusters;
+                cv::Mat canvas = cv::Mat::zeros(rows, cols, CV_8UC3);
+                for(adc_msg::bbox2d box : bbooxes)
+                {
+                    int x1 = box.x1;
+                    int y1 = box.y1;
+                    int x2 = box.x2;
+                    int y2 = box.y2;
+                    std::cout<<"********xywh********"<<std::endl;
+                    std::cout<<x1<<" "<<y1<<" "<<x2<<" "<<y2<<std::endl;
+
+                    int width = x2 - x1;
+                    int height = y2 - y1;
+                    // x y wideh height
+                    cv::Mat roi_z = z_map(cv::Rect(x1, y1, width, height)).clone();
+                    cv::Mat roi_x = x_map(cv::Rect(x1, y1, width, height)).clone();
+                    cv::Mat roi_y = y_map(cv::Rect(x1, y1, width, height)).clone();
+                    std::vector<pcl::PointCloud<pcl::PointXYZRGB>> clusters;
+                    
+                    int roi_rows = roi_z.rows;
+                    int roi_cols = roi_z.cols;
+
+                    xyzpoints.clear();
+                    clusterposition.clear();
+
+                    int ratio_y = round(roi_rows / 50.0); //四舍五入
+                    int ratio_x = round(roi_cols / 50.0);
+                    ratio_y = ratio_y > 0 ? ratio_y : 1;
+                    ratio_x = ratio_x > 0 ? ratio_x : 1;
+
+                    for(int row = 0; row < roi_rows ; row+=ratio_y)
+                    {
+                        float* data_ptr_z = roi_z.ptr<float>(row);
+                        float* data_ptr_x = roi_x.ptr<float>(row);
+                        float* data_ptr_y = roi_y.ptr<float>(row);
+                        for(int col = 0; col < roi_cols ; col+=ratio_x)
+                        {
+                            if(data_ptr_z[col]>min_depth_ && data_ptr_z[col]<=max_depth_)
+                            {
+                                xyzpoints.push_back(cv::Point3f(data_ptr_x[col],
+                                data_ptr_y[col],data_ptr_z[col]));
+                                clusterposition.push_back(cv::Point2i(col+x1,row+y1));
+                            }
+                        }
+                    }
+                    //Cluster
+                    if(xyzpoints.size()<2) continue;
+                    float theta = 0.4;
+                    int n = xyzpoints.size();
+                    int labels[n];
+                    int m = MaxMinCluster(xyzpoints,theta,labels);
+                    xyzclusters.clear();                       
+                    //std::cout<<"m: "<<m<<std::endl;
+                    for(int i=0;i<m;i++)
+                    {
+                        xyzcluster.clear(); 
+                        for(int j=0;j<n;j++)
+                        {
+                            if(labels[j]==i)
+                            {
+                                    std::pair<cv::Point2i,cv::Point3f> value(clusterposition[j],xyzpoints[j]);
+                                    xyzcluster.push_back(value);
+                            }
+                        }
+                        xyzclusters.push_back(xyzcluster);
+                    }
+                    if (xyzclusters.size() == 0) continue;                    
+                    std::sort(xyzclusters.begin(), xyzclusters.end(), SortClusterBySampleNum);                    
+                    std::sort(xyzclusters[0].begin(), xyzclusters[0].end(), SortPointByDepth);
+                    //去掉最大值,最小值
+                    if(xyzclusters[0].size()>2)
+                    {                        
+                        xyzclusters[0].pop_back();
+                        std::vector<std::pair<cv::Point2i,cv::Point3f>>::iterator
+                        k = xyzclusters[0].begin();
+                        xyzclusters[0].erase(k);
+                    }                    
+                    //求Z的中值,并用box的中心(u,v)求对应的X,Y,结果作为box的center(X,Y,Z)
+                    int center_u = x1 + 0.5*width;
+                    int center_v = y1 + 0.5*height;
+                    int middle_idx = floor(xyzclusters[0].size()*0.5);
+                    float mid_z = xyzclusters[0][middle_idx].second.z;
+                    cv::Point3f box_center_mid;
+                    box_center_mid.x = inv_fu * (center_u - u0) * mid_z;
+                    box_center_mid.y = inv_fv * (center_v - v0) * mid_z;
+                    box_center_mid.z = mid_z;
+                    //求Z的平均值,并用box的中心(u,v)求对应的X,Y,结果作为box的center(X,Y,Z)
+                    float sum_z = 0;
+                    for(size_t i=0;i<xyzclusters[0].size();i++)
+                    {
+                        sum_z = sum_z + xyzclusters[0][i].second.z;
+                    }
+                    float avg_z = sum_z / xyzclusters[0].size();
+                    cv::Point3f box_center_avg;
+                    box_center_avg.x = inv_fu * (center_u - u0) * avg_z;
+                    box_center_avg.y = inv_fv * (center_v - v0) * avg_z;
+                    box_center_avg.z = avg_z;
+
+                    //测试不聚类的检测结果
+                    std::sort(xyzpoints.begin(),xyzpoints.end(),SortPointByDepth1);
+                    //去掉最大值,最小值
+                    if(xyzpoints.size()>2)
+                    {                        
+                        xyzpoints.pop_back();
+                        std::vector<cv::Point3f>::iterator
+                        k = xyzpoints.begin();
+                        xyzpoints.erase(k);
+                    } 
+                    int middle_idx_nc = floor(xyzpoints.size()*0.5);
+                    float mid_z_nc = xyzpoints[middle_idx_nc].z;
+
+                    cv::Point3f box_center_mid_nc;
+                    box_center_mid_nc.x = inv_fu * (center_u - u0) * mid_z_nc;
+                    box_center_mid_nc.y = inv_fv * (center_v - v0) * mid_z_nc;
+                    box_center_mid_nc.z = mid_z_nc;
+                    //将数据转为PointCloud的格式,便于调用其他函数
+                    pcl::PointCloud<pcl::PointXYZRGB> cluster_maximum;
+                    pcl::PointXYZRGB single_point;
+                    for(size_t i=0;i<xyzclusters[0].size();i++)
+                    {
+                        single_point.x = xyzclusters[0][i].second.x;
+                        single_point.y = xyzclusters[0][i].second.y;
+                        single_point.z = xyzclusters[0][i].second.z;
+
+                        #ifdef DEBUG
+                        single_point.r = r[count%color_num];
+                        single_point.g = g[count%color_num];
+                        single_point.b = b[count%color_num];
+                        #endif // DEBUG
+
+                        cluster_maximum.push_back(single_point);
+                    }
+                    categorys.emplace_back(box.category);
+                    obstacles_points.push_back(cluster_maximum);
+                    obstacles_bbox2d.push_back(cv::Rect(x1,y1,width,height));
+                    box_centers.push_back(box_center_avg);
+
+                    #ifdef DEBUG
+                    //将聚类结果可视化
+                    for(size_t i=0;i<xyzclusters.size();i++)
+                    {
+                        for(size_t j=0;j<xyzclusters[i].size();j++)
+                        {
+                            cv::circle(canvas,xyzclusters[i][j].first,2,
+                            cv::Scalar(b[i%color_num],g[i%color_num],r[i%color_num]),-1);
+                        }
+                    }
+                    cv::rectangle(canvas, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(0,150,200),2,8,0);
+                    std::string str1 = cv::format("%.2f", box_center_mid.x); 
+                    std::string str2 = cv::format("%.2f", box_center_mid_nc.x);
+                    std::string str3 = cv::format("%.2f", box_center_mid.z);
+                    std::string str4 = cv::format("%.2f", box_center_mid_nc.z);
+                    std::string str5 = str1 +"," + str2 + "," + str3 + "," + str4;
+                    cv::putText(canvas,str5, cv::Point(x1, y1-15), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(0,150,200));
+                    std::cout<<str5<<std::endl;
+                    save_flag = true;
+                    (*point_cloud_ptr) += cluster_maximum;
+                    count++;
+                    #endif // DEBUG
+                }
+
+                //send message by share memory
+                //PushsObstaclesBySharedMem(obstacles_points,current_depth_bbox.stamp,categorys,obstacles_bbox2d);
+                
+                #ifdef DEBUG
+                //3D包围框
+                GenerateBoxMarkersAndPublish(obstacles_points, current_depth_bbox.stamp);
+                
+                if( point_cloud_ptr->size() > 0)
+                {
+                    pcl::toROSMsg(*point_cloud_ptr, point_cloud_msg);
+                    point_cloud_msg.header.frame_id = "fusion";
+                    point_cloud_msg.header.stamp = current_depth_bbox.stamp;
+                    // ROS_INFO("publish fusion %d", clusters[max_index].size());
+                    point_publisher_.publish(point_cloud_msg);
+                    ros::spinOnce();
+                }    
+                //////////imwrite
+                if(save_flag)
+                {
+                    char image_path[100];
+                    sprintf(image_path, "/media/nvidia/Elements/depth_img/%04d.jpg",frame_count);
+                    cv::imwrite(image_path,canvas);
+                }
+                frame_count ++; 
+                #endif // DEBUG                 
+            }
+            else
+            { 
+                usleep(2000);
+            }
+        }
+    }
+    void CallbackImageBoox(const sensor_msgs::ImageConstPtr& image_msg, const adc_msg::bboxes2dConstPtr& detection_bbox2d)
+    {
+        std::vector<adc_msg::bbox2d> bboxes = detection_bbox2d->bboxes;
+        cv::Mat img = cv_bridge::toCvCopy(image_msg, image_msg->encoding)->image;        
+        for(adc_msg::bbox2d bbox : bboxes)
+        {
+            int x1 = bbox.x1;
+            int y1 = bbox.y1;
+            int x2 = bbox.x2;
+            int y2 = bbox.y2;
+            cv::rectangle(img, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(0,150,200),2,8,0);
+            std::string str1=bbox.category;
+            cv::putText(img,str1, cv::Point(x1, y1-15), cv::FONT_HERSHEY_PLAIN, 2, cv::Scalar(0,150,200));
+        }
+        cv::imshow("show", img);
+        cv::waitKey(10);
+
+    }
+    void CallbackBboxDepth(const adc_msg::adc_depthConstPtr& depth_msg, const adc_msg::bboxes2dConstPtr& detection_bbox2d)
+    {
+        ros::Time depth_time = depth_msg->header.stamp;
+        depth_bbox merge_msg;
+        merge_msg.depth = *depth_msg;
+        merge_msg.boxes = *detection_bbox2d;
+        merge_msg.stamp = depth_time;
+        {
+            std::lock_guard<std::mutex> mutex_sync(mutex_sync_depth_bbox_);
+            cache_queue_.push(merge_msg);
+            if(cache_queue_.size() > KCacheNum) cache_queue_.pop();
+        }
+
+    }
+    void InitConfig()
+    {
+        std::string detection_image_topic;
+        std::string obstacles_shared_mem_topic;
+        bool is_vis_detection;
+        nh_ptr_->param("detection_bbox2d_topic", detection_bbox2d_topic_, std::string("/detection/bbox2d"));
+        nh_ptr_->param("stereo_depth_topic", stereo_depth_topic_, std::string("/stereo/depth"));
+        nh_ptr_->param("detection_image_topic", detection_image_topic, std::string("/detection/image"));
+        nh_ptr_->param("obstacles_shared_mem_topic", obstacles_shared_mem_topic, std::string("/detection/vision/obstacles"));
+        nh_ptr_->param("vis_detection", is_vis_detection, true);
+        nh_ptr_->param("dbscan_min_pts", dbscan_min_pts_, 50);
+        nh_ptr_->param("dbscan_radius", dbscan_radius_, 0.0001);
+        // cv::namedWindow("show",cv::WINDOW_NORMAL);
+
+        obstacles_shared_mem_handle_ = iv::modulecomm::RegisterSend(obstacles_shared_mem_topic.c_str(), 1000000, 1);
+        depth_sub_ptr_ = std::make_shared<message_filters::Subscriber<adc_msg::adc_depth>>(*nh_ptr_, stereo_depth_topic_, 1);
+        detection_bbox2s_sub_ptr_ = std::make_shared<message_filters::Subscriber<adc_msg::bboxes2d>>(*nh_ptr_, detection_bbox2d_topic_, 1);
+        detection_image_sub_ptr_ = std::make_shared<message_filters::Subscriber<sensor_msgs::Image>>(*nh_ptr_, detection_image_topic, 1);
+        CUDA_CHECK(cudaStreamCreate(&stream_depth_));
+        CUDA_CHECK(cudaStreamCreate(&stream_x_));
+        CUDA_CHECK(cudaStreamCreate(&stream_y_));
+        CUDA_CHECK(cudaStreamCreate(&stream_z_));
+        #ifdef DEBUG
+        point_publisher_ = nh_ptr_->advertise<sensor_msgs::PointCloud2>("/fusion/points", 1);
+        marker_publisher_ = nh_ptr_->advertise<visualization_msgs::Marker>("bbox_marker", 10);
+        #endif // DEBUG
+        thread_depth_bbox_ = std::thread(std::bind(&DetectionFusionCMW::DepthBboxesHandle, this));
+        thread_depth_bbox_.detach();
+        if(is_vis_detection)
+        {
+            sync_detection_ptr_ = std::make_shared<message_filters::Synchronizer<SyncPolicyDetection>>(SyncPolicyDetection(100), *detection_image_sub_ptr_, *detection_bbox2s_sub_ptr_);
+            sync_detection_ptr_->registerCallback(std::bind(&DetectionFusionCMW::CallbackImageBoox,this,std::placeholders::_1, std::placeholders::_2));
+        }
+        sync_ptr_ = std::make_shared<message_filters::Synchronizer<SyncPolicy>>(SyncPolicy(100), *depth_sub_ptr_, *detection_bbox2s_sub_ptr_);
+        sync_ptr_->registerCallback(std::bind(&DetectionFusionCMW::CallbackBboxDepth,this,std::placeholders::_1, std::placeholders::_2));
+        ros::spin();
+    }
+
+private:
+    std::shared_ptr<ros::NodeHandle> nh_ptr_;
+    std::string stereo_depth_topic_;
+    std::string detection_bbox2d_topic_;
+    std::shared_ptr<message_filters::Subscriber<adc_msg::adc_depth>> depth_sub_ptr_;
+    std::shared_ptr<message_filters::Subscriber<adc_msg::bboxes2d>> detection_bbox2s_sub_ptr_;
+    std::shared_ptr<message_filters::Subscriber<sensor_msgs::Image>> detection_image_sub_ptr_;
+    std::shared_ptr<message_filters::Synchronizer<SyncPolicy>> sync_ptr_;
+    std::shared_ptr<message_filters::Synchronizer<SyncPolicyDetection>> sync_detection_ptr_;
+    std::queue<depth_bbox>  cache_queue_;
+    std::mutex mutex_sync_depth_bbox_;
+    std::thread thread_depth_bbox_;
+    float32_t max_depth_ = 20.0;
+    float32_t min_depth_ = 0;
+
+
+    float* x_gpu_buf_ = nullptr;
+    float* y_gpu_buf_ = nullptr;
+    float* z_gpu_buf_ = nullptr;
+    short* depth_gpu_buf_ = nullptr;
+    
+    float* x_cpu_buf_ = nullptr;
+    float* y_cpu_buf_ = nullptr;
+    float* z_cpu_buf_ = nullptr;
+    short* depth_cpu_buf_ = nullptr;
+
+    cudaStream_t stream_depth_;
+    cudaStream_t stream_x_;
+    cudaStream_t stream_y_;
+    cudaStream_t stream_z_;
+    
+
+    int dbscan_min_pts_;
+    double dbscan_radius_;
+    adc::common::OBBExtrator obb_extrator_;
+
+    #ifdef DEBUG
+    ros::Publisher point_publisher_;
+    ros::Publisher marker_publisher_;
+    #endif // DEBUG
+
+    const unsigned int  KCacheNum = 2;
+    void* obstacles_shared_mem_handle_;
+};
+}
+}
+
+int main(int argc,  char** argv)
+{   
+    adc::ros_cmw::DetectionFusionCMW app(argc, argv);
+    return 0;
+}

+ 726 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/detection_depth_fusion_app_node_old.cpp

@@ -0,0 +1,726 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-08-25
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#include <ros/ros.h>
+#include <message_filters/subscriber.h>
+#include <message_filters/time_synchronizer.h>
+#include <message_filters/sync_policies/approximate_time.h> 
+#include <image_transport/image_transport.h>
+#include <cv_bridge/cv_bridge.h>
+#include <sensor_msgs/image_encodings.h>
+#include <memory>
+#include <queue>
+#include <mutex>
+#include <opencv2/core.hpp>
+#include <opencv2/highgui.hpp>
+#include <opencv2/videoio.hpp>
+#include "adc_msg/adc_depth.h"
+#include "cmw_app/detection_app.h"
+#include <sensor_msgs/PointCloud2.h> 
+#include "adc_msg/bboxes2d.h"
+#include "adc_msg/bbox2d.h"
+#include <pcl/point_cloud.h>
+#include <pcl_conversions/pcl_conversions.h>
+#include "common/utils.h"
+#include <cuda_runtime_api.h>
+#include "cuda_runtime.h"
+#include <visualization_msgs/Marker.h>
+
+#include "modulecomm.h"
+#include "obstacles.pb.h"
+#define IMAGE_WIDTH 640
+#define IMAGE_HEIGHT 480
+
+#define DEBUG
+
+namespace adc
+{
+namespace ros_cmw
+{
+
+class DetectionFusionCMW
+{
+public:
+    typedef message_filters::sync_policies::ApproximateTime<adc_msg::adc_depth, adc_msg::bboxes2d> SyncPolicy;
+    typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, adc_msg::bboxes2d> SyncPolicyDetection;
+    typedef std::vector<std::vector<float32_t>> ObstaclesType;
+    typedef std::vector<float> PointType;
+    
+public:
+    DetectionFusionCMW(int argc, char** argv)
+    {
+        ros::init(argc, argv, "detection");
+        nh_ptr_ = std::make_shared<ros::NodeHandle>("~");
+        InitConfig();
+        
+    }
+
+    ~DetectionFusionCMW()
+    {
+        if(nullptr != x_gpu_buf_)
+        {
+            SafeCudaFree(depth_gpu_buf_);
+            SafeCudaFree(x_gpu_buf_);
+            SafeCudaFree(y_gpu_buf_);
+            SafeCudaFree(z_gpu_buf_);
+            CUDA_CHECK(cudaFreeHost(depth_cpu_buf_));
+            CUDA_CHECK(cudaFreeHost(x_cpu_buf_));
+            CUDA_CHECK(cudaFreeHost(y_cpu_buf_));
+            CUDA_CHECK(cudaFreeHost(z_cpu_buf_));
+
+            depth_gpu_buf_ = nullptr;
+            x_gpu_buf_ = nullptr;
+            y_gpu_buf_ = nullptr;
+            z_gpu_buf_ = nullptr;
+            depth_cpu_buf_ = nullptr;
+            x_cpu_buf_ = nullptr;
+            y_cpu_buf_ = nullptr;
+            z_cpu_buf_ = nullptr;
+        }
+
+        CUDA_CHECK(cudaStreamDestroy(stream_depth_));
+        CUDA_CHECK(cudaStreamDestroy(stream_x_));
+        CUDA_CHECK(cudaStreamDestroy(stream_y_));
+        CUDA_CHECK(cudaStreamDestroy(stream_z_));
+    }
+private:
+
+    struct depth_bbox
+    {
+        adc_msg::adc_depth depth;
+        adc_msg::bboxes2d boxes;
+        ros::Time stamp;
+    };
+
+    struct Obstacle
+    {
+        std::vector<pcl::PointXYZRGB> corners;
+        pcl::PointXYZRGB center;
+
+    };
+
+    struct Obstacles
+    {
+        double time;
+        std::vector<Obstacle> obstacles;
+    };
+
+    void GenrateObstacleBox(const pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud, Obstacle& obtsacle)
+    {
+        float length, width, height;
+        std::vector<pcl::PointXYZRGB>& corners_ref = obtsacle.corners;
+        obb_extrator_.GetOBB(cloud, corners_ref, length, width, height);
+        (void) length;
+        (void) height;
+        (void) width;
+        pcl::PointXYZRGB& center_ref =  obtsacle.center;
+        center_ref.x = (corners_ref[0].x + corners_ref[7].x) / 2.0;
+        center_ref.y = (corners_ref[0].y + corners_ref[7].y) / 2.0;
+        center_ref.z = (corners_ref[0].z + corners_ref[7].z) / 2.0;
+    }
+
+    void GenerateBoxMarker(const pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud, ros::Time time, visualization_msgs::Marker& bbox_lines)
+    {
+        (void) time;
+        std::vector<pcl::PointXYZRGB> bbox;
+        float length, width, height;
+        obb_extrator_.GetOBB(cloud, bbox, length, width, height);
+
+        std::cout<<"l,w,h: "<<length<<","<<width<<","<<height<<std::endl;
+
+        geometry_msgs::Point p1, p2;
+
+        // line1
+        p1.x = bbox[0].x;
+        p1.y = bbox[0].y;
+        p1.z = bbox[0].z;
+
+        p2.x = bbox[1].x;
+        p2.y = bbox[1].y;
+        p2.z = bbox[1].z;
+
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+        // line 2
+        p1.x = bbox[2].x;
+        p1.y = bbox[2].y;
+        p1.z = bbox[2].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+        // line3
+        p2.x = bbox[3].x;
+        p2.y = bbox[3].y;
+        p2.z = bbox[3].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+        //line 4
+        p1.x = bbox[0].x;
+        p1.y = bbox[0].y;
+        p1.z = bbox[0].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+        //line 5
+        p1.x = bbox[4].x;
+        p1.y = bbox[4].y;
+        p1.z = bbox[4].z;
+        p2.x = bbox[5].x;
+        p2.y = bbox[5].y;
+        p2.z = bbox[5].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+        // line 6
+        p1.x = bbox[6].x;
+        p1.y = bbox[6].y;
+        p1.z = bbox[6].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+         // line 7
+        p2.x = bbox[7].x;
+        p2.y = bbox[7].y;
+        p2.z = bbox[7].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+         // line 8
+        p1.x = bbox[4].x;
+        p1.y = bbox[4].y;
+        p1.z = bbox[4].z;
+        bbox_lines.points.push_back(p1);
+        bbox_lines.points.push_back(p2);
+
+        // line1
+        for(size_t i = 0 ; i < 4; i++)
+        {
+            p1.x = bbox[i].x;
+            p1.y = bbox[i].y;
+            p1.z = bbox[i].z;
+
+            p2.x = bbox[i+4].x;
+            p2.y = bbox[i+4].y;
+            p2.z = bbox[i+4].z;
+
+            bbox_lines.points.push_back(p1);
+            bbox_lines.points.push_back(p2);
+        }
+
+        // marker_publisher_.publish(bbox_lines);
+        // ROS_INFO("publis maeker");
+    }
+    
+
+    void PushsObstaclesBySharedMem(std::vector<pcl::PointCloud<pcl::PointXYZRGB>>& obs_points, 
+                                   ros::Time time, std::vector<std::string>& categorys,
+                                   std::vector<cv::Rect>& obs_bbox2d)
+    { 
+            iv::vision::ObstacleInfo obstacle_info;
+            obstacle_info.set_time(time.toSec());
+            assert(obs_points.size() == categorys.size());
+            assert(obs_points.size() == obs_bbox2d.size());
+            int count = 0;
+            for(auto obstacle_points : obs_points)
+            {
+                Obstacle obs;
+                GenrateObstacleBox(obstacle_points.makeShared(), obs);
+                
+                iv::vision::Bbox3D* bbox_3d = obstacle_info.add_bbox_3d();
+
+                bbox_3d->set_category(categorys[count]);
+                bbox_3d->set_top_left_x(obs_bbox2d[count].x);
+                bbox_3d->set_top_left_y(obs_bbox2d[count].y);
+                bbox_3d->set_bottom_right_x(obs_bbox2d[count].x+obs_bbox2d[count].width)
+                bbox_3d->set_bottom_right_y(obs_bbox2d[count].y+obs_bbox2d[count].height);
+
+                iv::vision::Point3D* p = bbox_3d->mutable_p1();
+                p->set_x(obs.corners[0].x);
+                p->set_y(obs.corners[0].y);
+                p->set_z(obs.corners[0].z);
+
+                p = bbox_3d->mutable_p2();
+                p->set_x(obs.corners[1].x);
+                p->set_y(obs.corners[1].y);
+                p->set_z(obs.corners[1].z);
+         
+                p = bbox_3d->mutable_p3();
+                p->set_x(obs.corners[2].x);
+                p->set_y(obs.corners[2].y);
+                p->set_z(obs.corners[2].z);
+
+                p = bbox_3d->mutable_p4();
+                p->set_x(obs.corners[3].x);
+                p->set_y(obs.corners[3].y);
+                p->set_z(obs.corners[3].z);
+
+                p = bbox_3d->mutable_p5();
+                p->set_x(obs.corners[4].x);
+                p->set_y(obs.corners[4].y);
+                p->set_z(obs.corners[4].z);
+
+                p = bbox_3d->mutable_p6();
+                p->set_x(obs.corners[5].x);
+                p->set_y(obs.corners[5].y);
+                p->set_z(obs.corners[5].z);
+
+                p = bbox_3d->mutable_p7();
+                p->set_x(obs.corners[6].x);
+                p->set_y(obs.corners[6].y);
+                p->set_z(obs.corners[6].z);
+
+                p = bbox_3d->mutable_p8();
+                p->set_x(obs.corners[7].x);
+                p->set_y(obs.corners[7].y);
+                p->set_z(obs.corners[7].z);
+
+                p = bbox_3d->mutable_c();
+                p->set_x(obs.center.x);
+                p->set_y(obs.center.y);
+                p->set_z(obs.center.z);
+
+                count++;
+            }
+
+        std::string pub_string = obstacle_info.SerializeAsString();
+        iv::modulecomm::ModuleSendMsg(obstacles_shared_mem_handle_, pub_string.data(), pub_string.length());
+    }
+
+    void GenerateBoxMarkersAndPublish(std::vector<pcl::PointCloud<pcl::PointXYZRGB>>& obs_points, ros::Time time )
+    {
+        visualization_msgs::Marker bbox_lines;
+        bbox_lines.header.frame_id = "/obs_vis";
+        bbox_lines.header.stamp = time;
+        bbox_lines.ns = "bbox_obb_lines";
+        bbox_lines.pose.orientation.w = 1.0;
+
+        bbox_lines.id = 0;
+        bbox_lines.scale.x = 0.008;
+        bbox_lines.type = visualization_msgs::Marker::LINE_LIST;
+        bbox_lines.color.r = 1.0;
+        bbox_lines.color.a = 1.0;
+
+        for(size_t i = 0; i < obs_points.size(); i++)
+        {
+            pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud = obs_points[i].makeShared();
+            GenerateBoxMarker(cloud, time, bbox_lines);
+        }
+        marker_publisher_.publish(bbox_lines);
+    }
+    void DepthBboxesHandle()
+    {
+        size_t cache_size = 0;
+        int frame_count = 0;
+        while(true)
+        {   
+            depth_bbox  current_depth_bbox;
+            {
+                std::lock_guard<std::mutex> mutex_sync(mutex_sync_depth_bbox_);
+                cache_size = cache_queue_.size();
+            }
+            if(cache_size > 0)
+            {
+           
+
+                {
+                    std::lock_guard<std::mutex> mutex_sync(mutex_sync_depth_bbox_);
+                    current_depth_bbox = cache_queue_.front();
+                    cache_queue_.pop();
+                }
+
+                float32_t fu = current_depth_bbox.depth.fu;
+                float32_t fv = current_depth_bbox.depth.fv;
+                float32_t u0 = current_depth_bbox.depth.u0;
+                float32_t v0 = current_depth_bbox.depth.v0;
+                float32_t inv_fu = 1 / fu;
+                float32_t inv_fv = 1 / fv;
+
+                cv::Mat depth_mono16;
+                sensor_msgs::Image depth_mono16_msg = current_depth_bbox.depth.depth;
+                depth_mono16 = cv_bridge::toCvCopy(depth_mono16_msg, "mono16")->image;
+
+                ///////////imshow
+                cv::Mat color, depth_8u;
+                depth_8u = depth_mono16 / 1000;
+                depth_8u.convertTo(depth_8u, CV_8U, 255/80);
+                cv::applyColorMap(depth_8u, color, cv::COLORMAP_RAINBOW);
+                color.setTo(cv::Scalar(0, 0, 0), depth_mono16 <= 0);
+                //////////
+
+                int rows = depth_mono16.rows;
+                int cols = depth_mono16.cols;
+                size_t mem_grid_size = static_cast<size_t>(rows * cols);
+                if(nullptr == x_gpu_buf_)
+                {
+                    depth_gpu_buf_ = (short*)SafeCudaMalloc(mem_grid_size * sizeof(short));
+                    x_gpu_buf_ = (float*)SafeCudaMalloc(mem_grid_size * sizeof(float));
+                    y_gpu_buf_ = (float*)SafeCudaMalloc(mem_grid_size * sizeof(float));
+                    z_gpu_buf_ = (float*)SafeCudaMalloc(mem_grid_size * sizeof(float));
+                    
+                    depth_cpu_buf_ = (short*)SafeHostMalloc(mem_grid_size * sizeof(short));
+                    x_cpu_buf_ = (float*)SafeHostMalloc(mem_grid_size * sizeof(float));
+                    y_cpu_buf_ = (float*)SafeHostMalloc(mem_grid_size * sizeof(float));
+                    z_cpu_buf_ = (float*)SafeHostMalloc(mem_grid_size * sizeof(float));
+                    z_cpu_buf_ = new float[mem_grid_size];
+                }
+
+                void* depht_data_ptr = (void*)depth_mono16.data;
+     
+                CUDA_CHECK(cudaMemcpyAsync(depth_gpu_buf_, depht_data_ptr, mem_grid_size * sizeof(short),cudaMemcpyHostToDevice,stream_depth_));
+                memset(z_cpu_buf_, 1.0, mem_grid_size * sizeof(float));
+                
+                adc::common::ComputeXYZGPU((short *)depth_gpu_buf_, 0, 0, inv_fu, inv_fv, u0, v0, min_depth_, max_depth_, rows, cols, x_gpu_buf_, y_gpu_buf_,z_gpu_buf_, stream_depth_);
+                CUDA_CHECK(cudaStreamSynchronize(stream_depth_));;
+
+                CUDA_CHECK(cudaMemcpyAsync(x_cpu_buf_, x_gpu_buf_, mem_grid_size * sizeof(float), cudaMemcpyDeviceToHost,stream_x_));
+                CUDA_CHECK(cudaMemcpyAsync(y_cpu_buf_, y_gpu_buf_, mem_grid_size * sizeof(float), cudaMemcpyDeviceToHost,stream_y_));
+                CUDA_CHECK(cudaMemcpyAsync(z_cpu_buf_,z_gpu_buf_, mem_grid_size * sizeof(float), cudaMemcpyDeviceToHost,stream_z_));
+               
+                CUDA_CHECK(cudaStreamSynchronize(stream_x_));
+                CUDA_CHECK(cudaStreamSynchronize(stream_y_));
+                CUDA_CHECK(cudaStreamSynchronize(stream_z_));
+
+                cv::Mat x_map(rows, cols, CV_32FC1, x_cpu_buf_);
+                cv::Mat y_map(rows, cols, CV_32FC1, y_cpu_buf_);
+                cv::Mat z_map(rows, cols, CV_32FC1, z_cpu_buf_);
+
+                std::vector<adc_msg::bbox2d> bbooxes = current_depth_bbox.boxes.bboxes;
+                // std::vector<ObstaclesType> obstacles;
+                #ifdef DEBUG 
+                int color_num = 16;
+                uchar r[] = {0,125,121,116,112,108,103,99,95,91,86,82,78,73,69,65};
+                uchar g[] = {0,0,30,0,0,250,0,0,0,150,0,0,0,0,0,0};
+                uchar b[] = {120,125,255,10,0,255,255,255,255,255,225,255,255,255,255,255,255};
+                pcl::PointCloud<pcl::PointXYZRGB>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
+                sensor_msgs::PointCloud2 point_cloud_msg;
+                int count = 0;
+                #endif
+
+                std::vector<pcl::PointCloud<pcl::PointXYZRGB>> obstacles_points;
+                std::vector<std::string> categorys;
+                std::vector<cv::Rect> obstacles_bbox2d;
+                for(adc_msg::bbox2d box : bbooxes)
+                {
+                    int x1 = box.x1;
+                    int y1 = box.y1;
+                    int x2 = box.x2;
+                    int y2 = box.y2;
+                    int width = x2 - x1;
+                    int height = y2 - y1;
+                    // x y wideh height
+                    cv::Mat roi_z = z_map(cv::Rect(x1, y1, width, height)).clone();
+                    cv::Mat roi_x = x_map(cv::Rect(x1, y1, width, height)).clone();
+                    cv::Mat roi_y = y_map(cv::Rect(x1, y1, width, height)).clone();
+                    std::vector<pcl::PointCloud<pcl::PointXYZRGB>> clusters;
+                    
+                    int roi_rows = roi_z.rows;
+                    int roi_cols = roi_z.cols;
+
+                    if(roi_rows >= 60 || roi_cols >= 60)
+                    // if(false)
+                    {
+                        cudaStream_t x_stream;
+                        cudaStream_t y_stream;
+                        cudaStream_t z_stream;
+                        CUDA_CHECK(cudaStreamCreate(&x_stream));
+                        CUDA_CHECK(cudaStreamCreate(&y_stream));
+                        CUDA_CHECK(cudaStreamCreate(&z_stream));
+
+                        void* x_gpu_buf;
+                        void* z_gpu_buf;
+                        void* y_gpu_buf;
+
+                        void* new_x_gpu_buf;
+                        void* new_z_gpu_buf;
+                        void* new_y_gpu_buf;
+
+                        void* new_x_cpu_buf;
+                        void* new_z_cpu_buf;
+                        void* new_y_cpu_buf;
+
+                        int ratio_y = roi_rows / 60;
+                        int ratio_x = roi_cols / 60;
+
+                        ratio_y = ratio_y > 0 ? ratio_y : 1;
+                        ratio_x = ratio_x > 0 ? ratio_x : 1;
+
+                        int new_rows = roi_rows / ratio_y;
+                        int new_cols = roi_cols / ratio_x;
+
+                        size_t old_grid_size = static_cast<size_t>(roi_rows * roi_cols);
+                        size_t new_grid_size = static_cast<size_t>(new_rows * new_cols);
+                        x_gpu_buf = SafeCudaMalloc(old_grid_size * sizeof(float));
+                        y_gpu_buf = SafeCudaMalloc(old_grid_size * sizeof(float));
+                        z_gpu_buf = SafeCudaMalloc(old_grid_size * sizeof(float));
+
+                        new_x_gpu_buf = SafeCudaMalloc(new_grid_size * sizeof(float));
+                        new_y_gpu_buf = SafeCudaMalloc(new_grid_size * sizeof(float));
+                        new_z_gpu_buf = SafeCudaMalloc(new_grid_size * sizeof(float));
+
+                        new_x_cpu_buf = SafeHostMalloc(new_grid_size * sizeof(float));
+                        new_y_cpu_buf = SafeHostMalloc(new_grid_size * sizeof(float));
+                        new_z_cpu_buf = SafeHostMalloc(new_grid_size * sizeof(float));
+
+                        CUDA_CHECK(cudaMemcpyAsync(x_gpu_buf, roi_x.data, old_grid_size * sizeof(float),cudaMemcpyHostToDevice,x_stream));
+                        CUDA_CHECK(cudaMemcpyAsync(y_gpu_buf, roi_y.data, old_grid_size * sizeof(float),cudaMemcpyHostToDevice,y_stream));
+                        CUDA_CHECK(cudaMemcpyAsync(z_gpu_buf, roi_z.data, old_grid_size * sizeof(float),cudaMemcpyHostToDevice,z_stream));
+                        // CUDA_CHECK(cudaStreamSynchronize(x_stream));
+                        CUDA_CHECK(cudaStreamSynchronize(y_stream));
+                        CUDA_CHECK(cudaStreamSynchronize(z_stream));
+
+                        adc::common::SampleDownGPU((float*)x_gpu_buf, (float*)y_gpu_buf, (float*)z_gpu_buf, ratio_x, ratio_y, 
+                                                    roi_cols, new_rows, new_cols, (float*)new_x_gpu_buf, (float*)new_y_gpu_buf, 
+                                                    (float*)new_z_gpu_buf,x_stream);
+                        CUDA_CHECK(cudaStreamSynchronize(x_stream));
+
+                        CUDA_CHECK(cudaMemcpyAsync(new_x_cpu_buf, new_x_gpu_buf, new_grid_size * sizeof(float),cudaMemcpyDeviceToHost,x_stream));
+                        CUDA_CHECK(cudaMemcpyAsync(new_y_cpu_buf ,new_y_gpu_buf, new_grid_size * sizeof(float),cudaMemcpyDeviceToHost,y_stream));
+                        CUDA_CHECK(cudaMemcpyAsync(new_z_cpu_buf ,new_z_gpu_buf, new_grid_size * sizeof(float),cudaMemcpyDeviceToHost,z_stream));
+
+                        CUDA_CHECK(cudaStreamSynchronize(x_stream));
+                        CUDA_CHECK(cudaStreamSynchronize(y_stream));
+                        CUDA_CHECK(cudaStreamSynchronize(z_stream));
+
+                        cv::Mat new_roi_x(new_rows, new_cols, CV_32FC1, new_x_cpu_buf);
+                        cv::Mat new_roi_y(new_rows, new_cols, CV_32FC1, new_y_cpu_buf);
+                        cv::Mat new_roi_z(new_rows, new_cols, CV_32FC1, new_z_cpu_buf);
+
+                        double start_time = (double)cv::getTickCount();
+                        adc::common::Clustering<pcl::PointXYZRGB>().SpatialClustering(new_roi_x, new_roi_y, new_roi_z, 9, 35, 0.01, min_depth_, max_depth_,  clusters);
+                        double end_time = (double)cv::getTickCount() - start_time;
+                        std::cout<< "Total Cost of cluster: "  << end_time*1000.0/cv::getTickFrequency()<<" ms"<<endl;
+
+                        SafeCudaFree(x_gpu_buf);
+                        SafeCudaFree(y_gpu_buf);
+                        SafeCudaFree(z_gpu_buf);
+                        SafeCudaFree(new_x_gpu_buf);
+                        SafeCudaFree(new_y_gpu_buf);
+                        SafeCudaFree(new_z_gpu_buf);
+
+                        CUDA_CHECK(cudaFreeHost(new_x_cpu_buf));
+                        CUDA_CHECK(cudaFreeHost(new_y_cpu_buf));
+                        CUDA_CHECK(cudaFreeHost(new_z_cpu_buf));
+
+                        CUDA_CHECK(cudaStreamDestroy(x_stream));
+                        CUDA_CHECK(cudaStreamDestroy(y_stream));
+                        CUDA_CHECK(cudaStreamDestroy(z_stream));
+
+                    }
+                    else
+                    {
+                       double start_time = (double)cv::getTickCount();
+                       adc::common::Clustering<pcl::PointXYZRGB>().SpatialClustering(roi_x, roi_y, roi_z, 9, 45, 0.01, min_depth_, max_depth_,  clusters);
+                       double end_time = (double)cv::getTickCount() - start_time;
+                       std::cout<< "Total Cost of cluster: "  << end_time*1000.0/cv::getTickFrequency()<<" ms"<<endl;
+                    }
+                    if(clusters.size() == 0) continue;
+                    categorys.emplace_back(box.category);
+                    size_t max_index = 0;
+                    size_t max_size = clusters[0].size();
+                    for(size_t i = 1; i < clusters.size(); i++)
+                    {
+                        if(max_size < clusters[i].size())
+                        {
+                            max_index = i;
+                            max_size = clusters[i].size();
+                        }
+                    }
+                    (void) max_size;
+                
+                    for(size_t i = 0; i < clusters[max_index].size(); i++)
+                    {
+                        clusters[max_index].points[i].r = r[count%color_num];
+                        clusters[max_index].points[i].g = g[count%color_num];
+                        clusters[max_index].points[i].b = b[count%color_num];                        
+                    }                    
+                    (*point_cloud_ptr) += clusters[max_index];
+                    obstacles_points.push_back(clusters[max_index]);
+                    obstacles_bbox2d.push_back(cv::Rect(x1,y1,width,height));
+
+                    //////////
+                    #ifdef DEBUG
+                    Obstacle obs;
+                    GenrateObstacleBox(clusters[max_index].makeShared(), obs);
+                    cv::rectangle(color, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(255,0,0),2,8,0);
+                    std::sort(clusters[max_index].begin(), clusters[max_index].end(),
+                    [](pcl::PointXYZRGB pt1, pcl::PointXYZRGB pt2) {return pt1.z > pt2.z; });
+                    //求Z的中值,并用box的中心(u,v)求对应的X,Y,结果作为box的center(X,Y,Z)
+                    int center_u = x1 + 0.5*width;
+                    int center_v = y1 + 0.5*height;
+                    int middle_idx = floor(clusters[max_index].width*0.5);
+                    float mid_z = clusters[max_index].points[middle_idx].z;
+                    cv::Point3f box_center_mid;
+                    box_center_mid.x = inv_fu * (center_u - u0) * mid_z;
+                    box_center_mid.y = inv_fv * (center_v - v0) * mid_z;
+                    box_center_mid.z = mid_z;
+                    std::string str1 = cv::format("%.2f", obs.center.x); 
+                    std::string str2 = cv::format("%.2f", box_center_mid.x);
+                    std::string str3 = cv::format("%.2f", obs.center.z);
+                    std::string str4 = cv::format("%.2f", box_center_mid.z);
+                    std::string str5 = str1 +"," + str2 + "," + str3 + "," + str4;
+                    cv::putText(color,str5, cv::Point(x1, y1-15), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255,0,0));
+                    std::cout<<str5<<std::endl;
+                    // GenerateBoxMarker(clusters[max_index].makeShared(), current_depth_bbox.stamp);
+                    count++;
+                    #endif // DEBUG
+                }
+
+                //send message by share memory
+                //PushsObstaclesBySharedMem(obstacles_points,current_depth_bbox.stamp,categorys,obstacles_bbox2d);
+                
+                #ifdef DEBUG
+                //3D包围框
+                GenerateBoxMarkersAndPublish(obstacles_points, current_depth_bbox.stamp);
+
+                if( point_cloud_ptr->size() > 0)
+                {
+                    pcl::toROSMsg(*point_cloud_ptr, point_cloud_msg);
+                    point_cloud_msg.header.frame_id = "fusion";
+                    point_cloud_msg.header.stamp = current_depth_bbox.stamp;
+                    // ROS_INFO("publish fusion %d", clusters[max_index].size());
+                    point_publisher_.publish(point_cloud_msg);
+                    ros::spinOnce();
+                } 
+                //////////imwrite
+                char image_path[100];
+                sprintf(image_path, "/media/nvidia/Elements/depth_img/old_%04d.jpg",frame_count);
+                cv::imwrite(image_path,color);
+                frame_count ++;                              
+                #endif // DEBUG                               
+            }
+            else
+            { 
+                usleep(2000);
+            }
+        }
+    }
+    void CallbackImageBoox(const sensor_msgs::ImageConstPtr& image_msg, const adc_msg::bboxes2dConstPtr& detection_bbox2d)
+    {
+        std::vector<adc_msg::bbox2d> bboxes = detection_bbox2d->bboxes;
+        cv::Mat img = cv_bridge::toCvCopy(image_msg, image_msg->encoding)->image;
+        
+        for(adc_msg::bbox2d bbox : bboxes)
+        {
+            int x1 = bbox.x1;
+            int y1 = bbox.y1;
+            int x2 = bbox.x2;
+            int y2 = bbox.y2;
+            cv::rectangle(img, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(0,150,200),2,8,0);
+            std::string str1=bbox.category;
+            cv::putText(img,str1, cv::Point(x1, y1-15), cv::FONT_HERSHEY_PLAIN, 2, cv::Scalar(0,150,200));
+        }
+
+        cv::imshow("show", img);
+        cv::waitKey(10);
+
+    }
+    void CallbackBboxDepth(const adc_msg::adc_depthConstPtr& depth_msg, const adc_msg::bboxes2dConstPtr& detection_bbox2d)
+    {
+        ros::Time depth_time = depth_msg->header.stamp;
+        depth_bbox merge_msg;
+        merge_msg.depth = *depth_msg;
+        merge_msg.boxes = *detection_bbox2d;
+        merge_msg.stamp = depth_time;
+        {
+            std::lock_guard<std::mutex> mutex_sync(mutex_sync_depth_bbox_);
+            cache_queue_.push(merge_msg);
+            if(cache_queue_.size() > KCacheNum) cache_queue_.pop();
+        }
+
+    }
+    void InitConfig()
+    {
+        std::string detection_image_topic;
+        std::string obstacles_shared_mem_topic;
+        bool is_vis_detection;
+        nh_ptr_->param("detection_bbox2d_topic", detection_bbox2d_topic_, std::string("/detection/bbox2d"));
+        nh_ptr_->param("stereo_depth_topic", stereo_depth_topic_, std::string("/stereo/depth"));
+        nh_ptr_->param("detection_image_topic", detection_image_topic, std::string("/detection/image"));
+        nh_ptr_->param("obstacles_shared_mem_topic", obstacles_shared_mem_topic, std::string("/detection/vision/obstacles"));
+        nh_ptr_->param("vis_detection", is_vis_detection, true);
+        nh_ptr_->param("dbscan_min_pts", dbscan_min_pts_, 50);
+        nh_ptr_->param("dbscan_radius", dbscan_radius_, 0.0001);
+        // cv::namedWindow("show",cv::WINDOW_NORMAL);
+
+        obstacles_shared_mem_handle_ = iv::modulecomm::RegisterSend(obstacles_shared_mem_topic.c_str(), 1000000, 1);
+        depth_sub_ptr_ = std::make_shared<message_filters::Subscriber<adc_msg::adc_depth>>(*nh_ptr_, stereo_depth_topic_, 1);
+        detection_bbox2s_sub_ptr_ = std::make_shared<message_filters::Subscriber<adc_msg::bboxes2d>>(*nh_ptr_, detection_bbox2d_topic_, 1);
+        detection_image_sub_ptr_ = std::make_shared<message_filters::Subscriber<sensor_msgs::Image>>(*nh_ptr_, detection_image_topic, 1);
+        CUDA_CHECK(cudaStreamCreate(&stream_depth_));
+        CUDA_CHECK(cudaStreamCreate(&stream_x_));
+        CUDA_CHECK(cudaStreamCreate(&stream_y_));
+        CUDA_CHECK(cudaStreamCreate(&stream_z_));
+        #ifdef DEBUG
+        point_publisher_ = nh_ptr_->advertise<sensor_msgs::PointCloud2>("/fusion/points", 1);
+        marker_publisher_ = nh_ptr_->advertise<visualization_msgs::Marker>("bbox_marker", 10);
+        #endif // DEBUG
+        thread_depth_bbox_ = std::thread(std::bind(&DetectionFusionCMW::DepthBboxesHandle, this));
+        thread_depth_bbox_.detach();
+        if(is_vis_detection)
+        {
+            sync_detection_ptr_ = std::make_shared<message_filters::Synchronizer<SyncPolicyDetection>>(SyncPolicyDetection(100), *detection_image_sub_ptr_, *detection_bbox2s_sub_ptr_);
+            sync_detection_ptr_->registerCallback(std::bind(&DetectionFusionCMW::CallbackImageBoox,this,std::placeholders::_1, std::placeholders::_2));
+        }
+        sync_ptr_ = std::make_shared<message_filters::Synchronizer<SyncPolicy>>(SyncPolicy(100), *depth_sub_ptr_, *detection_bbox2s_sub_ptr_);
+        sync_ptr_->registerCallback(std::bind(&DetectionFusionCMW::CallbackBboxDepth,this,std::placeholders::_1, std::placeholders::_2));
+        ros::spin();
+    }
+
+private:
+    std::shared_ptr<ros::NodeHandle> nh_ptr_;
+    std::string stereo_depth_topic_;
+    std::string detection_bbox2d_topic_;
+    std::shared_ptr<message_filters::Subscriber<adc_msg::adc_depth>> depth_sub_ptr_;
+    std::shared_ptr<message_filters::Subscriber<adc_msg::bboxes2d>> detection_bbox2s_sub_ptr_;
+    std::shared_ptr<message_filters::Subscriber<sensor_msgs::Image>> detection_image_sub_ptr_;
+    std::shared_ptr<message_filters::Synchronizer<SyncPolicy>> sync_ptr_;
+    std::shared_ptr<message_filters::Synchronizer<SyncPolicyDetection>> sync_detection_ptr_;
+    std::queue<depth_bbox>  cache_queue_;
+    std::mutex mutex_sync_depth_bbox_;
+    std::thread thread_depth_bbox_;
+    float32_t max_depth_ = 20.0;
+    float32_t min_depth_ = 0;
+
+
+    float* x_gpu_buf_ = nullptr;
+    float* y_gpu_buf_ = nullptr;
+    float* z_gpu_buf_ = nullptr;
+    short* depth_gpu_buf_ = nullptr;
+    
+    float* x_cpu_buf_ = nullptr;
+    float* y_cpu_buf_ = nullptr;
+    float* z_cpu_buf_ = nullptr;
+    short* depth_cpu_buf_ = nullptr;
+
+    cudaStream_t stream_depth_;
+    cudaStream_t stream_x_;
+    cudaStream_t stream_y_;
+    cudaStream_t stream_z_;
+    
+
+    int dbscan_min_pts_;
+    double dbscan_radius_;
+    adc::common::OBBExtrator obb_extrator_;
+
+    #ifdef DEBUG
+    ros::Publisher point_publisher_;
+    ros::Publisher marker_publisher_;
+    #endif // DEBUG
+
+    const unsigned int  KCacheNum = 2;
+    void* obstacles_shared_mem_handle_;
+};
+}
+}
+
+int main(int argc,  char** argv)
+{   
+    adc::ros_cmw::DetectionFusionCMW app(argc, argv);
+    return 0;
+}

+ 325 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/detection_lane_node.cpp

@@ -0,0 +1,325 @@
+#include <ros/ros.h>
+#include <sensor_msgs/PointCloud2.h> 
+#include <pcl_conversions/pcl_conversions.h> 
+#include <pcl/point_cloud.h>
+#include <chrono>
+#include <stdio.h>  
+#include <iostream>  
+#include <fstream>
+#include "opencv2/opencv.hpp"
+#include <jsoncpp/json/json.h>
+#define IVAIL_VALUE  -10 
+
+const uchar COLOR_MAP[4][3] = {{255,0,0},{0,255,0},{0,0,255},{125,125,0}};
+using namespace std;
+using namespace cv;
+
+namespace adc
+{
+
+namespace ros_cmw
+{
+
+class TestStereoLane
+{
+public:
+    typedef std::chrono::system_clock::time_point  TimeType;
+public:
+    TestStereoLane(int argc, char** argv, std::string node_name = "stereo_lane")
+    {
+        ros::init(argc, argv, node_name);
+        nh_ptr_ = std::make_shared<ros::NodeHandle>("~");
+        InitConfig();
+    }
+private:
+    void readInToMatrix(fstream &in, string FilePath, float data[480][640]) 
+    {
+	    in.open(FilePath, ios::in);//打开一个file
+	    if (!in.is_open()) 
+        {
+		    cout << "Can not find " << FilePath << endl;
+		    // system("pause");
+	    }
+        string buff;
+        int i = 0;//行数i
+        while (getline(in, buff)) 
+        {
+            vector<float> nums;
+            // string->char *
+            char *s_input = (char *)buff.c_str();
+            const char * split = ",";
+            // 以‘,’为分隔符拆分字符串
+            char *p = strtok(s_input, split);
+            float a;
+            while (p != NULL) {
+                // char * -> int
+                a = atof(p);
+                //cout << a << endl;
+                nums.push_back(a);
+                p = strtok(NULL, split);
+            }//end while
+            for (size_t b = 0; b < nums.size(); b++) {
+                data[i][b] = nums[b]/1000.0;  //单位是米
+                //cout<<data[i][b]<<endl;
+            }//end for
+            i++;
+        }//end while
+        in.close();
+        cout << "get  data" << endl;
+    }
+
+    void ComputeXYZ(const float depth[480][640],  const int left_x, const int left_y, 
+                const float inv_fu, const float inv_fv, const float u0, const float v0,
+                const float min_depth, const float max_depth, const int rows, const int cols,
+                float* z, float* x, float* y)
+    {
+
+        for(int i=0;i<rows;i++)
+        {
+            for(int j=0;j<cols;j++)
+            {
+                int index_id = i * cols + j;
+                
+                //float z_f = static_cast<float>(depth[i][j]) / 1000.0;
+                float z_f = depth[i][j];
+                if(z_f > max_depth || z_f < min_depth)
+                {
+                    z[index_id] = IVAIL_VALUE;
+                    x[index_id] = IVAIL_VALUE;
+                    y[index_id] = IVAIL_VALUE;
+                }
+                else
+                {
+                    z[index_id] = z_f;
+                    float x_f = inv_fu * (j + left_x - u0) * z_f;
+                    float y_f = inv_fv * (i + left_y - v0) * z_f;
+                    x[index_id] = x_f;
+                    y[index_id] = y_f;
+                }
+
+            }
+        }
+
+    }
+    //最小二乘法拟合曲线,用y拟合x,拟合成功返回true,失败返回false
+    bool polyfit(vector<Point> &in_point,int n, Mat &fit)
+    {
+        //double Time = (double)cvGetTickCount();
+        int m = in_point.size();
+        //构造矩阵X
+        Mat X = Mat::zeros(n+1,n+1,CV_64FC1);
+        for(int i=0;i<n+1;i++)
+        {
+            for(int j=0;j<n+1;j++)
+            {
+                for(int k=0;k<m;k++)
+                {
+                    X.at<double>(i,j)=X.at<double>(i,j)+pow(in_point[k].y,i+j);
+                }
+            }
+        }
+        //构造Y矩阵
+        Mat Y = Mat::zeros(n+1,1,CV_64FC1);
+        for(int i=0;i<n+1;i++)
+        {
+            for(int k=0;k<m;k++)
+            {
+                Y.at<double>(i, 0) = Y.at<double>(i, 0) +
+                        std::pow(in_point[k].y, i) * in_point[k].x;
+            }
+        }
+
+        fit = cv::Mat::zeros(n + 1, 1, CV_64FC1);
+        //求解矩阵A
+        cv::solve(X, Y, fit, cv::DECOMP_LU);
+        //Time = (double)cvGetTickCount() - Time ;
+        //cout<<"polyfit = "<<Time/cvGetTickFrequency()/1000<<endl;
+
+        if(countNonZero(fit))
+            return true;
+        else 
+            return false;
+    }
+
+    static bool SortPointByY(const cv::Point &point1, const cv::Point &point2)
+    {
+        return point1.y < point2.y;
+    }
+
+    bool LaneFit(std::vector<std::vector<Point>> &lane_coords, int n,std::vector<std::vector<Point>> &fit_coords)
+    {
+        std::vector<Point> single_lane_coords;
+        std::vector<Point> out_single_lane_coords;
+        for(size_t lane_index=0;lane_index<lane_coords.size();lane_index++)
+        {
+            single_lane_coords = lane_coords[lane_index];
+            //remove too short lane
+            sort(single_lane_coords.begin(),single_lane_coords.end(),SortPointByY);
+            float min_y=single_lane_coords.front().y,max_y=single_lane_coords.back().y;
+            Mat mat_k;
+            bool fitflag = polyfit(single_lane_coords, n, mat_k);
+            if(fitflag)
+            {
+                //将拟合出来的点集存储
+                for (int i = min_y; i < max_y; i++)
+                {
+                    Point2d ipt;
+                    ipt.x = 0;
+                    ipt.y = i;
+                    for (int j = 0; j < n + 1; ++j)
+                    {
+                        ipt.x += mat_k.at<double>(j, 0)*pow(i,j);
+                    }
+                    out_single_lane_coords.push_back(ipt);
+                }
+                fit_coords.push_back(out_single_lane_coords);
+                out_single_lane_coords.clear();
+            }
+
+        }
+        return true;
+    }
+
+    void PublishPoints(Mat& points, cv::Mat& color_image, 
+                    vector<vector<Point>>& lane_point,ros::Time& stamp)
+    {
+        (void) color_image;
+        pcl::PointCloud<pcl::PointXYZRGB>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
+        sensor_msgs::PointCloud2 point_cloud_msg;
+        for(size_t i=0;i<lane_point.size();i++)
+        {
+            for(size_t j=0;j<lane_point[i].size();j++)
+            {
+                Point pixel = lane_point[i][j];
+		Vec3f xyz = points.at<Vec3f>(pixel.y,pixel.x);
+                float x = xyz[0];
+                float y = xyz[1];
+                float z = xyz[2];
+
+                /*
+                Vec3b color = color_image.at<Vec3b>(pixel.y,pixel.x);
+                uchar b = color[0];
+                uchar g = color[1];
+                uchar r = color[2];
+                */
+
+                uchar b = COLOR_MAP[i][0];
+                uchar g = COLOR_MAP[i][1];
+                uchar r = COLOR_MAP[i][2];
+
+                if(z>0)
+                {
+                    pcl::PointXYZRGB point;
+                    point.x = x;
+                    point.y = y;
+                    point.z = z;
+                    point.r = r;
+                    point.g = g;
+                    point.b = b;
+                    point_cloud_ptr->push_back(point);
+                }
+
+            }
+        }
+  
+        //pcl::PointCloud<pcl::PointXYZRGB>::Ptr filters_point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
+        // adc::adc_3d::PointUtils::FilterRemoveOutlierPointsOnStatistical(point_cloud_ptr , filters_point_cloud_ptr);
+        // ROS_INFO("Send point cloud, size: %d", point_cloud_ptr->size());
+        pcl::toROSMsg(*point_cloud_ptr, point_cloud_msg);
+        point_cloud_msg.header.frame_id = "stereo_lanepoint"; 
+        point_cloud_msg.header.stamp = stamp;
+        point_vis_publisher_.publish(point_cloud_msg);
+        ros::spinOnce();
+    }
+
+    void GetWorldMatAndLanePoint(Mat& world_mat, Mat& img, vector<vector<Point>>& lane_point)
+    {
+        string json_path = "/media/nvidia/LIQINGXIA/6/1.json";
+        string txt_path = "/media/nvidia/LIQINGXIA/6/1.txt";
+        string img_path = "/media/nvidia/LIQINGXIA/6/1.png";
+        img = imread(img_path);
+        vector<vector<Point>>lanes;
+        vector<Point>line;
+        Json::Reader reader;
+        Json::Value root;
+        std::ifstream ifs;
+        string img_name;
+        int rows;
+        int cols;
+        ifs.open(json_path, std::ios::binary);
+        if (!ifs.is_open())
+        {
+            cout << "Error opening file\n";
+        }
+        if(reader.parse(ifs, root))
+        {
+            img_name = root["imagePath"].asString();
+            rows = root["imageHeight"].asInt();
+            cols = root["imageWidth"].asInt();
+            for (unsigned int i = 0; i < root["shapes"].size(); i++)
+            {
+                Json::Value Array = root["shapes"][i]["points"];
+                int rows = Array.size();
+                // int cols = Array[0].size();
+                for(int m=0;m<rows;m++)
+                {
+                    line.push_back(Point(Array[m][0].asInt(),Array[m][1].asInt()));
+                }
+                lanes.push_back(line);
+                line.clear();
+            }
+        
+            fstream file1; //创建文件流对象
+            float data[480][640];
+            readInToMatrix(file1,txt_path,data);
+            double inv_fu = 1.0/609.368164062;
+            double inv_fv = 1.0/609.368164062;
+            double u0 = 323.633148193;
+            double v0 = 244.768936157;
+            float x[rows*cols];
+            float y[rows*cols];
+            float z[rows*cols];
+            ComputeXYZ(data,0,0,inv_fu,inv_fu,u0,v0,1.0,8.0,480,640,z,x,y);
+            (void) inv_fu;
+            (void) inv_fv;
+            cv::Mat x_mat(rows, cols, CV_32FC1, x);
+            cv::Mat y_mat(rows, cols, CV_32FC1, y);
+            cv::Mat z_mat(rows, cols, CV_32FC1, z);
+            vector<Mat>channels_all;
+            channels_all.push_back(x_mat);
+            channels_all.push_back(y_mat);
+            channels_all.push_back(z_mat);
+            cv::merge(channels_all, world_mat);
+            LaneFit(lanes,2,lane_point);
+        }
+    }
+    void InitConfig()
+    {
+
+
+        nh_ptr_->param("points_vis_topic", points_vis_topic_, std::string("/stereo/lane_point_cloud"));
+
+        Mat world_mat,img;
+        vector<vector<Point>>lane_point;
+        GetWorldMatAndLanePoint(world_mat,img,lane_point);
+        point_vis_publisher_ = nh_ptr_->advertise<sensor_msgs::PointCloud2>(points_vis_topic_, 1);
+        TimeType tm;
+        int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(tm.time_since_epoch()).count();
+        int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(tm.time_since_epoch()).count() % 1000000000UL;
+        ros::Time stamp(sec, nsec);
+        while(true)
+            PublishPoints(world_mat,img,lane_point,stamp);
+    }
+private:
+    std::shared_ptr<ros::NodeHandle> nh_ptr_;
+    std::string points_vis_topic_;
+    ros::Publisher point_vis_publisher_;
+};
+}
+}
+
+int main(int argc, char** argv)
+{
+    adc::ros_cmw::TestStereoLane stereo_lane(argc, argv);
+    return 0;
+}

+ 205 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/detection_trt7_app_node.cpp

@@ -0,0 +1,205 @@
+/**
+* \brief 
+* \author liqingxia (pengcheng@yslrpch@126.com)
+* \date 2021-06-21
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#include <ros/ros.h>
+#include <message_filters/subscriber.h>
+#include <message_filters/time_synchronizer.h>
+#include <message_filters/sync_policies/approximate_time.h> 
+#include <image_transport/image_transport.h>
+#include <cv_bridge/cv_bridge.h>
+#include <sensor_msgs/image_encodings.h>
+#include <memory>
+#include <stdlib.h>
+#include <opencv2/core.hpp>
+#include <opencv2/highgui.hpp>
+#include <opencv2/videoio.hpp>
+#include <opencv2/calib3d/calib3d.hpp>
+#include "cmw_app/detection_trt7_app.h"
+#include <yaml-cpp/yaml.h>
+#include <vector>
+
+#include "common/project_root.h"
+#include "adc_msg/bboxes2d.h"
+#include "adc_msg/bbox2d.h"
+
+
+#define IMAGE_WIDTH 640
+#define IMAGE_HEIGHT 480
+
+namespace adc
+{
+namespace ros_cmw
+{
+
+class DetectionTrt7CMW
+{
+public:
+    // typedef message_filters::sync_policies::ApproximateTime<adc_msg::adc_depth, sensor_msgs::Image> SyncPolicy;
+    typedef std::chrono::system_clock::time_point  TimeType;
+    typedef  std::shared_ptr<std::list<vector<Tn::Bbox>>> Bboxes2DType;
+    typedef  cv::Mat ImageType;
+    typedef std::vector<std::string> ClassNameType;
+public:
+    DetectionTrt7CMW(int argc, char** argv)
+    {
+        ros::init(argc, argv, "detection");
+        nh_ptr_ = std::make_shared<ros::NodeHandle>("~");
+        InitConfig();
+    }
+private:
+    void CallbackDetection(Bboxes2DType& bboxes, TimeType& tm, ImageType& img, ClassNameType& class_name)
+    {
+        std::vector<adc_msg::bbox2d> detections;
+        auto detection_res_ptr = (*bboxes).begin();
+        for(std::vector<Tn::Bbox>::iterator bbox = detection_res_ptr->begin(); bbox != detection_res_ptr->end(); ++bbox)
+        {
+            adc_msg::bbox2d bbox2d_msg;
+            bbox2d_msg.x1 = bbox->left;
+            bbox2d_msg.y1 = bbox->top;
+            bbox2d_msg.x2 = bbox->right;
+            bbox2d_msg.y2 = bbox->bot;
+            bbox2d_msg.score = bbox->score;
+            bbox2d_msg.category = class_name[bbox->classId];
+            detections.push_back(bbox2d_msg);
+        }
+        adc_msg::bboxes2d bboxes2s_msg;
+        int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(tm.time_since_epoch()).count();
+        int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(tm.time_since_epoch()).count() % 1000000000UL;
+        ros::Time stamp(sec, nsec);
+        std_msgs::Header header;
+        header.stamp = stamp;
+        bboxes2s_msg.header = header;
+        bboxes2s_msg.bboxes = detections;
+        bboxes_pushisher_.publish(bboxes2s_msg);
+        if(publis_origin_img_)
+        {
+            cv_bridge::CvImage cv_img_l;
+            sensor_msgs::Image img_msg_l;
+            cv_img_l.header.stamp = stamp;
+            cv_img_l.encoding = "bgr8";
+            cv_img_l.image = img;
+            cv_img_l.toImageMsg(img_msg_l);
+            left_image_publisher_.publish(img_msg_l);
+        }
+    }
+
+    void CallbackBinocular(const sensor_msgs::ImageConstPtr& binocular_image_msg)
+    {
+
+        ros::Time image_time = binocular_image_msg->header.stamp;
+        // ros::Time image_time = ros::Time::now();
+        cv::Mat binocular = cv_bridge::toCvCopy(binocular_image_msg, binocular_image_msg->encoding)->image;
+        cv::Mat left_image = binocular(left_rect_).clone();
+        uint64_t nsecs = image_time.toNSec();
+        std::chrono::nanoseconds ns(nsecs);
+        TimeType time_stamp(ns);
+        cv::remap(left_image, left_image, map_x_, map_y_, cv::INTER_LINEAR);
+        detection_ptr_->CacheImages(left_image, time_stamp);
+    }
+    void ReadCameraParams(const std::string camera_yaml)
+    {
+        try
+        {
+            YAML::Node node = YAML::LoadFile(camera_yaml);
+            int height = node["image_height"].as<int>();
+            int width = node["image_width"].as<int>();
+            cv::Size size(width, height);
+            std::vector<double> K_v= node["camera_matrix"]["data"].as<std::vector<double>>();
+            std::vector<double> P_v = node["projection_matrix"]["data"].as<std::vector<double>>();
+            std::vector<double> D_v = node["distortion_coefficients"]["data"].as<std::vector<double>>();
+            std::vector<double> R_v = node["rectification_matrix"]["data"].as<std::vector<double>>();
+
+            cv::Mat _K(K_v);
+            cv::Mat _P(P_v);
+            cv::Mat _D(D_v);
+            cv::Mat _R(R_v);
+
+            _K = _K.reshape(3,3);
+            _P = _P.reshape(4,3);
+            _D = _D.reshape(5,1);
+            _R = _R.reshape(3,3);
+            cv::initUndistortRectifyMap(_K, _D, _R, _P, size, CV_32FC1, map_x_, map_y_);
+            
+        }
+        catch (const std::exception& e)
+        {
+            std::cerr << e.what() << '\n'<<std::endl;;
+            std::cerr<<" detection Ros app read camera params file faile!"<<std::endl;
+            exit(1);
+
+        }
+    }
+    void InitConfig()
+    {
+
+        std::string detection_config;
+        std::string root_path;
+        std::string left_camera_params_filename;
+        std::string detection_bbox2d_topic;
+        std::string origin_imgage_topic;
+        float hz;
+        nh_ptr_->param("binocular_topic", binocular_topic_, std::string("/camera/binocular"));
+        nh_ptr_->param("hz", hz, 30.0f);
+        nh_ptr_->param("detection_config", detection_config, std::string("detection.yaml"));
+        nh_ptr_->param("root_path", root_path, std::string("/opt/adc"));
+        nh_ptr_->param("left_camera_params_filename", left_camera_params_filename, std::string("left.yaml"));
+        nh_ptr_->param("detection_bbox2d_topic", detection_bbox2d_topic, std::string("/detection/bbox2d"));
+        nh_ptr_->param("publis_origin_img", publis_origin_img_, true);
+        nh_ptr_->param("origin_imgage_topic", origin_imgage_topic, std::string("/detection/image"));
+        std::string param_root = "ADC_ROOT="+root_path;
+        char* path_c = const_cast<char*>(param_root.c_str());
+        putenv(path_c);
+        std::string left_camera_params_file_path = adc::RootPath::GetAbsolutePath(left_camera_params_filename);
+        ReadCameraParams(left_camera_params_file_path);
+        detection_ptr_ = std::make_shared<adc::DetectionTrt7App>(detection_config, hz);
+        detection_ptr_->SetDetectionCallback(
+            std::bind(
+                &DetectionTrt7CMW::CallbackDetection, this, 
+                std::placeholders::_1, std::placeholders::_2,
+                std::placeholders::_3, std::placeholders::_4)
+        );
+//std::bind(&StereoMatchCMW::CallbackDepthHandler, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
+
+        left_rect_ = cv::Rect(0,0,IMAGE_WIDTH, IMAGE_HEIGHT);
+        binocular_subscriber_ = nh_ptr_->subscribe<sensor_msgs::Image>(
+            binocular_topic_, 1,
+            std::bind(&DetectionTrt7CMW::CallbackBinocular,
+            this, 
+            std::placeholders::_1)
+            );
+        bboxes_pushisher_ = nh_ptr_->advertise<adc_msg::bboxes2d>(detection_bbox2d_topic,1);
+        transport_img_ptr_ = std::make_shared<image_transport::ImageTransport>(*nh_ptr_);
+        left_image_publisher_ = transport_img_ptr_->advertise(origin_imgage_topic, 1);
+        detection_ptr_->Run();
+        // depth_sub_ptr_ = std::make_shared<message_filters::Subscriber<adc_msg::adc_depth>>(*nh_ptr_, stereo_depth_topic_, 1);
+        // image_sub_ptr_ = std::make_shared<message_filters::Subscriber<sensor_msgs::Image>>(*nh_ptr_, binocular_topic_, 1);
+        // sync_ptr_ = std::make_shared<message_filters::Synchronizer<SyncPolicy>>(SyncPolicy(100), *depth_sub_ptr_, *image_sub_ptr_);
+        // sync_ptr_->registerCallback(std::bind(&DetectionCMW::CallbackImageDepth,this,std::placeholders::_1, std::placeholders::_2));
+        ros::spin();    
+
+    }
+private:
+    std::shared_ptr<ros::NodeHandle> nh_ptr_;
+    std::string binocular_topic_;
+    std::shared_ptr<adc::DetectionTrt7App> detection_ptr_;
+    cv::Rect left_rect_;
+    ros::Subscriber binocular_subscriber_;
+    ros::Publisher bboxes_pushisher_;
+    std::shared_ptr<image_transport::ImageTransport> transport_img_ptr_;
+    image_transport::Publisher left_image_publisher_;
+    cv::Mat map_x_;
+    cv::Mat map_y_;
+    bool publis_origin_img_;
+};
+}
+}
+int main(int argc,  char** argv)
+{   
+    adc::ros_cmw::DetectionTrt7CMW app(argc, argv);
+    return 0;
+}

+ 552 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/lane_depth_fusion_app_node.cpp

@@ -0,0 +1,552 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-08-25
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#include <ros/ros.h>
+#include <message_filters/subscriber.h>
+#include <message_filters/time_synchronizer.h>
+#include <message_filters/sync_policies/approximate_time.h> 
+#include <image_transport/image_transport.h>
+#include <cv_bridge/cv_bridge.h>
+#include <sensor_msgs/image_encodings.h>
+#include <memory>
+#include <queue>
+#include <mutex>
+#include <opencv2/core.hpp>
+#include <opencv2/highgui.hpp>
+#include <opencv2/videoio.hpp>
+#include "adc_msg/adc_depth.h"
+#include "cmw_app/detection_app.h"
+#include <sensor_msgs/PointCloud2.h> 
+#include "adc_msg/lane_info.h"
+#include "adc_msg/lane_info_array.h"
+#include <pcl/point_cloud.h>
+#include <pcl_conversions/pcl_conversions.h>
+#include <pcl/sample_consensus/ransac.h>
+#include <pcl/sample_consensus/sac_model_plane.h>
+#include <pcl/sample_consensus/sac_model_sphere.h>
+#include "eigen3/Eigen/Core"
+#include "common/utils.h"
+#include <cuda_runtime_api.h>
+#include "cuda_runtime.h"
+#include <visualization_msgs/Marker.h>
+
+#define IMAGE_WIDTH 640
+#define IMAGE_HEIGHT 480
+
+#define DEBUG
+
+
+ namespace adc
+ {
+ namespace ros_cmw
+ {
+
+ const int COLOR_MAP[13][3]={{255, 0, 0},{0, 255, 0},{0, 0, 255},{125, 125, 0},{0,125,125},
+                            {125, 0, 125},{50, 100, 50},{100, 50, 100},{50, 50, 100},
+                            {255,255,0},{0,255,255},{255,0,255},{100,100,100}};
+
+ class LaneFusionCMW
+ {
+ public:
+     struct depth_lane
+     {
+         adc_msg::adc_depth depth;
+         adc_msg::lane_info_array lanes;
+         ros::Time stamp;
+     };
+
+     typedef struct lane_information
+     {
+         int id;
+         int type;
+         int color;
+         float a;
+         float b;
+         float c;
+         float min_y;
+         float max_y;
+         float curvature;
+     }lane_information;
+     typedef message_filters::sync_policies::ApproximateTime<adc_msg::adc_depth, adc_msg::lane_info_array> SyncPolicy;
+     typedef message_filters::sync_policies::ApproximateTime<sensor_msgs::Image, adc_msg::lane_info_array> SyncPolicyDetection;
+     typedef std::shared_ptr<std::list<vector<lane_information>>> LaneInfoArrayType;
+ public:
+     LaneFusionCMW(int argc, char** argv)
+     {
+         ros::init(argc, argv, "detection");
+         nh_ptr_ = std::make_shared<ros::NodeHandle>("~");
+         InitConfig();
+     }
+ private:
+     //最小二乘法拟合曲线,用y拟合x,拟合成功返回true,失败返回false
+     bool polyfit(std::vector<cv::Point2f> &in_point, int n, cv::Mat &fit)
+     {
+             //double Time = (double)cvGetTickCount();
+             int m = in_point.size();
+             //构造矩阵X
+             cv::Mat X = cv::Mat::zeros(n + 1, n + 1, CV_64FC1);
+             for (int i = 0; i < n + 1; i++)
+             {
+                     for (int j = 0; j < n + 1; j++)
+                     {
+                             for (int k = 0; k < m; k++)
+                             {
+                                     X.at<double>(i, j) = X.at<double>(i, j) + pow(in_point[k].y, i + j);
+                             }
+                     }
+             }
+             //构造Y矩阵
+             cv::Mat Y = cv::Mat::zeros(n + 1, 1, CV_64FC1);
+             for (int i = 0; i < n + 1; i++)
+             {
+                     for (int k = 0; k < m; k++)
+                     {
+                             Y.at<double>(i, 0) = Y.at<double>(i, 0) +
+                                                                      std::pow(in_point[k].y, i) * in_point[k].x;
+                     }
+             }
+
+             fit = cv::Mat::zeros(n + 1, 1, CV_64FC1);
+             //求解矩阵A
+             cv::solve(X, Y, fit, cv::DECOMP_LU);
+             //Time = (double)cvGetTickCount() - Time ;
+             //cout<<"polyfit = "<<Time/cvGetTickFrequency()/1000<<endl;
+
+             if (countNonZero(fit))
+                     return true;
+             else
+                     return false;
+     }
+
+     pcl::PointCloud<pcl::PointXYZ>::Ptr ComputeXYZ(cv::Mat &depth, const int left_x, const int left_y,
+                                     const float inv_fu, const float inv_fv, const float u0, const float v0,
+                                     const float min_depth, const float max_depth,
+                                     const float corret_a=0, const float corret_b=0, const float corret_c=0)
+     {
+
+
+             pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_plane(new pcl::PointCloud<pcl::PointXYZ>);
+             //用图像下半部分拟合平面,或者取梯形区域
+             int rows = depth.rows;
+             int cols = depth.cols;
+             int half_rows = int(0.5*rows);
+
+             for (int i = half_rows; i < rows; i++)
+             {
+                 short* depth_p;
+                 depth_p = depth.ptr<short>(i);
+                 for (int j = 0; j < cols; j++)
+                     {
+                             //int index_id = i * cols + j;
+                             //float z_f = static_cast<float>(depth[i][j]) / 1000.0;
+                             float z_f = depth_p[j] / 1000.0;  //单位m
+                             //correct
+                             z_f = z_f + corret_a + corret_b*z_f + corret_c*z_f*z_f;
+
+                             pcl::PointXYZ point_plane;
+                             if (z_f < max_depth && z_f > min_depth)
+                             {
+                                     float x_f = inv_fu * (j + left_x - u0) * z_f;
+                                     float y_f = inv_fv * (i + left_y - v0) * z_f;
+                                     if(y_f>min_height_ && y_f<max_height_ && x_f>min_width_ && x_f<max_width_)
+                                     {
+                                             point_plane.x = x_f;
+                                             point_plane.y = y_f;
+                                             point_plane.z = z_f;
+                                             cloud_plane->push_back(point_plane);
+                                     }
+                             }
+                     }
+             }
+             return cloud_plane;
+     }
+
+     //将车道线上的点转换到相机坐标系下,通过zc=1,求与拟合出地平面的交点
+     void ComputeVirtualXYZ(const float inv_fu, const float inv_fv, const float u0, const float v0,
+                            const float min_depth, const float max_depth,Eigen::VectorXf &coeff,
+                            std::vector<lane_information> &pixel_lane_result,
+                            std::vector<lane_information> &camera_lane_result,
+                            std::vector<std::vector<cv::Point3f>> &coords_camera)
+     {
+
+         float z_f = 1.0;
+         float p1[3] = {0};
+         float p2[3];
+         float plane_normal[3] = {coeff[0],coeff[1],coeff[2]};
+         cv::Point3f crosspoint;
+         cv::Point2f fitpoint; //(x,z)
+         std::vector<cv::Point3f> single_coords_camera;
+         std::vector<cv::Point2f> single_fit_coords_camera;
+         float z_curvature = 1.0;
+         for(size_t i=0;i<pixel_lane_result.size();i++)
+         {
+
+             int min_y = int(pixel_lane_result[i].min_y);
+             int max_y = int(pixel_lane_result[i].max_y);
+             for(int y=min_y;y<max_y;y++)
+             {
+                 float x = pixel_lane_result[i].a + pixel_lane_result[i].b*y + pixel_lane_result[i].c*y*y;
+                 float x_f = inv_fu * (x - u0) * z_f;
+                 float y_f = inv_fv * (y - v0) * z_f;
+                 p2[0] = x_f;
+                 p2[1] = y_f;
+                 p2[2] = z_f;
+                 float p1p2[3];
+                 p1p2[0] = p2[0]-p1[0];
+                 p1p2[1] = p2[1]-p1[1];
+                 p1p2[2] = p2[2]-p1[2];
+                 //判断直线与平面是否平行,若平行则与法向量垂直,点乘为0
+                 float dot_p1p2_normal = p1p2[0]*plane_normal[0] + p1p2[1]*plane_normal[1]
+                                         + p1p2[2]*plane_normal[2];
+                  if(dot_p1p2_normal>0 || dot_p1p2_normal<0)
+                  {
+
+
+                      float n = abs(p1[0]*plane_normal[0] + p1[1]*plane_normal[1]
+                                     + p1[2]*plane_normal[2]+coeff[3])/
+                                     abs(dot_p1p2_normal);
+                      crosspoint.x = p1[0] + n*p1p2[0];
+                      crosspoint.y = p1[1] + n*p1p2[1];
+                      crosspoint.z = p1[2] + n*p1p2[2];
+                      if (crosspoint.z < max_depth && crosspoint.z > min_depth)
+                      {
+                         single_coords_camera.push_back(crosspoint);
+                         fitpoint = cv::Point2f(crosspoint.x,crosspoint.z);
+                         single_fit_coords_camera.push_back(fitpoint);
+                      }
+                    }
+             }
+             //拟合相机坐标系下的车道线参数
+             cv::Mat fit_mat_camera;
+             if(single_fit_coords_camera.size()>3)
+             {
+                 bool fitflag = polyfit(single_fit_coords_camera,2,fit_mat_camera);
+                 if(fitflag)
+                 {
+                     lane_information single_lane_camera;
+                     single_lane_camera.min_y = single_fit_coords_camera.front().y;
+                     single_lane_camera.max_y = single_fit_coords_camera.back().y;
+                     single_lane_camera.a = fit_mat_camera.at<double>(0,0);
+                     single_lane_camera.b = fit_mat_camera.at<double>(1,0);
+                     single_lane_camera.c = fit_mat_camera.at<double>(2,0);
+                     single_lane_camera.id = pixel_lane_result[i].id;
+                     single_lane_camera.type = pixel_lane_result[i].type;
+                     single_lane_camera.color = pixel_lane_result[i].color;
+                     single_lane_camera.curvature = abs(2*fit_mat_camera.at<double>(2,0))/pow(1 + pow((2*fit_mat_camera.at<double>(2,0)
+                                                                      *z_curvature + fit_mat_camera.at<double>(1,0)),2),1.5);
+                     camera_lane_result.push_back(single_lane_camera);
+                     coords_camera.push_back(single_coords_camera);
+                 }
+             }
+             single_coords_camera.clear();
+             single_fit_coords_camera.clear();
+        }
+     }
+
+     void PublishLanes3d(std::vector<lane_information> &camera_lane_result, ros::Time &time_stamp)
+     {
+         std::vector<adc_msg::lane_info> lanes;
+         for(std::vector<lane_information>::iterator lane = camera_lane_result.begin(); lane != camera_lane_result.end(); ++lane)
+         {
+             adc_msg::lane_info lane3d_msg;
+             lane3d_msg.index = lane->id;
+             lane3d_msg.type = lane->type;
+             lane3d_msg.color = lane->color;
+             lane3d_msg.a = lane->a;
+             lane3d_msg.b = lane->b;
+             lane3d_msg.c = lane->c;
+             lane3d_msg.min_y = lane->min_y;
+             lane3d_msg.max_y = lane->max_y;
+             lane3d_msg.curvature = lane->curvature;
+             lanes.push_back(lane3d_msg);
+         }
+         adc_msg::lane_info_array lane_array3d_msg;
+         std_msgs::Header header;
+         header.stamp = time_stamp;
+         lane_array3d_msg.header = header;
+         lane_array3d_msg.lane_infos = lanes;
+         lanes3d_publisher_.publish(lane_array3d_msg);
+     }
+
+     void PublishLanes3dPoint(std::vector<std::vector<cv::Point3f>> &coords_camera, ros::Time &time_stamp)
+     {
+         pcl::PointCloud<pcl::PointXYZRGB>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
+         sensor_msgs::PointCloud2 point_cloud_msg;
+         for(size_t i=0;i<coords_camera.size();i++)
+         {
+             for(size_t j=0;j<coords_camera[i].size();j++)
+             {
+                 float x = coords_camera[i][j].x;
+                 float y = coords_camera[i][j].y;
+                 float z = coords_camera[i][j].z;
+                 uchar b = COLOR_MAP[i][0];
+                 uchar g = COLOR_MAP[i][1];
+                 uchar r = COLOR_MAP[i][2];
+                 if(z>0)
+                 {
+                     pcl::PointXYZRGB point;
+                     point.x = x;
+                     point.y = y;
+                     point.z = z;
+                     point.r = r;
+                     point.g = g;
+                     point.b = b;
+                     point_cloud_ptr->push_back(point);
+                 }
+
+             }
+         }
+         //pcl::PointCloud<pcl::PointXYZRGB>::Ptr filters_point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
+         // adc::adc_3d::PointUtils::FilterRemoveOutlierPointsOnStatistical(point_cloud_ptr , filters_point_cloud_ptr);
+         // ROS_INFO("Send point cloud, size: %d", point_cloud_ptr->size());
+         pcl::toROSMsg(*point_cloud_ptr, point_cloud_msg);
+         point_cloud_msg.header.frame_id = "lanes3d_point";
+         point_cloud_msg.header.stamp = time_stamp;
+         lanes3d_point_publisher_.publish(point_cloud_msg);
+     }
+
+     void DepthLanesHandle()
+     {
+         size_t cache_size = 0;
+         while(true)
+         {
+             depth_lane  current_depth_lane;
+             {
+                 std::lock_guard<std::mutex> mutex_sync(mutex_sync_depth_lane_);
+                 cache_size = cache_queue_.size();
+             }
+             if(cache_size > 0)
+             {
+
+                 {
+                     std::lock_guard<std::mutex> mutex_sync(mutex_sync_depth_lane_);
+                     current_depth_lane = cache_queue_.front();
+                     cache_queue_.pop();
+                 }
+
+                 float32_t fu = current_depth_lane.depth.fu;
+                 float32_t fv = current_depth_lane.depth.fv;
+                 float32_t u0 = current_depth_lane.depth.u0;
+                 float32_t v0 = current_depth_lane.depth.v0;
+                 float32_t inv_fu = 1 / fu;
+                 float32_t inv_fv = 1 / fv;
+
+                 //获得矫正后的深度图像
+                 cv::Mat depth_mono16;
+                 sensor_msgs::Image depth_mono16_msg = current_depth_lane.depth.depth;
+                 depth_mono16 = cv_bridge::toCvCopy(depth_mono16_msg, "mono16")->image;
+
+                 // int rows = depth_mono16.rows;
+                 // int cols = depth_mono16.cols;
+
+                 //double t0 = (double)cv::getTickCount();
+
+                 pcl::PointCloud<pcl::PointXYZ>::Ptr cloud_plane(new pcl::PointCloud<pcl::PointXYZ>);
+                 cloud_plane = ComputeXYZ(depth_mono16, 0, 0, inv_fu, inv_fu, u0, v0, min_depth_, max_depth_);
+
+                 //t0 = (double)cv::getTickCount()- t0;
+
+                 //std::cout<< "ComputeXYZ: "  <<t0*1000.0/cv::getTickFrequency()<<" ms"<<std::endl;
+
+                 //std::cout<<"cloud_plane->width: "<<cloud_plane->width<<std::endl;
+
+                 if(cloud_plane->width > 10)
+                 {
+                     //double t1 = (double)cv::getTickCount();
+                     //vector<int> inliers;//用于存放合群点的vector
+                     pcl::SampleConsensusModelPlane<pcl::PointXYZ>::Ptr model(
+                     new pcl::SampleConsensusModelPlane<pcl::PointXYZ>(cloud_plane));//定义待拟合平面的model,并使用待拟合点云初始化
+                     pcl::RandomSampleConsensus<pcl::PointXYZ> ransac(model);//定义RANSAC算法模型
+                     ransac.setDistanceThreshold(0.05);//设定阈值
+                     ransac.computeModel();//拟合
+                     // ransac.getInliers(inliers);//获取合群点
+                     // vector<int> tmp;
+                     Eigen::VectorXf coeff;
+                     ransac.getModelCoefficients(coeff);//获取拟合平面参数,对于平面ax+by_cz_d=0,coeff分别按顺序保存a,b,c,d
+                     std::cout<<"coeff "<<coeff[0]<<" "<<coeff[1]<<" "<<coeff[2]<<" "<<coeff[3]<<std::endl;
+
+                     //double t2 = (double)cv::getTickCount()- t1;
+
+                     //std::cout<< "SampleConsensusModelPlane: "  <<t2*1000.0/cv::getTickFrequency()<<" ms"<<std::endl;
+
+                     //获得图像上车道线检测结果
+                     std::vector<adc_msg::lane_info> lanes = current_depth_lane.lanes.lane_infos;
+                     std::vector<lane_information> pixel_lane_result;
+                     std::vector<lane_information> camera_lane_result;
+                     std::vector<std::vector<cv::Point3f>> coords_camera;
+                     for(adc_msg::lane_info lane : lanes)
+                     {
+                         lane_information single_lane;
+                         single_lane.min_y = lane.min_y;
+                         single_lane.max_y = lane.max_y;
+                         single_lane.a = lane.a;
+                         single_lane.b = lane.b;
+                         single_lane.c = lane.c;
+                         single_lane.id = lane.index;
+                         single_lane.type = lane.type;
+                         single_lane.color = lane.color;
+                         single_lane.curvature = lane.curvature;
+                         pixel_lane_result.push_back(single_lane);
+                     }
+
+                     //t1 = (double)cv::getTickCount()- t1;
+                     //std::cout<< "Total Cost: "  <<t1*1000.0/cv::getTickFrequency()<<" ms"<<std::endl;
+
+                     ComputeVirtualXYZ(inv_fu,inv_fv,u0,v0,min_depth_,max_depth_,coeff,
+                                       pixel_lane_result,camera_lane_result,coords_camera);
+
+                     ros::Time time_stamp = current_depth_lane.stamp;
+                     PublishLanes3d(camera_lane_result,time_stamp);
+                     if(is_vis_lanes3d_point)
+                         PublishLanes3dPoint(coords_camera,time_stamp);
+
+                     //显示相机坐标系下的车道线点集合
+                     cv::Mat mask(1920, 1080, CV_8UC3, cv::Scalar(255,255,255));
+                     if(coords_camera.size()>0)
+                     {
+                         for (std::size_t i = 0; i < coords_camera.size(); i++)
+                         {
+                                 for (std::size_t j = 0; j < coords_camera[i].size() - 1; j++)
+                                 {
+                                         float x_idx =coords_camera[i][j].x;
+                                         float y_idx = coords_camera[i][j].y;
+                                         float z_idx = coords_camera[i][j].z;
+                                         //通过y值选出地面上的点
+                                         if (z_idx > 0 && y_idx > 0.3 && y_idx < 0.8)
+                                         {
+                                                 int x_pixel = int((x_idx + 5) * 100);
+                                                 int z_pixel = int(z_idx * 100);
+                                                 cv::circle(mask, cv::Point(x_pixel, z_pixel), 5,
+                                                         cv::Scalar(COLOR_MAP[i][0], COLOR_MAP[i][1], COLOR_MAP[i][2]));
+                                         }
+                                 }
+                         }
+
+                      }
+                     cv::flip(mask,mask,0);
+                     cv::imshow("camera_lane",mask);
+                     cv::waitKey(1);
+                 }
+            }
+            else
+            {
+                 usleep(2000);
+            }
+         }
+
+     }
+
+     void CallbackImageLane(const sensor_msgs::ImageConstPtr& image_msg, const adc_msg::lane_info_arrayConstPtr& detection_lane2d)
+     {
+         std::vector<adc_msg::lane_info> lanes = detection_lane2d->lane_infos;
+         cv::Mat img = cv_bridge::toCvCopy(image_msg, image_msg->encoding)->image;
+        
+         for(adc_msg::lane_info lane : lanes)
+         {
+             int min_y = int(lane.min_y);
+             int max_y = int(lane.max_y);
+             float a = lane.a;
+             float b = lane.b;
+             float c = lane.c;
+             int id = lane.index;
+             for(int i=min_y;i<max_y;i++)
+             {
+                 int x = int(a + b*i + c*i*i);
+                 cv::Point lane_point(x,i);
+                 cv::circle(img, lane_point,5,cv::Scalar(COLOR_MAP[id][0],
+                                                  COLOR_MAP[id][1],
+                                                  COLOR_MAP[id][2]),-1);
+             }
+         }
+         cv::imshow("show", img);
+         cv::waitKey(10);
+     }
+     void CallbackLaneDepth(const adc_msg::adc_depthConstPtr& depth_msg, const adc_msg::lane_info_arrayConstPtr& detection_lane2d)
+     {
+         ros::Time depth_time = depth_msg->header.stamp;
+         depth_lane merge_msg;
+         merge_msg.depth = *depth_msg;
+         merge_msg.lanes = *detection_lane2d;
+         merge_msg.stamp = depth_time;
+         {
+             std::lock_guard<std::mutex> mutex_sync(mutex_sync_depth_lane_);
+             cache_queue_.push(merge_msg);
+             if(cache_queue_.size() > KCacheNum) cache_queue_.pop();
+         }
+
+     }
+     void InitConfig()
+     {
+
+         nh_ptr_->param("detection_lanes2d_topic", detection_lanes2d_topic_, std::string("/detection/lanes2d"));
+         nh_ptr_->param("detection_lanes3d_topic", detection_lanes3d_topic_, std::string("/detection/lanes3d"));
+         nh_ptr_->param("lanes3d_point_topic", lanes3d_point_topic_, std::string("/detection/lanes3d_point"));
+         nh_ptr_->param("stereo_depth_topic", stereo_depth_topic_, std::string("/stereo/depth"));
+         nh_ptr_->param("detection_image_topic", detection_image_topic, std::string("/detection/image_lane"));
+         nh_ptr_->param("vis_detection", is_vis_detection, true);
+         nh_ptr_->param("vis_lanes3d_point", is_vis_lanes3d_point, true);
+         cv::namedWindow("show",cv::WINDOW_NORMAL);
+
+         depth_sub_ptr_ = std::make_shared<message_filters::Subscriber<adc_msg::adc_depth>>(*nh_ptr_, stereo_depth_topic_, 1);
+         detection_lanes_sub_ptr_ = std::make_shared<message_filters::Subscriber<adc_msg::lane_info_array>>(*nh_ptr_, detection_lanes2d_topic_, 1);
+         detection_image_sub_ptr_ = std::make_shared<message_filters::Subscriber<sensor_msgs::Image>>(*nh_ptr_, detection_image_topic, 1);
+
+         lanes3d_publisher_ = nh_ptr_->advertise<adc_msg::lane_info_array>(detection_lanes3d_topic_,1);
+
+         if(is_vis_lanes3d_point)
+         {
+             lanes3d_point_publisher_ = nh_ptr_->advertise<sensor_msgs::PointCloud2>(lanes3d_point_topic_, 1);
+         }
+         thread_depth_lane_ = std::thread(std::bind(&LaneFusionCMW::DepthLanesHandle, this));
+         thread_depth_lane_.detach();
+         if(is_vis_detection)
+         {
+             sync_detection_ptr_ = std::make_shared<message_filters::Synchronizer<SyncPolicyDetection>>(SyncPolicyDetection(100), *detection_image_sub_ptr_, *detection_lanes_sub_ptr_);
+             sync_detection_ptr_->registerCallback(std::bind(&LaneFusionCMW::CallbackImageLane,this,std::placeholders::_1, std::placeholders::_2));
+         }
+         sync_ptr_ = std::make_shared<message_filters::Synchronizer<SyncPolicy>>(SyncPolicy(100), *depth_sub_ptr_, *detection_lanes_sub_ptr_);
+         sync_ptr_->registerCallback(std::bind(&LaneFusionCMW::CallbackLaneDepth,this,std::placeholders::_1, std::placeholders::_2));
+         ros::spin();
+     }
+
+ private:
+     std::shared_ptr<ros::NodeHandle> nh_ptr_;
+     std::string stereo_depth_topic_;
+     std::string detection_lanes2d_topic_;
+     std::string detection_lanes3d_topic_;
+     std::string lanes3d_point_topic_;
+     ros::Publisher lanes3d_publisher_;
+     ros::Publisher lanes3d_point_publisher_;
+     std::string detection_image_topic;
+     bool is_vis_detection;
+     bool is_vis_lanes3d_point;
+     std::shared_ptr<message_filters::Subscriber<adc_msg::adc_depth>> depth_sub_ptr_;
+     std::shared_ptr<message_filters::Subscriber<adc_msg::lane_info_array>> detection_lanes_sub_ptr_;
+     std::shared_ptr<message_filters::Subscriber<sensor_msgs::Image>> detection_image_sub_ptr_;
+     std::shared_ptr<message_filters::Synchronizer<SyncPolicy>> sync_ptr_;
+     std::shared_ptr<message_filters::Synchronizer<SyncPolicyDetection>> sync_detection_ptr_;
+     std::queue<depth_lane>  cache_queue_;
+     std::mutex mutex_sync_depth_lane_;
+     std::thread thread_depth_lane_;
+     float32_t max_depth_ = 10.0;
+     float32_t min_depth_ = 1.0;
+     float32_t max_height_ = 0.8;  //摄像头距离地面的高度y的值域范围
+     float32_t min_height_ = 0.3;
+     float32_t max_width_ = 3.0;  //拟合地面时选取的x值域范围
+     float32_t min_width_ = -3.0;
+     const unsigned int  KCacheNum = 2;
+ };
+ }
+ }
+
+int main(int argc,  char** argv)
+{   (void) argc;
+    (void) argv;
+    adc::ros_cmw::LaneFusionCMW app(argc, argv);
+    return 0;
+}

+ 209 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/lane_detection_app_node.cpp

@@ -0,0 +1,209 @@
+/**
+* \brief
+* \author liqingxia
+* \date 2020-11-30
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#include <ros/ros.h>
+#include <message_filters/subscriber.h>
+#include <message_filters/time_synchronizer.h>
+#include <message_filters/sync_policies/approximate_time.h>
+#include <image_transport/image_transport.h>
+#include <cv_bridge/cv_bridge.h>
+#include <sensor_msgs/image_encodings.h>
+#include <memory>
+#include <stdlib.h>
+#include <opencv2/core.hpp>
+#include <opencv2/highgui.hpp>
+#include <opencv2/videoio.hpp>
+#include <opencv2/calib3d/calib3d.hpp>
+#include "cmw_app/lane_detection_app.h"
+#include <yaml-cpp/yaml.h>
+#include <vector>
+
+#include "common/project_root.h"
+#include "adc_msg/lane_info_array.h"
+#include "adc_msg/lane_info.h"
+
+#define IMAGE_WIDTH 640
+#define IMAGE_HEIGHT 480
+
+namespace adc
+{
+namespace ros_cmw
+{
+class LaneDetectionCMW
+{
+public:
+    //typedef message_filters::sync_policies::ApproximateTime<adc_msg::adc_depth, sensor_msgs::Image> SyncPolicy;
+    typedef std::chrono::system_clock::time_point  TimeType;
+    typedef  std::shared_ptr<std::list<vector<ld::lane_info>>> LaneInfoArrayType;
+    typedef  cv::Mat ImageType;
+public:
+    LaneDetectionCMW(int argc, char** argv)
+    {
+        ros::init(argc, argv, "detections");
+        nh_ptr_ = std::make_shared<ros::NodeHandle>("~");
+        InitConfig();
+    }
+private:
+    //向外发送车道线检测结果和左图
+    void CallbackDetection(LaneInfoArrayType& laneinfos, TimeType& tm, ImageType& img)
+    {
+        std::vector<adc_msg::lane_info> lanes;
+        auto lane_res_ptr = (*laneinfos).begin();
+        for(std::vector<ld::lane_info>::iterator lane = lane_res_ptr->begin(); lane != lane_res_ptr->end(); ++lane)
+        {
+            adc_msg::lane_info lane2d_msg;
+            lane2d_msg.index = lane->id;
+            lane2d_msg.type = lane->type;
+            lane2d_msg.color = lane->color;
+            lane2d_msg.a = lane->para.at<float>(0,0);
+            lane2d_msg.b = lane->para.at<float>(1,0);
+            lane2d_msg.c = lane->para.at<float>(2,0);
+            lane2d_msg.min_y = lane->min_y;
+            lane2d_msg.max_y = lane->max_y;
+            lane2d_msg.curvature = lane->curvature;
+            lanes.push_back(lane2d_msg);
+        }
+        adc_msg::lane_info_array lane_array2d_msg;
+        int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(tm.time_since_epoch()).count();
+        int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(tm.time_since_epoch()).count() % 1000000000UL;
+        ros::Time stamp(sec, nsec);
+        std_msgs::Header header;
+        header.stamp = stamp;
+        lane_array2d_msg.header = header;
+        lane_array2d_msg.lane_infos = lanes;
+        lanes_pushisher_.publish(lane_array2d_msg);
+        if(publis_origin_img_)
+        {
+            cv_bridge::CvImage cv_img_l;
+            sensor_msgs::Image img_msg_l;
+            cv_img_l.header.stamp = stamp;
+            cv_img_l.encoding = "bgr8";
+            cv_img_l.image = img;
+            cv_img_l.toImageMsg(img_msg_l);
+            left_image_publisher_.publish(img_msg_l);
+        }
+    }
+
+    void CallbackBinocular(const sensor_msgs::ImageConstPtr& binocular_image_msg)
+    {
+
+        //应该用什么时候的image_time作为stamp????????????
+        ros::Time image_time = binocular_image_msg->header.stamp;
+        //ros::Time image_time = ros::Time::now();
+        cv::Mat binocular = cv_bridge::toCvCopy(binocular_image_msg, binocular_image_msg->encoding)->image;
+        cv::Mat left_image = binocular(left_rect_).clone();
+        uint64_t nsecs = image_time.toNSec();
+        std::chrono::nanoseconds ns(nsecs);
+        TimeType time_stamp(ns);
+        cv::remap(left_image, left_image, map_x_, map_y_, cv::INTER_LINEAR);
+        detection_ptr_->CacheImages(left_image, time_stamp);
+    }
+    void ReadCameraParams(const std::string camera_yaml)
+    {
+        try
+        {
+            YAML::Node node = YAML::LoadFile(camera_yaml);
+            int height = node["image_height"].as<int>();
+            int width = node["image_width"].as<int>();
+            cv::Size size(width, height);
+            std::vector<double> K_v= node["camera_matrix"]["data"].as<std::vector<double>>();
+            std::vector<double> P_v = node["projection_matrix"]["data"].as<std::vector<double>>();
+            std::vector<double> D_v = node["distortion_coefficients"]["data"].as<std::vector<double>>();
+            std::vector<double> R_v = node["rectification_matrix"]["data"].as<std::vector<double>>();
+
+            cv::Mat _K(K_v);
+            cv::Mat _P(P_v);
+            cv::Mat _D(D_v);
+            cv::Mat _R(R_v);
+
+            _K = _K.reshape(3,3);
+            _P = _P.reshape(4,3);
+            _D = _D.reshape(5.1);
+            _R = _R.reshape(3,3);
+            cv::initUndistortRectifyMap(_K, _D, _R, _P, size, CV_32FC1, map_x_, map_y_);
+
+        }
+        catch (const std::exception& e)
+        {
+            std::cerr << e.what() << '\n'<<std::endl;;
+            std::cerr<<" detection Ros app read camera params file faile!"<<std::endl;
+            exit(1);
+
+        }
+    }
+    void InitConfig()
+    {
+
+        std::string detection_config;
+        std::string root_path;
+        std::string left_camera_params_filename;
+        std::string detection_lanes2d_topic;
+        std::string origin_imgage_topic;
+        float hz;
+        nh_ptr_->param("binocular_topic", binocular_topic_, std::string("/camera/binocular"));
+        nh_ptr_->param("hz", hz, 30.0f);
+        nh_ptr_->param("detection_config", detection_config, std::string("lane_detection.yaml"));
+        nh_ptr_->param("root_path", root_path, std::string("/opt/adc"));
+        nh_ptr_->param("left_camera_params_filename", left_camera_params_filename, std::string("left.yaml"));
+        nh_ptr_->param("detection_lanes2d_topic", detection_lanes2d_topic, std::string("/detection/lanes2d"));
+        nh_ptr_->param("publis_origin_img", publis_origin_img_, true);
+        nh_ptr_->param("origin_imgage_topic", origin_imgage_topic, std::string("/detection/image_lane"));
+        std::string param_root = "ADC_ROOT="+root_path;
+        char* path_c = const_cast<char*>(param_root.c_str());
+        putenv(path_c);
+        std::string left_camera_params_file_path = adc::RootPath::GetAbsolutePath(left_camera_params_filename);
+        ReadCameraParams(left_camera_params_file_path);
+        detection_ptr_ = std::make_shared<adc::LaneDetectionApp>(detection_config, hz);
+        detection_ptr_->SetDetectionCallback(
+            std::bind(
+                &LaneDetectionCMW::CallbackDetection, this,
+                std::placeholders::_1, std::placeholders::_2,
+                std::placeholders::_3)
+        );
+//std::bind(&StereoMatchCMW::CallbackDepthHandler, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
+
+        left_rect_ = cv::Rect(0,0,IMAGE_WIDTH, IMAGE_HEIGHT);
+        binocular_subscriber_ = nh_ptr_->subscribe<sensor_msgs::Image>(
+            binocular_topic_, 1,
+            std::bind(&LaneDetectionCMW::CallbackBinocular,
+            this,
+            std::placeholders::_1)
+            );
+        lanes_pushisher_ = nh_ptr_->advertise<adc_msg::lane_info_array>(detection_lanes2d_topic,1);
+        transport_img_ptr_ = std::make_shared<image_transport::ImageTransport>(*nh_ptr_);
+        left_image_publisher_ = transport_img_ptr_->advertise(origin_imgage_topic, 1);
+        detection_ptr_->Run();
+        // depth_sub_ptr_ = std::make_shared<message_filters::Subscriber<adc_msg::adc_depth>>(*nh_ptr_, stereo_depth_topic_, 1);
+        // image_sub_ptr_ = std::make_shared<message_filters::Subscriber<sensor_msgs::Image>>(*nh_ptr_, binocular_topic_, 1);
+        // sync_ptr_ = std::make_shared<message_filters::Synchronizer<SyncPolicy>>(SyncPolicy(100), *depth_sub_ptr_, *image_sub_ptr_);
+        // sync_ptr_->registerCallback(std::bind(&DetectionCMW::CallbackImageDepth,this,std::placeholders::_1, std::placeholders::_2));
+        ros::spin();
+
+    }
+private:
+    std::shared_ptr<ros::NodeHandle> nh_ptr_;
+    std::string binocular_topic_;
+    std::shared_ptr<adc::LaneDetectionApp> detection_ptr_;
+    cv::Rect left_rect_;
+    ros::Subscriber binocular_subscriber_;
+    ros::Publisher lanes_pushisher_;
+    std::shared_ptr<image_transport::ImageTransport> transport_img_ptr_;
+    image_transport::Publisher left_image_publisher_;
+
+    cv::Mat map_x_;
+    cv::Mat map_y_;
+    bool publis_origin_img_;
+};
+}
+}
+
+int main(int argc,  char** argv)
+{
+    adc::ros_cmw::LaneDetectionCMW app(argc, argv);
+    return 0;
+}

+ 40 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/modulecomm.h

@@ -0,0 +1,40 @@
+#ifndef MODULECOMM_H
+#define MODULECOMM_H
+
+#include <QtCore/qglobal.h>
+#include <QDateTime>
+
+#include <functional>
+
+#if defined(MODULECOMM_LIBRARY)
+#  define MODULECOMMSHARED_EXPORT Q_DECL_EXPORT
+#else
+#  define MODULECOMMSHARED_EXPORT Q_DECL_IMPORT
+#endif
+
+
+
+//#include <iostream>
+//#include <thread>
+
+//using namespace std::placeholders;
+typedef std::function<void(const char * ,const unsigned int , const unsigned int , QDateTime * ,const char *)> ModuleFun;
+typedef void (* SMCallBack)(const char * strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname);
+
+namespace iv {
+namespace modulecomm {
+void * MODULECOMMSHARED_EXPORT RegisterSend(const char * strcommname,const unsigned int nBufSize,const unsigned int nMsgBufCount);
+void * MODULECOMMSHARED_EXPORT RegisterRecv(const char * strcommname,SMCallBack pCall);
+void * MODULECOMMSHARED_EXPORT RegisterRecvPlus(const char * strcommname,ModuleFun xFun);
+void MODULECOMMSHARED_EXPORT ModuleSendMsg(void * pHandle,const char * strdata,const unsigned int nDataLen);
+void MODULECOMMSHARED_EXPORT Unregister(void * pHandle);
+void MODULECOMMSHARED_EXPORT PauseComm(void * pHandle);
+void MODULECOMMSHARED_EXPORT ContintuComm(void * pHandle);
+
+}
+
+}
+
+
+
+#endif 

+ 227 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/app/sgm_app_node.cpp

@@ -0,0 +1,227 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-08-21
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+#include <ros/ros.h>
+#include <image_transport/image_transport.h>
+#include <cv_bridge/cv_bridge.h>
+#include <sensor_msgs/image_encodings.h>
+#include <memory>
+#include <opencv2/opencv.hpp>
+#include <stdlib.h>
+#include <pcl/point_cloud.h>
+#include <pcl_conversions/pcl_conversions.h> 
+#include <sensor_msgs/PointCloud2.h> 
+
+
+#include "cmw_app/sgm_app.h"
+#include "adc_3d/utils.h"
+#include "adc_msg/adc_depth.h"
+
+
+
+namespace adc
+{
+    
+namespace ros_cmw
+{
+    
+class StereoMatchCMW
+{
+
+public:
+    typedef std::chrono::system_clock::time_point  TimeType;
+    typedef cv::Mat PointCloudMat;
+    typedef cv::Mat ImageColor;
+  
+
+public:
+    StereoMatchCMW(int argc, char** argv, std::string node_name = "stereo_match")
+    {
+        ros::init(argc, argv, node_name);
+        nh_ptr_ = std::make_shared<ros::NodeHandle>("~");
+        InitConfig();
+    }
+private:
+    void PublishPoints(PointCloudMat& points, cv::Mat& color_image, ros::Time& stamp)
+    {
+        (void) stamp;
+        pcl::PointCloud<pcl::PointXYZRGB>::Ptr point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
+        sensor_msgs::PointCloud2 point_cloud_msg;
+
+        float* point_data_ptr = points.ptr<float>();
+        uchar* color_data_ptr = color_image.ptr<uchar>();
+        uint32_t points_size = points.rows * points.cols;
+        for(uint32_t index = 0; index < points_size; index++)
+        {
+            float x = point_data_ptr[0];
+            float y = point_data_ptr[1];
+            float z = point_data_ptr[2];
+            uchar b = color_data_ptr[0];
+            uchar g = color_data_ptr[1];
+            uchar r = color_data_ptr[2];
+            if(z < 0.15 || z > max_depth_)
+            {
+                point_data_ptr += 3;
+                color_data_ptr += 3;
+                continue;
+            }
+            else
+            {
+                pcl::PointXYZRGB point;
+                point.x = x;
+                point.y = y;
+                point.z = z;
+                point.r = r;
+                point.g = g;
+                point.b = b;
+                point_cloud_ptr->push_back(point);
+                point_data_ptr += 3;
+                color_data_ptr += 3;
+
+            }
+        }
+        
+
+        pcl::PointCloud<pcl::PointXYZRGB>::Ptr filters_point_cloud_ptr(new pcl::PointCloud<pcl::PointXYZRGB>);
+
+        // adc::adc_3d::PointUtils::FilterRemoveOutlierPointsOnStatistical(point_cloud_ptr , filters_point_cloud_ptr);
+        // ROS_INFO("Send point cloud, size: %d", point_cloud_ptr->size());
+        pcl::toROSMsg(*point_cloud_ptr, point_cloud_msg);
+        point_cloud_msg.header.frame_id = "stereo_camera"; 
+        point_cloud_msg.header.stamp = ros::Time::now();
+        point_vis_publisher_.publish(point_cloud_msg);
+        ros::spinOnce();
+    }
+
+    void PublshDepth(cv::Mat& depth, ros::Time& stamp)
+    {
+        cv::Mat depth_nomo16;
+        adc_msg::adc_depth depth_msg;
+        std_msgs::Header header;
+
+        depth.convertTo(depth_nomo16, CV_16U, 1000, 0);
+        cv_bridge::CvImage cv_img(std_msgs::Header(), "mono16", depth_nomo16);
+        cv_img.header.stamp=stamp;
+        header.stamp = stamp;
+        depth_msg.header = header;
+        depth_msg.fu = camera_params_.fu;
+        depth_msg.fv = camera_params_.fv;
+        depth_msg.u0 = camera_params_.u0;
+        depth_msg.v0 = camera_params_.v0;
+        cv_img.toImageMsg(depth_msg.depth);
+        depth_publisher_.publish(depth_msg);
+        ros::spinOnce();
+    }
+    void CallbackDepthHandler(PointCloudMat& depth_3d, ImageColor& image, TimeType& tm)
+    {
+        int32_t sec = std::chrono::duration_cast<std::chrono::seconds>(tm.time_since_epoch()).count();
+        int32_t nsec = std::chrono::duration_cast<std::chrono::nanoseconds>(tm.time_since_epoch()).count() % 1000000000UL;
+        ros::Time stamp(sec, nsec);
+        //
+        std::vector<cv::Mat> channels(3);
+        cv::split(depth_3d, channels);
+        cv::Mat depth = channels[2];
+        // cv::Mat depth_8u, depth_color;
+        // depth.convertTo(depth_8u, CV_8U, 255/15);
+        // cv::applyColorMap(depth_8u, depth_color,  cv::COLORMAP_RAINBOW);
+        // depth_color.setTo(cv::Scalar(0, 0, 0), depth_8u < 0);
+        if(send_vis_)
+        {
+            PublishPoints(depth_3d, image, stamp);
+        }
+        PublshDepth(depth, stamp);
+        // cv::imshow("depth", depth_color);
+        // cv::waitKey(10);
+    }
+    void InitConfig()
+    {
+        std::string root_path;
+        std::string steroe_config;
+        std::string depth_topic;
+        float hz;
+        int image_height;
+        int image_width;
+        nh_ptr_->param("binocular_camera_topic", binocular_camera_topic_, std::string("/camera/binocular"));
+        nh_ptr_->param("stereo_depth_topic", stereo_depth_topic_, std::string("/stereo/depth"));
+        nh_ptr_->param("root_path", root_path, std::string("/opt/adc"));
+        nh_ptr_->param("steroe_config", steroe_config, std::string("stereo.yaml"));
+        nh_ptr_->param("hz", hz, 30.0f);
+        nh_ptr_->param("send_vis", send_vis_, false);
+        nh_ptr_->param("points_vis_topic", points_vis_topic_, std::string("/stereo/point_cloud"));
+        nh_ptr_->param("max_depth", max_depth_, 15.0f);
+        nh_ptr_->param("depth_topic", depth_topic, std::string("/stereo/depth"));
+        nh_ptr_->param("image_width", image_width, 640);
+        nh_ptr_->param("image_height", image_height, 480);
+        
+
+        std::string param_root = "ADC_ROOT="+root_path;
+        char* path_c = const_cast<char*>(param_root.c_str());
+        putenv(path_c);
+        left_rect = cv::Rect(0,0,image_width, image_height);
+        right_rect = cv::Rect(image_width,0,image_height, image_height);
+        sgm_ptr_ = std::make_shared<adc::StereoSgmApp>(steroe_config, hz);
+        //add callback 
+        sgm_ptr_->SetPointCloudMatAndImageColorCallback(
+            std::bind(&StereoMatchCMW::CallbackDepthHandler, this, std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
+        sgm_ptr_->Run();
+        sgm_ptr_->GetDepthParams(camera_params_);
+        if(send_vis_)
+        {
+            point_vis_publisher_ = nh_ptr_->advertise<sensor_msgs::PointCloud2>(points_vis_topic_, 1);
+        }
+        depth_publisher_ = nh_ptr_->advertise<adc_msg::adc_depth>(depth_topic, 1);
+        binocular_subscriber_ = nh_ptr_->subscribe<sensor_msgs::Image>(
+                                binocular_camera_topic_, 1,
+                                std::bind(
+                                    &StereoMatchCMW::CallbackReceviceBinocular,
+                                    this,
+                                    std::placeholders::_1)
+                                    );
+        ros::spin();
+
+    }
+
+    void CallbackReceviceBinocular(const sensor_msgs::ImageConstPtr& binocular_img_msg)
+    {
+        cv::Mat left_image, right_image, binocular_image;
+        binocular_image = cv_bridge::toCvCopy(binocular_img_msg, binocular_img_msg->encoding)->image;
+        left_image = binocular_image(left_rect).clone();
+        right_image = binocular_image(right_rect).clone();
+        ros::Time time = binocular_img_msg->header.stamp;
+        // ros::Time time = ros::Time::now();
+
+        uint64_t nsecs = time.toNSec();
+        std::chrono::nanoseconds ns(nsecs);
+        TimeType time_stamp(ns);
+        sgm_ptr_->CacheImages(left_image, right_image, time_stamp);
+    }
+
+private:
+    std::shared_ptr<ros::NodeHandle> nh_ptr_;
+    std::string binocular_camera_topic_;
+    std::string points_vis_topic_;
+    ros::Subscriber binocular_subscriber_;
+    ros::Publisher point_vis_publisher_;
+    ros::Publisher depth_publisher_;
+    std::string stereo_depth_topic_;
+    std::shared_ptr<adc::StereoSgmApp> sgm_ptr_;
+    cv::Rect left_rect, right_rect;
+    bool send_vis_;
+    float32_t max_depth_;
+    adc::CameraParameters camera_params_;
+
+};
+}
+}
+
+int main(int argc, char** argv)
+{
+    adc::ros_cmw::StereoMatchCMW stereo_cmw(argc, argv);
+    return 0;
+
+}
+

+ 36 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/adc_3d/utils.h

@@ -0,0 +1,36 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-06-09
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#include <pcl/point_types.h>
+#include <pcl/filters/passthrough.h>
+#include <pcl/filters/statistical_outlier_removal.h>
+
+
+namespace adc
+{
+namespace adc_3d
+{
+
+class PointUtils
+{
+public:
+
+static void FilterRemoveOutlierPointsOnStatistical(pcl::PointCloud<pcl::PointXYZ>::Ptr input_pointcloud,
+                         pcl::PointCloud<pcl::PointXYZ>::Ptr output_pointcloud,
+                         const int knn_num = 50,
+                         const float32_t std = 1.0);
+static void FilterRemoveOutlierPointsOnStatistical(pcl::PointCloud<pcl::PointXYZRGB>::Ptr input_pointcloud,
+                         pcl::PointCloud<pcl::PointXYZRGB>::Ptr output_pointcloud,
+                         const int knn_num = 50,
+                         const float32_t std = 1.0);
+
+};
+
+}
+
+}

+ 76 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/cmw_app/detection_trt7_app.h

@@ -0,0 +1,76 @@
+#ifndef  DETECTION_TRT7_APP_H
+#define DETECTION_TRT7_APP_H
+
+#include <opencv2/core/core.hpp>
+#include "opencv2/imgproc/imgproc.hpp"
+#include <queue>
+#include <utility>
+#include <mutex>
+#include <functional>
+#include <cuda_runtime.h>
+#include <memory>
+#include <chrono>
+#include "common/work.h"
+#include "detection_trt7/yolov4Tensorrt/include/yolodetect.h"
+#include "detection_trt7/include/detect_obstacle.h"
+#include "detection_trt7/include/dataReader.h"
+#include <list>
+#include <yaml-cpp/yaml.h>
+using namespace std;
+
+namespace adc
+{
+    class DetectionTrt7App : public Worker
+    {
+
+    public:
+        typedef  std::shared_ptr<std::list<vector<Tn::Bbox>>> OutputDataType;
+        typedef  std::chrono::system_clock::time_point  TimeType;
+        typedef  cv::Mat ImageType;
+        typedef std::vector<std::string> ClassNameType;
+        typedef std::function<void(OutputDataType&, TimeType&, ImageType&, std::vector<string>&)> DetectionCallback;
+     
+        DetectionTrt7App(const std::string &config_file, const float hz);
+
+        void SetDetectionCallback(DetectionCallback callback);
+        void DoWork();
+
+        void CacheImages(const cv::Mat &left_img, TimeType& time);
+
+        bool ReceiveImage(void);
+
+        void Inference();
+
+        vector<string> GetNamesFromFile(string filePathAndName);
+
+    private:
+        void InitConfig(const std::string& config_file);
+
+    private:
+        std::mutex mutex_image_;
+    private:
+        ///cache data
+        std::queue<cv::Mat> image_cache_;
+        std::queue<std::chrono::system_clock::time_point> time_cache_;
+
+
+    private:
+        NetworkInfo network_info_;
+        std::string engine_path_;
+        IExecutionContext* yolo_context_;
+        std::shared_ptr<YoloDetect> Yolo_detect_;
+        std::shared_ptr<std::list<vector<Tn::Bbox>>> outputs_;
+        std::vector<KalmanTracker> trackers_;
+        int64 image_count_;
+        cv::Mat left_image_;
+        std::chrono::system_clock::time_point  current_time_;
+        std::vector<string> class_name_;
+        DetectionCallback detection_callback_;
+	static const int kQueueSize = 10; 
+        float nms_;
+        float ignore_;
+        bool track_;
+    };
+}
+
+#endif // ! DETECTION_TRT7_APP_H

+ 74 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/cmw_app/lane_detection_app.h

@@ -0,0 +1,74 @@
+#ifndef  LANE_DETECTION_APP_H
+#define LANE_DETECTION_APP_H
+
+
+#include <opencv2/core/core.hpp>
+#include "opencv2/imgproc/imgproc.hpp"
+#include <queue>
+#include <utility>
+#include <mutex>
+#include <functional>
+#include <cuda_runtime.h>
+#include <memory>
+#include <chrono>
+#include "common/work.h"
+#include "lane_detection/tensorrt/tensorrt.h"
+#include "lane_detection/lanedetect.h"
+#include <list>
+#include <yaml-cpp/yaml.h>
+using namespace std;
+
+namespace adc
+{
+    static const int INPUT_H = 256;
+    static const int INPUT_W = 512;  
+    static const int INPUT_C = 3;
+
+    class LaneDetectionApp : public Worker
+    {
+
+    public:
+        typedef  std::shared_ptr<std::list<vector<ld::lane_info>>> OutputDataType;
+        typedef  std::chrono::system_clock::time_point  TimeType;
+        typedef  cv::Mat ImageType;
+        typedef std::function<void(OutputDataType&, TimeType&, ImageType&)> DetectionCallback;
+
+        LaneDetectionApp(const std::string &config_file, const float hz);
+        ~LaneDetectionApp();
+        void SetDetectionCallback(DetectionCallback callback);
+        void DoWork();
+        void CacheImages(const cv::Mat &left_img, TimeType& time);
+        bool ReceiveImage(void);
+        void Inference();
+    private:
+        void InitConfig(const std::string& config_file);
+
+    private:
+        std::mutex mutex_image_;
+    private:
+        ///cache data
+        std::queue<cv::Mat> image_cache_;
+        std::queue<std::chrono::system_clock::time_point> time_cache_;
+
+    private:
+        IExecutionContext* lane_context;
+        std::shared_ptr<ld::TensorRT> lane_trt;
+        std::shared_ptr<ld::LaneNet> lanenet;
+        std::vector<KalmanTracker> trackers;
+        static int frame_count;
+        shared_ptr<std::list<vector<ld::lane_info>>> outputs_;
+        cv::Mat left_image_;
+        std::chrono::system_clock::time_point  current_time_;
+        DetectionCallback detection_callback_;
+        static const int kQueueSize = 10;
+        int width_;
+        int hight_;
+        int channel_;
+    public:
+        float _mean[INPUT_C] = {0.485, 0.456, 0.406};//rgb
+        float _std[INPUT_C] = {0.229, 0.224, 0.225};
+    };
+
+}
+
+#endif // ! LANE_DETECTION_APP_H

+ 180 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/cmw_app/sgm_app.h

@@ -0,0 +1,180 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-05-20
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#ifndef  STEREO_VISION_SGM_APP_H
+#define STEREO_VISION_SGM_APP_H
+
+
+#include <opencv2/core/core.hpp>
+#include <queue>
+#include <utility>
+#include <mutex>
+#include <functional>
+#include <cuda_runtime.h>
+#include <memory>
+#include <stereo_vision/sgm.h>
+#include <common/work.h>
+namespace adc
+{
+
+
+struct device_buffer
+{
+	device_buffer() : data(nullptr) {}
+	device_buffer(size_t count) { allocate(count); }
+	void allocate(size_t count) { cudaMalloc(&data, count); }
+	~device_buffer() { cudaFree(data); }
+	void* data;
+};
+
+
+struct CameraParameters
+{
+	float fu;
+	float fv;
+	float u0;
+	float v0;
+	float baseline;
+	float height;
+	float tilt;
+    float pitch;
+    float yaw;
+
+};
+
+struct CoordinateTransform
+{
+	CoordinateTransform(const CameraParameters& camera) : camera(camera)
+	{
+		sinTilt = (sinf(camera.tilt));
+		cosTilt = (cosf(camera.tilt));
+		bf = camera.baseline * camera.fu;
+		invfu = 1.f / camera.fu;
+		invfv = 1.f / camera.fv;
+        sin_pitch = (sinf(camera.pitch));
+        cos_pitch = (cosf(camera.pitch));
+        sin_yaw = (sinf(camera.yaw));
+        cos_yaw = (cosf(camera.yaw));
+        cv::Mat R_x = (cv::Mat_<float>(3,3)<<1 ,    0 ,                 0,
+                                                                                    0,    sin_pitch,    cos_pitch,
+                                                                                    0,    -sin_pitch,  cos_pitch);
+        cv::Mat R_y = (cv::Mat_<float>(3,3)<<cos_yaw,    0,     -sin_yaw,
+                                                                                    0 ,                  1 ,                    0,
+                                                                                    sin_yaw,     0,     cos_yaw);
+        cv::Mat R_z = (cv::Mat_<float>(3,3)<<cosTilt,     sinTilt,        0,
+                                                                                -sinTilt,     cosTilt,      0,
+                                                                                0,                  0,                  1);
+        R = R_x*R_y*R_z;                                                                        
+	}
+
+	inline cv::Point3f imageToWorld(const cv::Point2f& pt, float d) const
+	{
+		const float u = pt.x;
+		const float v = pt.y;
+
+		const float Zc = bf / d;
+		const float Xc = invfu * (u - camera.u0) * Zc;
+		const float Yc = invfv * (v - camera.v0) * Zc;
+        // cv::Mat point = (cv::Mat_<float>(3,1)<<Xc,Yc,Zc);
+        // point = R*point;
+        // float x_w = point.at<float>(0,0);
+        // float y_w =point.at<float>(1,0);
+        // float z_w = point.at<float>(2,0);
+		return cv::Point3f(Xc, Yc, Zc);
+	}
+
+	CameraParameters camera;
+	float sinTilt, cosTilt, bf, invfu, invfv;
+    float  sin_pitch,  cos_pitch, sin_yaw,cos_yaw;
+    cv::Mat R;
+};
+
+
+class StereoSgmApp : public Worker
+{
+
+public:
+    typedef std::vector<cv::Vec3b> PointColorType;
+    typedef std::vector<cv::Point3f> PointCloudType;
+    typedef std::chrono::system_clock::time_point  TimeType;
+    typedef cv::Mat PointCloudMat;
+    typedef cv::Mat ImageColor;
+    typedef std::function<void(PointCloudMat&, TimeType&)> PointCloudMatCallback;
+    typedef std::function<void(PointCloudType&, TimeType&)> PointCloudCallback;
+    typedef std::function<void(PointColorType&, TimeType&)> PointColorCallback;
+    typedef std::function<void(PointCloudType&,  PointColorType&, TimeType&)> PointAndColorCallback;
+    typedef std::function<void(PointCloudMat&, ImageColor&, TimeType&)> PointCloudMatAndImageColorCallback;
+
+    explicit StereoSgmApp(const std::string &config_file, const float hz) ;
+
+    void SetPointCloudCallback(PointCloudCallback callback);
+    void SetPointColorCallback(PointColorCallback callback);
+    void SetPointAdnColorCallback(PointAndColorCallback callback);
+    void SetPointCloudMatCallback(PointCloudMatCallback callback);
+    void SetPointCloudMatAndImageColorCallback(PointCloudMatAndImageColorCallback callback);
+    void DoWork();
+    void CacheImages(const cv::Mat &left_img, const cv::Mat &right_img, TimeType& time);
+    bool ReceiveImage(void);
+    static void ReadParams(const std::string& yaml_file, cv::Mat& K, cv::Mat& D, cv::Mat& P, cv::Mat& R, cv::Size& size);
+    void GetDepthParams(CameraParameters& camera_params);
+private:
+    void InitConfig(const std::string& config_file);
+
+    void ReprojectPointsTo3D();
+
+    void StereoMatch();
+private:
+    std::mutex mutex_image_;
+private:
+    ///cache data
+    std::queue<std::pair<cv::Mat,cv::Mat>> image_cache_;
+    std::queue<std::chrono::system_clock::time_point> time_cache_;
+private:
+    cv::Mat map_l_x, map_l_y, map_r_x, map_r_y;
+    std::shared_ptr<CameraParameters> camera_ptr_;
+    std::shared_ptr<CoordinateTransform> tf_ptr_;
+    std::shared_ptr<adc::StereoSGM> sgm_ptr_;
+    float max_depth_;
+    int input_type_;
+    int input_bytes_;
+    int output_bytes_;
+
+    int disp_size_ = 64;
+    int output_depth_ = 16;
+    bool subpixel_ = true;
+    int P1_ = 10;
+    int P2_ = 120;
+    float uniqueness_ = 0.95f;
+    int top_row_ = 0;
+
+    const size_t kQueueSize = 10;
+
+    cv::Mat dispartity_, dispartity_32f_;
+    cv::Mat left_image_, right_image_;
+    std::chrono::system_clock::time_point  current_time_;
+
+    std::shared_ptr<device_buffer> d_left_ptr_;
+    std::shared_ptr<device_buffer> d_right_ptr_;
+    std::shared_ptr<device_buffer> d_dispartity_ptr_;
+    PointCloudType point_cloud_;
+    PointColorType point_color_;
+    cv::Mat point_cloud_mat_;
+
+
+    PointColorCallback point_color_callback_;
+    PointCloudCallback point_cloud_callback_;
+    PointAndColorCallback poind_and_color_callback_;
+    PointCloudMatCallback point_cloud_mat_callback_;
+    PointCloudMatAndImageColorCallback point_cloud_mat_and_image_color_callback_;
+};
+
+}
+
+#endif // ! STEREO_VISION_SGM_APP_H
+
+

+ 66 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/cuda_utils.h

@@ -0,0 +1,66 @@
+/**
+ *  \brerf 
+ *  \author pengcheng(pengcheng@adcsoft.cn)
+ *  \date 2020.09.11
+ *  \attention 
+ * 
+*/
+
+#ifndef _COMMON_CUDA_UTILS__H___
+#define _COMMON_CUDA_UTILS__H___
+
+
+#include <assert.h>
+#include "cuda.h"
+#include "cuda_runtime.h"
+#include <cudnn.h>
+#include <iostream>
+
+#ifndef CUDA_CHECK
+
+#define CUDA_CHECK(callstr)                                                                    \
+    {                                                                                          \
+        cudaError_t error_code = callstr;                                                      \
+        if (error_code != cudaSuccess) {                                                       \
+            std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__<<std::endl; \
+            assert(0);                                                                         \
+        }                                                                                      \
+    }
+
+#endif
+
+
+inline void* SafeHostMalloc(size_t mem_size)
+{
+    void * host_mem;
+    CUDA_CHECK(cudaHostAlloc(&host_mem, mem_size, cudaHostAllocDefault));
+    if(host_mem == nullptr)
+    {
+        std::cerr << "CPU Out of memory" << std::endl;
+        exit(1);
+    }
+    return host_mem;
+}
+
+inline void* SafeCudaMalloc(size_t memSize)
+{
+    void* deviceMem;
+    CUDA_CHECK(cudaMalloc((void**)&deviceMem, memSize));
+    if (deviceMem == nullptr)
+    {
+        std::cerr << "GPU Out of memory" << std::endl;
+        exit(1);
+    }
+    return deviceMem;
+}
+
+inline void SafeCudaFree(void* device_mem)
+{
+    if(device_mem != nullptr)
+    {
+        CUDA_CHECK(cudaFree(device_mem));
+    }
+}
+
+#endif // !_COMMON_CUDA_UTILS__H_
+

+ 2209 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/impl/cxxopts.hpp

@@ -0,0 +1,2209 @@
+/*
+Copyright (c) 2014, 2015, 2016, 2017 Jarryd Beck
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+#ifndef CXXOPTS_HPP_INCLUDED
+#define CXXOPTS_HPP_INCLUDED
+
+#include <cstring>
+#include <cctype>
+#include <exception>
+#include <iostream>
+#include <limits>
+#include <map>
+#include <memory>
+#include <regex>
+#include <sstream>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#ifdef __cpp_lib_optional
+#include <optional>
+#define CXXOPTS_HAS_OPTIONAL
+#endif
+
+#ifndef CXXOPTS_VECTOR_DELIMITER
+#define CXXOPTS_VECTOR_DELIMITER ','
+#endif
+
+#define CXXOPTS__VERSION_MAJOR 2
+#define CXXOPTS__VERSION_MINOR 2
+#define CXXOPTS__VERSION_PATCH 0
+
+namespace cxxopts
+{
+  static constexpr struct {
+    uint8_t major, minor, patch;
+  } version = {
+    CXXOPTS__VERSION_MAJOR,
+    CXXOPTS__VERSION_MINOR,
+    CXXOPTS__VERSION_PATCH
+  };
+}
+
+//when we ask cxxopts to use Unicode, help strings are processed using ICU,
+//which results in the correct lengths being computed for strings when they
+//are formatted for the help output
+//it is necessary to make sure that <unicode/unistr.h> can be found by the
+//compiler, and that icu-uc is linked in to the binary.
+
+#ifdef CXXOPTS_USE_UNICODE
+#include <unicode/unistr.h>
+
+namespace cxxopts
+{
+  typedef icu::UnicodeString String;
+
+  inline
+  String
+  toLocalString(std::string s)
+  {
+    return icu::UnicodeString::fromUTF8(std::move(s));
+  }
+
+  class UnicodeStringIterator : public
+    std::iterator<std::forward_iterator_tag, int32_t>
+  {
+    public:
+
+    UnicodeStringIterator(const icu::UnicodeString* string, int32_t pos)
+    : s(string)
+    , i(pos)
+    {
+    }
+
+    value_type
+    operator*() const
+    {
+      return s->char32At(i);
+    }
+
+    bool
+    operator==(const UnicodeStringIterator& rhs) const
+    {
+      return s == rhs.s && i == rhs.i;
+    }
+
+    bool
+    operator!=(const UnicodeStringIterator& rhs) const
+    {
+      return !(*this == rhs);
+    }
+
+    UnicodeStringIterator&
+    operator++()
+    {
+      ++i;
+      return *this;
+    }
+
+    UnicodeStringIterator
+    operator+(int32_t v)
+    {
+      return UnicodeStringIterator(s, i + v);
+    }
+
+    private:
+    const icu::UnicodeString* s;
+    int32_t i;
+  };
+
+  inline
+  String&
+  stringAppend(String&s, String a)
+  {
+    return s.append(std::move(a));
+  }
+
+  inline
+  String&
+  stringAppend(String& s, int n, UChar32 c)
+  {
+    for (int i = 0; i != n; ++i)
+    {
+      s.append(c);
+    }
+
+    return s;
+  }
+
+  template <typename Iterator>
+  String&
+  stringAppend(String& s, Iterator begin, Iterator end)
+  {
+    while (begin != end)
+    {
+      s.append(*begin);
+      ++begin;
+    }
+
+    return s;
+  }
+
+  inline
+  size_t
+  stringLength(const String& s)
+  {
+    return s.length();
+  }
+
+  inline
+  std::string
+  toUTF8String(const String& s)
+  {
+    std::string result;
+    s.toUTF8String(result);
+
+    return result;
+  }
+
+  inline
+  bool
+  empty(const String& s)
+  {
+    return s.isEmpty();
+  }
+}
+
+namespace std
+{
+  inline
+  cxxopts::UnicodeStringIterator
+  begin(const icu::UnicodeString& s)
+  {
+    return cxxopts::UnicodeStringIterator(&s, 0);
+  }
+
+  inline
+  cxxopts::UnicodeStringIterator
+  end(const icu::UnicodeString& s)
+  {
+    return cxxopts::UnicodeStringIterator(&s, s.length());
+  }
+}
+
+//ifdef CXXOPTS_USE_UNICODE
+#else
+
+namespace cxxopts
+{
+  typedef std::string String;
+
+  template <typename T>
+  T
+  toLocalString(T&& t)
+  {
+    return std::forward<T>(t);
+  }
+
+  inline
+  size_t
+  stringLength(const String& s)
+  {
+    return s.length();
+  }
+
+  inline
+  String&
+  stringAppend(String&s, String a)
+  {
+    return s.append(std::move(a));
+  }
+
+  inline
+  String&
+  stringAppend(String& s, size_t n, char c)
+  {
+    return s.append(n, c);
+  }
+
+  template <typename Iterator>
+  String&
+  stringAppend(String& s, Iterator begin, Iterator end)
+  {
+    return s.append(begin, end);
+  }
+
+  template <typename T>
+  std::string
+  toUTF8String(T&& t)
+  {
+    return std::forward<T>(t);
+  }
+
+  inline
+  bool
+  empty(const std::string& s)
+  {
+    return s.empty();
+  }
+}
+
+//ifdef CXXOPTS_USE_UNICODE
+#endif
+
+namespace cxxopts
+{
+  namespace
+  {
+#ifdef _WIN32
+    const std::string LQUOTE("\'");
+    const std::string RQUOTE("\'");
+#else
+    const std::string LQUOTE("‘");
+    const std::string RQUOTE("’");
+#endif
+  }
+
+  class Value : public std::enable_shared_from_this<Value>
+  {
+    public:
+
+    virtual ~Value() = default;
+
+    virtual
+    std::shared_ptr<Value>
+    clone() const = 0;
+
+    virtual void
+    parse(const std::string& text) const = 0;
+
+    virtual void
+    parse() const = 0;
+
+    virtual bool
+    has_default() const = 0;
+
+    virtual bool
+    is_container() const = 0;
+
+    virtual bool
+    has_implicit() const = 0;
+
+    virtual std::string
+    get_default_value() const = 0;
+
+    virtual std::string
+    get_implicit_value() const = 0;
+
+    virtual std::shared_ptr<Value>
+    default_value(const std::string& value) = 0;
+
+    virtual std::shared_ptr<Value>
+    implicit_value(const std::string& value) = 0;
+
+    virtual std::shared_ptr<Value>
+    no_implicit_value() = 0;
+
+    virtual bool
+    is_boolean() const = 0;
+  };
+
+  class OptionException : public std::exception
+  {
+    public:
+    OptionException(const std::string& message)
+    : m_message(message)
+    {
+    }
+
+    virtual const char*
+    what() const noexcept
+    {
+      return m_message.c_str();
+    }
+
+    private:
+    std::string m_message;
+  };
+
+  class OptionSpecException : public OptionException
+  {
+    public:
+
+    OptionSpecException(const std::string& message)
+    : OptionException(message)
+    {
+    }
+  };
+
+  class OptionParseException : public OptionException
+  {
+    public:
+    OptionParseException(const std::string& message)
+    : OptionException(message)
+    {
+    }
+  };
+
+  class option_exists_error : public OptionSpecException
+  {
+    public:
+    option_exists_error(const std::string& option)
+    : OptionSpecException("Option " + LQUOTE + option + RQUOTE + " already exists")
+    {
+    }
+  };
+
+  class invalid_option_format_error : public OptionSpecException
+  {
+    public:
+    invalid_option_format_error(const std::string& format)
+    : OptionSpecException("Invalid option format " + LQUOTE + format + RQUOTE)
+    {
+    }
+  };
+
+  class option_syntax_exception : public OptionParseException {
+    public:
+    option_syntax_exception(const std::string& text)
+    : OptionParseException("Argument " + LQUOTE + text + RQUOTE +
+        " starts with a - but has incorrect syntax")
+    {
+    }
+  };
+
+  class option_not_exists_exception : public OptionParseException
+  {
+    public:
+    option_not_exists_exception(const std::string& option)
+    : OptionParseException("Option " + LQUOTE + option + RQUOTE + " does not exist")
+    {
+    }
+  };
+
+  class missing_argument_exception : public OptionParseException
+  {
+    public:
+    missing_argument_exception(const std::string& option)
+    : OptionParseException(
+        "Option " + LQUOTE + option + RQUOTE + " is missing an argument"
+      )
+    {
+    }
+  };
+
+  class option_requires_argument_exception : public OptionParseException
+  {
+    public:
+    option_requires_argument_exception(const std::string& option)
+    : OptionParseException(
+        "Option " + LQUOTE + option + RQUOTE + " requires an argument"
+      )
+    {
+    }
+  };
+
+  class option_not_has_argument_exception : public OptionParseException
+  {
+    public:
+    option_not_has_argument_exception
+    (
+      const std::string& option,
+      const std::string& arg
+    )
+    : OptionParseException(
+        "Option " + LQUOTE + option + RQUOTE +
+        " does not take an argument, but argument " +
+        LQUOTE + arg + RQUOTE + " given"
+      )
+    {
+    }
+  };
+
+  class option_not_present_exception : public OptionParseException
+  {
+    public:
+    option_not_present_exception(const std::string& option)
+    : OptionParseException("Option " + LQUOTE + option + RQUOTE + " not present")
+    {
+    }
+  };
+
+  class argument_incorrect_type : public OptionParseException
+  {
+    public:
+    argument_incorrect_type
+    (
+      const std::string& arg
+    )
+    : OptionParseException(
+        "Argument " + LQUOTE + arg + RQUOTE + " failed to parse"
+      )
+    {
+    }
+  };
+
+  class option_required_exception : public OptionParseException
+  {
+    public:
+    option_required_exception(const std::string& option)
+    : OptionParseException(
+        "Option " + LQUOTE + option + RQUOTE + " is required but not present"
+      )
+    {
+    }
+  };
+
+  template <typename T>
+  void throw_or_mimic(const std::string& text)
+  {
+    static_assert(std::is_base_of<std::exception, T>::value,
+                  "throw_or_mimic only works on std::exception and "
+                  "deriving classes");
+
+#ifndef CXXOPTS_NO_EXCEPTIONS
+    // If CXXOPTS_NO_EXCEPTIONS is not defined, just throw
+    throw T{text};
+#else
+    // Otherwise manually instantiate the exception, print what() to stderr,
+    // and abort
+    T exception{text};
+    std::cerr << exception.what() << std::endl;
+    std::cerr << "Aborting (exceptions disabled)..." << std::endl;
+    std::abort();
+#endif
+  }
+
+  namespace values
+  {
+    namespace
+    {
+      std::basic_regex<char> integer_pattern
+        ("(-)?(0x)?([0-9a-zA-Z]+)|((0x)?0)");
+      std::basic_regex<char> truthy_pattern
+        ("(t|T)(rue)?|1");
+      std::basic_regex<char> falsy_pattern
+        ("(f|F)(alse)?|0");
+    }
+
+    namespace detail
+    {
+      template <typename T, bool B>
+      struct SignedCheck;
+
+      template <typename T>
+      struct SignedCheck<T, true>
+      {
+        template <typename U>
+        void
+        operator()(bool negative, U u, const std::string& text)
+        {
+          if (negative)
+          {
+            if (u > static_cast<U>((std::numeric_limits<T>::min)()))
+            {
+              throw_or_mimic<argument_incorrect_type>(text);
+            }
+          }
+          else
+          {
+            if (u > static_cast<U>((std::numeric_limits<T>::max)()))
+            {
+              throw_or_mimic<argument_incorrect_type>(text);
+            }
+          }
+        }
+      };
+
+      template <typename T>
+      struct SignedCheck<T, false>
+      {
+        template <typename U>
+        void
+        operator()(bool, U, const std::string&) {}
+      };
+
+      template <typename T, typename U>
+      void
+      check_signed_range(bool negative, U value, const std::string& text)
+      {
+        SignedCheck<T, std::numeric_limits<T>::is_signed>()(negative, value, text);
+      }
+    }
+
+    template <typename R, typename T>
+    R
+    checked_negate(T&& t, const std::string&, std::true_type)
+    {
+      // if we got to here, then `t` is a positive number that fits into
+      // `R`. So to avoid MSVC C4146, we first cast it to `R`.
+      // See https://github.com/jarro2783/cxxopts/issues/62 for more details.
+      return -static_cast<R>(t-1)-1;
+    }
+
+    template <typename R, typename T>
+    T
+    checked_negate(T&& t, const std::string& text, std::false_type)
+    {
+      throw_or_mimic<argument_incorrect_type>(text);
+      return t;
+    }
+
+    template <typename T>
+    void
+    integer_parser(const std::string& text, T& value)
+    {
+      std::smatch match;
+      std::regex_match(text, match, integer_pattern);
+
+      if (match.length() == 0)
+      {
+        throw_or_mimic<argument_incorrect_type>(text);
+      }
+
+      if (match.length(4) > 0)
+      {
+        value = 0;
+        return;
+      }
+
+      using US = typename std::make_unsigned<T>::type;
+
+      constexpr bool is_signed = std::numeric_limits<T>::is_signed;
+      const bool negative = match.length(1) > 0;
+      const uint8_t base = match.length(2) > 0 ? 16 : 10;
+
+      auto value_match = match[3];
+
+      US result = 0;
+
+      for (auto iter = value_match.first; iter != value_match.second; ++iter)
+      {
+        US digit = 0;
+
+        if (*iter >= '0' && *iter <= '9')
+        {
+          digit = static_cast<US>(*iter - '0');
+        }
+        else if (base == 16 && *iter >= 'a' && *iter <= 'f')
+        {
+          digit = static_cast<US>(*iter - 'a' + 10);
+        }
+        else if (base == 16 && *iter >= 'A' && *iter <= 'F')
+        {
+          digit = static_cast<US>(*iter - 'A' + 10);
+        }
+        else
+        {
+          throw_or_mimic<argument_incorrect_type>(text);
+        }
+
+        US next = result * base + digit;
+        if (result > next)
+        {
+          throw_or_mimic<argument_incorrect_type>(text);
+        }
+
+        result = next;
+      }
+
+      detail::check_signed_range<T>(negative, result, text);
+
+      if (negative)
+      {
+        value = checked_negate<T>(result,
+          text,
+          std::integral_constant<bool, is_signed>());
+      }
+      else
+      {
+        value = static_cast<T>(result);
+      }
+    }
+
+    template <typename T>
+    void stringstream_parser(const std::string& text, T& value)
+    {
+      std::stringstream in(text);
+      in >> value;
+      if (!in) {
+        throw_or_mimic<argument_incorrect_type>(text);
+      }
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, uint8_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, int8_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, uint16_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, int16_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, uint32_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, int32_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, uint64_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, int64_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, bool& value)
+    {
+      std::smatch result;
+      std::regex_match(text, result, truthy_pattern);
+
+      if (!result.empty())
+      {
+        value = true;
+        return;
+      }
+
+      std::regex_match(text, result, falsy_pattern);
+      if (!result.empty())
+      {
+        value = false;
+        return;
+      }
+
+      throw_or_mimic<argument_incorrect_type>(text);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, std::string& value)
+    {
+      value = text;
+    }
+
+    // The fallback parser. It uses the stringstream parser to parse all types
+    // that have not been overloaded explicitly.  It has to be placed in the
+    // source code before all other more specialized templates.
+    template <typename T>
+    void
+    parse_value(const std::string& text, T& value) {
+      stringstream_parser(text, value);
+    }
+
+    template <typename T>
+    void
+    parse_value(const std::string& text, std::vector<T>& value)
+    {
+      std::stringstream in(text);
+      std::string token;
+      while(in.eof() == false && std::getline(in, token, CXXOPTS_VECTOR_DELIMITER)) {
+        T v;
+        parse_value(token, v);
+        value.emplace_back(std::move(v));
+      }
+    }
+
+#ifdef CXXOPTS_HAS_OPTIONAL
+    template <typename T>
+    void
+    parse_value(const std::string& text, std::optional<T>& value)
+    {
+      T result;
+      parse_value(text, result);
+      value = std::move(result);
+    }
+#endif
+
+    inline
+    void parse_value(const std::string& text, char& c)
+    {
+      if (text.length() != 1)
+      {
+        throw_or_mimic<argument_incorrect_type>(text);
+      }
+
+      c = text[0];
+    }
+
+    template <typename T>
+    struct type_is_container
+    {
+      static constexpr bool value = false;
+    };
+
+    template <typename T>
+    struct type_is_container<std::vector<T>>
+    {
+      static constexpr bool value = true;
+    };
+
+    template <typename T>
+    class abstract_value : public Value
+    {
+      using Self = abstract_value<T>;
+
+      public:
+      abstract_value()
+      : m_result(std::make_shared<T>())
+      , m_store(m_result.get())
+      {
+      }
+
+      abstract_value(T* t)
+      : m_store(t)
+      {
+      }
+
+      virtual ~abstract_value() = default;
+
+      abstract_value(const abstract_value& rhs)
+      {
+        if (rhs.m_result)
+        {
+          m_result = std::make_shared<T>();
+          m_store = m_result.get();
+        }
+        else
+        {
+          m_store = rhs.m_store;
+        }
+
+        m_default = rhs.m_default;
+        m_implicit = rhs.m_implicit;
+        m_default_value = rhs.m_default_value;
+        m_implicit_value = rhs.m_implicit_value;
+      }
+
+      void
+      parse(const std::string& text) const
+      {
+        parse_value(text, *m_store);
+      }
+
+      bool
+      is_container() const
+      {
+        return type_is_container<T>::value;
+      }
+
+      void
+      parse() const
+      {
+        parse_value(m_default_value, *m_store);
+      }
+
+      bool
+      has_default() const
+      {
+        return m_default;
+      }
+
+      bool
+      has_implicit() const
+      {
+        return m_implicit;
+      }
+
+      std::shared_ptr<Value>
+      default_value(const std::string& value)
+      {
+        m_default = true;
+        m_default_value = value;
+        return shared_from_this();
+      }
+
+      std::shared_ptr<Value>
+      implicit_value(const std::string& value)
+      {
+        m_implicit = true;
+        m_implicit_value = value;
+        return shared_from_this();
+      }
+
+      std::shared_ptr<Value>
+      no_implicit_value()
+      {
+        m_implicit = false;
+        return shared_from_this();
+      }
+
+      std::string
+      get_default_value() const
+      {
+        return m_default_value;
+      }
+
+      std::string
+      get_implicit_value() const
+      {
+        return m_implicit_value;
+      }
+
+      bool
+      is_boolean() const
+      {
+        return std::is_same<T, bool>::value;
+      }
+
+      const T&
+      get() const
+      {
+        if (m_store == nullptr)
+        {
+          return *m_result;
+        }
+        else
+        {
+          return *m_store;
+        }
+      }
+
+      protected:
+      std::shared_ptr<T> m_result;
+      T* m_store;
+
+      bool m_default = false;
+      bool m_implicit = false;
+
+      std::string m_default_value;
+      std::string m_implicit_value;
+    };
+
+    template <typename T>
+    class standard_value : public abstract_value<T>
+    {
+      public:
+      using abstract_value<T>::abstract_value;
+
+      std::shared_ptr<Value>
+      clone() const
+      {
+        return std::make_shared<standard_value<T>>(*this);
+      }
+    };
+
+    template <>
+    class standard_value<bool> : public abstract_value<bool>
+    {
+      public:
+      ~standard_value() = default;
+
+      standard_value()
+      {
+        set_default_and_implicit();
+      }
+
+      standard_value(bool* b)
+      : abstract_value(b)
+      {
+        set_default_and_implicit();
+      }
+
+      std::shared_ptr<Value>
+      clone() const
+      {
+        return std::make_shared<standard_value<bool>>(*this);
+      }
+
+      private:
+
+      void
+      set_default_and_implicit()
+      {
+        m_default = true;
+        m_default_value = "false";
+        m_implicit = true;
+        m_implicit_value = "true";
+      }
+    };
+  }
+
+  template <typename T>
+  std::shared_ptr<Value>
+  value()
+  {
+    return std::make_shared<values::standard_value<T>>();
+  }
+
+  template <typename T>
+  std::shared_ptr<Value>
+  value(T& t)
+  {
+    return std::make_shared<values::standard_value<T>>(&t);
+  }
+
+  class OptionAdder;
+
+  class OptionDetails
+  {
+    public:
+    OptionDetails
+    (
+      const std::string& short_,
+      const std::string& long_,
+      const String& desc,
+      std::shared_ptr<const Value> val
+    )
+    : m_short(short_)
+    , m_long(long_)
+    , m_desc(desc)
+    , m_value(val)
+    , m_count(0)
+    {
+    }
+
+    OptionDetails(const OptionDetails& rhs)
+    : m_desc(rhs.m_desc)
+    , m_count(rhs.m_count)
+    {
+      m_value = rhs.m_value->clone();
+    }
+
+    OptionDetails(OptionDetails&& rhs) = default;
+
+    const String&
+    description() const
+    {
+      return m_desc;
+    }
+
+    const Value& value() const {
+        return *m_value;
+    }
+
+    std::shared_ptr<Value>
+    make_storage() const
+    {
+      return m_value->clone();
+    }
+
+    const std::string&
+    short_name() const
+    {
+      return m_short;
+    }
+
+    const std::string&
+    long_name() const
+    {
+      return m_long;
+    }
+
+    private:
+    std::string m_short;
+    std::string m_long;
+    String m_desc;
+    std::shared_ptr<const Value> m_value;
+    int m_count;
+  };
+
+  struct HelpOptionDetails
+  {
+    std::string s;
+    std::string l;
+    String desc;
+    bool has_default;
+    std::string default_value;
+    bool has_implicit;
+    std::string implicit_value;
+    std::string arg_help;
+    bool is_container;
+    bool is_boolean;
+  };
+
+  struct HelpGroupDetails
+  {
+    std::string name;
+    std::string description;
+    std::vector<HelpOptionDetails> options;
+  };
+
+  class OptionValue
+  {
+    public:
+    void
+    parse
+    (
+      std::shared_ptr<const OptionDetails> details,
+      const std::string& text
+    )
+    {
+      ensure_value(details);
+      ++m_count;
+      m_value->parse(text);
+    }
+
+    void
+    parse_default(std::shared_ptr<const OptionDetails> details)
+    {
+      ensure_value(details);
+      m_default = true;
+      m_value->parse();
+    }
+
+    size_t
+    count() const noexcept
+    {
+      return m_count;
+    }
+
+    // TODO: maybe default options should count towards the number of arguments
+    bool
+    has_default() const noexcept
+    {
+      return m_default;
+    }
+
+    template <typename T>
+    const T&
+    as() const
+    {
+      if (m_value == nullptr) {
+        throw_or_mimic<std::domain_error>("No value");
+      }
+
+#ifdef CXXOPTS_NO_RTTI
+      return static_cast<const values::standard_value<T>&>(*m_value).get();
+#else
+      return dynamic_cast<const values::standard_value<T>&>(*m_value).get();
+#endif
+    }
+
+    private:
+    void
+    ensure_value(std::shared_ptr<const OptionDetails> details)
+    {
+      if (m_value == nullptr)
+      {
+        m_value = details->make_storage();
+      }
+    }
+
+    std::shared_ptr<Value> m_value;
+    size_t m_count = 0;
+    bool m_default = false;
+  };
+
+  class KeyValue
+  {
+    public:
+    KeyValue(std::string key_, std::string value_)
+    : m_key(std::move(key_))
+    , m_value(std::move(value_))
+    {
+    }
+
+    const
+    std::string&
+    key() const
+    {
+      return m_key;
+    }
+
+    const
+    std::string&
+    value() const
+    {
+      return m_value;
+    }
+
+    template <typename T>
+    T
+    as() const
+    {
+      T result;
+      values::parse_value(m_value, result);
+      return result;
+    }
+
+    private:
+    std::string m_key;
+    std::string m_value;
+  };
+
+  class ParseResult
+  {
+    public:
+
+    ParseResult(
+      const std::shared_ptr<
+        std::unordered_map<std::string, std::shared_ptr<OptionDetails>>
+      >,
+      std::vector<std::string>,
+      bool allow_unrecognised,
+      int&, char**&);
+
+    size_t
+    count(const std::string& o) const
+    {
+      auto iter = m_options->find(o);
+      if (iter == m_options->end())
+      {
+        return 0;
+      }
+
+      auto riter = m_results.find(iter->second);
+
+      return riter->second.count();
+    }
+
+    const OptionValue&
+    operator[](const std::string& option) const
+    {
+      auto iter = m_options->find(option);
+
+      if (iter == m_options->end())
+      {
+        throw_or_mimic<option_not_present_exception>(option);
+      }
+
+      auto riter = m_results.find(iter->second);
+
+      return riter->second;
+    }
+
+    const std::vector<KeyValue>&
+    arguments() const
+    {
+      return m_sequential;
+    }
+
+    private:
+
+    void
+    parse(int& argc, char**& argv);
+
+    void
+    add_to_option(const std::string& option, const std::string& arg);
+
+    bool
+    consume_positional(std::string a);
+
+    void
+    parse_option
+    (
+      std::shared_ptr<OptionDetails> value,
+      const std::string& name,
+      const std::string& arg = ""
+    );
+
+    void
+    parse_default(std::shared_ptr<OptionDetails> details);
+
+    void
+    checked_parse_arg
+    (
+      int argc,
+      char* argv[],
+      int& current,
+      std::shared_ptr<OptionDetails> value,
+      const std::string& name
+    );
+
+    const std::shared_ptr<
+      std::unordered_map<std::string, std::shared_ptr<OptionDetails>>
+    > m_options;
+    std::vector<std::string> m_positional;
+    std::vector<std::string>::iterator m_next_positional;
+    std::unordered_set<std::string> m_positional_set;
+    std::unordered_map<std::shared_ptr<OptionDetails>, OptionValue> m_results;
+
+    bool m_allow_unrecognised;
+
+    std::vector<KeyValue> m_sequential;
+  };
+
+  struct Option
+  {
+    Option
+    (
+      const std::string& opts,
+      const std::string& desc,
+      const std::shared_ptr<const Value>& value = ::cxxopts::value<bool>(),
+      const std::string& arg_help = ""
+    )
+    : opts_(opts)
+    , desc_(desc)
+    , value_(value)
+    , arg_help_(arg_help)
+    {
+    }
+
+    std::string opts_;
+    std::string desc_;
+    std::shared_ptr<const Value> value_;
+    std::string arg_help_;
+  };
+
+  class Options
+  {
+    typedef std::unordered_map<std::string, std::shared_ptr<OptionDetails>>
+      OptionMap;
+    public:
+
+    Options(std::string program, std::string help_string = "")
+    : m_program(std::move(program))
+    , m_help_string(toLocalString(std::move(help_string)))
+    , m_custom_help("[OPTION...]")
+    , m_positional_help("positional parameters")
+    , m_show_positional(false)
+    , m_allow_unrecognised(false)
+    , m_options(std::make_shared<OptionMap>())
+    , m_next_positional(m_positional.end())
+    {
+    }
+
+    Options&
+    positional_help(std::string help_text)
+    {
+      m_positional_help = std::move(help_text);
+      return *this;
+    }
+
+    Options&
+    custom_help(std::string help_text)
+    {
+      m_custom_help = std::move(help_text);
+      return *this;
+    }
+
+    Options&
+    show_positional_help()
+    {
+      m_show_positional = true;
+      return *this;
+    }
+
+    Options&
+    allow_unrecognised_options()
+    {
+      m_allow_unrecognised = true;
+      return *this;
+    }
+
+    ParseResult
+    parse(int& argc, char**& argv);
+
+    OptionAdder
+    add_options(std::string group = "");
+
+    void
+    add_options
+    (
+      const std::string& group,
+      std::initializer_list<Option> options
+    );
+
+    void
+    add_option
+    (
+      const std::string& group,
+      const Option& option
+    );
+
+    void
+    add_option
+    (
+      const std::string& group,
+      const std::string& s,
+      const std::string& l,
+      std::string desc,
+      std::shared_ptr<const Value> value,
+      std::string arg_help
+    );
+
+    //parse positional arguments into the given option
+    void
+    parse_positional(std::string option);
+
+    void
+    parse_positional(std::vector<std::string> options);
+
+    void
+    parse_positional(std::initializer_list<std::string> options);
+
+    template <typename Iterator>
+    void
+    parse_positional(Iterator begin, Iterator end) {
+      parse_positional(std::vector<std::string>{begin, end});
+    }
+
+    std::string
+    help(const std::vector<std::string>& groups = {}) const;
+
+    const std::vector<std::string>
+    groups() const;
+
+    const HelpGroupDetails&
+    group_help(const std::string& group) const;
+
+    private:
+
+    void
+    add_one_option
+    (
+      const std::string& option,
+      std::shared_ptr<OptionDetails> details
+    );
+
+    String
+    help_one_group(const std::string& group) const;
+
+    void
+    generate_group_help
+    (
+      String& result,
+      const std::vector<std::string>& groups
+    ) const;
+
+    void
+    generate_all_groups_help(String& result) const;
+
+    std::string m_program;
+    String m_help_string;
+    std::string m_custom_help;
+    std::string m_positional_help;
+    bool m_show_positional;
+    bool m_allow_unrecognised;
+
+    std::shared_ptr<OptionMap> m_options;
+    std::vector<std::string> m_positional;
+    std::vector<std::string>::iterator m_next_positional;
+    std::unordered_set<std::string> m_positional_set;
+
+    //mapping from groups to help options
+    std::map<std::string, HelpGroupDetails> m_help;
+  };
+
+  class OptionAdder
+  {
+    public:
+
+    OptionAdder(Options& options, std::string group)
+    : m_options(options), m_group(std::move(group))
+    {
+    }
+
+    OptionAdder&
+    operator()
+    (
+      const std::string& opts,
+      const std::string& desc,
+      std::shared_ptr<const Value> value
+        = ::cxxopts::value<bool>(),
+      std::string arg_help = ""
+    );
+
+    private:
+    Options& m_options;
+    std::string m_group;
+  };
+
+  namespace
+  {
+    constexpr int OPTION_LONGEST = 30;
+    constexpr int OPTION_DESC_GAP = 2;
+
+    std::basic_regex<char> option_matcher
+      ("--([[:alnum:]][-_[:alnum:]]+)(=(.*))?|-([[:alnum:]]+)");
+
+    std::basic_regex<char> option_specifier
+      ("(([[:alnum:]]),)?[ ]*([[:alnum:]][-_[:alnum:]]*)?");
+
+    String
+    format_option
+    (
+      const HelpOptionDetails& o
+    )
+    {
+      auto& s = o.s;
+      auto& l = o.l;
+
+      String result = "  ";
+
+      if (s.size() > 0)
+      {
+        result += "-" + toLocalString(s) + ",";
+      }
+      else
+      {
+        result += "   ";
+      }
+
+      if (l.size() > 0)
+      {
+        result += " --" + toLocalString(l);
+      }
+
+      auto arg = o.arg_help.size() > 0 ? toLocalString(o.arg_help) : "arg";
+
+      if (!o.is_boolean)
+      {
+        if (o.has_implicit)
+        {
+          result += " [=" + arg + "(=" + toLocalString(o.implicit_value) + ")]";
+        }
+        else
+        {
+          result += " " + arg;
+        }
+      }
+
+      return result;
+    }
+
+    String
+    format_description
+    (
+      const HelpOptionDetails& o,
+      size_t start,
+      size_t width
+    )
+    {
+      auto desc = o.desc;
+
+      if (o.has_default && (!o.is_boolean || o.default_value != "false"))
+      {
+        if(o.default_value != "")
+        {
+          desc += toLocalString(" (default: " + o.default_value + ")");
+        }
+        else
+        {
+          desc += toLocalString(" (default: \"\")");
+        }
+      }
+
+      String result;
+
+      auto current = std::begin(desc);
+      auto startLine = current;
+      auto lastSpace = current;
+
+      auto size = size_t{};
+
+      while (current != std::end(desc))
+      {
+        if (*current == ' ')
+        {
+          lastSpace = current;
+        }
+
+        if (*current == '\n')
+        {
+          startLine = current + 1;
+          lastSpace = startLine;
+        }
+        else if (size > width)
+        {
+          if (lastSpace == startLine)
+          {
+            stringAppend(result, startLine, current + 1);
+            stringAppend(result, "\n");
+            stringAppend(result, start, ' ');
+            startLine = current + 1;
+            lastSpace = startLine;
+          }
+          else
+          {
+            stringAppend(result, startLine, lastSpace);
+            stringAppend(result, "\n");
+            stringAppend(result, start, ' ');
+            startLine = lastSpace + 1;
+          }
+          size = 0;
+        }
+        else
+        {
+          ++size;
+        }
+
+        ++current;
+      }
+
+      //append whatever is left
+      stringAppend(result, startLine, current);
+
+      return result;
+    }
+  }
+
+inline
+ParseResult::ParseResult
+(
+  const std::shared_ptr<
+    std::unordered_map<std::string, std::shared_ptr<OptionDetails>>
+  > options,
+  std::vector<std::string> positional,
+  bool allow_unrecognised,
+  int& argc, char**& argv
+)
+: m_options(options)
+, m_positional(std::move(positional))
+, m_next_positional(m_positional.begin())
+, m_allow_unrecognised(allow_unrecognised)
+{
+  parse(argc, argv);
+}
+
+inline
+void
+Options::add_options
+(
+  const std::string &group,
+  std::initializer_list<Option> options
+)
+{
+ OptionAdder option_adder(*this, group);
+ for (const auto &option: options)
+ {
+   option_adder(option.opts_, option.desc_, option.value_, option.arg_help_);
+ }
+}
+
+inline
+OptionAdder
+Options::add_options(std::string group)
+{
+  return OptionAdder(*this, std::move(group));
+}
+
+inline
+OptionAdder&
+OptionAdder::operator()
+(
+  const std::string& opts,
+  const std::string& desc,
+  std::shared_ptr<const Value> value,
+  std::string arg_help
+)
+{
+  std::match_results<const char*> result;
+  std::regex_match(opts.c_str(), result, option_specifier);
+
+  if (result.empty())
+  {
+    throw_or_mimic<invalid_option_format_error>(opts);
+  }
+
+  const auto& short_match = result[2];
+  const auto& long_match = result[3];
+
+  if (!short_match.length() && !long_match.length())
+  {
+    throw_or_mimic<invalid_option_format_error>(opts);
+  } else if (long_match.length() == 1 && short_match.length())
+  {
+    throw_or_mimic<invalid_option_format_error>(opts);
+  }
+
+  auto option_names = []
+  (
+    const std::sub_match<const char*>& short_,
+    const std::sub_match<const char*>& long_
+  )
+  {
+    if (long_.length() == 1)
+    {
+      return std::make_tuple(long_.str(), short_.str());
+    }
+    else
+    {
+      return std::make_tuple(short_.str(), long_.str());
+    }
+  }(short_match, long_match);
+
+  m_options.add_option
+  (
+    m_group,
+    std::get<0>(option_names),
+    std::get<1>(option_names),
+    desc,
+    value,
+    std::move(arg_help)
+  );
+
+  return *this;
+}
+
+inline
+void
+ParseResult::parse_default(std::shared_ptr<OptionDetails> details)
+{
+  m_results[details].parse_default(details);
+}
+
+inline
+void
+ParseResult::parse_option
+(
+  std::shared_ptr<OptionDetails> value,
+  const std::string& /*name*/,
+  const std::string& arg
+)
+{
+  auto& result = m_results[value];
+  result.parse(value, arg);
+
+  m_sequential.emplace_back(value->long_name(), arg);
+}
+
+inline
+void
+ParseResult::checked_parse_arg
+(
+  int argc,
+  char* argv[],
+  int& current,
+  std::shared_ptr<OptionDetails> value,
+  const std::string& name
+)
+{
+  if (current + 1 >= argc)
+  {
+    if (value->value().has_implicit())
+    {
+      parse_option(value, name, value->value().get_implicit_value());
+    }
+    else
+    {
+      throw_or_mimic<missing_argument_exception>(name);
+    }
+  }
+  else
+  {
+    if (value->value().has_implicit())
+    {
+      parse_option(value, name, value->value().get_implicit_value());
+    }
+    else
+    {
+      parse_option(value, name, argv[current + 1]);
+      ++current;
+    }
+  }
+}
+
+inline
+void
+ParseResult::add_to_option(const std::string& option, const std::string& arg)
+{
+  auto iter = m_options->find(option);
+
+  if (iter == m_options->end())
+  {
+    throw_or_mimic<option_not_exists_exception>(option);
+  }
+
+  parse_option(iter->second, option, arg);
+}
+
+inline
+bool
+ParseResult::consume_positional(std::string a)
+{
+  while (m_next_positional != m_positional.end())
+  {
+    auto iter = m_options->find(*m_next_positional);
+    if (iter != m_options->end())
+    {
+      auto& result = m_results[iter->second];
+      if (!iter->second->value().is_container())
+      {
+        if (result.count() == 0)
+        {
+          add_to_option(*m_next_positional, a);
+          ++m_next_positional;
+          return true;
+        }
+        else
+        {
+          ++m_next_positional;
+          continue;
+        }
+      }
+      else
+      {
+        add_to_option(*m_next_positional, a);
+        return true;
+      }
+    }
+    else
+    {
+      throw_or_mimic<option_not_exists_exception>(*m_next_positional);
+    }
+  }
+
+  return false;
+}
+
+inline
+void
+Options::parse_positional(std::string option)
+{
+  parse_positional(std::vector<std::string>{std::move(option)});
+}
+
+inline
+void
+Options::parse_positional(std::vector<std::string> options)
+{
+  m_positional = std::move(options);
+  m_next_positional = m_positional.begin();
+
+  m_positional_set.insert(m_positional.begin(), m_positional.end());
+}
+
+inline
+void
+Options::parse_positional(std::initializer_list<std::string> options)
+{
+  parse_positional(std::vector<std::string>(std::move(options)));
+}
+
+inline
+ParseResult
+Options::parse(int& argc, char**& argv)
+{
+  ParseResult result(m_options, m_positional, m_allow_unrecognised, argc, argv);
+  return result;
+}
+
+inline
+void
+ParseResult::parse(int& argc, char**& argv)
+{
+  int current = 1;
+
+  int nextKeep = 1;
+
+  bool consume_remaining = false;
+
+  while (current != argc)
+  {
+    if (strcmp(argv[current], "--") == 0)
+    {
+      consume_remaining = true;
+      ++current;
+      break;
+    }
+
+    std::match_results<const char*> result;
+    std::regex_match(argv[current], result, option_matcher);
+
+    if (result.empty())
+    {
+      //not a flag
+
+      // but if it starts with a `-`, then it's an error
+      if (argv[current][0] == '-' && argv[current][1] != '\0') {
+        if (!m_allow_unrecognised) {
+          throw_or_mimic<option_syntax_exception>(argv[current]);
+        }
+      }
+
+      //if true is returned here then it was consumed, otherwise it is
+      //ignored
+      if (consume_positional(argv[current]))
+      {
+      }
+      else
+      {
+        argv[nextKeep] = argv[current];
+        ++nextKeep;
+      }
+      //if we return from here then it was parsed successfully, so continue
+    }
+    else
+    {
+      //short or long option?
+      if (result[4].length() != 0)
+      {
+        const std::string& s = result[4];
+
+        for (std::size_t i = 0; i != s.size(); ++i)
+        {
+          std::string name(1, s[i]);
+          auto iter = m_options->find(name);
+
+          if (iter == m_options->end())
+          {
+            if (m_allow_unrecognised)
+            {
+              continue;
+            }
+            else
+            {
+              //error
+              throw_or_mimic<option_not_exists_exception>(name);
+            }
+          }
+
+          auto value = iter->second;
+
+          if (i + 1 == s.size())
+          {
+            //it must be the last argument
+            checked_parse_arg(argc, argv, current, value, name);
+          }
+          else if (value->value().has_implicit())
+          {
+            parse_option(value, name, value->value().get_implicit_value());
+          }
+          else
+          {
+            //error
+            throw_or_mimic<option_requires_argument_exception>(name);
+          }
+        }
+      }
+      else if (result[1].length() != 0)
+      {
+        const std::string& name = result[1];
+
+        auto iter = m_options->find(name);
+
+        if (iter == m_options->end())
+        {
+          if (m_allow_unrecognised)
+          {
+            // keep unrecognised options in argument list, skip to next argument
+            argv[nextKeep] = argv[current];
+            ++nextKeep;
+            ++current;
+            continue;
+          }
+          else
+          {
+            //error
+            throw_or_mimic<option_not_exists_exception>(name);
+          }
+        }
+
+        auto opt = iter->second;
+
+        //equals provided for long option?
+        if (result[2].length() != 0)
+        {
+          //parse the option given
+
+          parse_option(opt, name, result[3]);
+        }
+        else
+        {
+          //parse the next argument
+          checked_parse_arg(argc, argv, current, opt, name);
+        }
+      }
+
+    }
+
+    ++current;
+  }
+
+  for (auto& opt : *m_options)
+  {
+    auto& detail = opt.second;
+    auto& value = detail->value();
+
+    auto& store = m_results[detail];
+
+    if(value.has_default() && !store.count() && !store.has_default()){
+      parse_default(detail);
+    }
+  }
+
+  if (consume_remaining)
+  {
+    while (current < argc)
+    {
+      if (!consume_positional(argv[current])) {
+        break;
+      }
+      ++current;
+    }
+
+    //adjust argv for any that couldn't be swallowed
+    while (current != argc) {
+      argv[nextKeep] = argv[current];
+      ++nextKeep;
+      ++current;
+    }
+  }
+
+  argc = nextKeep;
+
+}
+
+inline
+void
+Options::add_option
+(
+  const std::string& group,
+  const Option& option
+)
+{
+    add_options(group, {option});
+}
+
+inline
+void
+Options::add_option
+(
+  const std::string& group,
+  const std::string& s,
+  const std::string& l,
+  std::string desc,
+  std::shared_ptr<const Value> value,
+  std::string arg_help
+)
+{
+  auto stringDesc = toLocalString(std::move(desc));
+  auto option = std::make_shared<OptionDetails>(s, l, stringDesc, value);
+
+  if (s.size() > 0)
+  {
+    add_one_option(s, option);
+  }
+
+  if (l.size() > 0)
+  {
+    add_one_option(l, option);
+  }
+
+  //add the help details
+  auto& options = m_help[group];
+
+  options.options.emplace_back(HelpOptionDetails{s, l, stringDesc,
+      value->has_default(), value->get_default_value(),
+      value->has_implicit(), value->get_implicit_value(),
+      std::move(arg_help),
+      value->is_container(),
+      value->is_boolean()});
+}
+
+inline
+void
+Options::add_one_option
+(
+  const std::string& option,
+  std::shared_ptr<OptionDetails> details
+)
+{
+  auto in = m_options->emplace(option, details);
+
+  if (!in.second)
+  {
+    throw_or_mimic<option_exists_error>(option);
+  }
+}
+
+inline
+String
+Options::help_one_group(const std::string& g) const
+{
+  typedef std::vector<std::pair<String, String>> OptionHelp;
+
+  auto group = m_help.find(g);
+  if (group == m_help.end())
+  {
+    return "";
+  }
+
+  OptionHelp format;
+
+  size_t longest = 0;
+
+  String result;
+
+  if (!g.empty())
+  {
+    result += toLocalString(" " + g + " options:\n");
+  }
+
+  for (const auto& o : group->second.options)
+  {
+    if (m_positional_set.find(o.l) != m_positional_set.end() &&
+        !m_show_positional)
+    {
+      continue;
+    }
+
+    auto s = format_option(o);
+    longest = (std::max)(longest, stringLength(s));
+    format.push_back(std::make_pair(s, String()));
+  }
+
+  longest = (std::min)(longest, static_cast<size_t>(OPTION_LONGEST));
+
+  //widest allowed description
+  auto allowed = size_t{76} - longest - OPTION_DESC_GAP;
+
+  auto fiter = format.begin();
+  for (const auto& o : group->second.options)
+  {
+    if (m_positional_set.find(o.l) != m_positional_set.end() &&
+        !m_show_positional)
+    {
+      continue;
+    }
+
+    auto d = format_description(o, longest + OPTION_DESC_GAP, allowed);
+
+    result += fiter->first;
+    if (stringLength(fiter->first) > longest)
+    {
+      result += '\n';
+      result += toLocalString(std::string(longest + OPTION_DESC_GAP, ' '));
+    }
+    else
+    {
+      result += toLocalString(std::string(longest + OPTION_DESC_GAP -
+        stringLength(fiter->first),
+        ' '));
+    }
+    result += d;
+    result += '\n';
+
+    ++fiter;
+  }
+
+  return result;
+}
+
+inline
+void
+Options::generate_group_help
+(
+  String& result,
+  const std::vector<std::string>& print_groups
+) const
+{
+  for (size_t i = 0; i != print_groups.size(); ++i)
+  {
+    const String& group_help_text = help_one_group(print_groups[i]);
+    if (empty(group_help_text))
+    {
+      continue;
+    }
+    result += group_help_text;
+    if (i < print_groups.size() - 1)
+    {
+      result += '\n';
+    }
+  }
+}
+
+inline
+void
+Options::generate_all_groups_help(String& result) const
+{
+  std::vector<std::string> all_groups;
+  all_groups.reserve(m_help.size());
+
+  for (auto& group : m_help)
+  {
+    all_groups.push_back(group.first);
+  }
+
+  generate_group_help(result, all_groups);
+}
+
+inline
+std::string
+Options::help(const std::vector<std::string>& help_groups) const
+{
+  String result = m_help_string + "\nUsage:\n  " +
+    toLocalString(m_program) + " " + toLocalString(m_custom_help);
+
+  if (m_positional.size() > 0 && m_positional_help.size() > 0) {
+    result += " " + toLocalString(m_positional_help);
+  }
+
+  result += "\n\n";
+
+  if (help_groups.size() == 0)
+  {
+    generate_all_groups_help(result);
+  }
+  else
+  {
+    generate_group_help(result, help_groups);
+  }
+
+  return toUTF8String(result);
+}
+
+inline
+const std::vector<std::string>
+Options::groups() const
+{
+  std::vector<std::string> g;
+
+  std::transform(
+    m_help.begin(),
+    m_help.end(),
+    std::back_inserter(g),
+    [] (const std::map<std::string, HelpGroupDetails>::value_type& pair)
+    {
+      return pair.first;
+    }
+  );
+
+  return g;
+}
+
+inline
+const HelpGroupDetails&
+Options::group_help(const std::string& group) const
+{
+  return m_help.at(group);
+}
+
+}
+
+#endif //CXXOPTS_HPP_INCLUDED

+ 34 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/project_root.h

@@ -0,0 +1,34 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-06-19
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#include <string>
+#include <memory>
+namespace adc
+{
+    
+class RootPath
+{
+public:
+    using Ptr = std::shared_ptr<RootPath>;
+    void SetRoot(const std::string &root);
+    std::string GetRoot();
+    static RootPath::Ptr GetInstance();
+    static bool AbsolutePath(const std::string &path);
+    static const std::string GetAbsolutePath(const std::string &path);
+    ~RootPath();  
+    RootPath();
+
+private:
+    std::string root_path_;
+    static RootPath::Ptr instance_ptr_;
+
+};
+
+
+
+}

+ 35 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/time_adcsoft.h

@@ -0,0 +1,35 @@
+/**
+ *  \brerf 
+ *  \author pengcheng(pengcheng@adcsoft.cn)
+ *  \date 2019.11.25
+ *  \attention 
+ * 
+*/
+
+#ifndef  ADCSOFT_TIME_ADCSOFT_H__
+#define ADCSOFT_TIME_ADCSOFT_H__
+
+#include <chrono>
+#include <thread>
+
+namespace adc
+{
+
+class Timer
+{
+public:
+    Timer(const float& hz);
+
+    void Start();
+
+    void Stop();
+
+private:
+    std::chrono::duration<float> duration_;
+
+    std::chrono::system_clock::time_point start_;
+
+    std::chrono::duration<float> elapsed_;
+};
+}
+#endif // ! ADCSOFT_TIME_H__

+ 131 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/utils.h

@@ -0,0 +1,131 @@
+/**
+ *  \brerf 
+ *  \author pengcheng(pengcheng@adcsoft.cn)
+ *  \date 2020.09.01
+ *  \attention 
+ * 
+*/
+
+
+#include <float.h>
+#include <opencv2/core.hpp>
+#include <pcl/point_cloud.h>
+#include<pcl/point_types.h>
+#include <pcl/features/moment_of_inertia_estimation.h>
+#include <chrono>
+#include "cuda_runtime.h"
+#include "common/cuda_utils.h"
+#include <memory>
+
+#ifndef ADC_COMMON_UTILS
+#define ADC_COMMON_UTILS 
+
+
+
+namespace adc
+{
+
+namespace common
+{
+
+
+void SampleDownGPUDevice(const float* x, const float* y, const float* z, const int ratio_x, const int ratio_y,
+                const int old_cols, const int new_rows, const int new_cols,
+                float* new_x, float* new_y, float* new_z, cudaStream_t stream);
+
+
+void SampleDownGPU(const float* x, const float* y, const float* z, const int ratio_x, const int ratio_y,
+                const int old_cols, const int new_rows, const int new_cols,
+                float* new_x, float* new_y, float* new_z, cudaStream_t stream);
+
+void ComputeXYZGPUDevice(const short* depth,  const int left_x, const int left_y, 
+    const float inv_fu, const float inv_fv, const float u0, const float v0,
+    const float min_depth, const float max_depth, const int rows, const int cols,
+    float* x, float* y, float* z, cudaStream_t stream,const float corret_a,
+                         const float corret_b, const float corret_c);
+void ComputeXYZGPU(const short* depth,  const int left_x, const int left_y, 
+    const float inv_fu, const float inv_fv, const float u0, const float v0,
+    const float min_depth, const float max_depth, const int rows, const int cols,
+    float* x, float* y, float* z, cudaStream_t stream,
+    const float corret_a=0.0, const float corret_b=0.0, const float corret_c=0.0);
+void TestGPU(float* z);
+
+class Perf
+{
+public:
+    Perf(const std::string& desc, float threshold = 0.0);
+    ~Perf();
+private:
+    std::string description_;
+    std::chrono::duration<float> threshold_;
+    std::chrono::system_clock::time_point start_;
+
+};
+
+template<typename Scalar>
+class Clustering
+{
+
+public:
+
+    void ComputeXY(const cv::Mat& depth, int left_x, int left_y, float32_t inv_fu,
+                    float32_t inv_fv, float32_t u0, float32_t v0, float32_t min_depth,
+                    float32_t max_depth, cv::Mat &x_3d, cv::Mat &y_3d);
+
+    void ConputeDis(const  std::vector<float32_t> &p1, const std::vector<float32_t> &p2, float32_t &square);
+
+    bool CorePoint(const cv::Mat &depth, const  cv::Mat &x_map, const cv::Mat &y_map,
+                    int core_x, int core_y, int kernel_begin_x, 
+                    int kernel_end_x, int kernel_begin_y, int kernel_end_y,
+                    float32_t max_dis, float32_t min_depth, float32_t max_depth,
+                    std::vector<cv::Point2i> &boundary_point, std::vector<float32_t> &core_point
+                    );
+    bool CorePoint2(const cv::Mat &depth, const  cv::Mat &x_map, const cv::Mat &y_map,
+                int core_x, int core_y, int kernel_begin_x, 
+                int kernel_end_x, int kernel_begin_y, int kernel_end_y,
+                float32_t max_dis, float32_t min_depth, float32_t max_depth,
+                std::vector<cv::Point2i> &boundary_point, std::vector<float32_t> &core_point
+                );
+    void ExternCluster(const cv::Mat& depth, const cv::Mat& x_map, 
+                    const cv::Mat& y_map, cv::Mat &flag_visited, float32_t min_depth, float32_t max_depth,
+                    float32_t max_dis, size_t min_pts, size_t kernel_size, float32_t full_size,
+                    std::vector<cv::Point2i> &boundary, pcl::PointCloud<Scalar>& cluster
+                    );
+    void ExternCluster2(const cv::Mat& depth, const cv::Mat& x_map, 
+                    const cv::Mat& y_map, cv::Mat &flag_visited, float32_t min_depth, float32_t max_depth,
+                    float32_t max_dis, size_t min_pts, size_t kernel_size, float32_t full_size,
+                    std::vector<cv::Point2i> &boundary, pcl::PointCloud<Scalar>& cluster
+                    );
+
+    void SpatialClustering(cv::Mat &depth, size_t kernel_size, size_t min_pts, float32_t max_dis,
+                        int left_x, int left_y, float32_t inv_fu, float32_t inv_fv, float32_t u0, float32_t v0,
+                        float32_t min_depth, float32_t max_depth,
+                        std::vector<pcl::PointCloud<Scalar>>  &clusters);
+
+    void SpatialClustering(cv::Mat& x, cv::Mat& y, cv::Mat& z, size_t kernel_size, size_t min_pts, float32_t max_dis,
+                        float32_t min_depth, float32_t max_depth, std::vector<pcl::PointCloud<Scalar>>  &clusters);
+
+
+};
+
+class OBBExtrator
+{
+public:
+    OBBExtrator(){};
+    ~OBBExtrator(){};
+    void GetOBB(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_ptr, std::vector<pcl::PointXYZRGB>& bbox_corner,float &length, float& width, float& height);
+
+private:
+    void ConvertPoints(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_ptr);
+
+private:
+    std::vector<cv::Point2f> points_;
+    float32_t max_z_;
+    float32_t min_z_;
+    cv::RotatedRect bbox_;
+
+};
+}
+
+}
+#endif // !ADC_COMMON_UTILS

+ 53 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/common/work.h

@@ -0,0 +1,53 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-05-23
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+#ifndef CMW_APP_TIME_ADCSOFT_H
+#define  CMW_APP_TIME_ADCSOFT_H
+
+#include <common/time_adcsoft.h>
+#include <iostream>
+
+namespace adc
+{
+
+class Worker
+{
+
+public:
+    enum
+    {
+        WS_INIT         = 0x001,
+        WS_RUN          = 0x002,
+        WS_SUSPEND      = 0x003,
+        WS_TERMINATE    = 0x004, 
+    };
+
+public:
+    Worker(float hz=20);
+
+    virtual ~Worker();
+
+    virtual void DoWork() = 0;
+
+    void Run();
+
+    void Suspend();
+
+    void Terminate();
+
+protected:
+    void MainThread();
+
+protected:
+    std::thread main_thread_;
+    Timer timer_;
+    std::uint16_t worker_state_;
+};
+
+}
+
+#endif

+ 36 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/include/Hungarian.h

@@ -0,0 +1,36 @@
+//
+// Created by lqx on 20-4-23.
+//
+
+#ifndef TRACK_SORT_HUNGARIAN_H_H
+#define TRACK_SORT_HUNGARIAN_H_H
+
+#include <iostream>
+#include <vector>
+
+using namespace std;
+
+class HungarianAlgorithm
+{
+public:
+    HungarianAlgorithm();
+    ~HungarianAlgorithm();
+    double Solve(vector<vector<double>>& DistMatrix, vector<int>& Assignment);
+
+private:
+    void assignmentoptimal(int *assignment, double *cost, double *distMatrix, int nOfRows, int nOfColumns);
+    void buildassignmentvector(int *assignment, bool *starMatrix, int nOfRows, int nOfColumns);
+    void computeassignmentcost(int *assignment, double *cost, double *distMatrix, int nOfRows);
+    void step2a(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+                bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+    void step2b(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+                bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+    void step3(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+               bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+    void step4(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+               bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim, int row, int col);
+    void step5(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+               bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+};
+
+#endif //TRACK_SORT_HUNGARIAN_H_H

+ 89 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/include/KalmanTracker.h

@@ -0,0 +1,89 @@
+//
+// Created by lqx on 20-4-23.
+//
+
+#ifndef TRACK_SORT_KALMANTRACKER_H
+#define TRACK_SORT_KALMANTRACKER_H
+
+///////////////////////////////////////////////////////////////////////////////
+// KalmanTracker.h: KalmanTracker Class Declaration
+
+#include "opencv2/video/tracking.hpp"
+#include "opencv2/highgui/highgui.hpp"
+
+using namespace std;
+
+#define StateType cv::Rect_<float>
+
+
+// This class represents the internel state of individual tracked objects observed as bounding box.
+class KalmanTracker
+{
+public:
+    KalmanTracker()
+    {
+        init_kf(StateType());
+        m_time_since_update = 0;
+        m_hits = 0;
+        m_hit_streak = 0;
+        m_age = 0;
+        m_id = kf_count;
+        //kf_count++;
+    }
+    KalmanTracker(StateType initRect)
+    {
+        init_kf(initRect);
+        m_time_since_update = 0;
+        m_hits = 0;
+        m_hit_streak = 0;
+        m_age = 0;
+        m_id = kf_count;
+        //kf_count++;
+    }
+
+    KalmanTracker(StateType initRect, int classId,float prob)
+    {
+        init_kf(initRect);
+        m_time_since_update = 0;
+        m_hits = 0;
+        m_hit_streak = 0;
+        m_age = 0;
+        m_id = kf_count;
+        //kf_count++;
+        m_class_id = classId;
+        m_prob = prob;
+    }
+
+    ~KalmanTracker()
+    {
+        m_history.clear();
+        m_class_history.clear();
+    }
+
+    StateType predict();
+    void update(StateType stateMat,int classId, float prob);
+
+    StateType get_state();
+    StateType get_rect_xysr(float cx, float cy, float s, float r);
+
+    static int kf_count;
+
+    int m_time_since_update;
+    int m_hits;
+    int m_hit_streak;
+    int m_age;
+    int m_id;
+    int m_class_id;
+    std::vector<int> m_class_history;
+    float m_prob;
+
+private:
+    void init_kf(StateType stateMat);
+
+    cv::KalmanFilter kf;
+    cv::Mat measurement;
+
+    std::vector<StateType> m_history;
+};
+
+#endif //TRACK_SORT_KALMANTRACKER_H

+ 32 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/include/dataReader.h

@@ -0,0 +1,32 @@
+#ifndef _DATA_READER_H_
+#define _DATA_READER_H_
+
+#include <vector>
+#include <list>
+#include <string>
+
+namespace Tn
+{
+    std::list<std::string> readFileList(const std::string& fileName);
+    
+    struct Source
+    {
+        std::string fileName;
+        int label;
+    };
+    std::list<Source> readLabelFileList(const std::string& fileName);    
+
+    struct Bbox
+    {
+        int classId;
+        int left;
+        int right;
+        int top;
+        int bot;
+        float score;
+    };
+    //[lst<filename>,lst<bbox_vec>]
+    std::tuple<std::list<std::string>, std::list<std::vector<Bbox>>> readObjectLabelFileList(const std::string& fileName);
+}
+
+#endif

+ 57 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/include/detect_obstacle.h

@@ -0,0 +1,57 @@
+#ifndef DETECT_TURNSTILE_H
+#define DETECT_TURNSTILE_H
+
+#include "detection_trt7/include/Hungarian.h"
+#include "detection_trt7/include/KalmanTracker.h"
+
+namespace td{
+
+const int COLOR_MAP[2][3]={{0, 0, 255},{255, 0, 0}};
+
+const int max_age = 2;
+const int min_hits = 3;
+const double iouThreshold = 0.5;
+
+struct bbox_t {
+    unsigned int x, y, w, h;       // (x,y) - top-left corner, (w, h) - width & height of bounded box
+    float prob;                    // confidence - probability that the object was found correctly
+    unsigned int obj_id;           // class of object - from range [0, classes-1]
+    unsigned int track_id;         // tracking id for video (0 - untracked, 1 - inf - tracked object)
+    unsigned int frames_counter;   // counter of frames on which the object was detected
+    float x_3d, y_3d, z_3d;        // center of object (in Meters) if ZED 3D Camera is used
+};
+
+typedef struct TrackingBox
+{
+    int frame;
+    int id;
+    int class_id;
+    float prob;
+    cv::Rect_<float> box;
+    vector<int> class_history;
+
+}TrackingBox;
+
+//yolo data o DetectBox
+typedef struct DetectBox
+{
+    int class_id;
+    float prob;
+    cv::Rect_<float> box;
+}DetectBox;
+
+//Computes IOU between two bounding boxes
+double GetIOU(cv::Rect_<float> bb_test, cv::Rect_<float> bb_gt);
+//画出检测框和相关信息
+void DrawBoxes(cv::Mat &frame, vector<string> classes, int classId, int turnstileId,float conf, int left, int top, int right, int bottom);
+
+//画出检测结果,image
+void Drawer(cv::Mat &frame, vector<bbox_t> outs, vector<string> classes);
+//画出检测结果,video
+void Drawer(cv::Mat &frame, vector<td::TrackingBox> &track_result, vector<string> &classes);
+//tracking turnstile
+bool TrackObstacle(int64 frame_count,vector<KalmanTracker> &trackers,vector<bbox_t> &outs,vector<td::TrackingBox> &track_result);
+}
+
+
+#endif // DETECT_TURNSTILE_H

+ 94 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/Utils.h

@@ -0,0 +1,94 @@
+#ifndef __TRT_UTILS_H_
+#define __TRT_UTILS_H_
+
+#include <iostream>
+#include <vector>
+#include <algorithm>
+#include <cudnn.h>
+
+#ifndef CUDA_CHECK
+
+#define CUDA_CHECK(callstr)                                                                    \
+    {                                                                                          \
+        cudaError_t error_code = callstr;                                                      \
+        if (error_code != cudaSuccess) {                                                       \
+            std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
+            assert(0);                                                                         \
+        }                                                                                      \
+    }
+
+#endif
+
+namespace Tn
+{
+    class Profiler : public nvinfer1::IProfiler
+    {
+    public:
+        void printLayerTimes(int itrationsTimes)
+        {
+            float totalTime = 0;
+            for (size_t i = 0; i < mProfile.size(); i++)
+            {
+                printf("%-40.40s %4.3fms\n", mProfile[i].first.c_str(), mProfile[i].second / itrationsTimes);
+                totalTime += mProfile[i].second;
+            }
+            printf("Time over all layers: %4.3f\n", totalTime / itrationsTimes);
+        }
+    private:
+        typedef std::pair<std::string, float> Record;
+        std::vector<Record> mProfile;
+
+        virtual void reportLayerTime(const char* layerName, float ms)
+        {
+            auto record = std::find_if(mProfile.begin(), mProfile.end(), [&](const Record& r){ return r.first == layerName; });
+            if (record == mProfile.end())
+                mProfile.push_back(std::make_pair(layerName, ms));
+            else
+                record->second += ms;
+        }
+    };
+
+    //Logger for TensorRT info/warning/errors
+    class Logger : public nvinfer1::ILogger
+    {
+    public:
+
+        Logger(): Logger(Severity::kWARNING) {}
+
+        Logger(Severity severity): reportableSeverity(severity) {}
+
+        void log(Severity severity, const char* msg) override
+        {
+            // suppress messages with severity enum value greater than the reportable
+            if (severity > reportableSeverity) return;
+
+            switch (severity)
+            {
+                case Severity::kINTERNAL_ERROR: std::cerr << "INTERNAL_ERROR: "; break;
+                case Severity::kERROR: std::cerr << "ERROR: "; break;
+                case Severity::kWARNING: std::cerr << "WARNING: "; break;
+                case Severity::kINFO: std::cerr << "INFO: "; break;
+                default: std::cerr << "UNKNOWN: "; break;
+            }
+            std::cerr << msg << std::endl;
+        }
+
+        Severity reportableSeverity{Severity::kWARNING};
+    };
+
+    template<typename T> 
+    void write(char*& buffer, const T& val)
+    {
+        *reinterpret_cast<T*>(buffer) = val;
+        buffer += sizeof(T);
+    }
+
+    template<typename T> 
+    void read(const char*& buffer, T& val)
+    {
+        val = *reinterpret_cast<const T*>(buffer);
+        buffer += sizeof(T);
+    }
+}
+
+#endif

+ 503 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/logging.h

@@ -0,0 +1,503 @@
+/*
+ * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TENSORRT_LOGGING_H
+#define TENSORRT_LOGGING_H
+
+#include "NvInferRuntimeCommon.h"
+#include <cassert>
+#include <ctime>
+#include <iomanip>
+#include <iostream>
+#include <ostream>
+#include <sstream>
+#include <string>
+
+using Severity = nvinfer1::ILogger::Severity;
+
+class LogStreamConsumerBuffer : public std::stringbuf
+{
+public:
+    LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
+        : mOutput(stream)
+        , mPrefix(prefix)
+        , mShouldLog(shouldLog)
+    {
+    }
+
+    LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
+        : mOutput(other.mOutput)
+    {
+    }
+
+    ~LogStreamConsumerBuffer()
+    {
+        // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
+        // std::streambuf::pptr() gives a pointer to the current position of the output sequence
+        // if the pointer to the beginning is not equal to the pointer to the current position,
+        // call putOutput() to log the output to the stream
+        if (pbase() != pptr())
+        {
+            putOutput();
+        }
+    }
+
+    // synchronizes the stream buffer and returns 0 on success
+    // synchronizing the stream buffer consists of inserting the buffer contents into the stream,
+    // resetting the buffer and flushing the stream
+    virtual int sync()
+    {
+        putOutput();
+        return 0;
+    }
+
+    void putOutput()
+    {
+        if (mShouldLog)
+        {
+            // prepend timestamp
+            std::time_t timestamp = std::time(nullptr);
+            tm* tm_local = std::localtime(&timestamp);
+            std::cout << "[";
+            std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
+            std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
+            // std::stringbuf::str() gets the string contents of the buffer
+            // insert the buffer contents pre-appended by the appropriate prefix into the stream
+            mOutput << mPrefix << str();
+            // set the buffer to empty
+            str("");
+            // flush the stream
+            mOutput.flush();
+        }
+    }
+
+    void setShouldLog(bool shouldLog)
+    {
+        mShouldLog = shouldLog;
+    }
+
+private:
+    std::ostream& mOutput;
+    std::string mPrefix;
+    bool mShouldLog;
+};
+
+//!
+//! \class LogStreamConsumerBase
+//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
+//!
+class LogStreamConsumerBase
+{
+public:
+    LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
+        : mBuffer(stream, prefix, shouldLog)
+    {
+    }
+
+protected:
+    LogStreamConsumerBuffer mBuffer;
+};
+
+//!
+//! \class LogStreamConsumer
+//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
+//!  Order of base classes is LogStreamConsumerBase and then std::ostream.
+//!  This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
+//!  in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
+//!  This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
+//!  Please do not change the order of the parent classes.
+//!
+class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
+{
+public:
+    //! \brief Creates a LogStreamConsumer which logs messages with level severity.
+    //!  Reportable severity determines if the messages are severe enough to be logged.
+    LogStreamConsumer(Severity reportableSeverity, Severity severity)
+        : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
+        , std::ostream(&mBuffer) // links the stream buffer with the stream
+        , mShouldLog(severity <= reportableSeverity)
+        , mSeverity(severity)
+    {
+    }
+
+    LogStreamConsumer(LogStreamConsumer&& other)
+        : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
+        , std::ostream(&mBuffer) // links the stream buffer with the stream
+        , mShouldLog(other.mShouldLog)
+        , mSeverity(other.mSeverity)
+    {
+    }
+
+    void setReportableSeverity(Severity reportableSeverity)
+    {
+        mShouldLog = mSeverity <= reportableSeverity;
+        mBuffer.setShouldLog(mShouldLog);
+    }
+
+private:
+    static std::ostream& severityOstream(Severity severity)
+    {
+        return severity >= Severity::kINFO ? std::cout : std::cerr;
+    }
+
+    static std::string severityPrefix(Severity severity)
+    {
+        switch (severity)
+        {
+        case Severity::kINTERNAL_ERROR: return "[F] ";
+        case Severity::kERROR: return "[E] ";
+        case Severity::kWARNING: return "[W] ";
+        case Severity::kINFO: return "[I] ";
+        case Severity::kVERBOSE: return "[V] ";
+        default: assert(0); return "";
+        }
+    }
+
+    bool mShouldLog;
+    Severity mSeverity;
+};
+
+//! \class Logger
+//!
+//! \brief Class which manages logging of TensorRT tools and samples
+//!
+//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
+//! and supports logging two types of messages:
+//!
+//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
+//! - Test pass/fail messages
+//!
+//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
+//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
+//!
+//! In the future, this class could be extended to support dumping test results to a file in some standard format
+//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
+//!
+//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
+//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
+//! library and messages coming from the sample.
+//!
+//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
+//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
+//! object.
+
+class Logger : public nvinfer1::ILogger
+{
+public:
+    Logger(Severity severity = Severity::kWARNING)
+        : mReportableSeverity(severity)
+    {
+    }
+
+    //!
+    //! \enum TestResult
+    //! \brief Represents the state of a given test
+    //!
+    enum class TestResult
+    {
+        kRUNNING, //!< The test is running
+        kPASSED,  //!< The test passed
+        kFAILED,  //!< The test failed
+        kWAIVED   //!< The test was waived
+    };
+
+    //!
+    //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
+    //! \return The nvinfer1::ILogger associated with this Logger
+    //!
+    //! TODO Once all samples are updated to use this method to register the logger with TensorRT,
+    //! we can eliminate the inheritance of Logger from ILogger
+    //!
+    nvinfer1::ILogger& getTRTLogger()
+    {
+        return *this;
+    }
+
+    //!
+    //! \brief Implementation of the nvinfer1::ILogger::log() virtual method
+    //!
+    //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
+    //! inheritance from nvinfer1::ILogger
+    //!
+    void log(Severity severity, const char* msg) override
+    {
+        LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
+    }
+
+    //!
+    //! \brief Method for controlling the verbosity of logging output
+    //!
+    //! \param severity The logger will only emit messages that have severity of this level or higher.
+    //!
+    void setReportableSeverity(Severity severity)
+    {
+        mReportableSeverity = severity;
+    }
+
+    //!
+    //! \brief Opaque handle that holds logging information for a particular test
+    //!
+    //! This object is an opaque handle to information used by the Logger to print test results.
+    //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
+    //! with Logger::reportTest{Start,End}().
+    //!
+    class TestAtom
+    {
+    public:
+        TestAtom(TestAtom&&) = default;
+
+    private:
+        friend class Logger;
+
+        TestAtom(bool started, const std::string& name, const std::string& cmdline)
+            : mStarted(started)
+            , mName(name)
+            , mCmdline(cmdline)
+        {
+        }
+
+        bool mStarted;
+        std::string mName;
+        std::string mCmdline;
+    };
+
+    //!
+    //! \brief Define a test for logging
+    //!
+    //! \param[in] name The name of the test.  This should be a string starting with
+    //!                  "TensorRT" and containing dot-separated strings containing
+    //!                  the characters [A-Za-z0-9_].
+    //!                  For example, "TensorRT.sample_googlenet"
+    //! \param[in] cmdline The command line used to reproduce the test
+    //
+    //! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
+    //!
+    static TestAtom defineTest(const std::string& name, const std::string& cmdline)
+    {
+        return TestAtom(false, name, cmdline);
+    }
+
+    //!
+    //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
+    //!        as input
+    //!
+    //! \param[in] name The name of the test
+    //! \param[in] argc The number of command-line arguments
+    //! \param[in] argv The array of command-line arguments (given as C strings)
+    //!
+    //! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
+    static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
+    {
+        auto cmdline = genCmdlineString(argc, argv);
+        return defineTest(name, cmdline);
+    }
+
+    //!
+    //! \brief Report that a test has started.
+    //!
+    //! \pre reportTestStart() has not been called yet for the given testAtom
+    //!
+    //! \param[in] testAtom The handle to the test that has started
+    //!
+    static void reportTestStart(TestAtom& testAtom)
+    {
+        reportTestResult(testAtom, TestResult::kRUNNING);
+        assert(!testAtom.mStarted);
+        testAtom.mStarted = true;
+    }
+
+    //!
+    //! \brief Report that a test has ended.
+    //!
+    //! \pre reportTestStart() has been called for the given testAtom
+    //!
+    //! \param[in] testAtom The handle to the test that has ended
+    //! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
+    //!                   TestResult::kFAILED, TestResult::kWAIVED
+    //!
+    static void reportTestEnd(const TestAtom& testAtom, TestResult result)
+    {
+        assert(result != TestResult::kRUNNING);
+        assert(testAtom.mStarted);
+        reportTestResult(testAtom, result);
+    }
+
+    static int reportPass(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kPASSED);
+        return EXIT_SUCCESS;
+    }
+
+    static int reportFail(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kFAILED);
+        return EXIT_FAILURE;
+    }
+
+    static int reportWaive(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kWAIVED);
+        return EXIT_SUCCESS;
+    }
+
+    static int reportTest(const TestAtom& testAtom, bool pass)
+    {
+        return pass ? reportPass(testAtom) : reportFail(testAtom);
+    }
+
+    Severity getReportableSeverity() const
+    {
+        return mReportableSeverity;
+    }
+
+private:
+    //!
+    //! \brief returns an appropriate string for prefixing a log message with the given severity
+    //!
+    static const char* severityPrefix(Severity severity)
+    {
+        switch (severity)
+        {
+        case Severity::kINTERNAL_ERROR: return "[F] ";
+        case Severity::kERROR: return "[E] ";
+        case Severity::kWARNING: return "[W] ";
+        case Severity::kINFO: return "[I] ";
+        case Severity::kVERBOSE: return "[V] ";
+        default: assert(0); return "";
+        }
+    }
+
+    //!
+    //! \brief returns an appropriate string for prefixing a test result message with the given result
+    //!
+    static const char* testResultString(TestResult result)
+    {
+        switch (result)
+        {
+        case TestResult::kRUNNING: return "RUNNING";
+        case TestResult::kPASSED: return "PASSED";
+        case TestResult::kFAILED: return "FAILED";
+        case TestResult::kWAIVED: return "WAIVED";
+        default: assert(0); return "";
+        }
+    }
+
+    //!
+    //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
+    //!
+    static std::ostream& severityOstream(Severity severity)
+    {
+        return severity >= Severity::kINFO ? std::cout : std::cerr;
+    }
+
+    //!
+    //! \brief method that implements logging test results
+    //!
+    static void reportTestResult(const TestAtom& testAtom, TestResult result)
+    {
+        severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
+                                         << testAtom.mCmdline << std::endl;
+    }
+
+    //!
+    //! \brief generate a command line string from the given (argc, argv) values
+    //!
+    static std::string genCmdlineString(int argc, char const* const* argv)
+    {
+        std::stringstream ss;
+        for (int i = 0; i < argc; i++)
+        {
+            if (i > 0)
+                ss << " ";
+            ss << argv[i];
+        }
+        return ss.str();
+    }
+
+    Severity mReportableSeverity;
+};
+
+namespace
+{
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
+//!
+//! Example usage:
+//!
+//!     LOG_VERBOSE(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
+//!
+//! Example usage:
+//!
+//!     LOG_INFO(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_INFO(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
+//!
+//! Example usage:
+//!
+//!     LOG_WARN(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_WARN(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
+//!
+//! Example usage:
+//!
+//!     LOG_ERROR(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_ERROR(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
+//         ("fatal" severity)
+//!
+//! Example usage:
+//!
+//!     LOG_FATAL(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_FATAL(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
+}
+
+} // anonymous namespace
+
+#endif // TENSORRT_LOGGING_H

+ 106 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/mish.h

@@ -0,0 +1,106 @@
+#ifndef _MISH_PLUGIN_H
+#define _MISH_PLUGIN_H
+
+#include <string>
+#include <vector>
+#include "NvInfer.h"
+
+namespace nvinfer1
+{
+    class MishPlugin: public IPluginV2IOExt
+    {
+        public:
+            explicit MishPlugin();
+            MishPlugin(const void* data, size_t length);
+
+            ~MishPlugin();
+
+            int getNbOutputs() const override
+            {
+                return 1;
+            }
+
+            Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) override;
+
+            int initialize() override;
+
+            virtual void terminate() override {};
+
+            virtual size_t getWorkspaceSize(int maxBatchSize) const override { return 0;}
+
+            virtual int enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream) override;
+
+            virtual size_t getSerializationSize() const override;
+
+            virtual void serialize(void* buffer) const override;
+
+            bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const override {
+                return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT;
+            }
+
+            const char* getPluginType() const override;
+
+            const char* getPluginVersion() const override;
+
+            void destroy() override;
+
+            IPluginV2IOExt* clone() const override;
+
+            void setPluginNamespace(const char* pluginNamespace) override;
+
+            const char* getPluginNamespace() const override;
+
+            DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const override;
+
+            bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const override;
+
+            bool canBroadcastInputAcrossBatch(int inputIndex) const override;
+
+            void attachToContext(
+                    cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) override;
+
+            void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) override;
+
+            void detachFromContext() override;
+
+            int input_size_;
+        private:
+            void forwardGpu(const float *const * inputs, float* output, cudaStream_t stream, int batchSize = 1);
+            int thread_count_ = 256;
+            const char* mPluginNamespace;
+    };
+
+    class MishPluginCreator : public IPluginCreator
+    {
+        public:
+            MishPluginCreator();
+
+            ~MishPluginCreator() override = default;
+
+            const char* getPluginName() const override;
+
+            const char* getPluginVersion() const override;
+
+            const PluginFieldCollection* getFieldNames() override;
+
+            IPluginV2IOExt* createPlugin(const char* name, const PluginFieldCollection* fc) override;
+
+            IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override;
+
+            void setPluginNamespace(const char* libNamespace) override
+            {
+                mNamespace = libNamespace;
+            }
+
+            const char* getPluginNamespace() const override
+            {
+                return mNamespace.c_str();
+            }
+
+        private:
+            std::string mNamespace;
+            static PluginFieldCollection mFC;
+            static std::vector<PluginField> mPluginAttributes;
+    };
+};
+#endif 

+ 70 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/trt_utils.h

@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+
+#ifndef __TRT_UTILS_H__
+#define __TRT_UTILS_H__
+
+#include <set>
+#include <map>
+#include <string>
+#include <vector>
+#include <cassert>
+#include <iostream>
+#include <fstream>
+
+#include "NvInfer.h"
+#include "NvInferPlugin.h"
+
+#define UNUSED(expr) (void)(expr)
+#define DIVUP(n, d) ((n) + (d)-1) / (d)
+
+std::string trim(std::string s);
+float clamp(const float val, const float minVal, const float maxVal);
+bool fileExists(const std::string fileName, bool verbose = true);
+std::vector<float> loadWeights(const std::string weightsFilePath, const std::string& networkType);
+std::string dimsToString(const nvinfer1::Dims d);
+void displayDimType(const nvinfer1::Dims d);
+int getNumChannels(nvinfer1::ITensor* t);
+uint64_t get3DTensorVolume(nvinfer1::Dims inputDims);
+
+// Helper functions to create yolo engine
+nvinfer1::ILayer* netAddMaxpool(int layerIdx, std::map<std::string, std::string>& block,
+                                nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network);
+nvinfer1::ILayer* netAddConvLinear(int layerIdx, std::map<std::string, std::string>& block,
+                                   std::vector<float>& weights,
+                                   std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,
+                                   int& inputChannels, nvinfer1::ITensor* input,
+                                   nvinfer1::INetworkDefinition* network);
+nvinfer1::ILayer* netAddConvBNActive(int layerIdx, std::map<std::string, std::string>& block,
+                                    std::vector<float>& weights,
+                                    std::vector<nvinfer1::Weights>& trtWeights, int& weightPtr,
+                                    int& inputChannels, nvinfer1::ITensor* input,
+                                    nvinfer1::INetworkDefinition* network);
+nvinfer1::ILayer* netAddUpsample(int layerIdx, std::map<std::string, std::string>& block,
+                                 std::vector<float>& weights,
+                                 std::vector<nvinfer1::Weights>& trtWeights, int& inputChannels,
+                                 nvinfer1::ITensor* input, nvinfer1::INetworkDefinition* network);
+void printLayerInfo(std::string layerIndex, std::string layerName, std::string layerInput,
+                    std::string layerOutput, std::string weightPtr);
+
+#endif

+ 164 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/yolo.h

@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _YOLO_H_
+#define _YOLO_H_
+
+#include <stdint.h>
+#include <string>
+#include <vector>
+#include <memory>
+
+#include "NvInfer.h"
+#include "detection_trt7/yolov4Tensorrt/include/trt_utils.h"
+#include "detection_trt7/yolov4Tensorrt/include/yololayer.h"
+#include "detection_trt7/yolov4Tensorrt/include/mish.h"
+
+typedef enum {
+    /** NvDsInferContext operation succeeded. */
+    NVDSINFER_SUCCESS = 0,
+    /** Failed to configure the NvDsInferContext instance possibly due to an
+     *  erroneous initialization property. */
+    NVDSINFER_CONFIG_FAILED,
+    /** Custom Library interface implementation failed. */
+    NVDSINFER_CUSTOM_LIB_FAILED,
+    /** Invalid parameters were supplied. */
+    NVDSINFER_INVALID_PARAMS,
+    /** Output parsing failed. */
+    NVDSINFER_OUTPUT_PARSING_FAILED,
+    /** CUDA error was encountered. */
+    NVDSINFER_CUDA_ERROR,
+    /** TensorRT interface failed. */
+    NVDSINFER_TENSORRT_ERROR,
+    /** Resource error was encountered. */
+    NVDSINFER_RESOURCE_ERROR,
+    /** TRT-IS error was encountered. */
+    NVDSINFER_TRTIS_ERROR,
+    /** Unknown error was encountered. */
+    NVDSINFER_UNKNOWN_ERROR
+} NvDsInferStatus;
+
+class IModelParser
+{
+public:
+    IModelParser() = default;
+    /**
+     * Destructor, make sure all external resource would be released here. */
+    virtual ~IModelParser() = default;
+
+    /**
+     * Function interface for parsing custom model and building tensorrt
+     * network.
+     *
+     * @param[in, out] network NvDsInfer will create the @a network and
+     *                 implementation can setup this network layer by layer.
+     * @return NvDsInferStatus indicating if model parsing was sucessful.
+     */
+    virtual NvDsInferStatus parseModel(
+        nvinfer1::INetworkDefinition& network) = 0;
+
+    /**
+     * Function interface to check if parser can support full-dimensions.
+     */
+    virtual bool hasFullDimsSupported() const = 0;
+
+    /**
+     * Function interface to get the new model name which is to be used for
+     * constructing the serialized engine file path.
+     */
+    virtual const char* getModelName() const = 0;
+};
+
+
+/**
+ * Holds all the file paths required to build a network.
+ */
+struct NetworkInfo
+{
+    std::string networkType;
+    std::string configFilePath;
+    std::string wtsFilePath;
+    std::string deviceType;
+    std::string inputBlobName;
+};
+
+/**
+ * Holds information about an output tensor of the yolo network.
+ */
+struct TensorInfo
+{
+    std::string blobName;
+    uint stride{0};
+    uint gridSize{0};
+    uint numClasses{0};
+    uint numBBoxes{0};
+    uint64_t volume{0};
+    std::vector<uint> masks;
+    std::vector<float> anchors;
+    int bindingIndex{-1};
+    float* hostBuffer{nullptr};
+};
+
+class Yolo : public IModelParser {
+public:
+    Yolo(const NetworkInfo& networkInfo);
+    ~Yolo() override;
+    bool hasFullDimsSupported() const override { return false; }
+    const char* getModelName() const override {
+        return m_ConfigFilePath.empty() ? m_NetworkType.c_str()
+                                        : m_ConfigFilePath.c_str();
+    }
+    NvDsInferStatus parseModel(nvinfer1::INetworkDefinition& network) override;
+
+    nvinfer1::ICudaEngine *createEngine (nvinfer1::IBuilder* builder);
+
+protected:
+    const std::string m_NetworkType;
+    const std::string m_ConfigFilePath;
+    const std::string m_WtsFilePath;
+    const std::string m_DeviceType;
+    const std::string m_InputBlobName;
+    const std::string m_OutputBlobName;
+    std::vector<TensorInfo> m_OutputTensors;
+    std::vector<std::map<std::string, std::string>> m_ConfigBlocks;
+    uint m_InputH;
+    uint m_InputW;
+    uint m_InputC;
+    uint64_t m_InputSize;
+
+    // TRT specific members
+    std::vector<nvinfer1::Weights> m_TrtWeights;
+    std::vector<nvinfer1::ITensor*> m_YoloTensor;
+
+    std::vector<YoloKernel> m_YoloKernel;
+
+
+private:
+    NvDsInferStatus buildYoloNetwork(
+        std::vector<float>& weights, nvinfer1::INetworkDefinition& network);
+    std::vector<std::map<std::string, std::string>> parseConfigFile(
+        const std::string cfgFilePath);
+    void parseConfigBlocks();
+    void destroyNetworkUtils();
+};
+
+#endif // _YOLO_H_

+ 52 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/yolodetect.h

@@ -0,0 +1,52 @@
+#ifndef YOLODETECT_H
+#define YOLODETECT_H
+
+#include "opencv2/opencv.hpp"
+#include "NvInfer.h"
+#include "NvInferRuntime.h"
+#include "cuda_runtime_api.h"
+
+#include "detection_trt7/yolov4Tensorrt/include/logging.h"
+#include "detection_trt7/yolov4Tensorrt/include/yolo.h"
+#include "detection_trt7/yolov4Tensorrt/include/trt_utils.h"
+#include "detection_trt7/yolov4Tensorrt/include/yololayer.h"
+#include "detection_trt7/yolov4Tensorrt/include/mish.h"
+
+using namespace nvinfer1;
+REGISTER_TENSORRT_PLUGIN(MishPluginCreator);
+REGISTER_TENSORRT_PLUGIN(YoloPluginCreator);
+
+
+class YoloDetect
+{
+public:
+    YoloDetect(NetworkInfo &networkInfo, std::string &modelname):
+        m_networkInfo(networkInfo),m_modelname(modelname)
+    {
+
+    }
+    bool loadModel(IExecutionContext*& context);
+    void doInference(IExecutionContext& context,float* input, float* output, int batch_size);
+    bool process(IExecutionContext& context, cv::Mat &image, std::vector<Detection> &detect_result,float ignore_thresh=0.4,float nms_thresh = 0.4);
+    cv::Rect get_rect(cv::Mat& img, float bbox[4],int input_w,int input_h);
+private:
+    bool saveEngine();
+    ICudaEngine* loadEngine(IRuntime& runtime);
+    cv::Mat preprocess_img(cv::Mat& img,int input_w,int input_h);
+    float iou(float lbox[4], float rbox[4]);
+    static bool cmp(Detection& a, Detection& b);
+    void nms(std::vector<Detection>& res, float *output, float ignore_thresh=0.4,float nms_thresh = 0.4);
+
+public:
+    int m_input_h;
+    int m_input_w;
+    int m_output_size;
+
+private:
+    NetworkInfo m_networkInfo;
+    std::string m_modelname;
+    Logger gLogger;
+
+};
+
+#endif // YOLODETECT_H

+ 126 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/detection_trt7/yolov4Tensorrt/include/yololayer.h

@@ -0,0 +1,126 @@
+#ifndef _YOLO_LAYER_H
+#define _YOLO_LAYER_H
+
+#include <assert.h>
+#include <cmath>
+#include <string.h>
+#include <cublas_v2.h>
+#include "NvInfer.h"
+#include "Utils.h"
+#include <iostream>
+#include "NvInferPlugin.h"
+
+struct YoloKernel
+{
+    int width;
+    int height;
+    int everyYoloAnchors;
+    float anchors[10];   // 一组yolo输出层中 anchors的数据个数 等于 3*2, 可以设置的更大一点,这个无所谓
+};
+
+struct alignas(float) Detection{
+    //x y w h
+    float bbox[4];
+    float det_confidence;
+    float class_id;
+    float class_confidence;
+};
+
+namespace nvinfer1
+{
+    class YoloLayerPlugin: public IPluginV2IOExt
+    {
+        public:
+            YoloLayerPlugin(const PluginFieldCollection& fc);
+            YoloLayerPlugin(const void* data, size_t length);
+
+            ~YoloLayerPlugin();
+
+            int getNbOutputs() const override
+            {
+                return 1;
+            }
+
+            Dims getOutputDimensions(int index, const Dims* inputs, int nbInputDims) override;
+
+            int initialize() override;
+
+            virtual void terminate() override {};
+
+            virtual size_t getWorkspaceSize(int maxBatchSize) const override { return 0;}
+
+            virtual int enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream) override;
+
+            virtual size_t getSerializationSize() const override;
+
+            virtual void serialize(void* buffer) const override;
+
+            bool supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) const override {
+                return inOut[pos].format == TensorFormat::kLINEAR && inOut[pos].type == DataType::kFLOAT;
+            }
+
+            const char* getPluginType() const override;
+
+            const char* getPluginVersion() const override;
+
+            void destroy() override;
+
+            IPluginV2IOExt* clone() const override;
+
+            void setPluginNamespace(const char* pluginNamespace) override;
+
+            const char* getPluginNamespace() const override;
+
+            DataType getOutputDataType(int index, const nvinfer1::DataType* inputTypes, int nbInputs) const override;
+
+            bool isOutputBroadcastAcrossBatch(int outputIndex, const bool* inputIsBroadcasted, int nbInputs) const override;
+
+            bool canBroadcastInputAcrossBatch(int inputIndex) const override;
+
+            void attachToContext(
+                    cudnnContext* cudnnContext, cublasContext* cublasContext, IGpuAllocator* gpuAllocator) override;
+
+            void configurePlugin(const PluginTensorDesc* in, int nbInput, const PluginTensorDesc* out, int nbOutput) override;
+
+            void detachFromContext() override;
+
+        private:
+            void forwardGpu(const float *const * inputs,float * output, cudaStream_t stream,int batchSize = 1);
+            int mClassCount;        // 检测的目标的类别,从cfg文件获取,在cfg 设置
+            int mInput_w;           // 图像输入的尺寸,从cfg获取
+            int mInput_h;           // 由于umsample层的原因,宽度和高度要想等,TODO 调整
+            int mNumYoloLayers;     // yolo输出层的数量,从cfg获取,无需设置
+            std::vector<YoloKernel> mYoloKernel;
+
+            float mIgnore_thresh = 0.4;     // 置信度阈值,可以调整
+            int max_output_box = 1000;      // 最大输出数量
+            int mThreadCount = 256;         // cuda 内核函数,每一block中线程数量
+            const char* mPluginNamespace;   // 该插件名称
+
+    };
+    // 继承与IPluginCreator,重写虚函数
+    class YoloPluginCreator : public IPluginCreator
+    {
+        public:
+            YoloPluginCreator();
+
+            ~YoloPluginCreator() override = default;
+            const char* getPluginName() const override;
+            const char* getPluginVersion() const override;
+            const PluginFieldCollection* getFieldNames() override;
+            // 生成插件,这个是在 build network时调用
+            IPluginV2IOExt* createPlugin(const char* name, const PluginFieldCollection* fc) override;
+            // 反序列化,在读取保存的trt模型engine时调用,负责解析插件
+            IPluginV2IOExt* deserializePlugin(const char* name, const void* serialData, size_t serialLength) override;
+            void setPluginNamespace(const char* libNamespace) override{
+                mNamespace = libNamespace;
+            }
+            const char* getPluginNamespace() const override{
+                return mNamespace.c_str();
+            }
+        private:
+            std::string mNamespace;
+    };
+};
+
+#endif 

+ 38 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/lane_detection/Hungarian.h

@@ -0,0 +1,38 @@
+///////////////////////////////////////////////////////////////////////////////
+// Hungarian.h: Header file for Class HungarianAlgorithm.
+
+#ifndef LANENET_TENSORFLOW_HUNGARIAN_H
+#define LANENET_TENSORFLOW_HUNGARIAN_H
+
+
+#include <iostream>
+#include <vector>
+
+using namespace std;
+
+
+class HungarianAlgorithm
+{
+public:
+	HungarianAlgorithm();
+	~HungarianAlgorithm();
+	double Solve(vector<vector<double>>& DistMatrix, vector<int>& Assignment);
+
+private:
+	void assignmentoptimal(int *assignment, double *cost, double *distMatrix, int nOfRows, int nOfColumns);
+	void buildassignmentvector(int *assignment, bool *starMatrix, int nOfRows, int nOfColumns);
+	void computeassignmentcost(int *assignment, double *cost, double *distMatrix, int nOfRows);
+	void step2a(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+				bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+	void step2b(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+				bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+	void step3(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+			   bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+	void step4(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+			   bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim, int row, int col);
+	void step5(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix,
+			   bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim);
+};
+
+
+#endif

+ 72 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/lane_detection/KalmanTracker.h

@@ -0,0 +1,72 @@
+///////////////////////////////////////////////////////////////////////////////
+// KalmanTracker.h: KalmanTracker Class Declaration
+
+#ifndef LANENET_TENSORFLOW_KALMAN_H
+#define LANENET_TENSORFLOW_KALMAN_H
+
+#include "opencv2/video/tracking.hpp"
+#include "opencv2/highgui/highgui.hpp"
+
+using namespace std;
+using namespace cv;
+
+#define StateType Mat
+
+
+// This class represents the internel state of individual tracked objects observed as bounding box.
+class KalmanTracker
+{
+public:
+	KalmanTracker()
+	{
+		init_kf(StateType());
+		m_time_since_update = 0;
+		m_hits = 0;
+		m_hit_streak = 0;
+		m_age = 0;
+		m_id = kf_count;
+		//kf_count++;
+	}
+    KalmanTracker(StateType initRect, int min_y, int max_y)
+	{
+		init_kf(initRect);
+		m_time_since_update = 0;
+		m_hits = 0;
+		m_hit_streak = 0;
+		m_age = 0;
+        m_id = kf_count;  //each tracker id=0
+		//kf_count++; 
+        m_min_y = min_y;
+        m_max_y = max_y;
+	}
+
+	~KalmanTracker()
+	{
+		m_history.clear();
+	}
+
+	StateType predict();
+    void update(StateType stateMat, int min_y, int max_y);
+	
+	StateType get_state();
+
+	static int kf_count;
+
+	int m_time_since_update;
+	int m_hits;
+	int m_hit_streak;
+	int m_age;
+	int m_id;
+        int m_min_y;
+        int m_max_y;
+
+private:
+	void init_kf(StateType stateMat);
+
+	cv::KalmanFilter kf;
+	cv::Mat measurement;
+
+	std::vector<StateType> m_history;
+};
+
+#endif

+ 101 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/lane_detection/lanedetect.h

@@ -0,0 +1,101 @@
+#ifndef LANEDETECT_H
+#define LANEDETECT_H
+#include "lane_detection/tensorrt/tensorrt.h"
+#include "lane_detection/Hungarian.h"
+#include "lane_detection/KalmanTracker.h"
+
+using namespace cv;
+namespace ld{
+static const int N = 1;
+static const int INPUT_C = 3;
+static const int OUTPUT_C_BIN = 6;
+static const int OUTPUT_C_INS = 4;
+static const int INPUT_H = 256;
+static const int INPUT_W = 512;
+// static float mean[INPUT_C] = {0.485, 0.456, 0.406};//rgb
+// static float std[INPUT_C] = {0.229, 0.224, 0.225};
+const int COLOR_MAP[13][3]={{255, 0, 0},{0, 255, 0},{0, 0, 255},{125, 125, 0},{0,125,125},
+                           {125, 0, 125},{50, 100, 50},{100, 50, 100},{50, 50, 100},
+                           {255,255,0},{0,255,255},{255,0,255},{100,100,100}};
+
+const float KEEP_POINT = 0.2;  //用于聚类的点数 = 检测出的点 * KEEP_POINT
+const int MAX_NUM_CLUSTERS = 4; //最多检测的车道线数
+const float SORT_VALUE = 0.75;   //用于最后车道线id排序,比较y=SORT_VALUE * HEIGHT处的x值。
+
+const int RANSAC_ITERATIONS = 100; //ransac最大迭代次数
+const double RANSAC_MODEL_SIZE = 0.05;
+const int RANSAC_ERROR_THRESHOLD = 20;
+const double RANSAC_INLINERS = 0.5;
+const double STOP_ITERATION = 0.85; //内点所占比例大于此值,则停止迭代
+
+const int max_age = 1; //tracking
+const int min_hits = 3;
+const double distanceThreshold = 100.0;
+
+typedef struct lane_info
+{
+    int id;
+    int type;
+    int color;
+    Point2f sort_point;
+    Point2f world_point;
+    vector<Point2f> lane_point;
+    Mat para;
+    float min_y;
+    float max_y;
+    double curvature;
+}lane_info;
+
+typedef struct tracking_info
+{
+    int frame;
+    int id;
+    int min_y;
+    int max_y;
+    Mat para;
+}tracking_info;
+
+bool SortLaneBySampleNum(std::vector<Point2f> &lane1, std::vector<Point2f> &lane2);
+bool SortLaneByx(lane_info &lane1, lane_info &lane2);
+bool SortLaneByxDesc(lane_info &lane1, lane_info &lane2);
+bool SortPointByY(cv::Point2f &point1, cv::Point2f &point2);
+bool SortMatByX(std::vector<Mat> &mat1, std::vector<Mat> &mat2);
+bool isShortLane(vector<Point2f> &point);
+/* remove points set which is small in point num */
+int eliminate_fewer_points(vector<vector<Point2f>> &point);
+// Fisher - Yates shuffle
+template<class bidiiter>
+bidiiter random_unique(bidiiter begin, bidiiter end, size_t num_random);
+
+class LaneNet
+{
+public:
+    int IMAGE_WIDTH=640;
+    int IMAGE_HEIGHT=480;
+public:
+    void doInference(IExecutionContext& context, float* input, float* output_bin, float* output_ins, int batch_size, int input_size, int output_size_bin, int output_size_ins);
+    void morphological_process(Mat &input,float length_threshold, Mat &output);
+    bool get_lane_area(Mat &binary, Mat &instance, vector<vector<float> > &instance_feature,
+                       vector<Point2f> &instance_index);
+    int MaxMinCluster(std::vector<std::vector<float>> sample,float theta,int *clas);
+    void LaneFeatsCluster(std::vector<std::vector<float>> &features,std::vector<Point2f> &coordinates,float theta,
+                                       std::vector<std::vector<Point2f>> &lane_coords);
+    bool polyfit(vector<Point2f> &in_point,int n, Mat &fit);
+    //bool RANSAC_Para(int iterations, int samples_num, int inlier_threshold, double error_threshold,
+                               //vector<Point2f> &in_point,Mat &best_model);
+
+    bool delete_outliers(vector<vector<Point2f>> &src_lane_coords,vector<vector<Point2f>> &dst_lane_coords);
+    bool ransac_point(int iterations, int samples_num, int inlier_threshold,
+                                 double error_threshold, vector<Point2f> &in_point);
+    bool LaneFitImg(std::vector<std::vector<Point2f>> &lane_coords, int n, std::vector<Mat> &lanes_para,
+                    std::vector<std::vector<Point2f>> &lanes_point);
+    void preprocess(float* prob_bin, float*prob_ins,float* binary_p,float* instance_p);
+    bool convertToMat(float* binary_p,float* instance_p,Mat &type_mat,Mat &instance_mat,Mat &instance_mat_uchar);
+    bool track_lanes(bool fit_flag,std::vector<KalmanTracker> &trackers,std::vector<Mat> &lanes_para,
+                     std::vector<std::vector<Point2f>> &lanes_point,
+                     vector<ld::tracking_info> &tracking_result);
+    bool get_lane_info(Mat &type_img,vector<ld::tracking_info> &tracking_result,std::vector<ld::lane_info> &lanes);
+    bool process(IExecutionContext& context, float* input, std::vector<KalmanTracker> &trackers,std::vector<lane_info> &lanes);
+};
+}
+#endif // LANEDETECT_H

+ 78 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/lane_detection/tensorrt/tensorrt.h

@@ -0,0 +1,78 @@
+#ifndef TENSORRT_H
+#define TENSORRT_H
+
+#include "common/cuda_utils.h"
+#include "NvInfer.h"
+#include "NvOnnxParser.h"
+#include <numeric>
+#include <cudnn.h>
+#include <thread>
+#include <algorithm>
+#include <assert.h>
+#include <cmath>
+#include <cuda_runtime_api.h>
+#include <fstream>
+#include <iomanip>
+#include <iostream>
+#include <sstream>
+#include <sys/stat.h>
+#include <time.h>
+#include <memory>
+#include <vector>
+#include <opencv2/opencv.hpp>
+
+using namespace nvinfer1;
+using namespace std;
+namespace ld
+{
+//Logger for TensorRT info/warning/errors
+class Logger : public nvinfer1::ILogger
+{
+public:
+
+    Logger(): Logger(Severity::kWARNING) {}
+
+    Logger(Severity severity): reportableSeverity(severity) {}
+
+    void log(Severity severity, const char* msg) override
+    {
+        // suppress messages with severity enum value greater than the reportable
+        if (severity > reportableSeverity) return;
+
+        switch (severity)
+        {
+            case Severity::kINTERNAL_ERROR: std::cerr << "INTERNAL_ERROR: "; break;
+            case Severity::kERROR: std::cerr << "ERROR: "; break;
+            case Severity::kWARNING: std::cerr << "WARNING: "; break;
+            case Severity::kINFO: std::cerr << "INFO: "; break;
+            default: std::cerr << "UNKNOWN: "; break;
+        }
+        std::cerr << msg << std::endl;
+    }
+
+    Severity reportableSeverity{Severity::kWARNING};
+};
+class TensorRT
+{
+private:
+    string ONNX_FILEPATH;
+    string ENGINE_FILEPATH;
+    Logger gLogger;
+
+private:
+    ICudaEngine* onnxToEngine(const std::string& modelFile, // name of the onnx model
+                          unsigned int maxBatchSize);
+    bool onnxToTRTModel(const std::string& modelFile, // name of the onnx model
+                    unsigned int maxBatchSize,    // batch size - NB must be at least as large as the batch we want to run with
+                    IHostMemory*& trtModelStream); // output buffer for the TensorRT model
+    void SaveEngine(const nvinfer1::IHostMemory& trtModelStream, const std::string& engine_filepath);
+    ICudaEngine* LoadEngine(IRuntime& runtime, const std::string& engine_filepath);
+    void demo_save_onnx_to_trt(const std::string& onnx_filepath, const std::string& engine_filepath);
+
+public:
+    void setPath(string onnx_filepath, string engine_filepath);
+    bool loadModel(IExecutionContext*& context);
+    void loadImg(cv::Mat &input, int re_width, int re_height, float *data, float *mean, float* std);
+};
+}
+#endif // TENSORRT_H

+ 2209 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/common/impl/cxxopts.hpp

@@ -0,0 +1,2209 @@
+/*
+Copyright (c) 2014, 2015, 2016, 2017 Jarryd Beck
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+*/
+
+#ifndef CXXOPTS_HPP_INCLUDED
+#define CXXOPTS_HPP_INCLUDED
+
+#include <cstring>
+#include <cctype>
+#include <exception>
+#include <iostream>
+#include <limits>
+#include <map>
+#include <memory>
+#include <regex>
+#include <sstream>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#ifdef __cpp_lib_optional
+#include <optional>
+#define CXXOPTS_HAS_OPTIONAL
+#endif
+
+#ifndef CXXOPTS_VECTOR_DELIMITER
+#define CXXOPTS_VECTOR_DELIMITER ','
+#endif
+
+#define CXXOPTS__VERSION_MAJOR 2
+#define CXXOPTS__VERSION_MINOR 2
+#define CXXOPTS__VERSION_PATCH 0
+
+namespace cxxopts
+{
+  static constexpr struct {
+    uint8_t major, minor, patch;
+  } version = {
+    CXXOPTS__VERSION_MAJOR,
+    CXXOPTS__VERSION_MINOR,
+    CXXOPTS__VERSION_PATCH
+  };
+}
+
+//when we ask cxxopts to use Unicode, help strings are processed using ICU,
+//which results in the correct lengths being computed for strings when they
+//are formatted for the help output
+//it is necessary to make sure that <unicode/unistr.h> can be found by the
+//compiler, and that icu-uc is linked in to the binary.
+
+#ifdef CXXOPTS_USE_UNICODE
+#include <unicode/unistr.h>
+
+namespace cxxopts
+{
+  typedef icu::UnicodeString String;
+
+  inline
+  String
+  toLocalString(std::string s)
+  {
+    return icu::UnicodeString::fromUTF8(std::move(s));
+  }
+
+  class UnicodeStringIterator : public
+    std::iterator<std::forward_iterator_tag, int32_t>
+  {
+    public:
+
+    UnicodeStringIterator(const icu::UnicodeString* string, int32_t pos)
+    : s(string)
+    , i(pos)
+    {
+    }
+
+    value_type
+    operator*() const
+    {
+      return s->char32At(i);
+    }
+
+    bool
+    operator==(const UnicodeStringIterator& rhs) const
+    {
+      return s == rhs.s && i == rhs.i;
+    }
+
+    bool
+    operator!=(const UnicodeStringIterator& rhs) const
+    {
+      return !(*this == rhs);
+    }
+
+    UnicodeStringIterator&
+    operator++()
+    {
+      ++i;
+      return *this;
+    }
+
+    UnicodeStringIterator
+    operator+(int32_t v)
+    {
+      return UnicodeStringIterator(s, i + v);
+    }
+
+    private:
+    const icu::UnicodeString* s;
+    int32_t i;
+  };
+
+  inline
+  String&
+  stringAppend(String&s, String a)
+  {
+    return s.append(std::move(a));
+  }
+
+  inline
+  String&
+  stringAppend(String& s, int n, UChar32 c)
+  {
+    for (int i = 0; i != n; ++i)
+    {
+      s.append(c);
+    }
+
+    return s;
+  }
+
+  template <typename Iterator>
+  String&
+  stringAppend(String& s, Iterator begin, Iterator end)
+  {
+    while (begin != end)
+    {
+      s.append(*begin);
+      ++begin;
+    }
+
+    return s;
+  }
+
+  inline
+  size_t
+  stringLength(const String& s)
+  {
+    return s.length();
+  }
+
+  inline
+  std::string
+  toUTF8String(const String& s)
+  {
+    std::string result;
+    s.toUTF8String(result);
+
+    return result;
+  }
+
+  inline
+  bool
+  empty(const String& s)
+  {
+    return s.isEmpty();
+  }
+}
+
+namespace std
+{
+  inline
+  cxxopts::UnicodeStringIterator
+  begin(const icu::UnicodeString& s)
+  {
+    return cxxopts::UnicodeStringIterator(&s, 0);
+  }
+
+  inline
+  cxxopts::UnicodeStringIterator
+  end(const icu::UnicodeString& s)
+  {
+    return cxxopts::UnicodeStringIterator(&s, s.length());
+  }
+}
+
+//ifdef CXXOPTS_USE_UNICODE
+#else
+
+namespace cxxopts
+{
+  typedef std::string String;
+
+  template <typename T>
+  T
+  toLocalString(T&& t)
+  {
+    return std::forward<T>(t);
+  }
+
+  inline
+  size_t
+  stringLength(const String& s)
+  {
+    return s.length();
+  }
+
+  inline
+  String&
+  stringAppend(String&s, String a)
+  {
+    return s.append(std::move(a));
+  }
+
+  inline
+  String&
+  stringAppend(String& s, size_t n, char c)
+  {
+    return s.append(n, c);
+  }
+
+  template <typename Iterator>
+  String&
+  stringAppend(String& s, Iterator begin, Iterator end)
+  {
+    return s.append(begin, end);
+  }
+
+  template <typename T>
+  std::string
+  toUTF8String(T&& t)
+  {
+    return std::forward<T>(t);
+  }
+
+  inline
+  bool
+  empty(const std::string& s)
+  {
+    return s.empty();
+  }
+}
+
+//ifdef CXXOPTS_USE_UNICODE
+#endif
+
+namespace cxxopts
+{
+  namespace
+  {
+#ifdef _WIN32
+    const std::string LQUOTE("\'");
+    const std::string RQUOTE("\'");
+#else
+    const std::string LQUOTE("‘");
+    const std::string RQUOTE("’");
+#endif
+  }
+
+  class Value : public std::enable_shared_from_this<Value>
+  {
+    public:
+
+    virtual ~Value() = default;
+
+    virtual
+    std::shared_ptr<Value>
+    clone() const = 0;
+
+    virtual void
+    parse(const std::string& text) const = 0;
+
+    virtual void
+    parse() const = 0;
+
+    virtual bool
+    has_default() const = 0;
+
+    virtual bool
+    is_container() const = 0;
+
+    virtual bool
+    has_implicit() const = 0;
+
+    virtual std::string
+    get_default_value() const = 0;
+
+    virtual std::string
+    get_implicit_value() const = 0;
+
+    virtual std::shared_ptr<Value>
+    default_value(const std::string& value) = 0;
+
+    virtual std::shared_ptr<Value>
+    implicit_value(const std::string& value) = 0;
+
+    virtual std::shared_ptr<Value>
+    no_implicit_value() = 0;
+
+    virtual bool
+    is_boolean() const = 0;
+  };
+
+  class OptionException : public std::exception
+  {
+    public:
+    OptionException(const std::string& message)
+    : m_message(message)
+    {
+    }
+
+    virtual const char*
+    what() const noexcept
+    {
+      return m_message.c_str();
+    }
+
+    private:
+    std::string m_message;
+  };
+
+  class OptionSpecException : public OptionException
+  {
+    public:
+
+    OptionSpecException(const std::string& message)
+    : OptionException(message)
+    {
+    }
+  };
+
+  class OptionParseException : public OptionException
+  {
+    public:
+    OptionParseException(const std::string& message)
+    : OptionException(message)
+    {
+    }
+  };
+
+  class option_exists_error : public OptionSpecException
+  {
+    public:
+    option_exists_error(const std::string& option)
+    : OptionSpecException("Option " + LQUOTE + option + RQUOTE + " already exists")
+    {
+    }
+  };
+
+  class invalid_option_format_error : public OptionSpecException
+  {
+    public:
+    invalid_option_format_error(const std::string& format)
+    : OptionSpecException("Invalid option format " + LQUOTE + format + RQUOTE)
+    {
+    }
+  };
+
+  class option_syntax_exception : public OptionParseException {
+    public:
+    option_syntax_exception(const std::string& text)
+    : OptionParseException("Argument " + LQUOTE + text + RQUOTE +
+        " starts with a - but has incorrect syntax")
+    {
+    }
+  };
+
+  class option_not_exists_exception : public OptionParseException
+  {
+    public:
+    option_not_exists_exception(const std::string& option)
+    : OptionParseException("Option " + LQUOTE + option + RQUOTE + " does not exist")
+    {
+    }
+  };
+
+  class missing_argument_exception : public OptionParseException
+  {
+    public:
+    missing_argument_exception(const std::string& option)
+    : OptionParseException(
+        "Option " + LQUOTE + option + RQUOTE + " is missing an argument"
+      )
+    {
+    }
+  };
+
+  class option_requires_argument_exception : public OptionParseException
+  {
+    public:
+    option_requires_argument_exception(const std::string& option)
+    : OptionParseException(
+        "Option " + LQUOTE + option + RQUOTE + " requires an argument"
+      )
+    {
+    }
+  };
+
+  class option_not_has_argument_exception : public OptionParseException
+  {
+    public:
+    option_not_has_argument_exception
+    (
+      const std::string& option,
+      const std::string& arg
+    )
+    : OptionParseException(
+        "Option " + LQUOTE + option + RQUOTE +
+        " does not take an argument, but argument " +
+        LQUOTE + arg + RQUOTE + " given"
+      )
+    {
+    }
+  };
+
+  class option_not_present_exception : public OptionParseException
+  {
+    public:
+    option_not_present_exception(const std::string& option)
+    : OptionParseException("Option " + LQUOTE + option + RQUOTE + " not present")
+    {
+    }
+  };
+
+  class argument_incorrect_type : public OptionParseException
+  {
+    public:
+    argument_incorrect_type
+    (
+      const std::string& arg
+    )
+    : OptionParseException(
+        "Argument " + LQUOTE + arg + RQUOTE + " failed to parse"
+      )
+    {
+    }
+  };
+
+  class option_required_exception : public OptionParseException
+  {
+    public:
+    option_required_exception(const std::string& option)
+    : OptionParseException(
+        "Option " + LQUOTE + option + RQUOTE + " is required but not present"
+      )
+    {
+    }
+  };
+
+  template <typename T>
+  void throw_or_mimic(const std::string& text)
+  {
+    static_assert(std::is_base_of<std::exception, T>::value,
+                  "throw_or_mimic only works on std::exception and "
+                  "deriving classes");
+
+#ifndef CXXOPTS_NO_EXCEPTIONS
+    // If CXXOPTS_NO_EXCEPTIONS is not defined, just throw
+    throw T{text};
+#else
+    // Otherwise manually instantiate the exception, print what() to stderr,
+    // and abort
+    T exception{text};
+    std::cerr << exception.what() << std::endl;
+    std::cerr << "Aborting (exceptions disabled)..." << std::endl;
+    std::abort();
+#endif
+  }
+
+  namespace values
+  {
+    namespace
+    {
+      std::basic_regex<char> integer_pattern
+        ("(-)?(0x)?([0-9a-zA-Z]+)|((0x)?0)");
+      std::basic_regex<char> truthy_pattern
+        ("(t|T)(rue)?|1");
+      std::basic_regex<char> falsy_pattern
+        ("(f|F)(alse)?|0");
+    }
+
+    namespace detail
+    {
+      template <typename T, bool B>
+      struct SignedCheck;
+
+      template <typename T>
+      struct SignedCheck<T, true>
+      {
+        template <typename U>
+        void
+        operator()(bool negative, U u, const std::string& text)
+        {
+          if (negative)
+          {
+            if (u > static_cast<U>((std::numeric_limits<T>::min)()))
+            {
+              throw_or_mimic<argument_incorrect_type>(text);
+            }
+          }
+          else
+          {
+            if (u > static_cast<U>((std::numeric_limits<T>::max)()))
+            {
+              throw_or_mimic<argument_incorrect_type>(text);
+            }
+          }
+        }
+      };
+
+      template <typename T>
+      struct SignedCheck<T, false>
+      {
+        template <typename U>
+        void
+        operator()(bool, U, const std::string&) {}
+      };
+
+      template <typename T, typename U>
+      void
+      check_signed_range(bool negative, U value, const std::string& text)
+      {
+        SignedCheck<T, std::numeric_limits<T>::is_signed>()(negative, value, text);
+      }
+    }
+
+    template <typename R, typename T>
+    R
+    checked_negate(T&& t, const std::string&, std::true_type)
+    {
+      // if we got to here, then `t` is a positive number that fits into
+      // `R`. So to avoid MSVC C4146, we first cast it to `R`.
+      // See https://github.com/jarro2783/cxxopts/issues/62 for more details.
+      return -static_cast<R>(t-1)-1;
+    }
+
+    template <typename R, typename T>
+    T
+    checked_negate(T&& t, const std::string& text, std::false_type)
+    {
+      throw_or_mimic<argument_incorrect_type>(text);
+      return t;
+    }
+
+    template <typename T>
+    void
+    integer_parser(const std::string& text, T& value)
+    {
+      std::smatch match;
+      std::regex_match(text, match, integer_pattern);
+
+      if (match.length() == 0)
+      {
+        throw_or_mimic<argument_incorrect_type>(text);
+      }
+
+      if (match.length(4) > 0)
+      {
+        value = 0;
+        return;
+      }
+
+      using US = typename std::make_unsigned<T>::type;
+
+      constexpr bool is_signed = std::numeric_limits<T>::is_signed;
+      const bool negative = match.length(1) > 0;
+      const uint8_t base = match.length(2) > 0 ? 16 : 10;
+
+      auto value_match = match[3];
+
+      US result = 0;
+
+      for (auto iter = value_match.first; iter != value_match.second; ++iter)
+      {
+        US digit = 0;
+
+        if (*iter >= '0' && *iter <= '9')
+        {
+          digit = static_cast<US>(*iter - '0');
+        }
+        else if (base == 16 && *iter >= 'a' && *iter <= 'f')
+        {
+          digit = static_cast<US>(*iter - 'a' + 10);
+        }
+        else if (base == 16 && *iter >= 'A' && *iter <= 'F')
+        {
+          digit = static_cast<US>(*iter - 'A' + 10);
+        }
+        else
+        {
+          throw_or_mimic<argument_incorrect_type>(text);
+        }
+
+        US next = result * base + digit;
+        if (result > next)
+        {
+          throw_or_mimic<argument_incorrect_type>(text);
+        }
+
+        result = next;
+      }
+
+      detail::check_signed_range<T>(negative, result, text);
+
+      if (negative)
+      {
+        value = checked_negate<T>(result,
+          text,
+          std::integral_constant<bool, is_signed>());
+      }
+      else
+      {
+        value = static_cast<T>(result);
+      }
+    }
+
+    template <typename T>
+    void stringstream_parser(const std::string& text, T& value)
+    {
+      std::stringstream in(text);
+      in >> value;
+      if (!in) {
+        throw_or_mimic<argument_incorrect_type>(text);
+      }
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, uint8_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, int8_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, uint16_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, int16_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, uint32_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, int32_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, uint64_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, int64_t& value)
+    {
+      integer_parser(text, value);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, bool& value)
+    {
+      std::smatch result;
+      std::regex_match(text, result, truthy_pattern);
+
+      if (!result.empty())
+      {
+        value = true;
+        return;
+      }
+
+      std::regex_match(text, result, falsy_pattern);
+      if (!result.empty())
+      {
+        value = false;
+        return;
+      }
+
+      throw_or_mimic<argument_incorrect_type>(text);
+    }
+
+    inline
+    void
+    parse_value(const std::string& text, std::string& value)
+    {
+      value = text;
+    }
+
+    // The fallback parser. It uses the stringstream parser to parse all types
+    // that have not been overloaded explicitly.  It has to be placed in the
+    // source code before all other more specialized templates.
+    template <typename T>
+    void
+    parse_value(const std::string& text, T& value) {
+      stringstream_parser(text, value);
+    }
+
+    template <typename T>
+    void
+    parse_value(const std::string& text, std::vector<T>& value)
+    {
+      std::stringstream in(text);
+      std::string token;
+      while(in.eof() == false && std::getline(in, token, CXXOPTS_VECTOR_DELIMITER)) {
+        T v;
+        parse_value(token, v);
+        value.emplace_back(std::move(v));
+      }
+    }
+
+#ifdef CXXOPTS_HAS_OPTIONAL
+    template <typename T>
+    void
+    parse_value(const std::string& text, std::optional<T>& value)
+    {
+      T result;
+      parse_value(text, result);
+      value = std::move(result);
+    }
+#endif
+
+    inline
+    void parse_value(const std::string& text, char& c)
+    {
+      if (text.length() != 1)
+      {
+        throw_or_mimic<argument_incorrect_type>(text);
+      }
+
+      c = text[0];
+    }
+
+    template <typename T>
+    struct type_is_container
+    {
+      static constexpr bool value = false;
+    };
+
+    template <typename T>
+    struct type_is_container<std::vector<T>>
+    {
+      static constexpr bool value = true;
+    };
+
+    template <typename T>
+    class abstract_value : public Value
+    {
+      using Self = abstract_value<T>;
+
+      public:
+      abstract_value()
+      : m_result(std::make_shared<T>())
+      , m_store(m_result.get())
+      {
+      }
+
+      abstract_value(T* t)
+      : m_store(t)
+      {
+      }
+
+      virtual ~abstract_value() = default;
+
+      abstract_value(const abstract_value& rhs)
+      {
+        if (rhs.m_result)
+        {
+          m_result = std::make_shared<T>();
+          m_store = m_result.get();
+        }
+        else
+        {
+          m_store = rhs.m_store;
+        }
+
+        m_default = rhs.m_default;
+        m_implicit = rhs.m_implicit;
+        m_default_value = rhs.m_default_value;
+        m_implicit_value = rhs.m_implicit_value;
+      }
+
+      void
+      parse(const std::string& text) const
+      {
+        parse_value(text, *m_store);
+      }
+
+      bool
+      is_container() const
+      {
+        return type_is_container<T>::value;
+      }
+
+      void
+      parse() const
+      {
+        parse_value(m_default_value, *m_store);
+      }
+
+      bool
+      has_default() const
+      {
+        return m_default;
+      }
+
+      bool
+      has_implicit() const
+      {
+        return m_implicit;
+      }
+
+      std::shared_ptr<Value>
+      default_value(const std::string& value)
+      {
+        m_default = true;
+        m_default_value = value;
+        return shared_from_this();
+      }
+
+      std::shared_ptr<Value>
+      implicit_value(const std::string& value)
+      {
+        m_implicit = true;
+        m_implicit_value = value;
+        return shared_from_this();
+      }
+
+      std::shared_ptr<Value>
+      no_implicit_value()
+      {
+        m_implicit = false;
+        return shared_from_this();
+      }
+
+      std::string
+      get_default_value() const
+      {
+        return m_default_value;
+      }
+
+      std::string
+      get_implicit_value() const
+      {
+        return m_implicit_value;
+      }
+
+      bool
+      is_boolean() const
+      {
+        return std::is_same<T, bool>::value;
+      }
+
+      const T&
+      get() const
+      {
+        if (m_store == nullptr)
+        {
+          return *m_result;
+        }
+        else
+        {
+          return *m_store;
+        }
+      }
+
+      protected:
+      std::shared_ptr<T> m_result;
+      T* m_store;
+
+      bool m_default = false;
+      bool m_implicit = false;
+
+      std::string m_default_value;
+      std::string m_implicit_value;
+    };
+
+    template <typename T>
+    class standard_value : public abstract_value<T>
+    {
+      public:
+      using abstract_value<T>::abstract_value;
+
+      std::shared_ptr<Value>
+      clone() const
+      {
+        return std::make_shared<standard_value<T>>(*this);
+      }
+    };
+
+    template <>
+    class standard_value<bool> : public abstract_value<bool>
+    {
+      public:
+      ~standard_value() = default;
+
+      standard_value()
+      {
+        set_default_and_implicit();
+      }
+
+      standard_value(bool* b)
+      : abstract_value(b)
+      {
+        set_default_and_implicit();
+      }
+
+      std::shared_ptr<Value>
+      clone() const
+      {
+        return std::make_shared<standard_value<bool>>(*this);
+      }
+
+      private:
+
+      void
+      set_default_and_implicit()
+      {
+        m_default = true;
+        m_default_value = "false";
+        m_implicit = true;
+        m_implicit_value = "true";
+      }
+    };
+  }
+
+  template <typename T>
+  std::shared_ptr<Value>
+  value()
+  {
+    return std::make_shared<values::standard_value<T>>();
+  }
+
+  template <typename T>
+  std::shared_ptr<Value>
+  value(T& t)
+  {
+    return std::make_shared<values::standard_value<T>>(&t);
+  }
+
+  class OptionAdder;
+
+  class OptionDetails
+  {
+    public:
+    OptionDetails
+    (
+      const std::string& short_,
+      const std::string& long_,
+      const String& desc,
+      std::shared_ptr<const Value> val
+    )
+    : m_short(short_)
+    , m_long(long_)
+    , m_desc(desc)
+    , m_value(val)
+    , m_count(0)
+    {
+    }
+
+    OptionDetails(const OptionDetails& rhs)
+    : m_desc(rhs.m_desc)
+    , m_count(rhs.m_count)
+    {
+      m_value = rhs.m_value->clone();
+    }
+
+    OptionDetails(OptionDetails&& rhs) = default;
+
+    const String&
+    description() const
+    {
+      return m_desc;
+    }
+
+    const Value& value() const {
+        return *m_value;
+    }
+
+    std::shared_ptr<Value>
+    make_storage() const
+    {
+      return m_value->clone();
+    }
+
+    const std::string&
+    short_name() const
+    {
+      return m_short;
+    }
+
+    const std::string&
+    long_name() const
+    {
+      return m_long;
+    }
+
+    private:
+    std::string m_short;
+    std::string m_long;
+    String m_desc;
+    std::shared_ptr<const Value> m_value;
+    int m_count;
+  };
+
+  struct HelpOptionDetails
+  {
+    std::string s;
+    std::string l;
+    String desc;
+    bool has_default;
+    std::string default_value;
+    bool has_implicit;
+    std::string implicit_value;
+    std::string arg_help;
+    bool is_container;
+    bool is_boolean;
+  };
+
+  struct HelpGroupDetails
+  {
+    std::string name;
+    std::string description;
+    std::vector<HelpOptionDetails> options;
+  };
+
+  class OptionValue
+  {
+    public:
+    void
+    parse
+    (
+      std::shared_ptr<const OptionDetails> details,
+      const std::string& text
+    )
+    {
+      ensure_value(details);
+      ++m_count;
+      m_value->parse(text);
+    }
+
+    void
+    parse_default(std::shared_ptr<const OptionDetails> details)
+    {
+      ensure_value(details);
+      m_default = true;
+      m_value->parse();
+    }
+
+    size_t
+    count() const noexcept
+    {
+      return m_count;
+    }
+
+    // TODO: maybe default options should count towards the number of arguments
+    bool
+    has_default() const noexcept
+    {
+      return m_default;
+    }
+
+    template <typename T>
+    const T&
+    as() const
+    {
+      if (m_value == nullptr) {
+        throw_or_mimic<std::domain_error>("No value");
+      }
+
+#ifdef CXXOPTS_NO_RTTI
+      return static_cast<const values::standard_value<T>&>(*m_value).get();
+#else
+      return dynamic_cast<const values::standard_value<T>&>(*m_value).get();
+#endif
+    }
+
+    private:
+    void
+    ensure_value(std::shared_ptr<const OptionDetails> details)
+    {
+      if (m_value == nullptr)
+      {
+        m_value = details->make_storage();
+      }
+    }
+
+    std::shared_ptr<Value> m_value;
+    size_t m_count = 0;
+    bool m_default = false;
+  };
+
+  class KeyValue
+  {
+    public:
+    KeyValue(std::string key_, std::string value_)
+    : m_key(std::move(key_))
+    , m_value(std::move(value_))
+    {
+    }
+
+    const
+    std::string&
+    key() const
+    {
+      return m_key;
+    }
+
+    const
+    std::string&
+    value() const
+    {
+      return m_value;
+    }
+
+    template <typename T>
+    T
+    as() const
+    {
+      T result;
+      values::parse_value(m_value, result);
+      return result;
+    }
+
+    private:
+    std::string m_key;
+    std::string m_value;
+  };
+
+  class ParseResult
+  {
+    public:
+
+    ParseResult(
+      const std::shared_ptr<
+        std::unordered_map<std::string, std::shared_ptr<OptionDetails>>
+      >,
+      std::vector<std::string>,
+      bool allow_unrecognised,
+      int&, char**&);
+
+    size_t
+    count(const std::string& o) const
+    {
+      auto iter = m_options->find(o);
+      if (iter == m_options->end())
+      {
+        return 0;
+      }
+
+      auto riter = m_results.find(iter->second);
+
+      return riter->second.count();
+    }
+
+    const OptionValue&
+    operator[](const std::string& option) const
+    {
+      auto iter = m_options->find(option);
+
+      if (iter == m_options->end())
+      {
+        throw_or_mimic<option_not_present_exception>(option);
+      }
+
+      auto riter = m_results.find(iter->second);
+
+      return riter->second;
+    }
+
+    const std::vector<KeyValue>&
+    arguments() const
+    {
+      return m_sequential;
+    }
+
+    private:
+
+    void
+    parse(int& argc, char**& argv);
+
+    void
+    add_to_option(const std::string& option, const std::string& arg);
+
+    bool
+    consume_positional(std::string a);
+
+    void
+    parse_option
+    (
+      std::shared_ptr<OptionDetails> value,
+      const std::string& name,
+      const std::string& arg = ""
+    );
+
+    void
+    parse_default(std::shared_ptr<OptionDetails> details);
+
+    void
+    checked_parse_arg
+    (
+      int argc,
+      char* argv[],
+      int& current,
+      std::shared_ptr<OptionDetails> value,
+      const std::string& name
+    );
+
+    const std::shared_ptr<
+      std::unordered_map<std::string, std::shared_ptr<OptionDetails>>
+    > m_options;
+    std::vector<std::string> m_positional;
+    std::vector<std::string>::iterator m_next_positional;
+    std::unordered_set<std::string> m_positional_set;
+    std::unordered_map<std::shared_ptr<OptionDetails>, OptionValue> m_results;
+
+    bool m_allow_unrecognised;
+
+    std::vector<KeyValue> m_sequential;
+  };
+
+  struct Option
+  {
+    Option
+    (
+      const std::string& opts,
+      const std::string& desc,
+      const std::shared_ptr<const Value>& value = ::cxxopts::value<bool>(),
+      const std::string& arg_help = ""
+    )
+    : opts_(opts)
+    , desc_(desc)
+    , value_(value)
+    , arg_help_(arg_help)
+    {
+    }
+
+    std::string opts_;
+    std::string desc_;
+    std::shared_ptr<const Value> value_;
+    std::string arg_help_;
+  };
+
+  class Options
+  {
+    typedef std::unordered_map<std::string, std::shared_ptr<OptionDetails>>
+      OptionMap;
+    public:
+
+    Options(std::string program, std::string help_string = "")
+    : m_program(std::move(program))
+    , m_help_string(toLocalString(std::move(help_string)))
+    , m_custom_help("[OPTION...]")
+    , m_positional_help("positional parameters")
+    , m_show_positional(false)
+    , m_allow_unrecognised(false)
+    , m_options(std::make_shared<OptionMap>())
+    , m_next_positional(m_positional.end())
+    {
+    }
+
+    Options&
+    positional_help(std::string help_text)
+    {
+      m_positional_help = std::move(help_text);
+      return *this;
+    }
+
+    Options&
+    custom_help(std::string help_text)
+    {
+      m_custom_help = std::move(help_text);
+      return *this;
+    }
+
+    Options&
+    show_positional_help()
+    {
+      m_show_positional = true;
+      return *this;
+    }
+
+    Options&
+    allow_unrecognised_options()
+    {
+      m_allow_unrecognised = true;
+      return *this;
+    }
+
+    ParseResult
+    parse(int& argc, char**& argv);
+
+    OptionAdder
+    add_options(std::string group = "");
+
+    void
+    add_options
+    (
+      const std::string& group,
+      std::initializer_list<Option> options
+    );
+
+    void
+    add_option
+    (
+      const std::string& group,
+      const Option& option
+    );
+
+    void
+    add_option
+    (
+      const std::string& group,
+      const std::string& s,
+      const std::string& l,
+      std::string desc,
+      std::shared_ptr<const Value> value,
+      std::string arg_help
+    );
+
+    //parse positional arguments into the given option
+    void
+    parse_positional(std::string option);
+
+    void
+    parse_positional(std::vector<std::string> options);
+
+    void
+    parse_positional(std::initializer_list<std::string> options);
+
+    template <typename Iterator>
+    void
+    parse_positional(Iterator begin, Iterator end) {
+      parse_positional(std::vector<std::string>{begin, end});
+    }
+
+    std::string
+    help(const std::vector<std::string>& groups = {}) const;
+
+    const std::vector<std::string>
+    groups() const;
+
+    const HelpGroupDetails&
+    group_help(const std::string& group) const;
+
+    private:
+
+    void
+    add_one_option
+    (
+      const std::string& option,
+      std::shared_ptr<OptionDetails> details
+    );
+
+    String
+    help_one_group(const std::string& group) const;
+
+    void
+    generate_group_help
+    (
+      String& result,
+      const std::vector<std::string>& groups
+    ) const;
+
+    void
+    generate_all_groups_help(String& result) const;
+
+    std::string m_program;
+    String m_help_string;
+    std::string m_custom_help;
+    std::string m_positional_help;
+    bool m_show_positional;
+    bool m_allow_unrecognised;
+
+    std::shared_ptr<OptionMap> m_options;
+    std::vector<std::string> m_positional;
+    std::vector<std::string>::iterator m_next_positional;
+    std::unordered_set<std::string> m_positional_set;
+
+    //mapping from groups to help options
+    std::map<std::string, HelpGroupDetails> m_help;
+  };
+
+  class OptionAdder
+  {
+    public:
+
+    OptionAdder(Options& options, std::string group)
+    : m_options(options), m_group(std::move(group))
+    {
+    }
+
+    OptionAdder&
+    operator()
+    (
+      const std::string& opts,
+      const std::string& desc,
+      std::shared_ptr<const Value> value
+        = ::cxxopts::value<bool>(),
+      std::string arg_help = ""
+    );
+
+    private:
+    Options& m_options;
+    std::string m_group;
+  };
+
+  namespace
+  {
+    constexpr int OPTION_LONGEST = 30;
+    constexpr int OPTION_DESC_GAP = 2;
+
+    std::basic_regex<char> option_matcher
+      ("--([[:alnum:]][-_[:alnum:]]+)(=(.*))?|-([[:alnum:]]+)");
+
+    std::basic_regex<char> option_specifier
+      ("(([[:alnum:]]),)?[ ]*([[:alnum:]][-_[:alnum:]]*)?");
+
+    String
+    format_option
+    (
+      const HelpOptionDetails& o
+    )
+    {
+      auto& s = o.s;
+      auto& l = o.l;
+
+      String result = "  ";
+
+      if (s.size() > 0)
+      {
+        result += "-" + toLocalString(s) + ",";
+      }
+      else
+      {
+        result += "   ";
+      }
+
+      if (l.size() > 0)
+      {
+        result += " --" + toLocalString(l);
+      }
+
+      auto arg = o.arg_help.size() > 0 ? toLocalString(o.arg_help) : "arg";
+
+      if (!o.is_boolean)
+      {
+        if (o.has_implicit)
+        {
+          result += " [=" + arg + "(=" + toLocalString(o.implicit_value) + ")]";
+        }
+        else
+        {
+          result += " " + arg;
+        }
+      }
+
+      return result;
+    }
+
+    String
+    format_description
+    (
+      const HelpOptionDetails& o,
+      size_t start,
+      size_t width
+    )
+    {
+      auto desc = o.desc;
+
+      if (o.has_default && (!o.is_boolean || o.default_value != "false"))
+      {
+        if(o.default_value != "")
+        {
+          desc += toLocalString(" (default: " + o.default_value + ")");
+        }
+        else
+        {
+          desc += toLocalString(" (default: \"\")");
+        }
+      }
+
+      String result;
+
+      auto current = std::begin(desc);
+      auto startLine = current;
+      auto lastSpace = current;
+
+      auto size = size_t{};
+
+      while (current != std::end(desc))
+      {
+        if (*current == ' ')
+        {
+          lastSpace = current;
+        }
+
+        if (*current == '\n')
+        {
+          startLine = current + 1;
+          lastSpace = startLine;
+        }
+        else if (size > width)
+        {
+          if (lastSpace == startLine)
+          {
+            stringAppend(result, startLine, current + 1);
+            stringAppend(result, "\n");
+            stringAppend(result, start, ' ');
+            startLine = current + 1;
+            lastSpace = startLine;
+          }
+          else
+          {
+            stringAppend(result, startLine, lastSpace);
+            stringAppend(result, "\n");
+            stringAppend(result, start, ' ');
+            startLine = lastSpace + 1;
+          }
+          size = 0;
+        }
+        else
+        {
+          ++size;
+        }
+
+        ++current;
+      }
+
+      //append whatever is left
+      stringAppend(result, startLine, current);
+
+      return result;
+    }
+  }
+
+inline
+ParseResult::ParseResult
+(
+  const std::shared_ptr<
+    std::unordered_map<std::string, std::shared_ptr<OptionDetails>>
+  > options,
+  std::vector<std::string> positional,
+  bool allow_unrecognised,
+  int& argc, char**& argv
+)
+: m_options(options)
+, m_positional(std::move(positional))
+, m_next_positional(m_positional.begin())
+, m_allow_unrecognised(allow_unrecognised)
+{
+  parse(argc, argv);
+}
+
+inline
+void
+Options::add_options
+(
+  const std::string &group,
+  std::initializer_list<Option> options
+)
+{
+ OptionAdder option_adder(*this, group);
+ for (const auto &option: options)
+ {
+   option_adder(option.opts_, option.desc_, option.value_, option.arg_help_);
+ }
+}
+
+inline
+OptionAdder
+Options::add_options(std::string group)
+{
+  return OptionAdder(*this, std::move(group));
+}
+
+inline
+OptionAdder&
+OptionAdder::operator()
+(
+  const std::string& opts,
+  const std::string& desc,
+  std::shared_ptr<const Value> value,
+  std::string arg_help
+)
+{
+  std::match_results<const char*> result;
+  std::regex_match(opts.c_str(), result, option_specifier);
+
+  if (result.empty())
+  {
+    throw_or_mimic<invalid_option_format_error>(opts);
+  }
+
+  const auto& short_match = result[2];
+  const auto& long_match = result[3];
+
+  if (!short_match.length() && !long_match.length())
+  {
+    throw_or_mimic<invalid_option_format_error>(opts);
+  } else if (long_match.length() == 1 && short_match.length())
+  {
+    throw_or_mimic<invalid_option_format_error>(opts);
+  }
+
+  auto option_names = []
+  (
+    const std::sub_match<const char*>& short_,
+    const std::sub_match<const char*>& long_
+  )
+  {
+    if (long_.length() == 1)
+    {
+      return std::make_tuple(long_.str(), short_.str());
+    }
+    else
+    {
+      return std::make_tuple(short_.str(), long_.str());
+    }
+  }(short_match, long_match);
+
+  m_options.add_option
+  (
+    m_group,
+    std::get<0>(option_names),
+    std::get<1>(option_names),
+    desc,
+    value,
+    std::move(arg_help)
+  );
+
+  return *this;
+}
+
+inline
+void
+ParseResult::parse_default(std::shared_ptr<OptionDetails> details)
+{
+  m_results[details].parse_default(details);
+}
+
+inline
+void
+ParseResult::parse_option
+(
+  std::shared_ptr<OptionDetails> value,
+  const std::string& /*name*/,
+  const std::string& arg
+)
+{
+  auto& result = m_results[value];
+  result.parse(value, arg);
+
+  m_sequential.emplace_back(value->long_name(), arg);
+}
+
+inline
+void
+ParseResult::checked_parse_arg
+(
+  int argc,
+  char* argv[],
+  int& current,
+  std::shared_ptr<OptionDetails> value,
+  const std::string& name
+)
+{
+  if (current + 1 >= argc)
+  {
+    if (value->value().has_implicit())
+    {
+      parse_option(value, name, value->value().get_implicit_value());
+    }
+    else
+    {
+      throw_or_mimic<missing_argument_exception>(name);
+    }
+  }
+  else
+  {
+    if (value->value().has_implicit())
+    {
+      parse_option(value, name, value->value().get_implicit_value());
+    }
+    else
+    {
+      parse_option(value, name, argv[current + 1]);
+      ++current;
+    }
+  }
+}
+
+inline
+void
+ParseResult::add_to_option(const std::string& option, const std::string& arg)
+{
+  auto iter = m_options->find(option);
+
+  if (iter == m_options->end())
+  {
+    throw_or_mimic<option_not_exists_exception>(option);
+  }
+
+  parse_option(iter->second, option, arg);
+}
+
+inline
+bool
+ParseResult::consume_positional(std::string a)
+{
+  while (m_next_positional != m_positional.end())
+  {
+    auto iter = m_options->find(*m_next_positional);
+    if (iter != m_options->end())
+    {
+      auto& result = m_results[iter->second];
+      if (!iter->second->value().is_container())
+      {
+        if (result.count() == 0)
+        {
+          add_to_option(*m_next_positional, a);
+          ++m_next_positional;
+          return true;
+        }
+        else
+        {
+          ++m_next_positional;
+          continue;
+        }
+      }
+      else
+      {
+        add_to_option(*m_next_positional, a);
+        return true;
+      }
+    }
+    else
+    {
+      throw_or_mimic<option_not_exists_exception>(*m_next_positional);
+    }
+  }
+
+  return false;
+}
+
+inline
+void
+Options::parse_positional(std::string option)
+{
+  parse_positional(std::vector<std::string>{std::move(option)});
+}
+
+inline
+void
+Options::parse_positional(std::vector<std::string> options)
+{
+  m_positional = std::move(options);
+  m_next_positional = m_positional.begin();
+
+  m_positional_set.insert(m_positional.begin(), m_positional.end());
+}
+
+inline
+void
+Options::parse_positional(std::initializer_list<std::string> options)
+{
+  parse_positional(std::vector<std::string>(std::move(options)));
+}
+
+inline
+ParseResult
+Options::parse(int& argc, char**& argv)
+{
+  ParseResult result(m_options, m_positional, m_allow_unrecognised, argc, argv);
+  return result;
+}
+
+inline
+void
+ParseResult::parse(int& argc, char**& argv)
+{
+  int current = 1;
+
+  int nextKeep = 1;
+
+  bool consume_remaining = false;
+
+  while (current != argc)
+  {
+    if (strcmp(argv[current], "--") == 0)
+    {
+      consume_remaining = true;
+      ++current;
+      break;
+    }
+
+    std::match_results<const char*> result;
+    std::regex_match(argv[current], result, option_matcher);
+
+    if (result.empty())
+    {
+      //not a flag
+
+      // but if it starts with a `-`, then it's an error
+      if (argv[current][0] == '-' && argv[current][1] != '\0') {
+        if (!m_allow_unrecognised) {
+          throw_or_mimic<option_syntax_exception>(argv[current]);
+        }
+      }
+
+      //if true is returned here then it was consumed, otherwise it is
+      //ignored
+      if (consume_positional(argv[current]))
+      {
+      }
+      else
+      {
+        argv[nextKeep] = argv[current];
+        ++nextKeep;
+      }
+      //if we return from here then it was parsed successfully, so continue
+    }
+    else
+    {
+      //short or long option?
+      if (result[4].length() != 0)
+      {
+        const std::string& s = result[4];
+
+        for (std::size_t i = 0; i != s.size(); ++i)
+        {
+          std::string name(1, s[i]);
+          auto iter = m_options->find(name);
+
+          if (iter == m_options->end())
+          {
+            if (m_allow_unrecognised)
+            {
+              continue;
+            }
+            else
+            {
+              //error
+              throw_or_mimic<option_not_exists_exception>(name);
+            }
+          }
+
+          auto value = iter->second;
+
+          if (i + 1 == s.size())
+          {
+            //it must be the last argument
+            checked_parse_arg(argc, argv, current, value, name);
+          }
+          else if (value->value().has_implicit())
+          {
+            parse_option(value, name, value->value().get_implicit_value());
+          }
+          else
+          {
+            //error
+            throw_or_mimic<option_requires_argument_exception>(name);
+          }
+        }
+      }
+      else if (result[1].length() != 0)
+      {
+        const std::string& name = result[1];
+
+        auto iter = m_options->find(name);
+
+        if (iter == m_options->end())
+        {
+          if (m_allow_unrecognised)
+          {
+            // keep unrecognised options in argument list, skip to next argument
+            argv[nextKeep] = argv[current];
+            ++nextKeep;
+            ++current;
+            continue;
+          }
+          else
+          {
+            //error
+            throw_or_mimic<option_not_exists_exception>(name);
+          }
+        }
+
+        auto opt = iter->second;
+
+        //equals provided for long option?
+        if (result[2].length() != 0)
+        {
+          //parse the option given
+
+          parse_option(opt, name, result[3]);
+        }
+        else
+        {
+          //parse the next argument
+          checked_parse_arg(argc, argv, current, opt, name);
+        }
+      }
+
+    }
+
+    ++current;
+  }
+
+  for (auto& opt : *m_options)
+  {
+    auto& detail = opt.second;
+    auto& value = detail->value();
+
+    auto& store = m_results[detail];
+
+    if(value.has_default() && !store.count() && !store.has_default()){
+      parse_default(detail);
+    }
+  }
+
+  if (consume_remaining)
+  {
+    while (current < argc)
+    {
+      if (!consume_positional(argv[current])) {
+        break;
+      }
+      ++current;
+    }
+
+    //adjust argv for any that couldn't be swallowed
+    while (current != argc) {
+      argv[nextKeep] = argv[current];
+      ++nextKeep;
+      ++current;
+    }
+  }
+
+  argc = nextKeep;
+
+}
+
+inline
+void
+Options::add_option
+(
+  const std::string& group,
+  const Option& option
+)
+{
+    add_options(group, {option});
+}
+
+inline
+void
+Options::add_option
+(
+  const std::string& group,
+  const std::string& s,
+  const std::string& l,
+  std::string desc,
+  std::shared_ptr<const Value> value,
+  std::string arg_help
+)
+{
+  auto stringDesc = toLocalString(std::move(desc));
+  auto option = std::make_shared<OptionDetails>(s, l, stringDesc, value);
+
+  if (s.size() > 0)
+  {
+    add_one_option(s, option);
+  }
+
+  if (l.size() > 0)
+  {
+    add_one_option(l, option);
+  }
+
+  //add the help details
+  auto& options = m_help[group];
+
+  options.options.emplace_back(HelpOptionDetails{s, l, stringDesc,
+      value->has_default(), value->get_default_value(),
+      value->has_implicit(), value->get_implicit_value(),
+      std::move(arg_help),
+      value->is_container(),
+      value->is_boolean()});
+}
+
+inline
+void
+Options::add_one_option
+(
+  const std::string& option,
+  std::shared_ptr<OptionDetails> details
+)
+{
+  auto in = m_options->emplace(option, details);
+
+  if (!in.second)
+  {
+    throw_or_mimic<option_exists_error>(option);
+  }
+}
+
+inline
+String
+Options::help_one_group(const std::string& g) const
+{
+  typedef std::vector<std::pair<String, String>> OptionHelp;
+
+  auto group = m_help.find(g);
+  if (group == m_help.end())
+  {
+    return "";
+  }
+
+  OptionHelp format;
+
+  size_t longest = 0;
+
+  String result;
+
+  if (!g.empty())
+  {
+    result += toLocalString(" " + g + " options:\n");
+  }
+
+  for (const auto& o : group->second.options)
+  {
+    if (m_positional_set.find(o.l) != m_positional_set.end() &&
+        !m_show_positional)
+    {
+      continue;
+    }
+
+    auto s = format_option(o);
+    longest = (std::max)(longest, stringLength(s));
+    format.push_back(std::make_pair(s, String()));
+  }
+
+  longest = (std::min)(longest, static_cast<size_t>(OPTION_LONGEST));
+
+  //widest allowed description
+  auto allowed = size_t{76} - longest - OPTION_DESC_GAP;
+
+  auto fiter = format.begin();
+  for (const auto& o : group->second.options)
+  {
+    if (m_positional_set.find(o.l) != m_positional_set.end() &&
+        !m_show_positional)
+    {
+      continue;
+    }
+
+    auto d = format_description(o, longest + OPTION_DESC_GAP, allowed);
+
+    result += fiter->first;
+    if (stringLength(fiter->first) > longest)
+    {
+      result += '\n';
+      result += toLocalString(std::string(longest + OPTION_DESC_GAP, ' '));
+    }
+    else
+    {
+      result += toLocalString(std::string(longest + OPTION_DESC_GAP -
+        stringLength(fiter->first),
+        ' '));
+    }
+    result += d;
+    result += '\n';
+
+    ++fiter;
+  }
+
+  return result;
+}
+
+inline
+void
+Options::generate_group_help
+(
+  String& result,
+  const std::vector<std::string>& print_groups
+) const
+{
+  for (size_t i = 0; i != print_groups.size(); ++i)
+  {
+    const String& group_help_text = help_one_group(print_groups[i]);
+    if (empty(group_help_text))
+    {
+      continue;
+    }
+    result += group_help_text;
+    if (i < print_groups.size() - 1)
+    {
+      result += '\n';
+    }
+  }
+}
+
+inline
+void
+Options::generate_all_groups_help(String& result) const
+{
+  std::vector<std::string> all_groups;
+  all_groups.reserve(m_help.size());
+
+  for (auto& group : m_help)
+  {
+    all_groups.push_back(group.first);
+  }
+
+  generate_group_help(result, all_groups);
+}
+
+inline
+std::string
+Options::help(const std::vector<std::string>& help_groups) const
+{
+  String result = m_help_string + "\nUsage:\n  " +
+    toLocalString(m_program) + " " + toLocalString(m_custom_help);
+
+  if (m_positional.size() > 0 && m_positional_help.size() > 0) {
+    result += " " + toLocalString(m_positional_help);
+  }
+
+  result += "\n\n";
+
+  if (help_groups.size() == 0)
+  {
+    generate_all_groups_help(result);
+  }
+  else
+  {
+    generate_group_help(result, help_groups);
+  }
+
+  return toUTF8String(result);
+}
+
+inline
+const std::vector<std::string>
+Options::groups() const
+{
+  std::vector<std::string> g;
+
+  std::transform(
+    m_help.begin(),
+    m_help.end(),
+    std::back_inserter(g),
+    [] (const std::map<std::string, HelpGroupDetails>::value_type& pair)
+    {
+      return pair.first;
+    }
+  );
+
+  return g;
+}
+
+inline
+const HelpGroupDetails&
+Options::group_help(const std::string& group) const
+{
+  return m_help.at(group);
+}
+
+}
+
+#endif //CXXOPTS_HPP_INCLUDED

+ 45 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/census_transform.hpp

@@ -0,0 +1,45 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+
+#ifndef SGM_CENSUS_TRANSFORM_HPP
+#define SGM_CENSUS_TRANSFORM_HPP
+
+#include "stereo_vision/impl/device_buffer.hpp"
+#include "stereo_vision/impl/types.hpp"
+
+namespace adc {
+
+template <typename T>
+class CensusTransform {
+
+public:
+	using input_type = T;
+
+private:
+	DeviceBuffer<feature_type> m_feature_buffer;
+
+public:
+	CensusTransform();
+
+	const feature_type *get_output() const {
+		return m_feature_buffer.data();
+	}
+	
+	void enqueue(
+		const input_type *src,
+		int width,
+		int height,
+		int pitch,
+		cudaStream_t stream);
+
+};
+
+}
+
+#endif

+ 104 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/device_buffer.hpp

@@ -0,0 +1,104 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#ifndef SGM_DEVICE_BUFFER_HPP
+#define SGM_DEVICE_BUFFER_HPP
+
+#include <cstddef>
+
+#include "stereo_vision/internal.h"
+
+namespace adc {
+
+template <typename T>
+class DeviceBuffer {
+
+public:
+	using value_type = T;
+
+private:
+	value_type *m_data;
+	size_t m_size;
+
+public:
+	DeviceBuffer()
+		: m_data(nullptr)
+		, m_size(0)
+	{ }
+
+	explicit DeviceBuffer(size_t n)
+		: m_data(nullptr)
+		, m_size(0)
+	{
+		allocate(n);
+	}
+
+	DeviceBuffer(const DeviceBuffer&) = delete;
+
+	DeviceBuffer(DeviceBuffer&& obj)
+		: m_data(obj.m_data)
+		, m_size(obj.m_size)
+	{
+		obj.m_data = nullptr;
+		obj.m_size = 0;
+	}
+
+	~DeviceBuffer(){
+		destroy();
+	}
+
+
+	void allocate(size_t n){
+		if(m_data && m_size >= n)
+			return;
+
+		destroy();
+		CudaSafeCall(cudaMalloc(reinterpret_cast<void **>(&m_data), sizeof(value_type) * n));
+		m_size = n;
+	}
+
+	void destroy(){
+		if(m_data)
+			CudaSafeCall(cudaFree(m_data));
+
+		m_data = nullptr;
+		m_size = 0;
+	}
+
+	void fillZero(){
+		CudaSafeCall(cudaMemset(m_data, 0, sizeof(value_type) * m_size));
+	}
+
+	DeviceBuffer& operator=(const DeviceBuffer&) = delete;
+
+	DeviceBuffer& operator=(DeviceBuffer&& obj){
+		m_data = obj.m_data;
+		m_size = obj.m_size;
+		obj.m_data = nullptr;
+		obj.m_size = 0;
+		return *this;
+	}
+
+
+	size_t size() const {
+		return m_size;
+	}
+
+	const value_type *data() const {
+		return m_data;
+	}
+
+	value_type *data(){
+		return m_data;
+	}
+
+};
+
+}
+
+#endif

+ 45 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/horizontal_path_aggregation.hpp

@@ -0,0 +1,45 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+
+#ifndef SGM_HORIZONTAL_PATH_AGGREGATION_HPP
+#define SGM_HORIZONTAL_PATH_AGGREGATION_HPP
+
+#include "stereo_vision/impl/types.hpp"
+
+namespace adc {
+namespace path_aggregation {
+
+template <unsigned int MAX_DISPARITY>
+void enqueue_aggregate_left2right_path(
+	cost_type *dest,
+	const feature_type *left,
+	const feature_type *right,
+	int width,
+	int height,
+	unsigned int p1,
+	unsigned int p2,
+	int min_disp,
+	cudaStream_t stream);
+
+template <unsigned int MAX_DISPARITY>
+void enqueue_aggregate_right2left_path(
+	cost_type *dest,
+	const feature_type *left,
+	const feature_type *right,
+	int width,
+	int height,
+	unsigned int p1,
+	unsigned int p2,
+	int min_disp,
+	cudaStream_t stream);
+
+}
+}
+
+#endif

+ 69 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/oblique_path_aggregation.hpp

@@ -0,0 +1,69 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+
+#ifndef SGM_OBLIQUE_PATH_AGGREGATION_HPP
+#define SGM_OBLIQUE_PATH_AGGREGATION_HPP
+
+#include "stereo_vision/impl/types.hpp"
+
+namespace adc {
+namespace path_aggregation {
+
+template <unsigned int MAX_DISPARITY>
+void enqueue_aggregate_upleft2downright_path(
+	cost_type *dest,
+	const feature_type *left,
+	const feature_type *right,
+	int width,
+	int height,
+	unsigned int p1,
+	unsigned int p2,
+	int min_disp,
+	cudaStream_t stream);
+
+template <unsigned int MAX_DISPARITY>
+void enqueue_aggregate_upright2downleft_path(
+	cost_type *dest,
+	const feature_type *left,
+	const feature_type *right,
+	int width,
+	int height,
+	unsigned int p1,
+	unsigned int p2,
+	int min_disp,
+	cudaStream_t stream);
+
+template <unsigned int MAX_DISPARITY>
+void enqueue_aggregate_downright2upleft_path(
+	cost_type *dest,
+	const feature_type *left,
+	const feature_type *right,
+	int width,
+	int height,
+	unsigned int p1,
+	unsigned int p2,
+	int min_disp,
+	cudaStream_t stream);
+
+template <unsigned int MAX_DISPARITY>
+void enqueue_aggregate_downleft2upright_path(
+	cost_type *dest,
+	const feature_type *left,
+	const feature_type *right,
+	int width,
+	int height,
+	unsigned int p1,
+	unsigned int p2,
+	int min_disp,
+	cudaStream_t stream);
+
+}
+}
+
+#endif

+ 52 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/path_aggregation.hpp

@@ -0,0 +1,52 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+
+#ifndef SGM_PATH_AGGREGATION_HPP
+#define SGM_PATH_AGGREGATION_HPP
+
+#include <stereo_vision/sgm.h>
+#include "stereo_vision/impl/device_buffer.hpp"
+#include "stereo_vision/impl/types.hpp"
+
+namespace adc {
+
+template <size_t MAX_DISPARITY>
+class PathAggregation {
+
+private:
+	static const unsigned int MAX_NUM_PATHS = 8;
+
+	DeviceBuffer<cost_type> m_cost_buffer;
+	cudaStream_t m_streams[MAX_NUM_PATHS];
+	cudaEvent_t m_events[MAX_NUM_PATHS];
+	
+public:
+	PathAggregation();
+	~PathAggregation();
+
+	const cost_type *get_output() const {
+		return m_cost_buffer.data();
+	}
+
+	void enqueue(
+		const feature_type *left,
+		const feature_type *right,
+		int width,
+		int height,
+		PathType path_type,
+		unsigned int p1,
+		unsigned int p2,
+		int min_disp,
+		cudaStream_t stream);
+
+};
+
+}
+
+#endif

+ 98 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/path_aggregation_common.hpp

@@ -0,0 +1,98 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+
+#ifndef SGM_PATH_AGGREGATION_COMMON_HPP
+#define SGM_PATH_AGGREGATION_COMMON_HPP
+
+#include <cstdint>
+#include <cuda.h>
+#include "stereo_vision/impl/utility.hpp"
+
+namespace adc {
+namespace path_aggregation {
+
+template <
+	unsigned int DP_BLOCK_SIZE,
+	unsigned int SUBGROUP_SIZE>
+struct DynamicProgramming {
+	static_assert(
+		DP_BLOCK_SIZE >= 2,
+		"DP_BLOCK_SIZE must be greater than or equal to 2");
+	static_assert(
+		(SUBGROUP_SIZE & (SUBGROUP_SIZE - 1)) == 0,	
+		"SUBGROUP_SIZE must be a power of 2");
+
+	uint32_t last_min;
+	uint32_t dp[DP_BLOCK_SIZE];
+
+	__device__ DynamicProgramming()
+		: last_min(0)
+	{
+		for(unsigned int i = 0; i < DP_BLOCK_SIZE; ++i){ dp[i] = 0; }
+	}
+
+	__device__ void update(
+		uint32_t *local_costs, uint32_t p1, uint32_t p2, uint32_t mask)
+	{
+		const unsigned int lane_id = threadIdx.x % SUBGROUP_SIZE;
+
+		const auto dp0 = dp[0];
+		uint32_t lazy_out = 0, local_min = 0;
+		{
+			const unsigned int k = 0;
+#if CUDA_VERSION >= 9000
+			const uint32_t prev =
+				__shfl_up_sync(mask, dp[DP_BLOCK_SIZE - 1], 1);
+#else
+			const uint32_t prev = __shfl_up(dp[DP_BLOCK_SIZE - 1], 1);
+#endif
+			uint32_t out = min(dp[k] - last_min, p2);
+			if(lane_id != 0){ out = min(out, prev - last_min + p1); }
+			out = min(out, dp[k + 1] - last_min + p1);
+			lazy_out = local_min = out + local_costs[k];
+		}
+		for(unsigned int k = 1; k + 1 < DP_BLOCK_SIZE; ++k){
+			uint32_t out = min(dp[k] - last_min, p2);
+			out = min(out, dp[k - 1] - last_min + p1);
+			out = min(out, dp[k + 1] - last_min + p1);
+			dp[k - 1] = lazy_out;
+			lazy_out = out + local_costs[k];
+			local_min = min(local_min, lazy_out);
+		}
+		{
+			const unsigned int k = DP_BLOCK_SIZE - 1;
+#if CUDA_VERSION >= 9000
+			const uint32_t next = __shfl_down_sync(mask, dp0, 1);
+#else
+			const uint32_t next = __shfl_down(dp0, 1);
+#endif
+			uint32_t out = min(dp[k] - last_min, p2);
+			out = min(out, dp[k - 1] - last_min + p1);
+			if(lane_id + 1 != SUBGROUP_SIZE){
+				out = min(out, next - last_min + p1);
+			}
+			dp[k - 1] = lazy_out;
+			dp[k] = out + local_costs[k];
+			local_min = min(local_min, dp[k]);
+		}
+		last_min = subgroup_min<SUBGROUP_SIZE>(local_min, mask);
+	}
+};
+
+template <unsigned int SIZE>
+__device__ unsigned int generate_mask()
+{
+	static_assert(SIZE <= 32, "SIZE must be less than or equal to 32");
+	return static_cast<unsigned int>((1ull << SIZE) - 1u);
+}
+
+}
+}
+
+#endif

+ 62 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/sgm.hpp

@@ -0,0 +1,62 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+
+#ifndef SGM_SGM_HPP
+#define SGM_SGM_HPP
+
+#include <memory>
+#include <cstdint>
+#include <stereo_vision/sgm.h>
+#include "stereo_vision/impl/types.hpp"
+
+namespace adc {
+
+template <typename T, size_t MAX_DISPARITY>
+class SemiGlobalMatching {
+
+public:
+	using input_type = T;
+	using output_type = adc::output_type;
+
+private:
+	class Impl;
+	std::unique_ptr<Impl> m_impl;
+
+public:
+	SemiGlobalMatching();
+	~SemiGlobalMatching();
+
+	void execute(
+		output_type *dest_left,
+		output_type *dest_right,
+		const input_type *src_left,
+		const input_type *src_right,
+		int width,
+		int height,
+		int src_pitch,
+		int dst_pitch,
+		const StereoSGM::Parameters& param);
+
+	void enqueue(
+		output_type *dest_left,
+		output_type *dest_right,
+		const input_type *src_left,
+		const input_type *src_right,
+		int width,
+		int height,
+		int src_pitch,
+		int dst_pitch,
+		const StereoSGM::Parameters& param,
+		cudaStream_t stream);
+
+};
+
+}
+
+#endif

+ 24 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/types.hpp

@@ -0,0 +1,24 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+
+#ifndef SGM_TYPES_HPP
+#define SGM_TYPES_HPP
+
+#include <cstdint>
+
+namespace adc {
+
+using feature_type = uint32_t;
+using cost_type = uint8_t;
+using cost_sum_type = uint16_t;
+using output_type = uint16_t;
+
+}
+
+#endif

+ 241 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/utility.hpp

@@ -0,0 +1,241 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+
+#ifndef SGM_UTILITY_HPP
+#define SGM_UTILITY_HPP
+
+#include <cuda.h>
+#include "stereo_vision/impl/types.hpp"
+
+namespace adc {
+
+static constexpr unsigned int WARP_SIZE = 32u;
+static constexpr output_type INVALID_DISP = static_cast<output_type>(-1);
+
+namespace detail {
+	template <typename T, unsigned int GROUP_SIZE, unsigned int STEP>
+	struct subgroup_min_impl {
+		static __device__ T call(T x, uint32_t mask){
+#if CUDA_VERSION >= 9000
+			x = min(x, __shfl_xor_sync(mask, x, STEP / 2, GROUP_SIZE));
+#else
+			x = min(x, __shfl_xor(x, STEP / 2, GROUP_SIZE));
+#endif
+			return subgroup_min_impl<T, GROUP_SIZE, STEP / 2>::call(x, mask);
+		}
+	};
+	template <typename T, unsigned int GROUP_SIZE>
+	struct subgroup_min_impl<T, GROUP_SIZE, 1u> {
+		static __device__ T call(T x, uint32_t){
+			return x;
+		}
+	};
+	template <unsigned int GROUP_SIZE, unsigned int STEP>
+	struct subgroup_and_impl {
+		static __device__ bool call(bool x, uint32_t mask){
+#if CUDA_VERSION >= 9000
+			x &= __shfl_xor_sync(mask, x, STEP / 2, GROUP_SIZE);
+#else
+			x &= __shfl_xor(x, STEP / 2, GROUP_SIZE);
+#endif
+			return subgroup_and_impl<GROUP_SIZE, STEP / 2>::call(x, mask);
+		}
+	};
+	template <unsigned int GROUP_SIZE>
+	struct subgroup_and_impl<GROUP_SIZE, 1u> {
+		static __device__ bool call(bool x, uint32_t){
+			return x;
+		}
+	};
+}
+
+template <unsigned int GROUP_SIZE, typename T>
+__device__ inline T subgroup_min(T x, uint32_t mask){
+	return detail::subgroup_min_impl<T, GROUP_SIZE, GROUP_SIZE>::call(x, mask);
+}
+
+template <unsigned int GROUP_SIZE>
+__device__ inline bool subgroup_and(bool x, uint32_t mask){
+	return detail::subgroup_and_impl<GROUP_SIZE, GROUP_SIZE>::call(x, mask);
+}
+
+template <typename T, typename S>
+__device__ inline T load_as(const S *p){
+	return *reinterpret_cast<const T *>(p);
+}
+
+template <typename T, typename S>
+__device__ inline void store_as(S *p, const T& x){
+	*reinterpret_cast<T *>(p) = x;
+}
+
+
+template <typename T>
+__device__ inline uint32_t pack_uint8x4(T x, T y, T z, T w){
+	uchar4 uint8x4;
+	uint8x4.x = static_cast<uint8_t>(x);
+	uint8x4.y = static_cast<uint8_t>(y);
+	uint8x4.z = static_cast<uint8_t>(z);
+	uint8x4.w = static_cast<uint8_t>(w);
+	return load_as<uint32_t>(&uint8x4);
+}
+
+
+template <unsigned int N>
+__device__ inline void load_uint8_vector(uint32_t *dest, const uint8_t *ptr);
+
+template <>
+__device__ inline void load_uint8_vector<1u>(uint32_t *dest, const uint8_t *ptr){
+	dest[0] = static_cast<uint32_t>(ptr[0]);
+}
+
+template <>
+__device__ inline void load_uint8_vector<2u>(uint32_t *dest, const uint8_t *ptr){
+	const auto uint8x2 = load_as<uchar2>(ptr);
+	dest[0] = uint8x2.x; dest[1] = uint8x2.y;
+}
+
+template <>
+__device__ inline void load_uint8_vector<4u>(uint32_t *dest, const uint8_t *ptr){
+	const auto uint8x4 = load_as<uchar4>(ptr);
+	dest[0] = uint8x4.x; dest[1] = uint8x4.y; dest[2] = uint8x4.z; dest[3] = uint8x4.w;
+}
+
+template <>
+__device__ inline void load_uint8_vector<8u>(uint32_t *dest, const uint8_t *ptr){
+	const auto uint32x2 = load_as<uint2>(ptr);
+	load_uint8_vector<4u>(dest + 0, reinterpret_cast<const uint8_t *>(&uint32x2.x));
+	load_uint8_vector<4u>(dest + 4, reinterpret_cast<const uint8_t *>(&uint32x2.y));
+}
+
+template <>
+__device__ inline void load_uint8_vector<16u>(uint32_t *dest, const uint8_t *ptr){
+	const auto uint32x4 = load_as<uint4>(ptr);
+	load_uint8_vector<4u>(dest +  0, reinterpret_cast<const uint8_t *>(&uint32x4.x));
+	load_uint8_vector<4u>(dest +  4, reinterpret_cast<const uint8_t *>(&uint32x4.y));
+	load_uint8_vector<4u>(dest +  8, reinterpret_cast<const uint8_t *>(&uint32x4.z));
+	load_uint8_vector<4u>(dest + 12, reinterpret_cast<const uint8_t *>(&uint32x4.w));
+}
+
+
+template <unsigned int N>
+__device__ inline void store_uint8_vector(uint8_t *dest, const uint32_t *ptr);
+
+template <>
+__device__ inline void store_uint8_vector<1u>(uint8_t *dest, const uint32_t *ptr){
+	dest[0] = static_cast<uint8_t>(ptr[0]);
+}
+
+template <>
+__device__ inline void store_uint8_vector<2u>(uint8_t *dest, const uint32_t *ptr){
+	uchar2 uint8x2;
+	uint8x2.x = static_cast<uint8_t>(ptr[0]);
+	uint8x2.y = static_cast<uint8_t>(ptr[0]);
+	store_as<uchar2>(dest, uint8x2);
+}
+
+template <>
+__device__ inline void store_uint8_vector<4u>(uint8_t *dest, const uint32_t *ptr){
+	store_as<uint32_t>(dest, pack_uint8x4(ptr[0], ptr[1], ptr[2], ptr[3]));
+}
+
+template <>
+__device__ inline void store_uint8_vector<8u>(uint8_t *dest, const uint32_t *ptr){
+	uint2 uint32x2;
+	uint32x2.x = pack_uint8x4(ptr[0], ptr[1], ptr[2], ptr[3]);
+	uint32x2.y = pack_uint8x4(ptr[4], ptr[5], ptr[6], ptr[7]);
+	store_as<uint2>(dest, uint32x2);
+}
+
+template <>
+__device__ inline void store_uint8_vector<16u>(uint8_t *dest, const uint32_t *ptr){
+	uint4 uint32x4;
+	uint32x4.x = pack_uint8x4(ptr[ 0], ptr[ 1], ptr[ 2], ptr[ 3]);
+	uint32x4.y = pack_uint8x4(ptr[ 4], ptr[ 5], ptr[ 6], ptr[ 7]);
+	uint32x4.z = pack_uint8x4(ptr[ 8], ptr[ 9], ptr[10], ptr[11]);
+	uint32x4.w = pack_uint8x4(ptr[12], ptr[13], ptr[14], ptr[15]);
+	store_as<uint4>(dest, uint32x4);
+}
+
+
+template <unsigned int N>
+__device__ inline void load_uint16_vector(uint32_t *dest, const uint16_t *ptr);
+
+template <>
+__device__ inline void load_uint16_vector<1u>(uint32_t *dest, const uint16_t *ptr){
+	dest[0] = static_cast<uint32_t>(ptr[0]);
+}
+
+template <>
+__device__ inline void load_uint16_vector<2u>(uint32_t *dest, const uint16_t *ptr){
+	const auto uint16x2 = load_as<ushort2>(ptr);
+	dest[0] = uint16x2.x; dest[1] = uint16x2.y;
+}
+
+template <>
+__device__ inline void load_uint16_vector<4u>(uint32_t *dest, const uint16_t *ptr){
+	const auto uint16x4 = load_as<ushort4>(ptr);
+	dest[0] = uint16x4.x; dest[1] = uint16x4.y; dest[2] = uint16x4.z; dest[3] = uint16x4.w;
+}
+
+template <>
+__device__ inline void load_uint16_vector<8u>(uint32_t *dest, const uint16_t *ptr){
+	const auto uint32x4 = load_as<uint4>(ptr);
+	load_uint16_vector<2u>(dest + 0, reinterpret_cast<const uint16_t *>(&uint32x4.x));
+	load_uint16_vector<2u>(dest + 2, reinterpret_cast<const uint16_t *>(&uint32x4.y));
+	load_uint16_vector<2u>(dest + 4, reinterpret_cast<const uint16_t *>(&uint32x4.z));
+	load_uint16_vector<2u>(dest + 6, reinterpret_cast<const uint16_t *>(&uint32x4.w));
+}
+
+
+template <unsigned int N>
+__device__ inline void store_uint16_vector(uint16_t *dest, const uint32_t *ptr);
+
+template <>
+__device__ inline void store_uint16_vector<1u>(uint16_t *dest, const uint32_t *ptr){
+	dest[0] = static_cast<uint16_t>(ptr[0]);
+}
+
+template <>
+__device__ inline void store_uint16_vector<2u>(uint16_t *dest, const uint32_t *ptr){
+	ushort2 uint16x2;
+	uint16x2.x = static_cast<uint16_t>(ptr[0]);
+	uint16x2.y = static_cast<uint16_t>(ptr[1]);
+	store_as<ushort2>(dest, uint16x2);
+}
+
+template <>
+__device__ inline void store_uint16_vector<4u>(uint16_t *dest, const uint32_t *ptr){
+	ushort4 uint16x4;
+	uint16x4.x = static_cast<uint16_t>(ptr[0]);
+	uint16x4.y = static_cast<uint16_t>(ptr[1]);
+	uint16x4.z = static_cast<uint16_t>(ptr[2]);
+	uint16x4.w = static_cast<uint16_t>(ptr[3]);
+	store_as<ushort4>(dest, uint16x4);
+}
+
+template <>
+__device__ inline void store_uint16_vector<8u>(uint16_t *dest, const uint32_t *ptr){
+	uint4 uint32x4;
+	store_uint16_vector<2u>(reinterpret_cast<uint16_t *>(&uint32x4.x), &ptr[0]);
+	store_uint16_vector<2u>(reinterpret_cast<uint16_t *>(&uint32x4.y), &ptr[2]);
+	store_uint16_vector<2u>(reinterpret_cast<uint16_t *>(&uint32x4.z), &ptr[4]);
+	store_uint16_vector<2u>(reinterpret_cast<uint16_t *>(&uint32x4.w), &ptr[6]);
+	store_as<uint4>(dest, uint32x4);
+}
+
+template <>
+__device__ inline void store_uint16_vector<16u>(uint16_t *dest, const uint32_t *ptr){
+	store_uint16_vector<8u>(dest + 0, ptr + 0);
+	store_uint16_vector<8u>(dest + 8, ptr + 8);
+}
+
+}
+
+#endif

+ 45 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/vertical_path_aggregation.hpp

@@ -0,0 +1,45 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+
+#ifndef SGM_VERTICAL_PATH_AGGREGATION_HPP
+#define SGM_VERTICAL_PATH_AGGREGATION_HPP
+
+#include "stereo_vision/impl/types.hpp"
+
+namespace adc {
+namespace path_aggregation {
+
+template <unsigned int MAX_DISPARITY>
+void enqueue_aggregate_up2down_path(
+	cost_type *dest,
+	const feature_type *left,
+	const feature_type *right,
+	int width,
+	int height,
+	unsigned int p1,
+	unsigned int p2,
+	int min_disp,
+	cudaStream_t stream);
+
+template <unsigned int MAX_DISPARITY>
+void enqueue_aggregate_down2up_path(
+	cost_type *dest,
+	const feature_type *left,
+	const feature_type *right,
+	int width,
+	int height,
+	unsigned int p1,
+	unsigned int p2,
+	int min_disp,
+	cudaStream_t stream);
+
+}
+}
+
+#endif

+ 62 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/impl/winner_takes_all.hpp

@@ -0,0 +1,62 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#ifndef SGM_WINNER_TAKES_ALL_HPP
+#define SGM_WINNER_TAKES_ALL_HPP
+
+#include <stereo_vision/sgm.h>
+#include "stereo_vision/impl/device_buffer.hpp"
+#include "stereo_vision/impl/types.hpp"
+
+namespace adc {
+
+template <size_t MAX_DISPARITY>
+class WinnerTakesAll {
+
+private:
+	DeviceBuffer<output_type> m_left_buffer;
+	DeviceBuffer<output_type> m_right_buffer;
+
+public:
+	WinnerTakesAll();
+
+	const output_type *get_left_output() const {
+		return m_left_buffer.data();
+	}
+
+	const output_type *get_right_output() const {
+		return m_right_buffer.data();
+	}
+
+	void enqueue(
+		const cost_type *src,
+		int width,
+		int height,
+		int pitch,
+		float uniqueness,
+		bool subpixel,
+		PathType path_type,
+		cudaStream_t stream);
+
+	void enqueue(
+		output_type *left,
+		output_type *right,
+		const cost_type *src,
+		int width,
+		int height,
+		int pitch,
+		float uniqueness,
+		bool subpixel,
+		PathType path_type,
+		cudaStream_t stream);
+
+};
+
+}
+
+#endif

+ 48 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/internal.h

@@ -0,0 +1,48 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#pragma once
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <assert.h>
+
+#include <stdexcept>
+
+#include <cuda_runtime.h>
+#include <cuda_runtime_api.h>
+
+#define CudaSafeCall(error) adc::details::cuda_safe_call(error, __FILE__, __LINE__)
+
+#define CudaKernelCheck() CudaSafeCall(cudaGetLastError())
+
+namespace adc {
+	namespace details {
+
+		void median_filter(const uint8_t* d_src, uint8_t* d_dst, int width, int height, int pitch);
+		void median_filter(const uint16_t* d_src, uint16_t* d_dst, int width, int height, int pitch);
+
+		void check_consistency(uint8_t* d_left_disp, const uint8_t* d_right_disp, const void* d_src_left, int width, int height, int depth_bits, int src_pitch, int dst_pitch, bool subpixel, int LR_max_diff);
+		void check_consistency(uint16_t* d_left_disp, const uint16_t* d_right_disp, const void* d_src_left, int width, int height, int depth_bits, int src_pitch, int dst_pitch, bool subpixel, int LR_max_diff);
+
+		void correct_disparity_range(uint16_t* d_disp, int width, int height, int pitch, bool subpixel, int min_disp);
+
+		void cast_16bit_8bit_array(const uint16_t* arr16bits, uint8_t* arr8bits, int num_elements);
+		void cast_8bit_16bit_array(const uint8_t* arr8bits, uint16_t* arr16bits, int num_elements);
+
+		inline void cuda_safe_call(cudaError error, const char *file, const int line)
+		{
+			if (error != cudaSuccess) {
+				fprintf(stderr, "cuda error %s : %d %s\n", file, line, cudaGetErrorString(error));
+				exit(-1);
+			}
+		}
+
+	}
+}

+ 171 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/sgm.h

@@ -0,0 +1,171 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#pragma once
+
+/**
+* @mainpage stereo-sgm
+* See adc::StereoSGM
+*/
+
+/**
+* @file sgm.h
+* stereo-sgm main header
+*/
+
+#include "stereo_vision/sgm_config.h"
+
+#if defined(SGM_SHARED)
+	#if defined(WIN32) || defined(_WIN32)
+		#if defined sgm_EXPORTS
+			#define SGM_API __declspec(dllexport)
+		#else
+			#define SGM_API __declspec(dllimport)
+		#endif
+	#else
+		#define SGM_API __attribute__((visibility("default")))
+	#endif
+#else
+	#define SGM_API
+#endif
+
+namespace adc {
+	struct CudaStereoSGMResources;
+
+	/**
+	* @enum DST_TYPE
+	* Indicates input/output pointer type.
+	*/
+	enum EXECUTE_INOUT {
+		EXECUTE_INOUT_HOST2HOST = (0 << 1) | 0,
+		EXECUTE_INOUT_HOST2CUDA = (1 << 1) | 0,
+		EXECUTE_INOUT_CUDA2HOST = (0 << 1) | 1,
+		EXECUTE_INOUT_CUDA2CUDA = (1 << 1) | 1,
+	};
+
+	/**
+	* Indicates number of scanlines which will be used.
+	*/
+	enum class PathType {
+		SCAN_4PATH, //>! Horizontal and vertical paths.
+		SCAN_8PATH  //>! Horizontal, vertical and oblique paths.
+	};
+
+	/**
+	* @brief StereoSGM class
+	*/
+	class StereoSGM {
+	public:
+		static const int SUBPIXEL_SHIFT = 4;
+		static const int SUBPIXEL_SCALE = (1 << SUBPIXEL_SHIFT);
+
+		/**
+		* @brief Available options for StereoSGM
+		*/
+		struct Parameters
+		{
+			int P1;
+			int P2;
+			float uniqueness;
+			bool subpixel;
+			PathType path_type;
+			int min_disp;
+			int LR_max_diff;
+
+			/**
+			* @param P1 Penalty on the disparity change by plus or minus 1 between nieghbor pixels.
+			* @param P2 Penalty on the disparity change by more than 1 between neighbor pixels.
+			* @param uniqueness Margin in ratio by which the best cost function value should be at least second one.
+			* @param subpixel Disparity value has 4 fractional bits if subpixel option is enabled.
+			* @param path_type Number of scanlines used in cost aggregation.
+			* @param min_disp Minimum possible disparity value.
+			* @param LR_max_diff Acceptable difference pixels which is used in LR check consistency. LR check consistency will be disabled if this value is set to negative.
+			*/
+			Parameters(int P1 = 10, int P2 = 120, float uniqueness = 0.95f, bool subpixel = false, int min_disp = 0, int LR_max_diff = 1, PathType path_type = PathType::SCAN_8PATH)
+				: P1(P1)
+				, P2(P2)
+				, uniqueness(uniqueness)
+				, subpixel(subpixel)
+				, path_type(path_type)
+				, min_disp(min_disp)
+				, LR_max_diff(LR_max_diff)
+			{ }
+		};
+
+		/**
+		* @param width Processed image's width.
+		* @param height Processed image's height.
+		* @param disparity_size It must be 64, 128 or 256.
+		* @param input_depth_bits Processed image's bits per pixel. It must be 8 or 16.
+		* @param output_depth_bits Disparity image's bits per pixel. It must be 8 or 16.
+		* @param inout_type Specify input/output pointer type. See adc::EXECUTE_TYPE.
+		* @attention
+		* output_depth_bits must be set to 16 when subpixel is enabled.
+		*/
+		SGM_API StereoSGM(int width, int height, int disparity_size, int input_depth_bits, int output_depth_bits, 
+			EXECUTE_INOUT inout_type, const Parameters& param = Parameters());
+
+		/**
+		* @param width Processed image's width.
+		* @param height Processed image's height.
+		* @param disparity_size It must be 64, 128 or 256.
+		* @param input_depth_bits Processed image's bits per pixel. It must be 8 or 16.
+		* @param output_depth_bits Disparity image's bits per pixel. It must be 8 or 16.
+		* @param src_pitch Source image's pitch (pixels).
+		* @param dst_pitch Destination image's pitch (pixels).
+		* @param inout_type Specify input/output pointer type. See adc::EXECUTE_TYPE.
+		* @attention
+		* output_depth_bits must be set to 16 when subpixel is enabled.
+		*/
+		SGM_API StereoSGM(int width, int height, int disparity_size, int input_depth_bits, int output_depth_bits, int src_pitch, int dst_pitch,
+			EXECUTE_INOUT inout_type, const Parameters& param = Parameters());
+
+		SGM_API virtual ~StereoSGM();
+
+		/**
+		* Execute stereo semi global matching.
+		* @param left_pixels  A pointer stored input left image.
+		* @param right_pixels A pointer stored input right image.
+		* @param dst          Output pointer. User must allocate enough memory.
+		* @attention
+		* You need to allocate dst memory at least width x height x sizeof(element_type) bytes.
+		* The element_type is uint8_t for output_depth_bits == 8 and uint16_t for output_depth_bits == 16.
+		* Note that dst element value would be multiplied StereoSGM::SUBPIXEL_SCALE if subpixel option was enabled.
+		* Value of Invalid disparity is equal to return value of `get_invalid_disparity` member function.
+		*/
+		SGM_API void execute(const void* left_pixels, const void* right_pixels, void* dst);
+
+		/**
+		* Generate invalid disparity value from Parameter::min_disp and Parameter::subpixel
+		* @attention
+		* Cast properly if you receive disparity value as `unsigned` type.
+		* See sample/movie for an example of this.
+		*/
+		SGM_API int get_invalid_disparity() const;
+
+	private:
+		StereoSGM(const StereoSGM&);
+		StereoSGM& operator=(const StereoSGM&);
+
+		void cuda_resource_allocate();
+
+		CudaStereoSGMResources* cu_res_;
+
+		int width_;
+		int height_;
+		int disparity_size_;
+		int input_depth_bits_;
+		int output_depth_bits_;
+		int src_pitch_;
+		int dst_pitch_;
+		EXECUTE_INOUT inout_type_;
+		Parameters param_;
+	};
+}
+
+#include "sgm_wrapper.h"

+ 9 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/sgm_config.h

@@ -0,0 +1,9 @@
+#ifndef __SGM_CONFIG_H__
+#define __SGM_CONFIG_H__
+
+#define SGM_SHARED
+
+
+/* #undef BUILD_OPENCV_WRAPPER */
+
+#endif 

+ 9 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/sgm_config.h.in

@@ -0,0 +1,9 @@
+#ifndef __SGM_CONFIG_H__
+#define __SGM_CONFIG_H__
+
+#cmakedefine SGM_SHARED
+
+
+#cmakedefine BUILD_OPENCV_WRAPPER
+
+#endif 

+ 80 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/include/stereo_vision/sgm_wrapper.h

@@ -0,0 +1,80 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com), phone:17600128901
+* \date 
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+#ifndef __LIBSGM_WRAPPER_H__
+#define __LIBSGM_WRAPPER_H__
+
+#include "stereo_vision/sgm.h"
+#include <memory>
+#ifdef BUILD_OPENCV_WRAPPER
+#include <opencv2/core/cuda.hpp>
+#endif
+
+namespace adc {
+	/**
+	 * @brief SGMWrapper class which is wrapper for adc::StereoSGM.
+	 */
+	class SGMWrapper {
+	public:
+		/**
+		 * @param numDisparity Maximum disparity minus minimum disparity.
+		 * @param P1 Penalty on the disparity change by plus or minus 1 between nieghbor pixels.
+		 * @param P2 Penalty on the disparity change by more than 1 between neighbor pixels.
+		 * @param uniquenessRatio Margin in ratio by which the best cost function value should be at least second one.
+		 * @param subpixel Disparity value has 4 fractional bits if subpixel option is enabled.
+		 * @param pathType Number of scanlines used in cost aggregation.
+		 * @param minDisparity Minimum possible disparity value.
+		 * @param lrMaxDiff Acceptable difference pixels which is used in LR check consistency. LR check consistency will be disabled if this value is set to negative.
+		 */
+		SGM_API SGMWrapper(int numDisparity = 128, int P1 = 10, int P2 = 120, float uniquenessRatio = 0.95f,
+				bool subpixel = false, PathType pathType = PathType::SCAN_8PATH, int minDisparity = 0, int lrMaxDiff = 1);
+		SGM_API ~SGMWrapper();
+
+		SGM_API int getNumDisparities() const;
+		SGM_API int getP1() const;
+		SGM_API int getP2() const;
+		SGM_API float getUniquenessRatio() const;
+		SGM_API bool hasSubpixel() const;
+		SGM_API PathType getPathType() const;
+		SGM_API int getMinDisparity() const;
+		SGM_API int getLrMaxDiff() const;
+		SGM_API int getInvalidDisparity() const;
+
+#ifdef BUILD_OPENCV_WRAPPER
+		/**
+		 * Execute stereo semi global matching via wrapper class.
+		 * @param I1        Input left image.  Image's type is must be CV_8U or CV_16U
+		 * @param I2        Input right image.  Image's size and type must be same with I1.
+		 * @param disparity Output image.  Its memory will be allocated automatically dependent on input image size.
+		 * @attention
+		 * type of output image `disparity` is CV_16S.
+		 * Note that disparity element value would be multiplied StereoSGM::SUBPIXEL_SCALE if subpixel option was enabled.
+		 */
+		SGM_API void execute(const cv::cuda::GpuMat& I1, const cv::cuda::GpuMat& I2, cv::cuda::GpuMat& disparity);
+
+		/**
+		 * Execute stereo semi global matching via wrapper class.
+		 * @param I1        Input left image.  Image's type is must be CV_8U or CV_16U.
+		 * @param I2        Input right image.  Image's size and type must be same with I1.
+		 * @param disparity Output image.  Its memory will be allocated automatically dependent on input image size.
+		 * @attention
+		 * type of output image `disparity` is CV_16S.
+		 * Note that disparity element value would be multiplied StereoSGM::SUBPIXEL_SCALE if subpixel option was enabled.
+		 */
+		SGM_API void execute(const cv::Mat& I1, const cv::Mat& I2, cv::Mat& disparity);
+#endif // BUILD_OPRENCV_WRAPPER
+
+	private:
+		struct Creator;
+		std::unique_ptr<adc::StereoSGM> sgm_;
+		int numDisparity_;
+		adc::StereoSGM::Parameters param_;
+		std::unique_ptr<Creator> prev_;
+	};
+}
+
+#endif // __LIBSGM_WRAPPER_H__

+ 12 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/detection.launch

@@ -0,0 +1,12 @@
+<launch>
+    <arg name="param_root" default="$(find front_vision)/params" />
+    <node name="detection_2d_node" pkg="front_vision" type="detection_trt7_node" output="screen">
+        <param name="binocular_topic" value="/camera/binocular" />
+        <param name="root_path" value="$(arg param_root)" />
+        <param name="detection_config" value="obstacle_detection_trt7.yaml" />
+        <param name="hz" value="30.0" />
+        <param name="left_camera_params_filename" value="left.yaml" />
+        <param name="publis_origin_img" value="true" />
+        <param name="origin_imgage_topic" value="/detection/image" />
+    </node>
+</launch>

+ 12 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/detection_depth_fusion.launch

@@ -0,0 +1,12 @@
+<launch>
+    <arg name="param_root" default="$(find front_vision)/params" />
+    <node name="detection_depth_fusion_node" pkg="front_vision" type="detection_depth_fusion_node" output="screen">
+        <param name="detection_bbox2d_topic" value="/detection/bbox2d" />
+        <param name="stereo_depth_topic" value="/stereo/depth" />
+        <param name="detection_image_topic" value="/detection/image" />
+        <param name="vis_detection" value="true" />
+    </node>
+    <node name="tf_fusion_map" pkg="tf" type="static_transform_publisher" args='0.0 0.0 0.0 0.0 0.0 0.0 map fusion 100' />
+    <node name="tf_mark_map" pkg="tf" type="static_transform_publisher" args='0.0 0.0 0.0 0.0 0.0 0.0 map /obs_vis 100' />
+
+</launch>

+ 12 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/detection_trt7.launch

@@ -0,0 +1,12 @@
+<launch>
+    <arg name="param_root" default="$(find front_vision)/params" />
+    <node name="detection_2d_node" pkg="front_vision" type="detection_trt7_node" output="screen">
+        <param name="binocular_topic" value="/camera/binocular" />
+        <param name="root_path" value="$(arg param_root)" />
+        <param name="detection_config" value="obstacle_detection_trt7.yaml" />
+        <param name="hz" value="30.0" />
+        <param name="left_camera_params_filename" value="left.yaml" />
+        <param name="publis_origin_img" value="true" />
+        <param name="origin_imgage_topic" value="/detection/image" />
+    </node>
+</launch>

+ 13 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/lane_depth_fusion.launch

@@ -0,0 +1,13 @@
+<launch>
+    <arg name="param_root" default="$(find front_vision)/params" />
+    <node name="lane_depth_fusion_node" pkg="front_vision" type="lane_depth_fusion_node" output="screen">
+        <param name="detection_lanes2d_topic" value="/detection/lanes2d" />
+	<param name="detection_lanes3d_topic" value="/detection/lanes3d" />
+	<param name="lanes3d_point_topic" value="/detection/lanes3d_point" />
+        <param name="stereo_depth_topic" value="/stereo/depth" />
+        <param name="detection_image_topic" value="/detection/image_lane" />
+        <param name="vis_detection" value="true" />
+	<param name="vis_lanes3d_point" value="true" />
+    </node>
+    <node name="tf_lane_fusion_map" pkg="tf" type="static_transform_publisher" args='0.0 0.0 0.0 0.0 0.0 0.0 map lanes3d_point 100' />
+</launch>

+ 13 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/lane_detection.launch

@@ -0,0 +1,13 @@
+<launch>
+    <arg name="param_root" default="$(find front_vision)/params" />
+    <node name="lane_detection_node" pkg="front_vision" type="lane_detection_node" output="screen">
+        <param name="binocular_topic" value="/camera/binocular" />
+        <param name="root_path" value="$(arg param_root)" />
+        <param name="detection_config" value="lane_detection.yaml" />
+        <param name="hz" value="30.0" />
+        <param name="left_camera_params_filename" value="left.yaml" />
+        <param name="publis_origin_img" value="true" />
+        <param name="publis_lanes_sharememory" value="false" />
+        <param name="origin_imgage_topic" value="/detection/image_lane" />
+    </node>
+</launch>

+ 59 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/smart_eys.launch

@@ -0,0 +1,59 @@
+<launch>
+    <arg name="param_root" default="$(find front_vision)/params" />
+
+    <node name="binocular_node" pkg="binocular_camera" type="binocular_camera_node" output="screen">
+        <param name="left_camera_topic" value="/camera/left" />
+        <param name="right_camera_topic" value="/camera/right" />
+        <param name="pub_rate" value="60.0" />
+        <param name="vis" value="false" />
+        <param name="split_publish" value="flase" />
+     </node>
+
+     <node name="stereo_match_node" pkg="front_vision" type="stereo_node" output="screen">
+        <param name="binocular_camera_topic" value="/camera/binocular" />
+        <param name="stereo_depth_topic" value="/stereo/depth" />
+        <param name="root_path" value="$(arg param_root)" />
+        <param name="steroe_config" value="stereo.yaml" />
+        <param name="hz" value="30.0" />
+        <param name="send_vis" value="true" />
+        <param name="points_vis_topic" value="/stereo/point_cloud" />
+        <param name="max_depth" value="15.0" />
+        <param name="image_width" value="640" />
+        <param name="image_height" value="480" />
+    </node>
+
+    <node name="detection_2d_node" pkg="front_vision" type="detection_trt7_node" output="screen">
+        <param name="binocular_topic" value="/camera/binocular" />
+        <param name="root_path" value="$(arg param_root)" />
+        <param name="detection_config" value="obstacle_detection_trt7.yaml" />
+        <param name="hz" value="30.0" />
+        <param name="left_camera_params_filename" value="left.yaml" />
+        <param name="publis_origin_img" value="true" />
+        <param name="origin_imgage_topic" value="/detection/image" />
+    </node>
+
+
+    <node name="lane_detection_node" pkg="front_vision" type="lane_detection_node" output="screen">
+        <param name="binocular_topic" value="/camera/binocular" />
+        <param name="root_path" value="$(arg param_root)" />
+        <param name="detection_config" value="lane_detection.yaml" />
+        <param name="hz" value="30.0" />
+        <param name="left_camera_params_filename" value="left.yaml" />
+        <param name="publis_origin_img" value="true" />
+        <param name="origin_imgage_topic" value="/detection/image_lane" />
+    </node>
+
+
+    <node name="detection_depth_fusion_node" pkg="front_vision" type="detection_depth_fusion_node" output="screen">
+        <param name="detection_bbox2d_topic" value="/detection/bbox2d" />
+        <param name="stereo_depth_topic" value="/stereo/depth" />
+        <param name="detection_image_topic" value="/detection/image" />
+        <param name="vis_detection" value="true" />
+    </node>
+
+
+    <node name="tf_fusion_map" pkg="tf" type="static_transform_publisher" args='0.0 0.0 0.0 0.0 0.0 0.0 map fusion 100' />
+    <node name="tf_mark_map" pkg="tf" type="static_transform_publisher" args='0.0 0.0 0.0 0.0 0.0 0.0 map /obs_vis 100' />
+    <node name="tf_stereo_map" pkg="tf" type="static_transform_publisher" args='0.0 0.0 0.0 0.0 0.0 0.0 map stereo_camera  100' />
+
+</launch>

+ 17 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/launch/stereo_match.launch

@@ -0,0 +1,17 @@
+<launch>
+    <arg name="param_root" default="$(find front_vision)/params" />
+    <node name="stereo_match_node" pkg="front_vision" type="stereo_node" output="screen">
+        <param name="binocular_camera_topic" value="/camera/binocular" />
+        <param name="stereo_depth_topic" value="/stereo/depth" />
+        <param name="root_path" value="$(arg param_root)" />
+        <param name="steroe_config" value="stereo.yaml" />
+        <param name="hz" value="30.0" />
+        <param name="send_vis" value="true" />
+        <param name="points_vis_topic" value="/stereo/point_cloud" />
+        <param name="max_depth" value="25.0" />
+        <param name="image_width" value="640" />
+        <param name="image_height" value="480" />
+    </node>
+    
+    <node name="tf_stereo_map" pkg="tf" type="static_transform_publisher" args='0.0 0.0 0.0 0.0 0.0 0.0 map stereo_camera  100' />
+</launch>

+ 80 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/package.xml

@@ -0,0 +1,80 @@
+<?xml version="1.0"?>
+<package format="2">
+  <name>front_vision</name>
+  <version>0.0.0</version>
+  <description>The front_vision package</description>
+
+  <!-- One maintainer tag required, multiple allowed, one person per tag -->
+  <!-- Example:  -->
+  <!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
+  <maintainer email="nvidia@todo.todo">nvidia</maintainer>
+
+
+  <!-- One license tag required, multiple allowed, one license per tag -->
+  <!-- Commonly used license strings: -->
+  <!--   BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
+  <license>TODO</license>
+
+
+  <!-- Url tags are optional, but multiple are allowed, one per tag -->
+  <!-- Optional attribute type can be: website, bugtracker, or repository -->
+  <!-- Example: -->
+  <!-- <url type="website">http://wiki.ros.org/front_vision</url> -->
+
+
+  <!-- Author tags are optional, multiple are allowed, one per tag -->
+  <!-- Authors do not have to be maintainers, but could be -->
+  <!-- Example: -->
+  <!-- <author email="jane.doe@example.com">Jane Doe</author> -->
+
+
+  <!-- The *depend tags are used to specify dependencies -->
+  <!-- Dependencies can be catkin packages or system dependencies -->
+  <!-- Examples: -->
+  <!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
+  <!--   <depend>roscpp</depend> -->
+  <!--   Note that this is equivalent to the following: -->
+  <!--   <build_depend>roscpp</build_depend> -->
+  <!--   <exec_depend>roscpp</exec_depend> -->
+  <!-- Use build_depend for packages you need at compile time: -->
+  <!--   <build_depend>message_generation</build_depend> -->
+  <!-- Use build_export_depend for packages you need in order to build against this package: -->
+  <!--   <build_export_depend>message_generation</build_export_depend> -->
+  <!-- Use buildtool_depend for build tool packages: -->
+  <!--   <buildtool_depend>catkin</buildtool_depend> -->
+  <!-- Use exec_depend for packages you need at runtime: -->
+  <!--   <exec_depend>message_runtime</exec_depend> -->
+  <!-- Use test_depend for packages you need only for testing: -->
+  <!--   <test_depend>gtest</test_depend> -->
+  <!-- Use doc_depend for packages you need only for building documentation: -->
+  <!--   <doc_depend>doxygen</doc_depend> -->
+  <buildtool_depend>catkin</buildtool_depend>
+  <build_depend>cv_bridge</build_depend>
+  <build_depend>image_transport</build_depend>
+  <build_depend>roscpp</build_depend>
+  <build_depend>pcl_ros</build_depend>
+  <build_depend>sensor_msgs</build_depend>
+  <build_depend>std_msgs</build_depend>
+  <build_depend>adc_msg</build_depend>
+  <build_export_depend>cv_bridge</build_export_depend>
+  <build_export_depend>image_transport</build_export_depend>
+  <build_export_depend>roscpp</build_export_depend>
+  <build_export_depend>pcl_ros</build_export_depend>
+  <build_export_depend>sensor_msgs</build_export_depend>
+  <build_export_depend>std_msgs</build_export_depend>
+  <build_export_depend>adc_msg</build_export_depend>
+  <exec_depend>cv_bridge</exec_depend>
+  <exec_depend>image_transport</exec_depend>
+  <exec_depend>roscpp</exec_depend>
+  <exec_depend>pcl_ros</exec_depend>
+  <exec_depend>sensor_msgs</exec_depend>
+  <exec_depend>std_msgs</exec_depend>
+  <exec_depend>adc_msg</exec_depend>
+
+
+  <!-- The export tag contains other, unspecified, tags -->
+  <export>
+    <!-- Other tools can request additional information be placed here -->
+
+  </export>
+</package>

+ 80 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/coco.names

@@ -0,0 +1,80 @@
+person
+bicycle
+car
+motorbike
+aeroplane
+bus
+train
+truck
+boat
+traffic light
+fire hydrant
+stop sign
+parking meter
+bench
+bird
+cat
+dog
+horse
+sheep
+cow
+elephant
+bear
+zebra
+giraffe
+backpack
+umbrella
+handbag
+tie
+suitcase
+frisbee
+skis
+snowboard
+sports ball
+kite
+baseball bat
+baseball glove
+skateboard
+surfboard
+tennis racket
+bottle
+wine glass
+cup
+fork
+knife
+spoon
+bowl
+banana
+apple
+sandwich
+orange
+broccoli
+carrot
+hot dog
+pizza
+donut
+cake
+chair
+sofa
+pottedplant
+bed
+diningtable
+toilet
+tvmonitor
+laptop
+mouse
+remote
+keyboard
+cell phone
+microwave
+oven
+toaster
+sink
+refrigerator
+book
+clock
+vase
+scissors
+teddy bear
+hair drier
+toothbrush

+ 20 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/front_vision.yaml

@@ -0,0 +1,20 @@
+obstacle_detector_config: "obstacle_detection_trt7.yaml"
+mark_ground_detector_config: "" #"mark_ground_detection.yaml"  
+cctsdb_detector_config: "" #"cctsdb_detection.yaml" 
+stereo_vison_config: "" # "stereo.yaml"
+left_camera_params: "left.yaml"
+used_loop_fetch: true
+mono: false
+crop_top: 200
+crop_bottom: 570
+hz: 30.0
+vis_mark_ground: false
+vis_pointcloud: true
+vis_obstacle: false
+fusion_depth_and_obstacle: flase
+topic_obstcle: "vision_obstacles"
+topic_mark_ground: "vision_mark_ground"
+topic_cctsdb: "vision_cctsdb"
+video_path: "/home/nvidia/Desktop/out.avi"
+save_origin_video: /home/nvidia/workspace/front_vision/video/o_202000508_test.avi
+save_rect_video: /home/nvidia/workspace/front_vision/video/20200508_test.avi

+ 5 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/lane_detection.yaml

@@ -0,0 +1,5 @@
+onnx_path: lanenet.onnx
+engine_path: lanenet.trt
+width: "512"
+hight: "256"
+

+ 29 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/left.yaml

@@ -0,0 +1,29 @@
+image_width: 640 #1920
+image_height: 480 #1080
+camera_name: narrow_stereo/left
+camera_matrix:
+  rows: 3
+  cols: 3
+  data: [ 9.9101061824493968e+02, 0., 3.2240947662064639e+02, 0.,
+       1.3204614936918008e+03, 2.1347486819969237e+02, 0., 0., 1. ]
+distortion_model: plumb_bob
+distortion_coefficients:
+  rows: 1
+  cols: 5
+  data:  [ -2.6197231183458036e-01, -2.2243349973001195e-01,
+       5.8119933644220419e-04, 2.9601287085331803e-03,
+       1.0858928904574114e+00 ]
+rectification_matrix:
+  rows: 3
+  cols: 3
+  data:  [ 9.9872169358702434e-01, 1.1853577793757102e-02,
+       4.9137271517180309e-02, -1.1986936051938402e-02,
+       9.9992522522184113e-01, 2.4201919628875092e-03,
+       -4.9104909354895344e-02, -3.0061035474238970e-03,
+       9.9878910247294428e-01 ]
+projection_matrix:
+  rows: 3
+  cols: 4
+  data:  [ 1.3395593177507055e+03, 0., 2.5359675598144531e+02, 0., 0.,
+       1.3395593177507055e+03, 2.1260866546630859e+02, 0., 0., 0., 1.,
+       0. ]

+ 11 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/obstacle_detection_trt7.yaml

@@ -0,0 +1,11 @@
+class_name_path: "coco.names"
+network_type: "yolov4"
+config_path: "yolov4.cfg"
+weights_path: "yolov4.weights"
+device_type: "kGPU"
+input_blob_name: "data"
+engine_path: "yolov4.engine"
+nms: "0.4"
+ignore: "0.4"
+track_flag: "false"
+

+ 30 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/right.yaml

@@ -0,0 +1,30 @@
+image_width: 640 #1920
+image_height: 480 #1080
+camera_name: narrow_stereo/right
+camera_matrix:
+  rows: 3
+  cols: 3
+  data: [ 9.9101061824493968e+02, 0., 3.2270462356042435e+02, 0.,
+       1.3204614936918008e+03, 2.1354570585549320e+02, 0., 0., 1. ]
+distortion_model: plumb_bob
+distortion_coefficients:
+  rows: 1
+  cols: 5
+  data: [ -1.4032765745160575e-01, -3.6297664760940944e+00,
+       -9.6464055480704905e-04, 2.2417493626967905e-03,
+       2.4812514199331563e+01 ]
+rectification_matrix:
+  rows: 3
+  cols: 3
+  data: [ 9.9875544752495782e-01, 1.5770579101557526e-02,
+       4.7316433445713187e-02, -1.5673667081344166e-02,
+       9.9987423557256783e-01, -2.4185116076583229e-03,
+       -4.7348624050169347e-02, 1.6738796177464019e-03,
+       9.9887702242447285e-01 ]
+projection_matrix:
+  rows: 3
+  cols: 4
+  data: [ 1.3395593177507055e+03, 0., 2.5359675598144531e+02,
+       -1.7335197416076241e+02, 0., 1.3395593177507055e+03,
+       2.1260866546630859e+02, 0., 0., 0., 1., 0. ]
+

+ 19 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/stereo.yaml

@@ -0,0 +1,19 @@
+disp_size: 256
+output_depth: 16
+subpixel: true
+p1: 80
+p2: 100
+input_type: 8
+uniqueness: 0.95f
+image_height: 480
+image_width: 640
+left_camera_yaml: "left.yaml"
+right_camera_yaml: "right.yaml"
+camera_height: 8
+camera_tilt: 0.0
+camera_pitch: -10.0
+camera_yaw: 0.0
+top_row: 0
+max_depth: 25.0
+LR_max_diff: 5
+min_disp: 1

+ 1158 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/params/yolov4.cfg

@@ -0,0 +1,1158 @@
+[net]
+batch=64
+subdivisions=8
+# Training
+#width=512
+#height=512
+width=416
+height=416
+channels=3
+momentum=0.949
+decay=0.0005
+angle=0
+saturation = 1.5
+exposure = 1.5
+hue=.1
+
+learning_rate=0.0013
+burn_in=1000
+max_batches = 500500
+policy=steps
+steps=400000,450000
+scales=.1,.1
+
+#cutmix=1
+mosaic=1
+
+#:104x104 54:52x52 85:26x26 104:13x13 for 416
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=3
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=32
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-7
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=64
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-10
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-28
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-28
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+# Downsample
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=3
+stride=2
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -2
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=3
+stride=1
+pad=1
+activation=mish
+
+[shortcut]
+from=-3
+activation=linear
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=mish
+
+[route]
+layers = -1,-16
+
+[convolutional]
+batch_normalize=1
+filters=1024
+size=1
+stride=1
+pad=1
+activation=mish
+
+##########################
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+### SPP ###
+[maxpool]
+stride=1
+size=5
+
+[route]
+layers=-2
+
+[maxpool]
+stride=1
+size=9
+
+[route]
+layers=-4
+
+[maxpool]
+stride=1
+size=13
+
+[route]
+layers=-1,-3,-5,-6
+### End SPP ###
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[upsample]
+stride=2
+
+[route]
+layers = 85
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[route]
+layers = -1, -3
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[upsample]
+stride=2
+
+[route]
+layers = 54
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[route]
+layers = -1, -3
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=128
+size=1
+stride=1
+pad=1
+activation=leaky
+
+##########################
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=256
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 0,1,2
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+scale_x_y = 1.2
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
+max_delta=5
+
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=2
+pad=1
+filters=256
+activation=leaky
+
+[route]
+layers = -1, -16
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=256
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=512
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 3,4,5
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+scale_x_y = 1.1
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
+max_delta=5
+
+
+[route]
+layers = -4
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=2
+pad=1
+filters=512
+activation=leaky
+
+[route]
+layers = -1, -37
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+filters=512
+size=1
+stride=1
+pad=1
+activation=leaky
+
+[convolutional]
+batch_normalize=1
+size=3
+stride=1
+pad=1
+filters=1024
+activation=leaky
+
+[convolutional]
+size=1
+stride=1
+pad=1
+filters=255
+activation=linear
+
+
+[yolo]
+mask = 6,7,8
+anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
+classes=80
+num=9
+jitter=.3
+ignore_thresh = .7
+truth_thresh = 1
+random=1
+scale_x_y = 1.05
+iou_thresh=0.213
+cls_normalizer=1.0
+iou_normalizer=0.07
+iou_loss=ciou
+nms_kind=greedynms
+beta_nms=0.6
+max_delta=5
+

+ 28 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/proto/center_net.proto

@@ -0,0 +1,28 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-05-30
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+ syntax = "proto2";
+ import "tensor_rt.proto";
+ package tensorrt_inference;
+ 
+ message CenterNetConfig
+ {
+     optional TensorRTConfig tensor_rt_config= 1;
+     optional float score_threshold = 2;
+     optional int32 image_height = 3;
+     optional int32 image_width = 4;
+     optional int32 down_ratio = 5;
+     optional float mean_1 = 6;
+     optional float mean_2 = 7;
+     optional float mean_3 = 8;
+     optional float std_1 = 9;
+     optional float std_2 = 10;
+     optional float std_3 = 11;
+ }
+ 
+ 

+ 33 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/proto/lanearray.proto

@@ -0,0 +1,33 @@
+/**
+* \brief 
+* \author liqingxia (liqingxia@adcsoft.cn)
+* \date 2021-06-28
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+syntax = "proto2";
+package iv.vision;
+message Lane{
+  optional double a=1 [default = 0];  //车道线轨迹曲线 三次项系数
+  optional double b=2;  //车道线轨迹曲线 二次项系数
+  optional double c=3;  //车道线轨迹曲线 一次项系数
+  optional double d=4;  //车道线轨迹曲线偏置项系数 
+};
+message Point2f{
+  optional double x=1 [default = 0];  //x坐标
+  optional double y=2 [default = 0];  //y坐标
+};
+message Line{
+  optional int32 index = 1; //编号
+  optional int32 type = 2 [default = 0]; //类型,1 实线 2 虚线
+  optional int32 color = 3 [default = 0]; //颜色, 1 白色 2 黄色
+  optional Point2f worldpoint = 4;  //用于构建地图点的世界坐标
+  repeated Point2f linepoint = 5;  //每条车道线上的点,用于画检测结果mask图。
+  optional double curvature = 6 [default = 0]; //曲率
+  optional Lane lane= 7; //轨迹方程
+};
+message Linearray{
+  repeated Line line = 1;  //车道线输出array
+  optional int64 timestamp =2; //时间戳
+};

+ 87 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/proto/obstacles.proto

@@ -0,0 +1,87 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-06-28
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+ syntax = "proto2";
+ package iv.vision;
+
+ message Point3D
+ {
+     optional double x = 1;
+     optional double y = 2;
+     optional double z = 3;
+ }
+
+ message Bbox3D
+ {
+     optional string category = 1;
+     optional uint64 class_id = 2;
+     /*
+     `-------------->x
+      |    (top_left_x, top_left_y)
+      |     *------------------------------
+      |     |                             |
+      |     |                             |
+      v     |                             |
+      y     |                             |
+            |                             |
+            ------------------------------*
+                                       (bottom_right_x, bottom_right_y)
+     */
+     optional uint64 top_left_x = 3;
+     optional uint64 top_left_y = 4;
+     optional uint64 bottom_right_x = 5;
+     optional uint64 bottom_right_y = 6;
+     /* 
+                 p5 --------------p6
+                   /|            /|
+                  / |           / |
+                 /  |          /  | 
+              p7 ---|----------p48|
+                |   |         |   |
+                |   |         |   |
+                |   |    .    |   |
+                |   |    c    |   |
+                |   |         |   |
+                |   |         |   |
+                | p1----------|---p2
+                |  /          |  /
+                | /           | /
+                |/            |/
+              p3 -------------p4
+
+            / z
+           /
+          /
+         /
+        -------------------------->x
+        |
+        |
+        |
+        |
+        |
+        v
+        y 
+     */
+
+     optional Point3D p1 = 7;
+     optional Point3D p2 = 8;
+     optional Point3D p3 = 9;
+     optional Point3D p4 = 10;
+     optional Point3D p5 = 11;
+     optional Point3D p6 = 12;
+     optional Point3D p7 = 13;
+     optional Point3D p8 = 14;
+     optional Point3D c = 15;
+
+ }
+
+ message ObstacleInfo
+ {
+     repeated Bbox3D bbox_3d = 1;
+     optional double time = 2;
+ }

+ 48 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/proto/tensor_rt.proto

@@ -0,0 +1,48 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-05-30
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+syntax = "proto2";
+package tensorrt_inference;
+
+message TensorConfig {
+    optional string name = 1;
+    optional uint64 channel = 2;
+    optional uint64 height = 3;
+    optional uint64 width = 4;
+    optional uint64 is_output = 5;
+}
+
+message CalibrationConfig {
+    optional string file = 1;
+    optional bool read_calibration_table_only = 2;
+    optional string calibration_list = 3;
+    optional uint64 first_batch = 4;
+    optional uint64 batch_size = 5;
+    optional uint64 num_batches = 6;
+    optional uint64 height = 7;
+    optional uint64 width = 8;
+    optional float scale = 9;
+    optional string mean = 10;
+    optional string dev = 11;
+    optional float padding_with =12;
+}
+
+message TensorRTConfig {
+    optional string network_name = 1;
+    optional uint64 gpu_id = 2;
+    optional string deploy_file = 3;
+    optional string model_file = 4;
+    optional uint64 batch_size = 5;
+    optional uint64 tensor_type = 6;
+    optional uint64 work_space = 7;
+    //repeated TensorConfig tensor_config = 8;
+    optional bool serialize_model = 9;
+    optional string serialize_model_name = 10;
+    optional CalibrationConfig calibration_config = 11;
+
+}

+ 28 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/cmw_app/CMakeLists.txt

@@ -0,0 +1,28 @@
+set(LIB_NAME cmw_app)
+
+find_package(OpenCV  REQUIRED)
+find_package(CUDA REQUIRED)
+aux_source_directory(. DIR_LIB_SRCS)
+include_directories(
+    ${PROJECT_SOURCE_DIR}/include
+    ${Opencv_INCLUDE_DIRS}
+    ${CUDA_INCLUDE_DIRS}
+    $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/src/common>
+    $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/src/common>
+    $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}>
+)
+
+add_library(${LIB_NAME} SHARED ${DIR_LIB_SRCS})
+
+set_target_properties (${LIB_NAME} PROPERTIES CLEAN_DIRECT_OUTPUT 1)
+set_target_properties (${LIB_NAME} PROPERTIES VERSION 1.0 SOVERSION 1)
+target_link_libraries(${LIB_NAME} ${OpenCV_LIBS}   ${CUDA_LIBRARIES} sgm detection_trt7 lane_detection common)
+
+set(CMAKE_STAGING_PREFIX /opt/adc)
+install(TARGETS ${LIB_NAME}
+  ARCHIVE DESTINATION ${CMAKE_STAGING_PREFIX}/lib
+  LIBRARY DESTINATION ${CMAKE_STAGING_PREFIX}/lib
+ # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
+)
+
+

+ 181 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/cmw_app/detection_trt7_app.cpp

@@ -0,0 +1,181 @@
+/**
+* \brief 
+* \author liqingxia (pengcheng@yslrpch@126.com)
+* \date 2021-06-18
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#include "cmw_app/detection_trt7_app.h"
+#include "common/project_root.h"
+
+using namespace Tn;
+using namespace std;
+
+namespace adc{
+DetectionTrt7App::DetectionTrt7App(const std::string &config_file, const float hz)  : Worker(hz)
+{
+    InitConfig(config_file);
+    detection_callback_ = NULL;
+}
+
+void DetectionTrt7App::SetDetectionCallback(DetectionCallback callback)
+{
+    detection_callback_ = callback;
+}
+
+vector<string> DetectionTrt7App::GetNamesFromFile(string filePathAndName)
+{
+    vector<string> retVec;
+    ifstream cocoName;
+    cocoName.open(filePathAndName.data());
+    assert(cocoName.is_open());
+    string s;
+    while (getline(cocoName,s))
+    {
+        retVec.push_back(s);
+    }
+    cocoName.close();
+    return retVec;
+}
+void DetectionTrt7App::InitConfig(const std::string &config_file)
+{
+    try
+    {
+        std::string config_file_abs = adc::RootPath::GetAbsolutePath(config_file);
+        YAML::Node node = YAML::LoadFile(config_file_abs);
+        std::string class_name_path = adc::RootPath::GetAbsolutePath( node["class_name_path"].as<std::string>());
+        std::string network_type = node["network_type"].as<std::string>();
+        std::string config_path = adc::RootPath::GetAbsolutePath( node["config_path"].as<std::string>());
+        std::string weights_path = adc::RootPath::GetAbsolutePath( node["weights_path"].as<std::string>());
+        std::string device_type = node["device_type"].as<std::string>();
+        std::string input_blob_name = node["input_blob_name"].as<std::string>();        
+        nms_ = node["nms"].as<float>();
+        ignore_ = node["ignore"].as<float>();
+        track_ = node["track_flag"].as<bool>();
+        network_info_.networkType = network_type;
+        network_info_.configFilePath = config_path;
+        network_info_.wtsFilePath = weights_path;
+        network_info_.deviceType = device_type;
+        network_info_.inputBlobName = input_blob_name;
+        engine_path_ = adc::RootPath::GetAbsolutePath(node["engine_path"].as<std::string>());
+        outputs_ = std::make_shared<std::list<vector<Tn::Bbox>>>();
+        image_count_ = 0;
+        class_name_ = GetNamesFromFile(class_name_path);
+        Yolo_detect_.reset(new YoloDetect(network_info_,engine_path_));
+        cudaSetDevice(0);
+        yolo_context_ = {nullptr};
+        Yolo_detect_->loadModel(yolo_context_);
+    }
+    catch(const std::exception& e)
+    {
+	std::cerr << e.what() << '\n';
+    }
+}
+
+void DetectionTrt7App::CacheImages(const cv::Mat &left_img, std::chrono::system_clock::time_point &time)
+{
+    std::lock_guard<std::mutex> mutex_cache_image(mutex_image_);
+
+    image_cache_.push(left_img);
+    time_cache_.push(time);
+    while (image_cache_.size() > kQueueSize)
+    {
+        image_cache_.pop();
+        time_cache_.pop();
+    }
+}
+
+bool DetectionTrt7App::ReceiveImage(void)
+{
+    std::lock_guard<std::mutex> mutex_cache_image(mutex_image_);
+    if(image_cache_.empty())
+    {
+        return false;
+    }
+    left_image_ = image_cache_.front().clone();
+    current_time_ = time_cache_.front();
+    image_cache_.pop();
+    time_cache_.pop();
+    return true;
+}
+void DetectionTrt7App::Inference()
+{
+    image_count_++;
+    std::vector<Detection> detect_results;
+    std::vector<Bbox> boxes;
+    Yolo_detect_->process(*yolo_context_,left_image_,detect_results,ignore_,nms_);
+    if(track_)
+    {
+        std::vector<td::bbox_t> outs;
+        td::bbox_t result;
+        for (size_t i = 0; i < detect_results.size(); i++) {
+            cv::Rect r = Yolo_detect_->get_rect(left_image_, detect_results[i].bbox,Yolo_detect_->m_input_w,Yolo_detect_->m_input_h);
+            result.x = r.x;
+            result.y = r.y;
+            result.w = r.width;
+            result.h = r.height;
+            result.prob = detect_results[i].det_confidence;
+            result.obj_id = detect_results[i].class_id;
+            outs.push_back(result);
+        }
+        std::vector<td::TrackingBox>track_result;
+        //tracking 
+        bool track_flag = td::TrackObstacle(image_count_,trackers_,outs,track_result);
+        for(auto& item : track_result)
+        {
+            Bbox bbox =
+                    {
+                            item.class_id,   //classId
+                            item.box.x, //left
+                            item.box.x+item.box.width, //right
+                            item.box.y, //top
+                            item.box.y+item.box.height, //bot
+                            item.prob       //score
+                    };
+            boxes.push_back(bbox);
+        }
+    }
+    else
+    {
+        for(auto& item : detect_results)
+        {
+            cv::Rect r = Yolo_detect_->get_rect(left_image_, item.bbox,Yolo_detect_->m_input_w,Yolo_detect_->m_input_h);
+            Bbox bbox =
+                    {
+                            item.class_id,   //classId
+                            r.x, //left
+                            r.x+r.width, //right
+                            r.y, //top
+                            r.y+r.height, //bot
+                            item.det_confidence       //score
+                    };
+            boxes.push_back(bbox);
+        }
+    }
+    outputs_->clear();
+    outputs_->emplace_back(boxes);
+}
+void DetectionTrt7App::DoWork()
+{
+    static int count = 0;
+    if(!ReceiveImage())
+    {
+        if(count>50)
+        {
+            std::cout<<"can not get images"<<std::endl;
+        }
+        count ++;
+        return;
+    }
+    count = 0;
+    
+    Inference();
+
+
+    if(NULL != detection_callback_)
+    {
+        detection_callback_(outputs_, current_time_,left_image_,class_name_);
+    }
+}
+}

+ 114 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/cmw_app/lane_detection_app.cpp

@@ -0,0 +1,114 @@
+//
+// Created by tian on 2020/5/24.
+//
+#include "cmw_app/lane_detection_app.h"
+#include "common/project_root.h"
+
+using namespace std;
+using namespace cv;
+
+namespace adc{
+LaneDetectionApp::LaneDetectionApp(const std::string &config_file, const float hz)  : Worker(hz),lane_context(nullptr)
+{
+    InitConfig(config_file);
+    detection_callback_ = NULL;
+}
+LaneDetectionApp::~LaneDetectionApp()
+{
+    if(nullptr != lane_context)
+        lane_context->destroy();
+}
+void LaneDetectionApp::SetDetectionCallback(DetectionCallback callback)
+{
+    detection_callback_ = callback;
+}
+void LaneDetectionApp::InitConfig(const std::string &config_file)
+{
+    try
+    {
+        std::string config_file_abs = adc::RootPath::GetAbsolutePath(config_file);
+        YAML::Node node = YAML::LoadFile(config_file_abs);
+        std::string onnx_path = adc::RootPath::GetAbsolutePath(node["onnx_path"].as<std::string>());
+        std::string engine_path = adc::RootPath::GetAbsolutePath(node["engine_path"].as<std::string>());
+        width_ = node["width"].as<int>(512);
+        hight_ = node["hight"].as<int>(256);
+        lanenet.reset(new ld::LaneNet);
+        lane_trt.reset(new ld::TensorRT);
+        lane_trt->setPath(onnx_path,engine_path);
+        /////加载LaneNet模型,初始化//////
+        lane_trt->loadModel(lane_context);
+        outputs_ = std::make_shared<std::list<vector<ld::lane_info>>>();
+    }
+    catch(const std::exception& e)
+    {
+        std::cerr << e.what() << '\n';
+    }
+}
+void LaneDetectionApp::Inference()
+{
+    const int INPUT_SIZE= ld::INPUT_C*ld::INPUT_H*ld::INPUT_W;
+    float data[INPUT_SIZE];
+    Mat image;
+    cv::resize(left_image_, image,cv::Size(ld::INPUT_W,ld::INPUT_H));
+     // normalize+transpose
+    lane_trt->loadImg(image,ld::INPUT_W,ld::INPUT_H,(float*)data, _mean, _std);
+    vector<ld::lane_info> lanes;
+    //frame_count ++;
+    outputs_->clear();
+    lanenet->process(*lane_context, data,trackers,lanes);
+    outputs_->emplace_back(lanes);
+}
+void LaneDetectionApp::CacheImages(const cv::Mat &left_img, std::chrono::system_clock::time_point &time)
+{
+    std::lock_guard<std::mutex> mutex_cache_image(mutex_image_);
+
+    image_cache_.push(left_img);
+    time_cache_.push(time);
+    while (image_cache_.size() > kQueueSize)
+    {
+        image_cache_.pop();
+        time_cache_.pop();
+    }
+}
+
+bool LaneDetectionApp::ReceiveImage(void)
+{
+    std::lock_guard<std::mutex> mutex_cache_image(mutex_image_);
+    if(image_cache_.empty())
+    {
+        return false;
+    }
+    left_image_ = image_cache_.front().clone();
+    current_time_ = time_cache_.front();
+    image_cache_.pop();
+    time_cache_.pop();
+    return true;
+}
+void LaneDetectionApp::DoWork()
+{
+    static int count = 0;
+    if(!ReceiveImage())
+    {
+        if(count>50)
+        {
+            std::cout<<"can not get images"<<std::endl;
+        }
+        count ++;
+        return;
+    }
+    count = 0;
+
+    Inference();
+
+
+    if(NULL != detection_callback_)
+    {
+        detection_callback_(outputs_, current_time_,left_image_);
+    }
+}
+
+int LaneDetectionApp::frame_count = 0;
+}
+
+
+

+ 301 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/cmw_app/sgm_app.cpp

@@ -0,0 +1,301 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-05-20
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+#include <iostream>
+#include "cmw_app/sgm_app.h"
+#include <yaml-cpp/yaml.h>
+#include <opencv2/calib3d/calib3d.hpp>
+#include <opencv2/opencv.hpp>
+#include <chrono>
+#include "common/project_root.h"
+
+namespace adc
+{
+
+StereoSgmApp::StereoSgmApp(const std::string &config_file, const float hz)  : Worker(hz)
+{
+    InitConfig(config_file);
+    point_color_callback_ = NULL;
+    point_cloud_callback_ = NULL;
+    poind_and_color_callback_ = NULL;
+    point_cloud_mat_callback_ = NULL;
+    point_cloud_mat_and_image_color_callback_ = NULL;
+}
+
+void StereoSgmApp::SetPointCloudCallback(PointCloudCallback callback)
+{
+    point_cloud_callback_ = callback;
+}
+
+void StereoSgmApp::SetPointColorCallback(PointColorCallback callback)
+{
+    point_color_callback_ = callback;
+}
+
+void StereoSgmApp::SetPointCloudMatAndImageColorCallback(PointCloudMatAndImageColorCallback callback)
+{
+    point_cloud_mat_and_image_color_callback_ = callback;
+}
+
+void StereoSgmApp::SetPointAdnColorCallback(PointAndColorCallback callback)
+{
+    poind_and_color_callback_ = callback;
+}
+
+void StereoSgmApp::SetPointCloudMatCallback(PointCloudMatCallback callback)
+{
+    point_cloud_mat_callback_ = callback;
+}
+void StereoSgmApp::InitConfig(const std::string &config_file)
+{
+    try
+    {
+        std::string yaml_file_abs_path = adc::RootPath::GetAbsolutePath(config_file);
+        YAML::Node node = YAML::LoadFile(yaml_file_abs_path);
+
+        disp_size_ = node["disp_size"].as<int>(64);
+        output_depth_ = node["output_depth"].as<int>(16);
+        subpixel_ = node["subpixel"].as<bool>(true);
+        P1_ = node["p1"].as<int>(10);
+        P2_ = node["p2"].as<int>(120);
+        input_type_ = node["input_type"].as<int>(8);
+        uniqueness_ = node["uniqueness"].as<float>(0.95f);
+        int image_height = node["image_height"].as<int>();
+        int image_width = node["image_width"].as<int>();
+        std::string left_camera_params = adc::RootPath::GetAbsolutePath(node["left_camera_yaml"].as<std::string>());
+        std::string right_camera_params = adc::RootPath::GetAbsolutePath(node["right_camera_yaml"].as<std::string>());
+
+        float camera_height = node["camera_height"].as<float>(1.65);
+        float camera_tilt = node["camera_tilt"].as<float>(0.0);
+        float  camera_yaw = node["camera_yaw"].as<float>(0.0);
+        float camera_pitch = node["camera_pitch"].as<float>(0.0);
+        int min_disp = node["min_disp"].as<int>(0);
+        int LR_max_diff = node["LR_max_diff"].as<int>(1);
+        top_row_ = node["top_row"].as<int>();
+        max_depth_ = node["max_depth"].as<float>();
+        point_cloud_mat_ = cv::Mat(image_height, image_width, CV_32FC3, cv::Scalar::all(-1000));
+        input_bytes_ = input_type_ * image_height * image_width / 8;
+        output_bytes_ = output_depth_ * image_height * image_width / 8;
+        dispartity_ = cv::Mat(image_height, image_width, CV_16S);
+        d_left_ptr_ = std::make_shared<device_buffer>(input_bytes_);
+        d_right_ptr_ = std::make_shared<device_buffer>(input_bytes_);
+        d_dispartity_ptr_ = std::make_shared<device_buffer>(output_bytes_);
+        /**
+			* @param P1 Penalty on the disparity change by plus or minus 1 between nieghbor pixels.
+			* @param P2 Penalty on the disparity change by more than 1 between neighbor pixels.
+			* @param uniqueness Margin in ratio by which the best cost function value should be at least second one.
+			* @param subpixel Disparity value has 4 fractional bits if subpixel option is enabled.
+			* @param path_type Number of scanlines used in cost aggregation.
+			* @param min_disp Minimum possible disparity value.
+			* @param LR_max_diff Acceptable difference pixels which is used in LR check consistency. LR check consistency will be disabled if this value is set to negative.
+			*
+			Parameters(int P1 = 10, int P2 = 120, float uniqueness = 0.95f, bool subpixel = false, int min_disp = 0, int LR_max_diff = 1,, PathType path_type = PathType::SCAN_8PAT)
+			
+         */
+        adc::StereoSGM::Parameters param(P1_, P2_, uniqueness_, subpixel_, min_disp, LR_max_diff);
+        sgm_ptr_ = std::make_shared<adc::StereoSGM>(image_width, image_height, disp_size_, input_type_, output_depth_,
+                adc::EXECUTE_INOUT_CUDA2CUDA, param);
+
+        cv::Mat K_l, D_l, R_l, P_l, K_r, D_r, R_r, P_r;
+        cv::Size size_l, size_r;
+        StereoSgmApp::ReadParams(left_camera_params, K_l, D_l, P_l, R_l, size_l);
+        StereoSgmApp::ReadParams(right_camera_params, K_r, D_r, P_r, R_r, size_r);
+        cv::initUndistortRectifyMap(K_l, D_l, R_l, P_l, size_l, CV_32FC1, map_l_x, map_l_y);
+        cv::initUndistortRectifyMap(K_r, D_r, R_r, P_r, size_r, CV_32FC1, map_r_x, map_r_y);
+       
+        camera_ptr_ = std::make_shared<CameraParameters>();
+        camera_ptr_->fu = P_l.at<double>(0,0);
+        camera_ptr_->fv = P_l.at<double>(1,1);
+        camera_ptr_->u0 = P_l.at<double>(0,2);
+        camera_ptr_->v0 = P_l.at<double>(1,2);
+        camera_ptr_->baseline = -P_r.at<double>(0,3) / P_r.at<double>(0,0);
+        camera_ptr_->height = camera_height;
+        camera_ptr_->tilt = camera_tilt;
+        camera_ptr_->yaw = camera_yaw;
+        camera_ptr_->pitch = camera_pitch;
+        tf_ptr_ = std::make_shared<CoordinateTransform>(*camera_ptr_);
+    }
+    catch(const std::exception& e)
+    {
+        std::cerr << e.what() << '\n';
+    }
+}
+void StereoSgmApp::StereoSgmApp::GetDepthParams(CameraParameters& camera_params)
+{
+    camera_params.fu = camera_ptr_->fu;
+    camera_params.fv = camera_ptr_->fv;
+    camera_params.u0 = camera_ptr_->u0;
+    camera_params.v0 = camera_ptr_->v0;
+    camera_params.baseline = camera_ptr_->baseline;
+    camera_params.height = camera_ptr_->height;
+    camera_params.tilt = camera_ptr_->tilt;
+    camera_params.yaw = camera_ptr_->yaw;
+    camera_params.pitch = camera_ptr_->pitch;
+}
+
+void StereoSgmApp::ReadParams(const std::string& yaml_file, cv::Mat& K, cv::Mat& D, cv::Mat& P, cv::Mat& R, cv::Size& size)
+{
+     try{
+        YAML::Node node = YAML::LoadFile(yaml_file);
+        int height = node["image_height"].as<int>();
+        int width = node["image_width"].as<int>();
+        size = cv::Size2i(width, height);
+        std::vector<double> K_v= node["camera_matrix"]["data"].as<std::vector<double>>();
+        std::vector<double> P_v = node["projection_matrix"]["data"].as<std::vector<double>>();
+        std::vector<double> D_v = node["distortion_coefficients"]["data"].as<std::vector<double>>();
+        std::vector<double> R_v = node["rectification_matrix"]["data"].as<std::vector<double>>();
+
+        cv::Mat _K(K_v);
+        cv::Mat _P(P_v);
+        cv::Mat _D(D_v);
+        cv::Mat _R(R_v);
+        K = _K.reshape(3,3).clone();
+        P = _P.reshape(4,3).clone();
+        D = _D.reshape(5, 1).clone();
+        R = _R.reshape(3,3).clone();
+        }
+        catch(const std::exception& e)
+        {
+            std::cerr << e.what() << '\n'<<std::endl;;
+            // ROS_ERROR(e.what());
+            exit(1);
+        }
+}
+
+void StereoSgmApp::CacheImages(const cv::Mat &left_img, const cv::Mat &right_img, std::chrono::system_clock::time_point &time)
+{
+    std::lock_guard<std::mutex> mutex_cache_image(mutex_image_);
+    
+    image_cache_.push(std::make_pair(left_img, right_img));
+    time_cache_.push(time);
+    while (image_cache_.size() > kQueueSize)
+    {
+        image_cache_.pop();
+        time_cache_.pop();
+    }
+}
+
+bool StereoSgmApp::ReceiveImage(void)
+{
+    std::lock_guard<std::mutex> mutex_cache_image(mutex_image_);
+    if(image_cache_.empty())
+    {
+        return false;
+    }
+    left_image_ = image_cache_.front().first.clone();
+    right_image_ = image_cache_.front().second.clone();
+    current_time_ = time_cache_.front();
+    image_cache_.pop();
+    time_cache_.pop();
+    return true;
+}
+
+void StereoSgmApp::StereoMatch()
+{
+    cv::Mat left_rect, right_rect;
+    cv::remap(left_image_, left_image_, map_l_x, map_l_y, cv::INTER_LINEAR);
+    cv::remap(right_image_, right_image_, map_r_x, map_r_y, cv::INTER_LINEAR);
+    cv::cvtColor(left_image_, left_rect, cv::COLOR_BGR2GRAY);
+    cv::cvtColor(right_image_, right_rect, cv::COLOR_BGR2GRAY);
+    cudaMemcpy(d_left_ptr_->data, left_rect.data, input_bytes_, cudaMemcpyHostToDevice);
+    cudaMemcpy(d_right_ptr_->data, right_rect.data, input_bytes_, cudaMemcpyHostToDevice);
+    sgm_ptr_->execute(d_left_ptr_->data, d_right_ptr_->data, d_dispartity_ptr_->data);
+    cudaMemcpy(dispartity_.data, d_dispartity_ptr_->data, output_bytes_, cudaMemcpyDeviceToHost);
+    dispartity_.convertTo(dispartity_32f_, CV_32F, subpixel_ ? 1. / adc::StereoSGM::SUBPIXEL_SCALE : 1.0);
+}
+
+void StereoSgmApp::ReprojectPointsTo3D()
+{
+    CV_Assert(dispartity_32f_.type() == CV_32F);
+    point_cloud_.clear();
+    point_color_.clear();
+    point_cloud_.reserve(dispartity_32f_.rows * dispartity_32f_.cols);
+    point_color_.reserve(dispartity_32f_.rows * dispartity_32f_.cols);
+
+    for (int y = top_row_; y < dispartity_32f_.rows ; y++)
+	{
+        float* cpm_ptr = point_cloud_mat_.ptr<float>(y);
+		for (int x = 0; x < dispartity_32f_.cols; x++)
+		{
+			const float d = dispartity_32f_.at<float>(y, x);
+			if (d > 0)
+			{
+				cv::Point3f temp = tf_ptr_->imageToWorld(cv::Point(x, y), d);
+				if(temp.z < max_depth_)
+                {
+                    cv::Vec3b color = left_image_.at<cv::Vec3b>(y,x);
+					point_cloud_.push_back(temp);
+                    point_color_.push_back(color);
+                    cpm_ptr[x*3] = temp.x;
+                    cpm_ptr[x*3 + 1] = temp.y;
+                    cpm_ptr[x*3 + 2] = temp.z;
+
+                }
+                else
+                {
+                    cpm_ptr[x*3] = 0.;
+                    cpm_ptr[x*3 + 1] = 0.;
+                    cpm_ptr[x*3 + 2] = 0.;
+                }
+			}
+            else
+            {
+                cpm_ptr[x*3] = 0.;
+                cpm_ptr[x*3 + 1] = 0.;
+                cpm_ptr[x*3 + 2] = 0.;
+            }
+		}
+	}
+
+}
+
+void StereoSgmApp::DoWork()
+{
+    static int count = 0;
+    if(!ReceiveImage())
+    {
+        if(count > 50)
+        {
+            std::cout<<"can not get images"<<std::endl; 
+        }
+        count ++;
+        return;
+    }
+    count = 0;
+   // const auto t1 = std::chrono::system_clock::now();
+    StereoMatch();
+    //const auto t2 = std::chrono::system_clock::now();
+   // const auto duration = std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
+    ReprojectPointsTo3D();
+    //const auto fps = 1e6 / duration;
+    //std::cout<<"match fps: "<<fps<<std::endl;
+    //std::cout<<"generate points: "<<point_cloud_.size()<<std::endl;
+    if(NULL != point_color_callback_)
+    {
+        point_color_callback_(point_color_, current_time_);
+    }
+    if(NULL != point_cloud_callback_)
+    {
+        point_cloud_callback_(point_cloud_, current_time_);
+    }
+    if(NULL != poind_and_color_callback_)
+    {
+        poind_and_color_callback_(point_cloud_,  point_color_, current_time_);
+    }
+    if(NULL != point_cloud_mat_callback_)
+    {
+        // cv::imshow("left", );
+        point_cloud_mat_callback_(point_cloud_mat_, current_time_);
+    }
+    if(NULL != point_cloud_mat_and_image_color_callback_)
+    {
+        point_cloud_mat_and_image_color_callback_(point_cloud_mat_, left_image_, current_time_);
+    }
+}
+
+
+}

+ 78 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/CMakeLists.txt

@@ -0,0 +1,78 @@
+cmake_minimum_required(VERSION 3.10)
+
+set(LIB_NAME common)
+
+
+find_package(PCL  REQUIRED)
+
+if(NOT PCL_FOUND)
+    message(ERROR "Not found PCL")
+endif()
+find_package(CUDA REQUIRED)
+
+find_path(TENSORRT_INCLUDE_DIR NvInfer.h
+        HINTS ${TENSORRT_ROOT} ${CUDA_TOOLKIT_ROOT_DIR}
+        PATH_SUFFIXES include/)
+message(STATUS "Found TensorRT headers at ${TENSORRT_INCLUDE_DIR}")
+find_library(TENSORRT_LIBRARY_INFER nvinfer
+        HINTS ${TENSORRT_ROOT} ${TENSORRT_BUILD} ${CUDA_TOOLKIT_ROOT_DIR}
+        PATH_SUFFIXES lib lib64 lib/x64)
+message(STATUS "Found TensorRT libs ${TENSORRT_LIBRARY_INFER}")
+
+find_package(OpenCV REQUIRED)
+link_directories(${OpenCV_LIBRARIES_DIRS})
+
+find_package(Boost REQUIRED COMPONENTS
+filesystem 
+system
+thread
+)
+if(NOT Boost_FOUND)
+    message(ERROR "Not found Boost")
+endif()
+
+find_package(Protobuf REQUIRED)
+if(NOT Protobuf_FOUND)
+    message(ERROR "Not found Protobuf")
+endif()
+link_directories(${PROTOBUF_LIBRARIES})
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}  -Wall -Ofast")
+list(APPEND CUDA_NVCC_FLAGS "-D_FORCE_INLINES -Xcompiler -fPIC ")
+set(CUDA_ARCH "-gencode;arch=compute_75,code=sm_75" )
+
+
+file(GLOB CPP_SRC *.cpp)
+#---------------------generated code-------------------------------#
+file(GLOB PROTO_FILES ${PROJECT_SOURCE_DIR}/proto/*.proto)
+PROTOBUF_GENERATE_CPP(PROTO_SRCS PROTO_HDRS ${PROTO_FILES})
+SET_PROPERTY(SOURCE ${PROTO_SRCS} PROPERTY COMPILE_FLAGS "-Wno-error=unused-parameter")
+
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}  -Wall -Ofast")
+
+include_directories(
+                    ${PCL_INCLUDE_DIRS}
+                    ${TENSORRT_INCLUDE_DIR}
+                    ${OpenCV_INCLUDE_DIRS}
+                    ${PROTOBUF_INCLUDE_DIR}
+                    ${PROJECT_SOURCE_DIR}/include
+                    $<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}/src/common>
+                    $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}/src/common>
+                    $<BUILD_INTERFACE:${PROJECT_BINARY_DIR}>
+                    )
+
+add_library(${LIB_NAME} SHARED
+            ${CPP_SRC}
+            ${TER_CPP_SRC}
+            ${PROTO_SRCS}
+            ${PROTO_HDRS}
+            )
+    target_link_libraries(${LIB_NAME}
+                        ${PCL_LIBRARIES}
+                        ${TENSORRT_LIBRARY_INFER}
+                        ${OpenCV_LIBS}
+                        ${Boost_LIBRARIES}
+                        ${PROTOBUF_LIBRARIES}
+                        common_cuda_part
+                        )

+ 19 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/cuda/CMakeLists.txt

@@ -0,0 +1,19 @@
+cmake_minimum_required(VERSION 3.10)
+
+set(LIB_NAME common_cuda_part)
+find_package(CUDA REQUIRED)
+file(GLOB CPP_SRC *.cpp)
+file(GLOB CU_SRC *.cu)
+set(CUDA_ARCH "-gencode;arch=compute_72,code=sm_72" )
+SET(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} ${CUDA_ARCH}")
+
+include_directories(
+                    ${CUDA_INCLUDE_DIRS}
+                    ${PCL_INCLUDE_DIRS}
+                    )
+                    #../onnx-tensorrt)
+CUDA_ADD_LIBRARY(common_cuda_part   ${CU_SRC}  SHARED
+                 )
+target_link_libraries(common_cuda_part
+                     )
+

+ 148 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/cuda/utils.cu

@@ -0,0 +1,148 @@
+/**
+ *  \brerf 
+ *  \author pengcheng(pengcheng@adcsoft.cn)
+ *  \date 2020.09.10
+ *  \attention 
+ * 
+*/
+
+#include <cuda_runtime_api.h>
+#include "cuda_runtime.h"
+#include <string.h>
+#include <stdio.h>
+#include <iostream>
+namespace adc
+{
+namespace common
+{ 
+
+#define IVAIL_VALUE  -10 
+#define THREAD_SIZE  8 
+
+__global__ void TEST(float* z)
+{
+int col = blockIdx.x * blockDim.x + threadIdx.x;
+int row = blockIdx.y * blockDim.y + threadIdx.y;
+
+// // if(row >= rows || col >= cols) return;
+// printf("gpu result is:%f\n",10);
+
+int index_id = row * 640 + col;
+z[index_id] = 210.0;
+
+}
+
+__global__ void ComputeXYZ(const short* depth,  const int left_x, const int left_y, 
+            const float inv_fu, const float inv_fv, const float u0, const float v0,
+            const float min_depth, const float max_depth, const int rows, const int cols,
+            float* z, float* x, float* y,const float corret_a, const float corret_b, const float corret_c)
+{
+   int col = blockIdx.x * blockDim.x + threadIdx.x;
+   int row = blockIdx.y * blockDim.y + threadIdx.y;
+
+    if(row >= rows || col >= cols) return;
+
+    int index_id = row * cols + col;
+    float z_f = static_cast<float>(depth[index_id]) / 1000.0;
+    //correct
+    z_f = z_f + corret_a + corret_b*z_f +  corret_c*z_f*z_f;
+
+    if(z_f > max_depth || z_f < min_depth)
+    {
+        z[index_id] = IVAIL_VALUE;
+        x[index_id] = IVAIL_VALUE;
+        y[index_id] = IVAIL_VALUE;
+    }
+    else
+    {
+        z[index_id] = z_f;
+        float x_f = inv_fu * (col + left_x - u0) * z_f;
+        float y_f = inv_fv * (row + left_y - v0) * z_f;
+        x[index_id] = x_f;
+        y[index_id] = y_f;
+    }
+   
+}
+__global__ void SampleDown(const float* x, const float* y, const float* z, const int ratio_x, const int ratio_y,
+                           const int old_cols, const int new_rows, const int new_cols,
+                           float* new_x, float* new_y, float* new_z)
+{
+    int col = blockIdx.x * blockDim.x + threadIdx.x;
+    int row = blockIdx.y * blockDim.y + threadIdx.y;
+    if(col >= new_cols || row >= new_rows) return;
+
+    int old_col_id = col * ratio_x;
+    int old_row_id = row * ratio_y;
+
+    int new_index_id = row * new_cols + col;
+    int old_index_id = old_row_id * old_cols + old_col_id;
+ 
+    new_x[new_index_id] = x[old_index_id];
+    new_y[new_index_id] = y[old_index_id];
+    new_z[new_index_id] = z[old_index_id];
+}
+
+
+void SampleDownGPUDevice(const float* x, const float* y, const float* z, const int ratio_x, const int ratio_y,
+                        const int old_cols, const int new_rows, const int new_cols,
+                        float* new_x, float* new_y, float* new_z, cudaStream_t stream)
+{
+    unsigned int threads_x = new_cols / THREAD_SIZE > 1 ? THREAD_SIZE : new_cols;
+    unsigned int threads_y = new_rows / THREAD_SIZE > 1 ? THREAD_SIZE : new_rows;
+
+    unsigned int blocks_x = new_cols / threads_x > 1 ? new_cols / threads_x + 1 : 1;
+    unsigned int blocks_y = new_rows / threads_y > 1 ? new_rows / threads_y + 1 : 1;
+
+    blocks_x = new_cols %  threads_x == 0 ? blocks_x - 1 : blocks_x;
+    blocks_y = new_rows %  threads_y == 0 ? blocks_y - 1 : blocks_y;
+
+    dim3 grids = {blocks_x, blocks_y};
+    dim3 threads = {threads_x, threads_y};
+    SampleDown<<<grids, threads, 0, stream>>>(x, y, z, ratio_x, ratio_y, old_cols, new_rows, new_cols, new_x, new_y, new_z);
+}
+
+void ComputeXYZGPUDevice(const short* depth,  const int left_x, const int left_y, 
+                        const float inv_fu, const float inv_fv, const float u0, const float v0,
+                        const float min_depth, const float max_depth, const int rows, const int cols,
+                        float* x, float* y, float* z, cudaStream_t stream,
+                        const float corret_a, const float corret_b, const float corret_c)
+{
+    unsigned int threads_x = cols / THREAD_SIZE > 1 ? THREAD_SIZE : cols;
+    unsigned int threads_y = rows / THREAD_SIZE > 1 ? THREAD_SIZE : rows;
+
+    unsigned int blocks_x = cols / threads_x > 1 ? cols / threads_x + 1 : 1 ;
+    unsigned int blocks_y = rows / threads_y > 1 ? rows / threads_y + 1 : 1 ;
+    blocks_x = cols % threads_x == 0 ? blocks_x - 1: blocks_x;
+    blocks_y = rows % threads_y == 0 ? blocks_y - 1: blocks_y;
+
+    dim3 grids = {blocks_x, blocks_y};
+    dim3 threads = {threads_x, threads_y};
+    // std::cout<<"blocks: <"<<blocks_x<<", "<<blocks_y<<">"<<std::endl;
+    // std::cout<<"threads: <"<<threads_x<<", "<<threads_y<<">"<<std::endl;
+    // std::cout<<"rows:"<<rows<<"  cols:"<<cols<<std::endl;
+    ComputeXYZ<<<grids, threads, 0, stream>>>(depth, left_x, left_y, inv_fu, inv_fv, u0, v0,                         
+                            min_depth, max_depth, rows, cols,
+                            z, x, y,corret_a,corret_b,corret_c);
+}
+void TestGPU(float* z)
+{
+    int cols = 640;
+    int rows = 480;
+    unsigned int threads_x = cols / THREAD_SIZE > 1 ? THREAD_SIZE : cols;
+    unsigned int threads_y = rows / THREAD_SIZE > 1 ? THREAD_SIZE : rows;
+    
+    unsigned int blocks_x = cols / threads_x > 1 ? cols / threads_x + 1 : 1 ;
+    unsigned int blocks_y = rows / threads_y > 1 ? rows / threads_y + 1 : 1 ;
+    blocks_x = cols % threads_x == 0 ? blocks_x - 1: blocks_x;
+    blocks_y = rows % threads_y == 0 ? blocks_y - 1: blocks_y;
+    
+    dim3 grids(blocks_x, blocks_y);
+    dim3 threads(threads_x, threads_y);
+    // std::cout<<"blocks: <"<<blocks_x<<", "<<blocks_y<<">"<<std::endl;
+    // std::cout<<"threads: <"<<threads_x<<", "<<threads_y<<">"<<std::endl;
+    // std::cout<<"rows:"<<rows<<"  cols:"<<cols<<std::endl;
+    TEST<<<grids, threads>>>(z);
+}
+
+}
+}

+ 71 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/project_root.cpp

@@ -0,0 +1,71 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-06-19
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+
+#include "common/project_root.h"
+#include <stdlib.h>
+#include <iostream>
+
+namespace adc
+{
+
+RootPath::RootPath()
+{
+    char* root = getenv("ADC_ROOT");
+    if(NULL != root)
+    {
+        root_path_ = root;
+    }
+    else
+    {
+        root_path_ = "/opt/adc/";
+    }
+}
+
+RootPath::~RootPath()
+{
+}
+
+void RootPath::SetRoot(const std::string &root)
+{
+    root_path_ = root;
+}
+
+std::string RootPath::GetRoot()
+{
+    return root_path_;
+}
+
+
+RootPath::Ptr RootPath::GetInstance()
+{
+    if(!instance_ptr_)
+    {
+        instance_ptr_ = std::make_shared<RootPath>();
+    }
+    return instance_ptr_;
+}
+
+RootPath::Ptr RootPath::instance_ptr_ = nullptr;
+bool RootPath::AbsolutePath(const  std::string &path)
+{
+    return (path[0] == '/')&& (path[0] == '.');
+}
+
+const std::string RootPath::GetAbsolutePath(const std::string &path)
+{
+    if(AbsolutePath(path))
+    {
+        return path;
+    }
+    else
+    {
+        return RootPath::GetInstance()->GetRoot() + "/" + path;
+    }
+}
+
+}

+ 37 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/time_adcsoft.cpp

@@ -0,0 +1,37 @@
+/**
+ *  \brerf 
+ *  \author pengcheng(pengcheng@adcsoft.cn)
+ *  \date 2019.11.25
+ *  \attention 
+ * 
+ */
+
+#include "common/time_adcsoft.h"
+
+namespace adc
+{
+
+Timer::Timer(const float& hz): 
+               duration_ {std::chrono::duration<float>(1.0 / hz)},
+               start_ {},
+               elapsed_ {}
+{
+
+}
+
+void Timer::Start()
+{
+    start_ = std::chrono::system_clock::now();
+}
+
+void Timer::Stop()
+{
+    elapsed_ = std::chrono::system_clock::now() - start_;
+
+    if( elapsed_ < duration_)
+    {
+        std::this_thread::sleep_for(duration_ - elapsed_);
+    }
+}
+
+}

+ 533 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/utils.cpp

@@ -0,0 +1,533 @@
+/**
+ *  \brerf 
+ *  \author pengcheng(pengcheng@adcsoft.cn)
+ *  \date 2020.09.01
+ *  \attention 
+ * 
+*/
+
+#include "common/utils.h"
+#include<algorithm>
+#include <queue>
+#include<pcl/point_types.h>
+#include <opencv2/opencv.hpp>
+namespace adc
+{
+
+namespace common
+{
+
+
+
+void ComputeXYZGPU(const short* depth,  const int left_x, const int left_y, 
+                        const float inv_fu, const float inv_fv, const float u0, const float v0,
+                        const float min_depth, const float max_depth, const int rows, const int cols,
+                        float* x, float* y, float* z, cudaStream_t stream,
+                        const float corret_a, const float corret_b, const float corret_c)
+{
+    ComputeXYZGPUDevice(depth,  left_x, left_y, inv_fu, inv_fv, u0, v0,min_depth, max_depth,
+                        rows, cols, x, y, z, stream,corret_a,corret_b,corret_c);
+}
+
+void SampleDownGPU(const float* x, const float* y, const float* z, const int ratio_x, const int ratio_y,
+    const int old_cols, const int new_rows, const int new_cols,
+    float* new_x, float* new_y, float* new_z, cudaStream_t stream)
+{
+    SampleDownGPUDevice(x, y, z, ratio_x, ratio_y,
+    old_cols, new_rows, new_cols,
+    new_x, new_y, new_z, stream);
+}
+
+Perf::Perf(const std::string& desc, float threshold) :
+    description_(desc),
+    threshold_(threshold),
+    start_(std::chrono::system_clock::now())
+{}
+Perf::~Perf()
+{
+    std::chrono::duration<float> elapsed = std::chrono::system_clock::now() - start_;
+    if(elapsed > threshold_)
+    {
+        std::cout<<description_<<" cost: "<<elapsed.count()<<" s"<<std::endl;
+    }
+}
+
+template<typename Scalar>
+void Clustering<Scalar>::ComputeXY(const cv::Mat& depth, int left_x, int left_y, float32_t inv_fu,
+                float32_t inv_fv, float32_t u0, float32_t v0, float32_t min_depth,
+                float32_t max_depth, cv::Mat &x_3d, cv::Mat &y_3d)
+{
+    for(int y = 0; y < depth.rows; y++)
+    {
+        for(int x = 0; x < depth.cols; x++)
+        {
+            float z_f = static_cast<float>(depth.at<short>(y,x)) / 1000.0;
+            if(z_f > max_depth || z_f < min_depth) continue;
+            float x_f = inv_fu * (x + left_x - u0) * z_f;
+            float y_f = inv_fv * (y + left_y - v0) * z_f;
+            x_3d.ptr<float>(y)[x] = x_f;
+            y_3d.ptr<float>(y)[x] = y_f;
+        }
+    }
+}
+
+template<typename Scalar>
+void Clustering<Scalar>::ConputeDis(const  std::vector<float32_t> &p1, const std::vector<float32_t> &p2, float32_t &square)
+{
+    if(p1.size() != p2.size())
+    {
+        square = 3.4e+38;
+    }
+    square = 0.;
+    for(size_t index = 0; index < p1.size(); index++)
+    {
+        square += pow(p1[index] - p2[index], 2.0);
+    }
+}
+
+template<typename Scalar>
+bool Clustering<Scalar>::CorePoint(const cv::Mat &depth, const  cv::Mat &x_map, const cv::Mat &y_map,
+                int core_x, int core_y, int kernel_begin_x, 
+                int kernel_end_x, int kernel_begin_y, int kernel_end_y,
+                float32_t max_dis, float32_t min_depth, float32_t max_depth,
+                std::vector<cv::Point2i> &boundary_point, std::vector<float32_t> &core_point
+                )
+{
+    boundary_point.clear();
+    float32_t z_f_c = static_cast<float32_t>(depth.at<short>(core_y, core_x)) / 1000.0;
+    if(z_f_c > max_depth  || z_f_c < min_depth) return false;
+    float32_t x_f_c = x_map.at<float>(core_y, core_x);
+    float32_t y_f_c = y_map.at<float>(core_y, core_x);
+    core_point.push_back(x_f_c);
+    core_point.push_back(y_f_c);
+    core_point.push_back(z_f_c);
+    for(int x = kernel_begin_x; x <= kernel_end_x; x++)
+    {
+        for(int y = kernel_begin_y; y <= kernel_end_y; y++)
+        {
+            float32_t z_f_m = static_cast<float32_t>(depth.at<short>(y, x)) / 1000.0;
+            if (z_f_m > max_depth || z_f_m < min_depth) 
+            {
+                continue;
+            }
+
+            float32_t x_f_m = x_map.at<float>(y, x);
+            float32_t y_f_m = y_map.at<float>(y, x);
+            std::vector<float32_t> match_point;
+            match_point.push_back(x_f_m);
+            match_point.push_back(y_f_m);
+            match_point.push_back(z_f_m);
+
+            float32_t dis = 3.4e+38;
+            ConputeDis(core_point, match_point, dis);
+            if(dis < max_dis)
+            {
+                cv::Point2i pt;
+                pt.x = x;
+                pt.y = y;
+                boundary_point.push_back(pt);
+            }
+        }
+    }
+
+    return true;
+
+}
+
+template<typename Scalar>
+bool Clustering<Scalar>::CorePoint2(const cv::Mat &depth, const  cv::Mat &x_map, const cv::Mat &y_map,
+                int core_x, int core_y, int kernel_begin_x, 
+                int kernel_end_x, int kernel_begin_y, int kernel_end_y,
+                float32_t max_dis, float32_t min_depth, float32_t max_depth,
+                std::vector<cv::Point2i> &boundary_point, std::vector<float32_t> &core_point
+                )
+{
+    boundary_point.clear();
+    float32_t z_f_c = depth.at<float>(core_y, core_x);
+    if(z_f_c > max_depth  || z_f_c < min_depth) return false;
+    float32_t x_f_c = x_map.at<float>(core_y, core_x);
+    float32_t y_f_c = y_map.at<float>(core_y, core_x);
+    core_point.push_back(x_f_c);
+    core_point.push_back(y_f_c);
+    core_point.push_back(z_f_c);
+    for(int x = kernel_begin_x; x <= kernel_end_x; x++)
+    {
+        for(int y = kernel_begin_y; y <= kernel_end_y; y++)
+        {
+            float32_t z_f_m = depth.at<float>(y, x);
+            if (z_f_m > max_depth || z_f_m < min_depth || z_f_m <=0) 
+            {
+                continue;
+            }
+
+            float32_t x_f_m = x_map.at<float>(y, x);
+            float32_t y_f_m = y_map.at<float>(y, x);
+            std::vector<float32_t> match_point;
+            match_point.push_back(x_f_m);
+            match_point.push_back(y_f_m);
+            match_point.push_back(z_f_m);
+
+            float32_t dis = 3.4e+38;
+            ConputeDis(core_point, match_point, dis);
+            if(dis < max_dis)
+            {
+                cv::Point2i pt;
+                pt.x = x;
+                pt.y = y;
+                boundary_point.push_back(pt);
+            }
+        }
+    }
+
+    return true;
+
+}
+
+template<typename Scalar>
+void Clustering<Scalar>::ExternCluster(const cv::Mat& depth, const cv::Mat& x_map, 
+                const cv::Mat& y_map, cv::Mat &flag_visited, float32_t min_depth, float32_t max_depth,
+                float32_t max_dis, size_t min_pts, size_t kernel_size, float32_t full_size,
+                std::vector<cv::Point2i> &boundary, pcl::PointCloud<Scalar>& cluster
+                )
+{
+    std::queue<cv::Point2i> boundary_contain;
+    for(auto pt : boundary)
+    {
+        uchar flag = flag_visited.at<uchar>(pt.y, pt.x);
+        if(flag != 1)  boundary_contain.push(pt);
+    }
+    if(boundary_contain.size() == 0) return;
+    int half_kernel_size = static_cast<int>(kernel_size / 2);
+
+    {
+    while (boundary_contain.size() > 0)
+    {
+        cv::Point2i current_pt = boundary_contain.front();
+        boundary_contain.pop();
+        if(1 == (int)flag_visited.at<uchar>(current_pt.y,current_pt.x)) continue;
+        float32_t z_f = static_cast<float32_t>(depth.at<short>(current_pt.y, current_pt.x))/1000.0;
+        float32_t x_f = x_map.at<float>(current_pt.y, current_pt.x);
+        float32_t y_f = y_map.at<float>(current_pt.y, current_pt.x);
+        Scalar point;
+        point.x = x_f;
+        point.y = y_f;
+        point.z = z_f;
+        flag_visited.at<uchar>(current_pt.y,current_pt.x) = 1;
+        cluster.push_back(point);
+
+        int kernel_begin_x = std::max(current_pt.x - half_kernel_size, 0);
+        int kernel_end_x = std::min(current_pt.x + half_kernel_size, depth.cols -1);
+        int kernel_begin_y = std::max(current_pt.y - half_kernel_size, 0);
+        int kernel_end_y = std::min(current_pt.y + half_kernel_size, depth.rows -1);
+
+        float32_t kernel_height = static_cast<float32_t>(kernel_end_y - kernel_begin_y + 1);
+        float32_t kernel_width = static_cast<float32_t>(kernel_end_x - kernel_begin_x + 1);
+        float32_t real_size = kernel_height * kernel_width;
+        float32_t real_min_pts = (real_size / full_size) * min_pts;
+        std::vector<float32_t> core_point;
+        std::vector<cv::Point2i> boundary_points;
+
+        bool valid = false;
+        {
+        valid = CorePoint(depth, x_map, y_map,  current_pt.x,  current_pt.y, kernel_begin_x, 
+                                kernel_end_x, kernel_begin_y, kernel_end_y, max_dis, 
+                                min_depth, max_depth, boundary_points, core_point);
+        }
+        (void) core_point;
+        if(!valid) continue;
+        if(boundary_points.size() < real_min_pts) continue;
+        for(auto pt : boundary_points)
+        {
+            uchar flag = flag_visited.at<uchar>(pt.y, pt.x);
+            if(1 == (int)flag) 
+            {
+                continue;
+            }
+            else
+            {
+                boundary_contain.push(pt);
+            }
+        }
+    } 
+    }
+       
+}
+
+template<typename Scalar>
+void Clustering<Scalar>::ExternCluster2(const cv::Mat& depth, const cv::Mat& x_map, 
+                const cv::Mat& y_map, cv::Mat &flag_visited, float32_t min_depth, float32_t max_depth,
+                float32_t max_dis, size_t min_pts, size_t kernel_size, float32_t full_size,
+                std::vector<cv::Point2i> &boundary, pcl::PointCloud<Scalar>& cluster
+                )
+{
+    std::queue<cv::Point2i> boundary_contain;
+    for(auto pt : boundary)
+    {
+        uchar flag = flag_visited.at<uchar>(pt.y, pt.x);
+        if(flag != 1)  boundary_contain.push(pt);
+    }
+    if(boundary_contain.size() == 0) return;
+    int half_kernel_size = static_cast<int>(kernel_size / 2);
+    {
+    while (boundary_contain.size() > 0)
+    {
+        cv::Point2i current_pt = boundary_contain.front();
+        boundary_contain.pop();
+        if(1 == (int)flag_visited.at<uchar>(current_pt.y,current_pt.x)) continue;
+        float32_t z_f = depth.at<float>(current_pt.y, current_pt.x);
+        float32_t x_f = x_map.at<float>(current_pt.y, current_pt.x);
+        float32_t y_f = y_map.at<float>(current_pt.y, current_pt.x);
+        Scalar point;
+        point.x = x_f;
+        point.y = y_f;
+        point.z = z_f;
+        flag_visited.at<uchar>(current_pt.y,current_pt.x) = 1;
+        if(z_f > max_depth || z_f < min_depth || z_f <= 0) continue;
+        cluster.push_back(point);
+        int kernel_begin_x = std::max(current_pt.x - half_kernel_size, 0);
+        int kernel_end_x = std::min(current_pt.x + half_kernel_size, depth.cols -1);
+        int kernel_begin_y = std::max(current_pt.y - half_kernel_size, 0);
+        int kernel_end_y = std::min(current_pt.y + half_kernel_size, depth.rows -1);
+        float32_t kernel_height = static_cast<float32_t>(kernel_end_y - kernel_begin_y + 1);
+        float32_t kernel_width = static_cast<float32_t>(kernel_end_x - kernel_begin_x + 1);
+        float32_t real_size = kernel_height * kernel_width;
+        float32_t real_min_pts = (real_size / full_size) * min_pts;
+        std::vector<float32_t> core_point;
+        std::vector<cv::Point2i> boundary_points;
+        bool valid = false;
+        {
+        valid = CorePoint2(depth, x_map, y_map,  current_pt.x,  current_pt.y, kernel_begin_x, 
+                                kernel_end_x, kernel_begin_y, kernel_end_y, max_dis, 
+                                min_depth, max_depth, boundary_points, core_point);
+        }//Perf
+        (void) core_point;
+        if(!valid) continue;
+        if(boundary_points.size() < real_min_pts) continue;
+        for(auto pt : boundary_points)
+        {
+            uchar flag = flag_visited.at<uchar>(pt.y, pt.x);
+            if(1 == (int)flag) 
+            {
+                continue;
+            }
+            else
+            {
+                boundary_contain.push(pt);
+            }
+        }
+    } 
+    }//Perf
+       
+}
+
+template<typename Scalar>
+void Clustering<Scalar>::SpatialClustering(cv::Mat &depth, size_t kernel_size, size_t min_pts, float32_t max_dis,
+                    int left_x, int left_y, float32_t inv_fu, float32_t inv_fv, float32_t u0, float32_t v0,
+                    float32_t min_depth, float32_t max_depth,
+                    std::vector<pcl::PointCloud<Scalar>>  &clusters)
+{
+    cv::Mat flag_visited = cv::Mat::zeros(depth.size(), CV_8UC1);
+    cv::Mat x_map = cv::Mat(depth.size(), CV_32FC1, cv::Scalar(3000.0));
+    cv::Mat y_map = cv::Mat(depth.size(), CV_32FC1, cv::Scalar(3000.0));
+    ComputeXY(depth, left_x, left_y, inv_fu,
+              inv_fv, u0, v0, min_depth,
+              max_depth, x_map, y_map);
+    clusters.clear();
+    int half_kernel_size = static_cast<int>(kernel_size / 2);
+    float32_t full_size = static_cast<float32_t>(kernel_size * kernel_size);
+    pcl::PointXYZ pyemp;
+
+    {
+    // std::string s = "row: " + std::to_string(depth.rows) + ", col: " + std::to_string(depth.cols)+ " ";
+    // Perf a(s, 0.0);
+    for(int row = 0; row < depth.rows; row++)
+    {
+        int kernel_begin_y = std::max(row - half_kernel_size, 0);
+        int kernel_end_y = std::min(row + half_kernel_size , depth.rows -1);
+        float32_t kernel_height = static_cast<float32_t>(kernel_end_y - kernel_begin_y + 1);
+        for(int col = 0; col < depth.cols; col++)
+        {
+   
+            uchar visited = flag_visited.at<uchar>(row, col);
+            if(visited == 1) continue;
+
+            int kernel_begin_x = std::max(col - half_kernel_size, 0);
+            int kernel_end_x = std::min(col + half_kernel_size, depth.cols);
+            float32_t kernel_width = static_cast<float32_t>(kernel_end_x - kernel_begin_x + 1);
+            
+            float32_t real_size = kernel_height * kernel_width;
+
+            float32_t real_min_pts = (real_size / full_size) * min_pts;
+
+            //Select seed point
+            std::vector<cv::Point2i> boundary_points;
+            std::vector<float32_t> core_point;
+            bool valid = CorePoint(depth, x_map, y_map, col, row, kernel_begin_x, 
+                                kernel_end_x, kernel_begin_y, kernel_end_y, max_dis, 
+                                min_depth, max_depth, boundary_points, core_point);
+            flag_visited.ptr<uchar>(row)[col] = 1;
+            if(!valid)  continue;
+            if(boundary_points.size() < real_min_pts)   continue;
+            pcl::PointCloud<Scalar> cluster;
+            Scalar point;
+            point.x = core_point[0];
+            point.y = core_point[1];
+            point.z = core_point[2];
+            cluster.push_back(point);
+            {
+                ExternCluster(depth, x_map, y_map, flag_visited, min_depth, max_depth, max_dis, min_pts, kernel_size, full_size, boundary_points, cluster);
+            }
+            clusters.push_back(cluster);
+        }
+    }
+
+    }//Perf
+    
+}
+
+template<typename Scalar>
+void Clustering<Scalar>::SpatialClustering(cv::Mat& x, cv::Mat& y, cv::Mat& z, size_t kernel_size, size_t min_pts, float32_t max_dis,
+                        float32_t min_depth, float32_t max_depth, std::vector<pcl::PointCloud<Scalar>>  &clusters)
+{
+    cv::Mat flag_visited = cv::Mat::zeros(x.size(), CV_8UC1);
+    clusters.clear();
+    int half_kernel_size = static_cast<int>(kernel_size / 2);
+    float32_t full_size = static_cast<float32_t>(kernel_size * kernel_size);
+
+    {//for Perf
+
+    for(int row = 0; row < x.rows; row++)
+    {
+        int kernel_begin_y = std::max(row - half_kernel_size, 0);
+        int kernel_end_y = std::min(row + half_kernel_size , x.rows -1);
+        float32_t kernel_height = static_cast<float32_t>(kernel_end_y - kernel_begin_y + 1);
+        for(int col = 0; col < x.cols; col++)
+        {
+   
+            uchar visited = flag_visited.at<uchar>(row, col);
+            if(visited == 1) continue;
+
+            int kernel_begin_x = std::max(col - half_kernel_size, 0);
+            int kernel_end_x = std::min(col + half_kernel_size, x.cols);
+            float32_t kernel_width = static_cast<float32_t>(kernel_end_x - kernel_begin_x + 1);
+            
+            float32_t real_size = kernel_height * kernel_width;
+
+            float32_t real_min_pts = (real_size / full_size) * min_pts;
+
+            //Select seed point
+            std::vector<cv::Point2i> boundary_points;
+            std::vector<float32_t> core_point;
+            bool valid = CorePoint2(z, x, y, col, row, kernel_begin_x, 
+                                kernel_end_x, kernel_begin_y, kernel_end_y, max_dis, 
+                                min_depth, max_depth, boundary_points, core_point);
+            flag_visited.ptr<uchar>(row)[col] = 1;
+            if(!valid)
+            {
+                continue;
+            }
+            if(boundary_points.size() < real_min_pts) 
+            {
+                continue;
+            }
+            pcl::PointCloud<Scalar> cluster;
+            Scalar point;
+            point.x = core_point[0];
+            point.y = core_point[1];
+            point.z = core_point[2];
+            cluster.push_back(point);
+            {
+            
+            ExternCluster2(z, x, y, flag_visited, min_depth, max_depth, max_dis, min_pts, kernel_size, full_size, boundary_points, cluster);
+            }
+            clusters.push_back(cluster);
+        }
+    }
+    }
+}
+template class Clustering<pcl::PointXYZ>;
+template class Clustering<pcl::PointXYZRGB>;
+
+void OBBExtrator::ConvertPoints(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_ptr)
+{
+    max_z_ = -999999;
+    min_z_ =  99999;
+    points_.clear();
+    for(size_t index = 0; index < cloud_ptr->points.size(); index++)
+    {
+        if(cloud_ptr->points[index].y > max_z_) max_z_ = cloud_ptr->points[index].y;
+        if(cloud_ptr->points[index].y < min_z_) min_z_ = cloud_ptr->points[index].y;
+        cv::Point2f point;
+        point.x = cloud_ptr->points[index].z;
+        point.y = cloud_ptr->points[index].x;
+        points_.push_back(point);
+    }
+}
+
+void OBBExtrator::GetOBB(pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud_ptr, std::vector<pcl::PointXYZRGB>& bbox_corner,float &length, float& width, float& height)
+{
+    ConvertPoints(cloud_ptr);
+    bbox_ = cv::minAreaRect(points_);
+    cv::Point2f rect[4];
+    bbox_.points(rect);
+    pcl::PointXYZRGB p1, p2, p3, p4, p5, p6, p7, p8;
+    p1.z = rect[0].x;
+    p1.x = rect[0].y;
+    p1.y = min_z_;
+
+    p2.z = rect[1].x;
+    p2.x = rect[1].y;
+    p2.y = min_z_;
+
+    p3.z = rect[2].x;
+    p3.x = rect[2].y;
+    p3.y = min_z_;
+
+    p4.z = rect[3].x;
+    p4.x = rect[3].y;
+    p4.y = min_z_;
+
+    p5.z = rect[0].x;
+    p5.x = rect[0].y;
+    p5.y = max_z_;
+
+    p6.z = rect[1].x;
+    p6.x = rect[1].y;
+    p6.y = max_z_;
+
+    p7.z = rect[2].x;
+    p7.x = rect[2].y;
+    p7.y = max_z_;
+
+    p8.z = rect[3].x;
+    p8.x = rect[3].y;
+    p8.y = max_z_;
+
+    length = pow(rect[0].x - rect[1].x, 2.0) + pow(rect[0].y - rect[1].y, 2.0);
+    length = sqrt(length);
+    width = pow(rect[1].x - rect[2].x, 2.0) + pow(rect[1].y - rect[2].y, 2.0);
+    width = sqrt(width);
+
+    if(width > length)
+    {
+        float tmp = width;
+        width = length;
+        length = tmp;
+    }
+    height = max_z_ - min_z_;
+
+    bbox_corner.push_back(p1);
+    bbox_corner.push_back(p2);
+    bbox_corner.push_back(p3);
+    bbox_corner.push_back(p4);
+    bbox_corner.push_back(p5);
+    bbox_corner.push_back(p6);
+    bbox_corner.push_back(p7);
+    bbox_corner.push_back(p8);
+
+}
+
+
+}
+
+}

+ 65 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/common/work.cpp

@@ -0,0 +1,65 @@
+/**
+* \brief 
+* \author pengcheng (pengcheng@yslrpch@126.com)
+* \date 2020-05-23
+* \attention Copyright©ADC Technology(tianjin)Co.Ltd
+* \attention Refer to COPYRIGHT.txt for complete terms of copyright notice
+*/
+#include <common/work.h>
+
+namespace adc
+{
+
+Worker::Worker(float hz):
+        timer_(hz),
+        worker_state_(WS_INIT)
+{
+    main_thread_ = std::thread(&Worker::MainThread, this);
+}
+Worker::~Worker()
+{
+    worker_state_ = WS_TERMINATE;
+    main_thread_.join();
+}
+
+void Worker::Run()
+{
+    worker_state_ = WS_RUN;
+}
+
+void Worker::Suspend()
+{
+    worker_state_ = WS_SUSPEND;
+}
+
+void Worker::Terminate()
+{
+    worker_state_ = WS_TERMINATE;
+}
+
+void Worker::MainThread()
+{
+    while (true)
+    {
+        timer_.Start();
+        if (WS_RUN == worker_state_)
+        {
+            DoWork();
+        }
+        else if (WS_INIT == worker_state_ || WS_SUSPEND == worker_state_)
+        {
+            continue;
+        }
+        else if (WS_TERMINATE == worker_state_)
+        {
+            break;
+        }
+        else
+        {
+            std::cerr<< "worker state is absolutely wrong."<<std::endl;
+        }
+
+        timer_.Stop();
+    }
+}
+}

+ 36 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/detection_trt7/CMakeLists.txt

@@ -0,0 +1,36 @@
+set(LIB_NAME detection_trt7)
+
+find_package(OpenCV  REQUIRED)
+find_package(CUDA REQUIRED)
+aux_source_directory(. DIR_LIB_SRCS)
+include_directories(
+    ${PROJECT_SOURCE_DIR}/include
+    ${Opencv_INCLUDE_DIRS}
+    ${CUDA_INCLUDE_DIRS}  
+    
+)
+
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Ofast -Wfatal-errors")
+
+#add wrapper
+set(TRT_WRAPPER_DIR ${CMAKE_CURRENT_SOURCE_DIR}/yolov4Tensorrt/)
+add_subdirectory(${TRT_WRAPPER_DIR})
+include_directories(${CMAKE_CURRENT_SOURCE_DIR}/../../../include/detection_trt7/yolov4Tensorrt)
+
+
+#add opencv
+FIND_PACKAGE(OpenCV REQUIRED)
+include_directories(${OpenCV_INCLUDE_DIRS})
+link_directories(${OpenCV_LIBRARIES_DIRS})
+
+add_library(${LIB_NAME} SHARED ${DIR_LIB_SRCS})
+
+target_link_libraries(${LIB_NAME} ${OpenCV_LIBS} TrtYoloNet  ${CUDA_LIBRARIES})
+
+set(CMAKE_STAGING_PREFIX /opt/adc)
+install(TARGETS ${LIB_NAME}
+  ARCHIVE DESTINATION ${CMAKE_STAGING_PREFIX}/lib
+  LIBRARY DESTINATION ${CMAKE_STAGING_PREFIX}/lib
+ # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
+)
+

+ 398 - 0
src/detection/front_vision_ros_trt7/front_vison_ros/src/front_vision/src/detection_trt7/Hungarian.cpp

@@ -0,0 +1,398 @@
+//
+// Created by lqx on 20-4-23.
+//
+///////////////////////////////////////////////////////////////////////////////
+// Hungarian.cpp: Implementation file for Class HungarianAlgorithm.
+//
+// This is a C++ wrapper with slight modification of a hungarian algorithm implementation by Markus Buehren.
+// The original implementation is a few mex-functions for use in MATLAB, found here:
+// http://www.mathworks.com/matlabcentral/fileexchange/6543-functions-for-the-rectangular-assignment-problem
+//
+// Both this code and the orignal code are published under the BSD license.
+// by Cong Ma, 2016
+//
+
+#include <math.h>
+#include <cfloat>
+#include "detection_trt7/include/Hungarian.h"
+
+
+HungarianAlgorithm::HungarianAlgorithm(){}
+HungarianAlgorithm::~HungarianAlgorithm(){}
+
+
+//********************************************************//
+// A single function wrapper for solving assignment problem.
+//********************************************************//
+double HungarianAlgorithm::Solve(vector<vector<double>>& DistMatrix, vector<int>& Assignment)
+{
+    unsigned int nRows = DistMatrix.size();
+    unsigned int nCols = DistMatrix[0].size();
+
+    double *distMatrixIn = new double[nRows * nCols];
+    int *assignment = new int[nRows];
+    double cost = 0.0;
+
+    // Fill in the distMatrixIn. Mind the index is "i + nRows * j".
+    // Here the cost matrix of size MxN is defined as a double precision array of N*M elements.
+    // In the solving functions matrices are seen to be saved MATLAB-internally in row-order.
+    // (i.e. the matrix [1 2; 3 4] will be stored as a vector [1 3 2 4], NOT [1 2 3 4]).
+    for (unsigned int i = 0; i < nRows; i++)
+        for (unsigned int j = 0; j < nCols; j++)
+            distMatrixIn[i + nRows * j] = DistMatrix[i][j];
+
+    // call solving function
+    assignmentoptimal(assignment, &cost, distMatrixIn, nRows, nCols);
+
+    Assignment.clear();
+    for (unsigned int r = 0; r < nRows; r++)
+        Assignment.push_back(assignment[r]);
+
+    delete[] distMatrixIn;
+    delete[] assignment;
+    return cost;
+}
+
+
+//********************************************************//
+// Solve optimal solution for assignment problem using Munkres algorithm, also known as Hungarian Algorithm.
+//********************************************************//
+void HungarianAlgorithm::assignmentoptimal(int *assignment, double *cost, double *distMatrixIn, int nOfRows, int nOfColumns)
+{
+    double *distMatrix, *distMatrixTemp, *distMatrixEnd, *columnEnd, value, minValue;
+    bool *coveredColumns, *coveredRows, *starMatrix, *newStarMatrix, *primeMatrix;
+    int nOfElements, minDim, row, col;
+
+    /* initialization */
+    *cost = 0;
+    for (row = 0; row<nOfRows; row++)
+        assignment[row] = -1;
+
+    /* generate working copy of distance Matrix */
+    /* check if all matrix elements are positive */
+    nOfElements = nOfRows * nOfColumns;
+    distMatrix = (double *)malloc(nOfElements * sizeof(double));
+    distMatrixEnd = distMatrix + nOfElements;
+
+    for (row = 0; row<nOfElements; row++)
+    {
+        value = distMatrixIn[row];
+        if (value < 0)
+            cerr << "All matrix elements have to be non-negative." << endl;
+        distMatrix[row] = value;
+    }
+
+
+    /* memory allocation */
+    coveredColumns = (bool *)calloc(nOfColumns, sizeof(bool));
+    coveredRows = (bool *)calloc(nOfRows, sizeof(bool));
+    starMatrix = (bool *)calloc(nOfElements, sizeof(bool));
+    primeMatrix = (bool *)calloc(nOfElements, sizeof(bool));
+    newStarMatrix = (bool *)calloc(nOfElements, sizeof(bool)); /* used in step4 */
+
+    /* preliminary steps */
+    if (nOfRows <= nOfColumns)
+    {
+        minDim = nOfRows;
+
+        for (row = 0; row<nOfRows; row++)
+        {
+            /* find the smallest element in the row */
+            distMatrixTemp = distMatrix + row;
+            minValue = *distMatrixTemp;
+            distMatrixTemp += nOfRows;
+            while (distMatrixTemp < distMatrixEnd)
+            {
+                value = *distMatrixTemp;
+                if (value < minValue)
+                    minValue = value;
+                distMatrixTemp += nOfRows;
+            }
+
+            /* subtract the smallest element from each element of the row */
+            distMatrixTemp = distMatrix + row;
+            while (distMatrixTemp < distMatrixEnd)
+            {
+                *distMatrixTemp -= minValue;
+                distMatrixTemp += nOfRows;
+            }
+        }
+
+        /* Steps 1 and 2a */
+        for (row = 0; row<nOfRows; row++)
+            for (col = 0; col<nOfColumns; col++)
+                if (fabs(distMatrix[row + nOfRows*col]) < DBL_EPSILON)
+                    if (!coveredColumns[col])
+                    {
+                        starMatrix[row + nOfRows*col] = true;
+                        coveredColumns[col] = true;
+                        break;
+                    }
+    }
+    else /* if(nOfRows > nOfColumns) */
+    {
+        minDim = nOfColumns;
+
+        for (col = 0; col<nOfColumns; col++)
+        {
+            /* find the smallest element in the column */
+            distMatrixTemp = distMatrix + nOfRows*col;
+            columnEnd = distMatrixTemp + nOfRows;
+
+            minValue = *distMatrixTemp++;
+            while (distMatrixTemp < columnEnd)
+            {
+                value = *distMatrixTemp++;
+                if (value < minValue)
+                    minValue = value;
+            }
+
+            /* subtract the smallest element from each element of the column */
+            distMatrixTemp = distMatrix + nOfRows*col;
+            while (distMatrixTemp < columnEnd)
+                *distMatrixTemp++ -= minValue;
+        }
+
+        /* Steps 1 and 2a */
+        for (col = 0; col<nOfColumns; col++)
+            for (row = 0; row<nOfRows; row++)
+                if (fabs(distMatrix[row + nOfRows*col]) < DBL_EPSILON)
+                    if (!coveredRows[row])
+                    {
+                        starMatrix[row + nOfRows*col] = true;
+                        coveredColumns[col] = true;
+                        coveredRows[row] = true;
+                        break;
+                    }
+        for (row = 0; row<nOfRows; row++)
+            coveredRows[row] = false;
+
+    }
+
+    /* move to step 2b */
+    step2b(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+
+    /* compute cost and remove invalid assignments */
+    computeassignmentcost(assignment, cost, distMatrixIn, nOfRows);
+
+    /* free allocated memory */
+    free(distMatrix);
+    free(coveredColumns);
+    free(coveredRows);
+    free(starMatrix);
+    free(primeMatrix);
+    free(newStarMatrix);
+
+    return;
+}
+
+/********************************************************/
+void HungarianAlgorithm::buildassignmentvector(int *assignment, bool *starMatrix, int nOfRows, int nOfColumns)
+{
+    int row, col;
+
+    for (row = 0; row<nOfRows; row++)
+        for (col = 0; col<nOfColumns; col++)
+            if (starMatrix[row + nOfRows*col])
+            {
+#ifdef ONE_INDEXING
+                assignment[row] = col + 1; /* MATLAB-Indexing */
+#else
+                assignment[row] = col;
+#endif
+                break;
+            }
+}
+
+/********************************************************/
+void HungarianAlgorithm::computeassignmentcost(int *assignment, double *cost, double *distMatrix, int nOfRows)
+{
+    int row, col;
+
+    for (row = 0; row<nOfRows; row++)
+    {
+        col = assignment[row];
+        if (col >= 0)
+            *cost += distMatrix[row + nOfRows*col];
+    }
+}
+
+/********************************************************/
+void HungarianAlgorithm::step2a(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
+{
+    bool *starMatrixTemp, *columnEnd;
+    int col;
+
+    /* cover every column containing a starred zero */
+    for (col = 0; col<nOfColumns; col++)
+    {
+        starMatrixTemp = starMatrix + nOfRows*col;
+        columnEnd = starMatrixTemp + nOfRows;
+        while (starMatrixTemp < columnEnd){
+            if (*starMatrixTemp++)
+            {
+                coveredColumns[col] = true;
+                break;
+            }
+        }
+    }
+
+    /* move to step 3 */
+    step2b(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+}
+
+/********************************************************/
+void HungarianAlgorithm::step2b(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
+{
+    int col, nOfCoveredColumns;
+
+    /* count covered columns */
+    nOfCoveredColumns = 0;
+    for (col = 0; col<nOfColumns; col++)
+        if (coveredColumns[col])
+            nOfCoveredColumns++;
+
+    if (nOfCoveredColumns == minDim)
+    {
+        /* algorithm finished */
+        buildassignmentvector(assignment, starMatrix, nOfRows, nOfColumns);
+    }
+    else
+    {
+        /* move to step 3 */
+        step3(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+    }
+
+}
+
+/********************************************************/
+void HungarianAlgorithm::step3(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
+{
+    bool zerosFound;
+    int row, col, starCol;
+
+    zerosFound = true;
+    while (zerosFound)
+    {
+        zerosFound = false;
+        for (col = 0; col<nOfColumns; col++)
+            if (!coveredColumns[col])
+                for (row = 0; row<nOfRows; row++)
+                    if ((!coveredRows[row]) && (fabs(distMatrix[row + nOfRows*col]) < DBL_EPSILON))
+                    {
+                        /* prime zero */
+                        primeMatrix[row + nOfRows*col] = true;
+
+                        /* find starred zero in current row */
+                        for (starCol = 0; starCol<nOfColumns; starCol++)
+                            if (starMatrix[row + nOfRows*starCol])
+                                break;
+
+                        if (starCol == nOfColumns) /* no starred zero found */
+                        {
+                            /* move to step 4 */
+                            step4(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim, row, col);
+                            return;
+                        }
+                        else
+                        {
+                            coveredRows[row] = true;
+                            coveredColumns[starCol] = false;
+                            zerosFound = true;
+                            break;
+                        }
+                    }
+    }
+
+    /* move to step 5 */
+    step5(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+}
+
+/********************************************************/
+void HungarianAlgorithm::step4(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim, int row, int col)
+{
+    int n, starRow, starCol, primeRow, primeCol;
+    int nOfElements = nOfRows*nOfColumns;
+
+    /* generate temporary copy of starMatrix */
+    for (n = 0; n<nOfElements; n++)
+        newStarMatrix[n] = starMatrix[n];
+
+    /* star current zero */
+    newStarMatrix[row + nOfRows*col] = true;
+
+    /* find starred zero in current column */
+    starCol = col;
+    for (starRow = 0; starRow<nOfRows; starRow++)
+        if (starMatrix[starRow + nOfRows*starCol])
+            break;
+
+    while (starRow<nOfRows)
+    {
+        /* unstar the starred zero */
+        newStarMatrix[starRow + nOfRows*starCol] = false;
+
+        /* find primed zero in current row */
+        primeRow = starRow;
+        for (primeCol = 0; primeCol<nOfColumns; primeCol++)
+            if (primeMatrix[primeRow + nOfRows*primeCol])
+                break;
+
+        /* star the primed zero */
+        newStarMatrix[primeRow + nOfRows*primeCol] = true;
+
+        /* find starred zero in current column */
+        starCol = primeCol;
+        for (starRow = 0; starRow<nOfRows; starRow++)
+            if (starMatrix[starRow + nOfRows*starCol])
+                break;
+    }
+
+    /* use temporary copy as new starMatrix */
+    /* delete all primes, uncover all rows */
+    for (n = 0; n<nOfElements; n++)
+    {
+        primeMatrix[n] = false;
+        starMatrix[n] = newStarMatrix[n];
+    }
+    for (n = 0; n<nOfRows; n++)
+        coveredRows[n] = false;
+
+    /* move to step 2a */
+    step2a(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+}
+
+/********************************************************/
+void HungarianAlgorithm::step5(int *assignment, double *distMatrix, bool *starMatrix, bool *newStarMatrix, bool *primeMatrix, bool *coveredColumns, bool *coveredRows, int nOfRows, int nOfColumns, int minDim)
+{
+    double h, value;
+    int row, col;
+
+    /* find smallest uncovered element h */
+    h = DBL_MAX;
+    for (row = 0; row<nOfRows; row++)
+        if (!coveredRows[row])
+            for (col = 0; col<nOfColumns; col++)
+                if (!coveredColumns[col])
+                {
+                    value = distMatrix[row + nOfRows*col];
+                    if (value < h)
+                        h = value;
+                }
+
+    /* add h to each covered row */
+    for (row = 0; row<nOfRows; row++)
+        if (coveredRows[row])
+            for (col = 0; col<nOfColumns; col++)
+                distMatrix[row + nOfRows*col] += h;
+
+    /* subtract h from each uncovered column */
+    for (col = 0; col<nOfColumns; col++)
+        if (!coveredColumns[col])
+            for (row = 0; row<nOfRows; row++)
+                distMatrix[row + nOfRows*col] -= h;
+
+    /* move to step 3 */
+    step3(assignment, distMatrix, starMatrix, newStarMatrix, primeMatrix, coveredColumns, coveredRows, nOfRows, nOfColumns, minDim);
+}
+

部分文件因为文件数量过多而无法显示