Ver código fonte

centerPoint viewer_lidar_obj adciv_save

zhengke 1 ano atrás
pai
commit
ad8f69d4b4
100 arquivos alterados com 25449 adições e 0 exclusões
  1. 168 0
      src/detection/CenterPoint-master/.clang-format
  2. 10 0
      src/detection/CenterPoint-master/.gitignore
  3. 104 0
      src/detection/CenterPoint-master/CMakeLists.txt
  4. 170 0
      src/detection/CenterPoint-master/CenterPoint.pro
  5. 21 0
      src/detection/CenterPoint-master/LICENSE
  6. 199 0
      src/detection/CenterPoint-master/README.md
  7. BIN
      src/detection/CenterPoint-master/build-qt/CenterPoint
  8. BIN
      src/detection/CenterPoint-master/doc/computation_graph.png
  9. BIN
      src/detection/CenterPoint-master/doc/entropy_track.gif
  10. BIN
      src/detection/CenterPoint-master/doc/explicit_track.gif
  11. BIN
      src/detection/CenterPoint-master/doc/fp_det.gif
  12. BIN
      src/detection/CenterPoint-master/doc/fp_track.gif
  13. 1 0
      src/detection/CenterPoint-master/google912b2d6b509a62f4.html
  14. 539 0
      src/detection/CenterPoint-master/include/BatchStream.h
  15. 149 0
      src/detection/CenterPoint-master/include/EntropyCalibrator.h
  16. 223 0
      src/detection/CenterPoint-master/include/centerpoint.h
  17. 147 0
      src/detection/CenterPoint-master/include/common/ErrorRecorder.h
  18. 217 0
      src/detection/CenterPoint-master/include/common/argsParser.h
  19. 467 0
      src/detection/CenterPoint-master/include/common/buffers.h
  20. 971 0
      src/detection/CenterPoint-master/include/common/common.h
  21. 116 0
      src/detection/CenterPoint-master/include/common/dumpTFWts.py
  22. 247 0
      src/detection/CenterPoint-master/include/common/getOptions.cpp
  23. 127 0
      src/detection/CenterPoint-master/include/common/getOptions.h
  24. 4302 0
      src/detection/CenterPoint-master/include/common/half.h
  25. 45 0
      src/detection/CenterPoint-master/include/common/logger.cpp
  26. 36 0
      src/detection/CenterPoint-master/include/common/logger.h
  27. 514 0
      src/detection/CenterPoint-master/include/common/logging.h
  28. 153 0
      src/detection/CenterPoint-master/include/common/parserOnnxConfig.h
  29. 70 0
      src/detection/CenterPoint-master/include/common/safeCommon.h
  30. 347 0
      src/detection/CenterPoint-master/include/common/sampleConfig.h
  31. 389 0
      src/detection/CenterPoint-master/include/common/sampleDevice.h
  32. 1205 0
      src/detection/CenterPoint-master/include/common/sampleEngines.cpp
  33. 136 0
      src/detection/CenterPoint-master/include/common/sampleEngines.h
  34. 702 0
      src/detection/CenterPoint-master/include/common/sampleInference.cpp
  35. 59 0
      src/detection/CenterPoint-master/include/common/sampleInference.h
  36. 1549 0
      src/detection/CenterPoint-master/include/common/sampleOptions.cpp
  37. 268 0
      src/detection/CenterPoint-master/include/common/sampleOptions.h
  38. 415 0
      src/detection/CenterPoint-master/include/common/sampleReporting.cpp
  39. 221 0
      src/detection/CenterPoint-master/include/common/sampleReporting.h
  40. 587 0
      src/detection/CenterPoint-master/include/common/sampleUtils.h
  41. 568 0
      src/detection/CenterPoint-master/include/common/windows/getopt.c
  42. 107 0
      src/detection/CenterPoint-master/include/common/windows/getopt.h
  43. 119 0
      src/detection/CenterPoint-master/include/config.h
  44. 6 0
      src/detection/CenterPoint-master/include/iou3d_nms.h
  45. 57 0
      src/detection/CenterPoint-master/include/postprocess.h
  46. 42 0
      src/detection/CenterPoint-master/include/preprocess.h
  47. 58 0
      src/detection/CenterPoint-master/include/scatter_cuda.h
  48. 27 0
      src/detection/CenterPoint-master/include/utils.h
  49. 0 0
      src/detection/CenterPoint-master/lidars/0a0d6b8c2e884134a3b48df43d54c36a.bin.txt
  50. BIN
      src/detection/CenterPoint-master/lidars/seq_0_frame_100.bin
  51. BIN
      src/detection/CenterPoint-master/lidars/seq_0_frame_101.bin
  52. 7 0
      src/detection/CenterPoint-master/lidars/test.py
  53. BIN
      src/detection/CenterPoint-master/models/pfe_baseline32000.onnx
  54. BIN
      src/detection/CenterPoint-master/models/pfe_fp.engine
  55. BIN
      src/detection/CenterPoint-master/models/rpn_baseline.onnx
  56. BIN
      src/detection/CenterPoint-master/models/rpn_fp.engine
  57. 543 0
      src/detection/CenterPoint-master/requirements.txt
  58. 500 0
      src/detection/CenterPoint-master/results1/seq_0_frame_100.bin.txt
  59. 500 0
      src/detection/CenterPoint-master/results1/seq_0_frame_101.bin.txt
  60. 500 0
      src/detection/CenterPoint-master/results2/seq_0_frame_100.bin.txt
  61. 500 0
      src/detection/CenterPoint-master/results2/seq_0_frame_101.bin.txt
  62. 8 0
      src/detection/CenterPoint-master/run-new.sh
  63. 8 0
      src/detection/CenterPoint-master/run.sh
  64. 403 0
      src/detection/CenterPoint-master/src/centerpoint.cpp
  65. 734 0
      src/detection/CenterPoint-master/src/iou3d_nms_kernel.cu
  66. 471 0
      src/detection/CenterPoint-master/src/main.cpp
  67. 319 0
      src/detection/CenterPoint-master/src/postprocess.cpp
  68. 211 0
      src/detection/CenterPoint-master/src/preprocess.cpp
  69. 262 0
      src/detection/CenterPoint-master/src/preprocess.cu
  70. 191 0
      src/detection/CenterPoint-master/src/samplecenterpoint.cpp
  71. 56 0
      src/detection/CenterPoint-master/src/scatter_cuda.cu
  72. 5 0
      src/detection/CenterPoint-master/tools/catkin_ws/catkin_make.sh
  73. 166 0
      src/detection/CenterPoint-master/tools/catkin_ws/default.rviz
  74. 1 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/.built_by
  75. 1 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/.catkin
  76. 2 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/.rosinstall
  77. 304 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/_setup_util.py
  78. 0 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/cmake.lock
  79. 16 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/env.sh
  80. 123 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf/FrameGraph.h
  81. 174 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf/FrameGraphRequest.h
  82. 196 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf/FrameGraphResponse.h
  83. 259 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf/tfMessage.h
  84. 123 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/FrameGraph.h
  85. 174 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/FrameGraphRequest.h
  86. 196 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/FrameGraphResponse.h
  87. 384 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformAction.h
  88. 283 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformActionFeedback.h
  89. 269 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformActionGoal.h
  90. 338 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformActionResult.h
  91. 175 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformFeedback.h
  92. 262 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformGoal.h
  93. 280 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformResult.h
  94. 259 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/TF2Error.h
  95. 259 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/TFMessage.h
  96. 8 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/local_setup.bash
  97. 9 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/local_setup.sh
  98. 8 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/local_setup.zsh
  99. 8 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/setup.bash
  100. 96 0
      src/detection/CenterPoint-master/tools/catkin_ws/devel/setup.sh

+ 168 - 0
src/detection/CenterPoint-master/.clang-format

@@ -0,0 +1,168 @@
+---
+Language:        Cpp
+# BasedOnStyle:  Google
+AccessModifierOffset: -4
+AlignAfterOpenBracket: Align
+AlignConsecutiveMacros: false
+AlignConsecutiveAssignments: false
+AlignConsecutiveDeclarations: false
+AlignEscapedNewlines: Right
+AlignOperands:   true
+AlignTrailingComments: true
+AllowAllArgumentsOnNextLine: true
+AllowAllConstructorInitializersOnNextLine: true
+AllowAllParametersOfDeclarationOnNextLine: true
+AllowShortBlocksOnASingleLine: Never
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: Empty
+AllowShortLambdasOnASingleLine: All
+AllowShortIfStatementsOnASingleLine: WithoutElse
+AllowShortLoopsOnASingleLine: true
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: true
+AlwaysBreakTemplateDeclarations: Yes
+BinPackArguments: true
+BinPackParameters: true
+BraceWrapping:
+  AfterCaseLabel:  false
+  AfterClass:      false
+  AfterControlStatement: false
+  AfterEnum:       false
+  AfterFunction:   false
+  AfterNamespace:  false
+  AfterObjCDeclaration: false
+  AfterStruct:     false
+  AfterUnion:      false
+  AfterExternBlock: false
+  BeforeCatch:     false
+  BeforeElse:      false
+  IndentBraces:    false
+  SplitEmptyFunction: true
+  SplitEmptyRecord: true
+  SplitEmptyNamespace: true
+BreakBeforeBinaryOperators: None
+BreakBeforeBraces: Attach
+BreakBeforeInheritanceComma: false
+BreakInheritanceList: BeforeColon
+BreakBeforeTernaryOperators: true
+BreakConstructorInitializersBeforeComma: false
+BreakConstructorInitializers: BeforeColon
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: true
+ColumnLimit:     80
+CommentPragmas:  '^ IWYU pragma:'
+CompactNamespaces: false
+ConstructorInitializerAllOnOneLineOrOnePerLine: true
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+Cpp11BracedListStyle: true
+DeriveLineEnding: true
+DerivePointerAlignment: false
+DisableFormat:   false
+ExperimentalAutoDetectBinPacking: false
+FixNamespaceComments: true
+ForEachMacros:
+  - foreach
+  - Q_FOREACH
+  - BOOST_FOREACH
+IncludeBlocks:   Regroup
+IncludeCategories:
+  - Regex:           '^<ext/.*\.h>'
+    Priority:        2
+    SortPriority:    0
+  - Regex:           '^<.*\.h>'
+    Priority:        1
+    SortPriority:    0
+  - Regex:           '^<.*'
+    Priority:        2
+    SortPriority:    0
+  - Regex:           '.*'
+    Priority:        3
+    SortPriority:    0
+IncludeIsMainRegex: '([-_](test|unittest))?$'
+IncludeIsMainSourceRegex: ''
+IndentCaseLabels: true
+IndentGotoLabels: true
+IndentPPDirectives: None
+IndentWidth:     4
+IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: false
+MacroBlockBegin: ''
+MacroBlockEnd:   ''
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: None
+ObjCBinPackProtocolList: Never
+ObjCBlockIndentWidth: 2
+ObjCSpaceAfterProperty: false
+ObjCSpaceBeforeProtocolList: true
+PenaltyBreakAssignment: 2
+PenaltyBreakBeforeFirstCallParameter: 1
+PenaltyBreakComment: 300
+PenaltyBreakFirstLessLess: 120
+PenaltyBreakString: 1000
+PenaltyBreakTemplateDeclaration: 10
+PenaltyExcessCharacter: 1000000
+PenaltyReturnTypeOnItsOwnLine: 200
+PointerAlignment: Right
+RawStringFormats:
+  - Language:        Cpp
+    Delimiters:
+      - cc
+      - CC
+      - cpp
+      - Cpp
+      - CPP
+      - 'c++'
+      - 'C++'
+    CanonicalDelimiter: ''
+    BasedOnStyle:    google
+  - Language:        TextProto
+    Delimiters:
+      - pb
+      - PB
+      - proto
+      - PROTO
+    EnclosingFunctions:
+      - EqualsProto
+      - EquivToProto
+      - PARSE_PARTIAL_TEXT_PROTO
+      - PARSE_TEST_PROTO
+      - PARSE_TEXT_PROTO
+      - ParseTextOrDie
+      - ParseTextProtoOrDie
+    CanonicalDelimiter: ''
+    BasedOnStyle:    google
+ReflowComments:  true
+SortIncludes:    true
+SortUsingDeclarations: true
+SpaceAfterCStyleCast: false
+SpaceAfterLogicalNot: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeCpp11BracedList: false
+SpaceBeforeCtorInitializerColon: true
+SpaceBeforeInheritanceColon: true
+SpaceBeforeParens: ControlStatements
+SpaceBeforeRangeBasedForLoopColon: true
+SpaceInEmptyBlock: false
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 2
+SpacesInAngles:  false
+SpacesInConditionalStatement: false
+SpacesInContainerLiterals: true
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+SpaceBeforeSquareBrackets: false
+Standard:        Auto
+StatementMacros:
+  - Q_UNUSED
+  - QT_REQUIRE_VERSION
+TabWidth:        8
+UseCRLF:         false
+UseTab:          Never
+...
+

+ 10 - 0
src/detection/CenterPoint-master/.gitignore

@@ -0,0 +1,10 @@
+build
+tools/__pycache__
+tools/tmp_models
+tools/.ipynb_checkpoints
+tools/catkin_ws/src/waymo_track/src/.ipynb_checkpoints
+tools/catkin_ws/build
+news
+olds
+results/*
+run0.sh

+ 104 - 0
src/detection/CenterPoint-master/CMakeLists.txt

@@ -0,0 +1,104 @@
+cmake_minimum_required(VERSION 2.8.3)
+project(centerpoint)
+
+
+include_directories(
+    ${PROJECT_SOURCE_DIR}/include
+)
+
+set(SAMPLES_COMMON_SOURCES
+${PROJECT_SOURCE_DIR}/include/common/logger.cpp
+)
+
+
+# pcl and boost related !
+find_package(Boost COMPONENTS program_options REQUIRED )
+include_directories(${Boost_INCLUDE_DIRS})
+link_directories(${Boost_LIBRARY_DIRS})
+ 
+find_package(PCL REQUIRED)
+include_directories(${PCL_INCLUDE_DIRS})
+link_directories(${PCL_LIBRARY_DIRS})
+add_definitions(${PCL_DEFINITIONS})
+
+
+
+# set flags for CUDA availability
+option(CUDA_AVAIL "CUDA available" OFF)
+find_package(CUDA)
+message("CUDA dir paths", ${CUDA_LIBRARIES})
+
+include_directories( 
+  ${PROJECT_SOURCE_DIR}/include/common
+  ${PROJECT_SOURCE_DIR}/include
+  ${CUDA_INCLUDE_DIRS}
+  )
+
+message("CUDA is available!")
+message("CUDA Libs: ${CUDA_LIBRARIES}")
+message("CUDA Headers: ${CUDA_INCLUDE_DIRS}")
+set(CUDA_AVAIL ON)
+
+
+# set flags for TensorRT availability
+option(TRT_AVAIL "TensorRT available" OFF)
+# try to find the tensorRT modules
+find_library(NVINFER NAMES nvinfer)
+find_library(NVPARSERS NAMES nvparsers)
+find_library(NVONNXPARSERS NAMES nvonnxparser)
+
+if(NVINFER AND NVPARSERS AND NVONNXPARSERS)
+  message("TensorRT is available!")
+  message("NVINFER: ${NVINFER}")
+  message("NVPARSERS: ${NVPARSERS}")
+  message("NVONNXPARSERS: ${NVONNXPARSERS}")
+  set(TRT_AVAIL ON)
+else()
+  message("TensorRT is NOT Available")
+  set(TRT_AVAIL OFF)
+endif()
+
+# set flags for CUDNN availability
+option(CUDNN_AVAIL "CUDNN available" OFF)
+# try to find the CUDNN module
+find_library(CUDNN_LIBRARY
+  NAMES libcudnn.so${__cudnn_ver_suffix} libcudnn${__cudnn_ver_suffix}.dylib ${__cudnn_lib_win_name}
+  PATHS $ENV{LD_LIBRARY_PATH} ${__libpath_cudart} ${CUDNN_ROOT_DIR} ${PC_CUDNN_LIBRARY_DIRS} ${CMAKE_INSTALL_PREFIX}
+  PATH_SUFFIXES lib lib64 bin
+  DOC "CUDNN library."
+)
+
+if(CUDNN_LIBRARY)
+  message("CUDNN is available!")
+  message("CUDNN_LIBRARY: ${CUDNN_LIBRARY}")
+  set(CUDNN_AVAIL ON)
+else()
+  message("CUDNN is NOT Available")
+  set(CUDNN_AVAIL OFF)
+endif()
+
+
+file(GLOB PointPillarLibs ${PROJECT_SOURCE_DIR}/src/*cpp)
+add_library(pointpillars SHARED ${PointPillarLibs})
+
+file(GLOB PointPillarCU ${PROJECT_SOURCE_DIR}/src/*cu)
+cuda_add_library(pointpillarsCU SHARED ${PointPillarCU})
+
+set(CUDA_LIB_PATH /usr/local/cuda/lib64/)
+
+MESSAGE(${PROJECT_SOURCE_DIR})
+add_executable(centerpoint ${PROJECT_SOURCE_DIR}/src/samplecenterpoint.cpp ${SAMPLES_COMMON_SOURCES})
+
+target_link_libraries(centerpoint 
+                         ${CUDA_LIBRARY} 
+                         ${CUDA_RUNTIME_LIBRARY} 
+                         ${PCL_LIBRARIES}  
+                         ${Boost_LIBRARIES}
+                         nvonnxparser
+                         nvinfer
+                         pointpillars
+                         ${CUDA_LIB_PATH}libcudart.so
+                         pointpillarsCU
+                         )
+
+

+ 170 - 0
src/detection/CenterPoint-master/CenterPoint.pro

@@ -0,0 +1,170 @@
+QT -= gui
+
+CONFIG += c++14 console
+CONFIG -= app_bundle
+
+QMAKE_CXXFLAGS += -std=gnu++17
+QMAKE_LFLAGS += -no-pie  -Wl,--no-as-needed
+# The following define makes your compiler emit warnings if you use
+# any feature of Qt which as been marked deprecated (the exact warnings
+# depend on your compiler). Please consult the documentation of the
+# deprecated API in order to know how to port your code away from it.
+DEFINES += QT_DEPRECATED_WARNINGS
+#DEFINES += DEBUG_SHOW
+# You can also make your code fail to compile if you use deprecated APIs.
+# In order to do so, uncomment the following line.
+# You can also select to disable deprecated APIs only up to a certain version of Qt.
+#DEFINES += QT_DISABLE_DEPRECATED_BEFORE=0x060000    # disables all the APIs deprecated before Qt 6.0.0
+TARGET = CenterPoint
+TEMPLATE = app
+
+SOURCES += src/main.cpp \
+    src/centerpoint.cpp \
+    src/postprocess.cpp \
+    src/preprocess.cpp \
+    include/common/logger.cpp \
+    ../../include/msgtype/object.pb.cc \
+    ../../include/msgtype/objectarray.pb.cc \
+
+CUDA_SOURCES +=  \
+    src/iou3d_nms_kernel.cu \
+    src/preprocess.cu \
+    src/scatter_cuda.cu
+
+DISTFILES += \
+    src/iou3d_nms_kernel.cu \
+    src/preprocess.cu \
+    src/scatter_cuda.cu
+
+
+INCLUDEPATH+= /home/nvidia/modularization/src/detection/CenterPoint-master/include \
+              /home/nvidia/modularization/src/detection/CenterPoint-master/include/common
+
+HEADERS += \
+    include/config.h \
+    ../../include/msgtype/object.pb.h \
+    ../../include/msgtype/objectarray.pb.h
+
+
+CUDA_SDK = "/usr/local/cuda/"   # cudaSDK路径
+
+CUDA_DIR = "/usr/local/cuda/"            # CUDA tookit路径
+
+SYSTEM_NAME = linux         # 自己系统环境 'Win32', 'x64', or 'Win64'
+
+SYSTEM_TYPE = 64           #操作系统位数 '32' or '64',
+
+CUDA_ARCH = sm_72         # cuda架构, for example 'compute_10', 'compute_11', 'sm_10'
+
+NVCC_OPTIONS = --use_fast_math --compiler-options "-fPIC"
+
+
+# include paths
+
+INCLUDEPATH += $$CUDA_DIR/include
+#INCLUDEPATH += /usr/local/cuda-10.0/targets/aarch64-linux/include/crt
+
+# library directories
+
+QMAKE_LIBDIR += $$CUDA_DIR/lib/
+
+CUDA_OBJECTS_DIR = ./
+
+# The following library conflicts with something in Cuda
+
+#QMAKE_LFLAGS_RELEASE = /NODEFAULTLIB:msvcrt.lib
+
+#QMAKE_LFLAGS_DEBUG   = /NODEFAULTLIB:msvcrtd.lib
+
+# Add the necessary libraries
+
+CUDA_LIBS =  cudart cufft
+
+# The following makes sure all path names (which often include spaces) are put between quotation marks
+
+CUDA_INC = $$join(INCLUDEPATH,'" -I"','-I"','"')
+
+NVCC_LIBS = $$join(CUDA_LIBS,' -l','-l', '')
+
+#LIBS += $$join(CUDA_LIBS,'.so ', '', '.so')
+
+# Configuration of the Cuda compiler
+
+CONFIG(debug, debug|release) {
+
+    # Debug mode
+
+    cuda_d.input = CUDA_SOURCES
+
+    cuda_d.output = $$CUDA_OBJECTS_DIR/${QMAKE_FILE_BASE}_cuda.o
+
+    cuda_d.commands = $$CUDA_DIR/bin/nvcc -D_DEBUG $$NVCC_OPTIONS $$CUDA_INC $$NVCC_LIBS --machine $$SYSTEM_TYPE -arch=$$CUDA_ARCH -c -o ${QMAKE_FILE_OUT} ${QMAKE_FILE_NAME}
+
+    cuda_d.dependency_type = TYPE_C
+
+    QMAKE_EXTRA_COMPILERS += cuda_d
+
+}
+
+else {
+
+    # Release mode
+
+    cuda.input = CUDA_SOURCES
+
+    cuda.output = $$CUDA_OBJECTS_DIR/${QMAKE_FILE_BASE}_cuda.o
+
+    cuda.commands = $$CUDA_DIR/bin/nvcc $$NVCC_OPTIONS $$CUDA_INC $$NVCC_LIBS --machine $$SYSTEM_TYPE -arch=$$CUDA_ARCH -O3 -c -o ${QMAKE_FILE_OUT} ${QMAKE_FILE_NAME}
+
+    cuda.dependency_type = TYPE_C
+
+    QMAKE_EXTRA_COMPILERS += cuda
+
+}
+
+
+LIBS += -L/usr/local/cuda-10.2/targets/aarch64-linux/lib
+
+LIBS += -lcudart -lcufft -lyaml-cpp
+
+#LIBS += -L/home/adc/soft/cudnn-10.2-linux-x64-v7.6.5.32/cuda/lib64 -lcudnn
+
+LIBS +=  -lnvinfer -lnvonnxparser -lnvcaffe_parser
+
+#LIBS += -L/home/nvidia/git/libtorch_gpu-1.6.0-linux-aarch64/lib -ltorch_cuda  -ltorch -lc10 -ltorch_cpu
+
+unix:INCLUDEPATH += /usr/include/eigen3
+unix:INCLUDEPATH += /usr/include/pcl-1.7
+unix:INCLUDEPATH += /usr/include/pcl-1.8
+
+!include(../../../include/common.pri ) {
+    error( "Couldn't find the common.pri file!" )
+}
+
+!include(../../../include/ivprotobuf.pri ) {
+    error( "Couldn't find the ivprotobuf.pri file!" )
+}
+
+LIBS += -lboost_system
+
+unix:LIBS +=  -lpcl_common\
+        -lpcl_features\
+        -lpcl_filters\
+        -lpcl_io\
+        -lpcl_io_ply\
+        -lpcl_kdtree\
+        -lpcl_keypoints\
+        -lpcl_octree\
+        -lpcl_outofcore\
+        -lpcl_people\
+        -lpcl_recognition\
+        -lpcl_registration\
+        -lpcl_sample_consensus\
+        -lpcl_search\
+        -lpcl_segmentation\
+        -lpcl_surface\
+        -lpcl_tracking\
+        -lpcl_visualization
+
+INCLUDEPATH += /usr/include/opencv4/
+LIBS += /usr/lib/aarch64-linux-gnu/libopencv*.so

+ 21 - 0
src/detection/CenterPoint-master/LICENSE

@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 Abraham423
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 199 - 0
src/detection/CenterPoint-master/README.md

@@ -0,0 +1,199 @@
+# CenterPoint : An Lidar Object Detection & Tracking project implemented by TensorRT 
+
+The project implement CenterPoint by TensorRT, where CenterPoint is an 3D object detection model using center points in the bird eye view.
+Code is written according to the [project](https://github.com/tianweiy/CenterPoint.git)
+
+Besides, it is running inference on [WaymoOpenSet](https://waymo.com/intl/en_us/dataset-download-terms) 
+
+
+# Setup
+
+The project has been tested on *Ubuntu18.04* and *Ubuntu20.04*, 
+It mainly relies on TensorRT and cuda as 3rd-party package,  with the following versions respectively:
+
+*vTensorRT : 8.0.1.6*
+
+*vCuda : 11.3*
+
+This project has provided the baseline onnx models trained with [this config](https://github.com/tianweiy/CenterPoint/blob/master/configs/waymo/pp/waymo_centerpoint_pp_two_pfn_stride1_3x.py) in `models`. If you want to export your own models, we assume you have had [CenterPoint](https://github.com/tianweiy/CenterPoint.git) project installed, you can setup local `det3d` environment 
+
+```
+cd /PATH/TO/centerpoint/tools 
+bash setup3.sh
+```
+
+# Preperation 
+
+###  Export as onnx models
+To export your own models, you can run
+```
+python3 export_onnx.py \
+--config waymo_centerpoint_pp_two_pfn_stride1_3x.py \
+--ckpt your_model.pth \
+--pfe_save_path pfe.onnx \
+--rpn_save_path rpn.onnx
+```
+Here we extract two pure nn models from the whole computation graph---`pfe` and `rpn`, this is to make it easier for trt to optimize its inference engines, 
+and we use cuda to connect these nn engines.
+
+###  Generate TensorRT serialized engines
+Actually you can directly create trt engines from onnx models and skip this step, however a more ideal way is to load your previously saved serialize engine files.
+
+You can run 
+```
+python3 create_engine.py \
+--config waymo_centerpoint_pp_two_pfn_stride1_3x.py \
+--pfe_onnx_path pfe.onnx \
+--rpn_onnx_path rpn.onnx \
+--pfe_engine_path pfe_fp.engine \
+--rpn_engine_path rpn_fp.engine
+```
+By default this will generate fp16-engine files.
+
+### Work with int8
+
+There are two ways to make quantization according to [Nvidia](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/#working-with-int8): Explicit  & Implicit Quantization
+
+To make explicit quant, you can go to [TensorRT](https://github.com/NVIDIA/TensorRT)/bin and make 
+`./trtexec --onnx=model.onnx --int8 --saveEngine=model.engine`
+*you will need to compile tensorrt from source code*
+
+To make implicit quant, you need previously generate calibration files, we assume you have waymo_openset downloaded and have converted into the desired data formation according to [this](https://github.com/tianweiy/CenterPoint/blob/master/docs/WAYMO.md)
+
+```
+python3 generate_calib_data.py \
+--config waymo_centerpoint_pp_two_pfn_stride1_3x.py \
+--ckpt your_model.pth \
+--calib_file_path your_calib_files
+```
+
+Then refer to the code we provide by 
+```
+python3 create_engine.py \
+--config waymo_centerpoint_pp_two_pfn_stride1_3x.py \
+--pfe_onnx_path pfe.onnx \
+--rpn_onnx_path rpn.onnx \
+--pfe_engine_path pfe_quant.engine \
+--rpn_engine_path rpn_quant.engine \
+--quant \
+--calib_file_path your_calib_files \
+--calib_batch_size 10
+```
+
+#  Run inference 
+
+After preperation, you may then build tensorrt project by executing the following commands:
+
+```
+cd /PATH/TO/centerpoint
+mkdir build && cd build
+cmake .. && make
+```
+If you want to create engine from onnx files, you can do infer by
+```
+./build/centerpoint \
+--pfeOnnxPath=models/pfe_baseline32000.onnx \
+--rpnOnnxPath=models/rpn_baseline.onnx \
+--savePath=results \
+--filePath=/PATH/TO/DATA \
+--fp16
+```
+Or load engine files directly 
+```
+./build/centerpoint \
+--pfeEnginePath=pfe_fp.engine \
+--rpnEnginePath=rpn_fp.engine \
+--savePath=results \
+--filePath=/PATH/TO/DATA \
+--loadEngine
+```
+where `filePath` refers to input bin files generated by `tools/generate_input_data.py`.
+
+You can also download seq1 directly to fastly test your model, see here in Baidu Cloud Disk : 
+```
+link:https://pan.baidu.com/s/1Ua9F3eFflA9Gckpa9U-1Eg  passwd:08s6
+```
+
+# Computation Speed 
+Acceleration is the main aim we want to archieve, and therefore we do most of computation(including preprocess & postprocess) on GPU. 
+The below table gives the average computation speed (by millisecond) of every computation module, and it is tested on RTX3080, with all the 39987 waymo validation samples. 
+the below table summarizes the computation speed.
+||Preprocess|PfeInfer|VoxelAssign|RpnInfer|Postprocess|
+|---|---|---|---|---|---|
+|fp32+gpupre+gpupost|1.73|8.47|0.36|25.0|2.01|
+|fp16+gpupre+gpupost|1.61|5.88|0.17|6.89|2.37|
+|fp16+cpupre+gpupost|9.2|6.14|0.42|7.14|2.10|
+|int8(minmax)+gpupre+gpupost|1.61|8.23|0.17|5.25|3.21|
+|int8(entropy)+gpupre+gpupost|1.41|7.45|0.17|4.65|2.11|
+|int8(explicit)+gpupre+gpupost|2.2|8.0|0.17|8.18|2.59|
+
+Note that `fp16` or `int8` may be mixed up with `fp32`, we have no control over which tensor shall be int8, fp16 or fp32, it's dominated by tensorrt. 
+We can see that fp16 mode runs much faster than fp32 mode, and gpu preprocess runs much faster than that of cpu, because in cuda, we runs in a pointwise-multithread-way, while in cpu, points are preprocessed in a for-loop-manner. 
+
+# Metrics
+You can run `cd tools && python3 waymo_eval.py --cpp_output --save_path ../results ` to compute evaluation metrics, we set score threshould as 0.2, 2D iou threshuold as [0.7,0.5] for vehicle and pedestrian as is shown in [waymo openset](https://waymo.com/open/challenges/2021/real-time-3d-prediction), below we can see the evaluation results:
+
+||Vehicle_level2/mAP|Vehichle_level2/mAPH|vehicle_level2 Recall@0.95|Pedestrian_level2/mAP|Pedestrian_level2/mAPH|Pedestrian_level2 Recall@0.95|
+|---|---|---|---|---|---|---|
+|fp32+cpupre+cpupost|0.7814|0.7240|0.3966|0.6837|0.5668|0.3739|
+|fp32+gpupre+gpupost|0.8039|0.7947|0.5731|0.6723|0.5588|0.2310|
+|fp16+gpupre+gpupost|0.8038|0.7945|0.5730|0.6671|0.5541|0.2301|
+|int8+gpupre+gpupost|0.5827|0.5615|0.4061|0.0634|0.0456|0.0|
+
+
+Below are *the old metrics*, it is computed by 3D iou with threshould [0.5,0.5] for vehicle and pedestrian, however the conclusions are the same.
+
+||Vehicle_level2/mAP|Vehichle_level2/mAPH|vehicle_level2 Recall@0.95|Pedestrian_level2/mAP|Pedestrian_level2/mAPH|Pedestrian_level2 Recall@0.95|
+|---|---|---|---|---|---|---|
+|torchModel|0.6019|0.5027|0.0241|0.5545|0.5377|0.0547|
+|fp32+cpupre+gpupost|0.6019|0.5027|0.0241|0.5546|0.5378|0.0548|
+|fp16+cpupre+gpupost|0.6024|0.5030|0.0240|0.5545|0.5378|0.0542|
+|fp16+gpupre+gpupost|0.6207|0.5173|0.2327|0.5788|0.5624|0.2984|
+|int8(minmax)+gpupre+gpupost|0.3470|0.2889|0.0|0.3222|0.3065|0.0|
+|int8(entropy)+gpupre+gpupost|0.1396|0.1049|0.0014|0.1550|0.1452|0.0008|
+|int8(explicit)+gpupre+gpupost|0.4642|0.3823|0.0288|0.4248|0.4112|0.0201|
+
+
+
+From the above metrics, we can see that 
+1. fp16 model almostly performs as well as fp32 model, despite it runs much faster.
+2. float model+cpupre+gpupost achieves the same result as original torch model, because the way they preprocess points are same, while gpu preprocess performs better. Because points in multithread are orderless, for a voxel that contains more points than given threshould, it makes unbiased subsampling.[detailed reason see here](https://github.com/tianweiy/CenterPoint/issues/243)
+3. int8 mode can't achieve the same results as float mode, and implicit calibration performs worse than explicit calib, we choose 1000 samples to make calibration and batch size is set 10, maybe samples are insufficient. However explicit calib model runs slower than implicit ones.
+
+# Online Tracking and Visualization
+We use rivz to visualize perception results, to setup catkin workspace, you can go to `tools/catkin_ws`, run
+```
+bash catkin_make.sh
+source devel/setup.bash
+```
+You need to config your file paths in `tools/catkin_ws/src/waymo_track/src/waymo_track.py`.
+Then you can open another two terminals, one type in `roscore`, another one type in `rviz` to show results(you can directly load `default.rviz` in `catkin_ws`), in your original terminal, go to `catkin_ws`, run `rosrun waymo_track waymo_track.py`, you should see the detection or tracking results in rviz window.
+
+
+Detection & tracking result shows below:
+#### float detection & tracking
+<img src='doc/fp_det.gif' style=' width:400px;height:270 px'/> <img src='doc/fp_track.gif' style=' width:400px;height:270 px'/>  
+
+#### implicit quantization & explicit quantization (tracking)
+
+<img src='doc/entropy_track.gif' style=' width:400px;height:270 px'/> <img src='doc/explicit_track.gif' style=' width:400px;height:270 px'/>
+
+
+# What has been done?
+To futher learn the detailed computation graph of CenterPoint, please refer to the following picture.
+![graph](doc/computation_graph.png)
+
+# Acknowledgements
+This project refers to some codes from:
+
+
+[CenterPoint](https://github.com/tianweiy/CenterPoint)
+
+[TensorRT](https://github.com/NVIDIA/TensorRT/tree/master)
+
+[CenterPoint-PointPillars ](https://github.com/CarkusL/CenterPoint)
+
+[SORT](https://github.com/abewley/sort)
+
+# Contact
+Hao Wang by christian.wong423@gmail.com

BIN
src/detection/CenterPoint-master/build-qt/CenterPoint


BIN
src/detection/CenterPoint-master/doc/computation_graph.png


BIN
src/detection/CenterPoint-master/doc/entropy_track.gif


BIN
src/detection/CenterPoint-master/doc/explicit_track.gif


BIN
src/detection/CenterPoint-master/doc/fp_det.gif


BIN
src/detection/CenterPoint-master/doc/fp_track.gif


+ 1 - 0
src/detection/CenterPoint-master/google912b2d6b509a62f4.html

@@ -0,0 +1 @@
+google-site-verification: google912b2d6b509a62f4.html

+ 539 - 0
src/detection/CenterPoint-master/include/BatchStream.h

@@ -0,0 +1,539 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef BATCH_STREAM_H
+#define BATCH_STREAM_H
+
+#include "NvInfer.h"
+#include "common.h"
+#include "logger.h"
+#include <algorithm>
+#include <stdio.h>
+#include <vector>
+#include "config.h"
+// #include "utils.h"
+
+
+class IBatchStream
+{
+public:
+    virtual void reset(int firstBatch) = 0;
+    virtual bool next() = 0;
+    virtual void skip(int skipCount) = 0;
+    virtual float* getBatch() = 0;
+    // virtual float* getLabels() = 0;
+    virtual int getBatchesRead() const = 0;
+    virtual int getBatchSize() const = 0;
+    virtual nvinfer1::Dims getDims() const = 0;
+};
+
+class PFEBatchStream : public IBatchStream
+{
+public:
+    PFEBatchStream(const std::string directory)
+        : mBatchSize{1} // we only execute batch size to be 1
+        , mDims{3, {MAX_PILLARS, MAX_PIONT_IN_PILLARS, FEATURE_NUM}} //!< We already know the dimensions of Voxels 
+    {
+            // initialize  mData
+            int elementNum = MAX_PILLARS * MAX_PIONT_IN_PILLARS * FEATURE_NUM;
+            // mData = static_cast<float*>(malloc(  elementNum* sizeof(float)));
+            // if(mData.data() == nullptr){
+            //     sample::gLogError << "PFE Calib Input Data Malloc Memory Failed! Size: " << elementNum << std::endl;
+            //     return ;
+            // }
+            // memset(mData, 0 , elementNum * sizeof(float));
+            // get file paths
+            // filePaths = glob(directory);
+            filePaths = {
+            "/mnt/data/waymo_opensets/val/calibrations/seq_0_frame_0_pfe_input.npy",
+            "/mnt/data/waymo_opensets/val/calibrations/seq_0_frame_1_pfe_input.npy",
+            "/mnt/data/waymo_opensets/val/calibrations/seq_0_frame_2_pfe_input.npy",
+            "/mnt/data/waymo_opensets/val/calibrations/seq_0_frame_3_pfe_input.npy",
+            "/mnt/data/waymo_opensets/val/calibrations/seq_0_frame_4_pfe_input.npy"
+            };
+            std::string filename_ = "/home/wanghao/Desktop/projects/CenterPoint/tensorrt/data/calibdata5.bin";
+            readDataFile(filename_);
+            mMaxBatches = filePaths.size() / mBatchSize;
+        // readDataFile(locateFile(dataFile, directories));
+        // readLabelsFile(locateFile(labelsFile, directories));
+    }
+
+    ~PFEBatchStream() 
+    {
+        // free(mData);
+    }
+
+    void reset(int firstBatch) override
+    {
+        mBatchCount = firstBatch;
+    }
+
+    bool next() override
+    {
+        if (mBatchCount >= mMaxBatches)
+        {
+            return false;
+        }
+        ++mBatchCount;
+        return true;
+    }
+
+    void skip(int skipCount) override
+    {
+        mBatchCount += skipCount;
+    }
+
+    float* getBatch() override
+    {
+        // std::string cur_file_path = filePaths[mBatchCount];
+        // readDataFile(cur_file_path);
+        // return mData;
+        int stride = mBatchCount * mBatchSize * samplesCommon::volume(mDims);
+        return mData.data() + stride;
+        
+    }
+
+    // float* getLabels() override
+    // {
+    //     return mData;
+    // }
+
+    int getBatchesRead() const override
+    {
+        return mBatchCount;
+    }
+
+    int getBatchSize() const override
+    {
+        return mBatchSize;
+    }
+
+    nvinfer1::Dims getDims() const override
+    {
+        return Dims{3, {mDims.d[0], mDims.d[1], mDims.d[2]}};
+    }
+
+private:
+
+    void readDataFile(std::string& filename)
+    {
+        // open the file:
+        std::streampos fileSize;
+        std::ifstream file(filename, std::ios::binary);
+        
+        if (!file) {
+            sample::gLogError << "[Error] Open file " << filename << " failed" << std::endl;
+            return ;
+        }
+        // get its size:
+        file.seekg(0, std::ios::end);
+        fileSize = file.tellg();
+        int elementNum = MAX_PILLARS * MAX_PIONT_IN_PILLARS * FEATURE_NUM;
+        file.seekg(0, std::ios::beg);
+        // assert(fileSize / sizeof(float)  == elementNum && "calibration data size needs to be consistance with params defined by config.h !");
+        // read the data:
+        // void* rawData=nullptr;
+        // rawData = malloc(fileSize);
+        // file.read((char*) rawData, fileSize);
+        // file.close();
+        // mData = static_cast<float*>(rawData);
+        // free(rawData);
+        mData.resize(elementNum * 5);
+        // std::vector<float> rawData(5 * elementNum);
+        // file.read(reinterpret_cast<char*>(rawData.data()), numElements * sizeof(float) * 5);
+        file.read(reinterpret_cast<char*>(mData.data()), elementNum * sizeof(float) * 5);
+        // std::transform(
+        //     rawData.begin(), rawData.end(), mData.begin(), [](uint8_t val) { return static_cast<float>(val) / 255.f; });
+        std::cout << "file size " << fileSize/sizeof(float) << std::endl;
+
+    }
+
+    int mBatchSize{0};
+    int mBatchCount{0}; //!< The batch that will be read on the next invocation of next()
+    int mMaxBatches{0};
+    Dims mDims{};
+    std::vector<std::string> filePaths;
+    // float* mData;
+    std::vector<float> mData = {};
+
+};
+
+
+
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+
+
+
+
+
+class MNISTBatchStream : public IBatchStream
+{
+public:
+    MNISTBatchStream(int batchSize, int maxBatches, const std::string& dataFile, const std::string& labelsFile,
+        const std::vector<std::string>& directories)
+        : mBatchSize{batchSize}
+        , mMaxBatches{maxBatches}
+        , mDims{3, {1, 28, 28}} //!< We already know the dimensions of MNIST images.
+    {
+        readDataFile(locateFile(dataFile, directories));
+        readLabelsFile(locateFile(labelsFile, directories));
+    }
+
+    void reset(int firstBatch) override
+    {
+        mBatchCount = firstBatch;
+    }
+
+    bool next() override
+    {
+        if (mBatchCount >= mMaxBatches)
+        {
+            return false;
+        }
+        ++mBatchCount;
+        return true;
+    }
+
+    void skip(int skipCount) override
+    {
+        mBatchCount += skipCount;
+    }
+
+    float* getBatch() override
+    {
+        return mData.data() + (mBatchCount * mBatchSize * samplesCommon::volume(mDims));
+    }
+
+    // float* getLabels() override
+    // {
+    //     return mLabels.data() + (mBatchCount * mBatchSize);
+    // }
+
+    int getBatchesRead() const override
+    {
+        return mBatchCount;
+    }
+
+    int getBatchSize() const override
+    {
+        return mBatchSize;
+    }
+
+    nvinfer1::Dims getDims() const override
+    {
+        return Dims{4, {mBatchSize, mDims.d[0], mDims.d[1], mDims.d[2]}};
+    }
+
+private:
+    void readDataFile(const std::string& dataFilePath)
+    {
+        std::ifstream file{dataFilePath.c_str(), std::ios::binary};
+
+        int magicNumber, numImages, imageH, imageW;
+        file.read(reinterpret_cast<char*>(&magicNumber), sizeof(magicNumber));
+        // All values in the MNIST files are big endian.
+        magicNumber = samplesCommon::swapEndianness(magicNumber);
+        ASSERT(magicNumber == 2051 && "Magic Number does not match the expected value for an MNIST image set");
+
+        // Read number of images and dimensions
+        file.read(reinterpret_cast<char*>(&numImages), sizeof(numImages));
+        file.read(reinterpret_cast<char*>(&imageH), sizeof(imageH));
+        file.read(reinterpret_cast<char*>(&imageW), sizeof(imageW));
+
+        numImages = samplesCommon::swapEndianness(numImages);
+        imageH = samplesCommon::swapEndianness(imageH);
+        imageW = samplesCommon::swapEndianness(imageW);
+
+        // The MNIST data is made up of unsigned bytes, so we need to cast to float and normalize.
+        int numElements = numImages * imageH * imageW;
+        std::vector<uint8_t> rawData(numElements);
+        file.read(reinterpret_cast<char*>(rawData.data()), numElements * sizeof(uint8_t));
+        mData.resize(numElements);
+        std::transform(
+            rawData.begin(), rawData.end(), mData.begin(), [](uint8_t val) { return static_cast<float>(val) / 255.f; });
+    }
+
+    void readLabelsFile(const std::string& labelsFilePath)
+    {
+        std::ifstream file{labelsFilePath.c_str(), std::ios::binary};
+        int magicNumber, numImages;
+        file.read(reinterpret_cast<char*>(&magicNumber), sizeof(magicNumber));
+        // All values in the MNIST files are big endian.
+        magicNumber = samplesCommon::swapEndianness(magicNumber);
+        ASSERT(magicNumber == 2049 && "Magic Number does not match the expected value for an MNIST labels file");
+
+        file.read(reinterpret_cast<char*>(&numImages), sizeof(numImages));
+        numImages = samplesCommon::swapEndianness(numImages);
+
+        std::vector<uint8_t> rawLabels(numImages);
+        file.read(reinterpret_cast<char*>(rawLabels.data()), numImages * sizeof(uint8_t));
+        mLabels.resize(numImages);
+        std::transform(
+            rawLabels.begin(), rawLabels.end(), mLabels.begin(), [](uint8_t val) { return static_cast<float>(val); });
+    }
+
+    int mBatchSize{0};
+    int mBatchCount{0}; //!< The batch that will be read on the next invocation of next()
+    int mMaxBatches{0};
+    Dims mDims{};
+    std::vector<float> mData{};
+    std::vector<float> mLabels{};
+};
+
+
+
+
+
+
+class BatchStream : public IBatchStream
+{
+public:
+    BatchStream(
+        int batchSize, int maxBatches, std::string prefix, std::string suffix, std::vector<std::string> directories)
+        : mBatchSize(batchSize)
+        , mMaxBatches(maxBatches)
+        , mPrefix(prefix)
+        , mSuffix(suffix)
+        , mDataDir(directories)
+    {
+        FILE* file = fopen(locateFile(mPrefix + std::string("0") + mSuffix, mDataDir).c_str(), "rb");
+        ASSERT(file != nullptr);
+        int d[4];
+        size_t readSize = fread(d, sizeof(int), 4, file);
+        ASSERT(readSize == 4);
+        mDims.nbDims = 4;  // The number of dimensions.
+        mDims.d[0] = d[0]; // Batch Size
+        mDims.d[1] = d[1]; // Channels
+        mDims.d[2] = d[2]; // Height
+        mDims.d[3] = d[3]; // Width
+        ASSERT(mDims.d[0] > 0 && mDims.d[1] > 0 && mDims.d[2] > 0 && mDims.d[3] > 0);
+        fclose(file);
+
+        mImageSize = mDims.d[1] * mDims.d[2] * mDims.d[3];
+        mBatch.resize(mBatchSize * mImageSize, 0);
+        mLabels.resize(mBatchSize, 0);
+        mFileBatch.resize(mDims.d[0] * mImageSize, 0);
+        mFileLabels.resize(mDims.d[0], 0);
+        reset(0);
+    }
+
+    BatchStream(int batchSize, int maxBatches, std::string prefix, std::vector<std::string> directories)
+        : BatchStream(batchSize, maxBatches, prefix, ".batch", directories)
+    {
+    }
+
+    BatchStream(
+        int batchSize, int maxBatches, nvinfer1::Dims dims, std::string listFile, std::vector<std::string> directories)
+        : mBatchSize(batchSize)
+        , mMaxBatches(maxBatches)
+        , mDims(dims)
+        , mListFile(listFile)
+        , mDataDir(directories)
+    {
+        mImageSize = mDims.d[1] * mDims.d[2] * mDims.d[3];
+        mBatch.resize(mBatchSize * mImageSize, 0);
+        mLabels.resize(mBatchSize, 0);
+        mFileBatch.resize(mDims.d[0] * mImageSize, 0);
+        mFileLabels.resize(mDims.d[0], 0);
+        reset(0);
+    }
+
+    // Resets data members
+    void reset(int firstBatch) override
+    {
+        mBatchCount = 0;
+        mFileCount = 0;
+        mFileBatchPos = mDims.d[0];
+        skip(firstBatch);
+    }
+
+    // Advance to next batch and return true, or return false if there is no batch left.
+    bool next() override
+    {
+        if (mBatchCount == mMaxBatches)
+        {
+            return false;
+        }
+
+        for (int csize = 1, batchPos = 0; batchPos < mBatchSize; batchPos += csize, mFileBatchPos += csize)
+        {
+            ASSERT(mFileBatchPos > 0 && mFileBatchPos <= mDims.d[0]);
+            if (mFileBatchPos == mDims.d[0] && !update())
+            {
+                return false;
+            }
+
+            // copy the smaller of: elements left to fulfill the request, or elements left in the file buffer.
+            csize = std::min(mBatchSize - batchPos, mDims.d[0] - mFileBatchPos);
+            std::copy_n(
+                getFileBatch() + mFileBatchPos * mImageSize, csize * mImageSize, getBatch() + batchPos * mImageSize);
+            // std::copy_n(getFileLabels() + mFileBatchPos, csize, getLabels() + batchPos);
+        }
+        mBatchCount++;
+        return true;
+    }
+
+    // Skips the batches
+    void skip(int skipCount) override
+    {
+        if (mBatchSize >= mDims.d[0] && mBatchSize % mDims.d[0] == 0 && mFileBatchPos == mDims.d[0])
+        {
+            mFileCount += skipCount * mBatchSize / mDims.d[0];
+            return;
+        }
+
+        int x = mBatchCount;
+        for (int i = 0; i < skipCount; i++)
+        {
+            next();
+        }
+        mBatchCount = x;
+    }
+
+    float* getBatch() override
+    {
+        return mBatch.data();
+    }
+
+    // float* getLabels() override
+    // {
+    //     return mLabels.data();
+    // }
+
+    int getBatchesRead() const override
+    {
+        return mBatchCount;
+    }
+
+    int getBatchSize() const override
+    {
+        return mBatchSize;
+    }
+
+    nvinfer1::Dims getDims() const override
+    {
+        return mDims;
+    }
+
+private:
+    float* getFileBatch()
+    {
+        return mFileBatch.data();
+    }
+
+    float* getFileLabels()
+    {
+        return mFileLabels.data();
+    }
+
+    bool update()
+    {
+        if (mListFile.empty())
+        {
+            std::string inputFileName = locateFile(mPrefix + std::to_string(mFileCount++) + mSuffix, mDataDir);
+            FILE* file = fopen(inputFileName.c_str(), "rb");
+            if (!file)
+            {
+                return false;
+            }
+
+            int d[4];
+            size_t readSize = fread(d, sizeof(int), 4, file);
+            ASSERT(readSize == 4);
+            ASSERT(mDims.d[0] == d[0] && mDims.d[1] == d[1] && mDims.d[2] == d[2] && mDims.d[3] == d[3]);
+            size_t readInputCount = fread(getFileBatch(), sizeof(float), mDims.d[0] * mImageSize, file);
+            ASSERT(readInputCount == size_t(mDims.d[0] * mImageSize));
+            size_t readLabelCount = fread(getFileLabels(), sizeof(float), mDims.d[0], file);
+            ASSERT(readLabelCount == 0 || readLabelCount == size_t(mDims.d[0]));
+
+            fclose(file);
+        }
+        else
+        {
+            std::vector<std::string> fNames;
+            std::ifstream file(locateFile(mListFile, mDataDir), std::ios::binary);
+            if (!file)
+            {
+                return false;
+            }
+
+            sample::gLogInfo << "Batch #" << mFileCount << std::endl;
+            file.seekg(((mBatchCount * mBatchSize)) * 7);
+
+            for (int i = 1; i <= mBatchSize; i++)
+            {
+                std::string sName;
+                std::getline(file, sName);
+                sName = sName + ".ppm";
+                sample::gLogInfo << "Calibrating with file " << sName << std::endl;
+                fNames.emplace_back(sName);
+            }
+
+            mFileCount++;
+
+            const int imageC = 3;
+            const int imageH = 300;
+            const int imageW = 300;
+            std::vector<samplesCommon::PPM<imageC, imageH, imageW>> ppms(fNames.size());
+            for (uint32_t i = 0; i < fNames.size(); ++i)
+            {
+                readPPMFile(locateFile(fNames[i], mDataDir), ppms[i]);
+            }
+
+            std::vector<float> data(samplesCommon::volume(mDims));
+            const float scale = 2.0 / 255.0;
+            const float bias = 1.0;
+            long int volChl = mDims.d[2] * mDims.d[3];
+
+            // Normalize input data
+            for (int i = 0, volImg = mDims.d[1] * mDims.d[2] * mDims.d[3]; i < mBatchSize; ++i)
+            {
+                for (int c = 0; c < mDims.d[1]; ++c)
+                {
+                    for (int j = 0; j < volChl; ++j)
+                    {
+                        data[i * volImg + c * volChl + j] = scale * float(ppms[i].buffer[j * mDims.d[1] + c]) - bias;
+                    }
+                }
+            }
+
+            std::copy_n(data.data(), mDims.d[0] * mImageSize, getFileBatch());
+        }
+
+        mFileBatchPos = 0;
+        return true;
+    }
+
+    int mBatchSize{0};
+    int mMaxBatches{0};
+    int mBatchCount{0};
+    int mFileCount{0};
+    int mFileBatchPos{0};
+    int mImageSize{0};
+    std::vector<float> mBatch;         //!< Data for the batch
+    std::vector<float> mLabels;        //!< Labels for the batch
+    std::vector<float> mFileBatch;     //!< List of image files
+    std::vector<float> mFileLabels;    //!< List of label files
+    std::string mPrefix;               //!< Batch file name prefix
+    std::string mSuffix;               //!< Batch file name suffix
+    nvinfer1::Dims mDims;              //!< Input dimensions
+    std::string mListFile;             //!< File name of the list of image names
+    std::vector<std::string> mDataDir; //!< Directories where the files can be found
+};
+
+#endif

+ 149 - 0
src/detection/CenterPoint-master/include/EntropyCalibrator.h

@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ENTROPY_CALIBRATOR_H
+#define ENTROPY_CALIBRATOR_H
+
+#include "BatchStream.h"
+#include "NvInfer.h"
+
+//! \class EntropyCalibratorImpl
+//!
+//! \brief Implements common functionality for Entropy calibrators.
+//!
+template <typename TBatchStream>
+class EntropyCalibratorImpl
+{
+public:
+    EntropyCalibratorImpl(
+        TBatchStream stream, int firstBatch, std::string networkName, const char* inputBlobName, bool readCache = false)
+        : mStream{stream}
+        // , mCalibrationTableName("CalibrationTable" + networkName)
+        , mCalibrationTableName(networkName)
+        , mInputBlobName(inputBlobName)
+        , mReadCache(readCache)
+    {
+        nvinfer1::Dims dims = mStream.getDims();
+        mInputCount = samplesCommon::volume(dims);
+        CHECK(cudaMalloc(&mDeviceInput, mInputCount * sizeof(float)));
+        mStream.reset(firstBatch);
+                std::cout << "calibrator init "<<mStream.getBatchesRead() << std::endl;
+    }
+
+    virtual ~EntropyCalibratorImpl()
+    {
+        CHECK(cudaFree(mDeviceInput));
+    }
+
+    int getBatchSize() const noexcept
+    {
+        std::cout << "calibrator getbatch size "<<mStream.getBatchesRead() << std::endl;
+        return mStream.getBatchSize();
+    }
+
+    bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept
+    {
+        std::cout << "calibrator reading batch "<<mStream.getBatchesRead() << std::endl;
+        if (!mStream.next())
+        {
+            return false;
+        }
+
+        float* tmp =  mStream.getBatch();
+        // for (int i=0 ;i < 20 ; i++) {
+        //     std::cout << tmp[i] << ", " ;
+        // }
+        // std::cout << std::endl;
+
+        CHECK(cudaMemcpy(mDeviceInput,tmp, mInputCount * sizeof(float), cudaMemcpyHostToDevice));
+        // ASSERT(!strcmp(names[0], mInputBlobName));
+        bindings[0] = mDeviceInput;
+        return true;
+    }
+
+    const void* readCalibrationCache(size_t& length) noexcept
+    {
+        mCalibrationCache.clear();
+        std::ifstream input(mCalibrationTableName, std::ios::binary);
+        input >> std::noskipws;
+        if (mReadCache && input.good())
+        {
+            std::copy(std::istream_iterator<char>(input), std::istream_iterator<char>(),
+                std::back_inserter(mCalibrationCache));
+        }
+        length = mCalibrationCache.size();
+        return length ? mCalibrationCache.data() : nullptr;
+        //         std::cout << "calibrator reading  "<<mStream.getBatchesRead() << std::endl;
+        // return nullptr;
+    }
+
+    void writeCalibrationCache(const void* cache, size_t length) noexcept
+    {
+        std::ofstream output(mCalibrationTableName, std::ios::binary);
+                std::cout << "calibrator writting  "<<mStream.getBatchesRead() << std::endl;
+        output.write(reinterpret_cast<const char*>(cache), length);
+    }
+
+private:
+    TBatchStream mStream;
+    size_t mInputCount;
+    std::string mCalibrationTableName;
+    const char* mInputBlobName;
+    bool mReadCache{true};
+    void* mDeviceInput{nullptr};
+    std::vector<char> mCalibrationCache;
+};
+
+//! \class Int8EntropyCalibrator2
+//!
+//! \brief Implements Entropy calibrator 2.
+//!  CalibrationAlgoType is kENTROPY_CALIBRATION_2.
+//!
+template <typename TBatchStream>
+class Int8EntropyCalibrator2 : public IInt8EntropyCalibrator2
+{
+public:
+    Int8EntropyCalibrator2(
+        TBatchStream stream, int firstBatch, const std::string networkName, const char* inputBlobName, bool readCache = false)
+        : mImpl(stream, firstBatch, networkName, inputBlobName, readCache)
+    {
+    }
+
+    int getBatchSize() const noexcept override
+    {
+        return mImpl.getBatchSize();
+    }
+
+    bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept override
+    {
+        return mImpl.getBatch(bindings, names, nbBindings);
+    }
+
+    const void* readCalibrationCache(size_t& length) noexcept override
+    {
+        return mImpl.readCalibrationCache(length);
+    }
+
+    void writeCalibrationCache(const void* cache, size_t length) noexcept override
+    {
+        mImpl.writeCalibrationCache(cache, length);
+    }
+
+private:
+    EntropyCalibratorImpl<TBatchStream> mImpl;
+};
+
+#endif // ENTROPY_CALIBRATOR_H

+ 223 - 0
src/detection/CenterPoint-master/include/centerpoint.h

@@ -0,0 +1,223 @@
+#include "argsParser.h"
+#include "buffers.h"
+#include "common.h"
+#include "logger.h"
+#include "parserOnnxConfig.h"
+#include "NvInfer.h"
+#include <cuda_runtime_api.h>
+#include <cstdlib>
+#include <fstream>
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <sys/time.h>
+#include <chrono>
+#include "preprocess.h"
+#include "postprocess.h"
+#include "scatter_cuda.h"
+
+// below  head files are defined in TensorRt/samples/common
+#include "EntropyCalibrator.h"
+#include "BatchStream.h"
+
+#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
+
+
+
+struct Params{
+    std::string pfeOnnxFilePath = "";
+    std::string rpnOnnxFilePath = "";
+    std::string pfeSerializedEnginePath = "";
+    std::string rpnSerializedEnginePath = "";
+
+    // Input Output Names
+    std::vector<std::string> pfeInputTensorNames;
+    std::vector<std::string> rpnInputTensorNames;
+    std::vector<std::string> pfeOutputTensorNames;
+    std::map<std::string, std::vector<std::string>> rpnOutputTensorNames;
+
+    // Input Output Paths
+    std::string savePath ;
+    std::vector<std::string>  filePaths;
+    
+    // Attrs
+    int dlaCore = -1;
+    bool fp16 = false;
+    bool int8 = false;
+    bool load_engine = false;
+    int batch_size = 1;
+};
+
+class CenterPoint
+{
+    template <typename T>
+    using SampleUniquePtr = std::unique_ptr<T, samplesCommon::InferDeleter>;
+
+public:
+    CenterPoint(const Params params)
+        : mParams(params)
+        ,BATCH_SIZE_(params.batch_size)
+        , mEngine(nullptr)
+        ,mEngineRPN(nullptr)
+    {
+
+        //const int NUM_THREADS, const int MAX_NUM_PILLARS, const int GRID_X_SIZE, const int GRID_Y_SIZE):
+        scatter_cuda_ptr_.reset(new ScatterCuda(PFE_OUTPUT_DIM, PFE_OUTPUT_DIM, BEV_W, BEV_H ));
+        // mallocate a global memory for pointer
+
+        GPU_CHECK(cudaMalloc((void**)&dev_points_, MAX_POINTS * POINT_DIM * sizeof(float)));
+        GPU_CHECK(cudaMemset(dev_points_,0, MAX_POINTS * POINT_DIM * sizeof(float)));
+
+        GPU_CHECK(cudaMalloc((void**)&dev_indices_,MAX_PILLARS * sizeof(int)));
+        GPU_CHECK(cudaMemset(dev_indices_,0,MAX_PILLARS * sizeof(int)));
+
+        /**
+         * @brief : Create and Init Variables for PreProcess
+         * 
+         */
+        GPU_CHECK(cudaMalloc((void**)& p_bev_idx_, MAX_POINTS * sizeof(int)));
+        GPU_CHECK(cudaMalloc((void**)& p_point_num_assigned_, MAX_POINTS * sizeof(int)));
+        GPU_CHECK(cudaMalloc((void**)& p_mask_, MAX_POINTS * sizeof(bool)));
+        GPU_CHECK(cudaMalloc((void**)& bev_voxel_idx_, BEV_H * BEV_W * sizeof(int)));
+
+        GPU_CHECK(cudaMemset(p_bev_idx_, 0, MAX_POINTS * sizeof(int)));
+        GPU_CHECK(cudaMemset(p_point_num_assigned_, 0, MAX_POINTS * sizeof(int)));
+        GPU_CHECK(cudaMemset(p_mask_, 0, MAX_POINTS * sizeof(bool)));
+        GPU_CHECK(cudaMemset(bev_voxel_idx_, 0, BEV_H * BEV_W * sizeof(int)));
+
+        GPU_CHECK(cudaMalloc((void**)&v_point_sum_, MAX_PILLARS * 3 *sizeof(float)));
+        GPU_CHECK(cudaMalloc((void**)&v_range_, MAX_PILLARS * sizeof(int)));
+        GPU_CHECK(cudaMalloc((void**)&v_point_num_, MAX_PILLARS * sizeof(int)));
+
+
+        GPU_CHECK(cudaMemset(v_range_,0, MAX_PILLARS * sizeof(int)));
+        GPU_CHECK(cudaMemset(v_point_sum_, 0, MAX_PILLARS * 3 * sizeof(float)));
+
+        /**
+         * @brief : Create and Init Variables for PostProcess
+         * 
+         */
+        GPU_CHECK(cudaMalloc((void**)&dev_score_idx_, OUTPUT_W * OUTPUT_H * sizeof(int)));
+        GPU_CHECK(cudaMemset(dev_score_idx_, -1 , OUTPUT_W * OUTPUT_H * sizeof(int)));
+
+        GPU_CHECK(cudaMallocHost((void**)& mask_cpu, INPUT_NMS_MAX_SIZE * DIVUP (INPUT_NMS_MAX_SIZE ,THREADS_PER_BLOCK_NMS) * sizeof(unsigned long long)));
+        GPU_CHECK(cudaMemset(mask_cpu, 0 ,  INPUT_NMS_MAX_SIZE * DIVUP (INPUT_NMS_MAX_SIZE ,THREADS_PER_BLOCK_NMS) * sizeof(unsigned long long)));
+
+        GPU_CHECK(cudaMallocHost((void**)& remv_cpu, THREADS_PER_BLOCK_NMS * sizeof(unsigned long long)));
+        GPU_CHECK(cudaMemset(remv_cpu, 0 ,  THREADS_PER_BLOCK_NMS  * sizeof(unsigned long long)));
+
+        GPU_CHECK(cudaMallocHost((void**)&host_score_idx_, OUTPUT_W * OUTPUT_H  * sizeof(int)));
+        GPU_CHECK(cudaMemset(host_score_idx_, -1, OUTPUT_W * OUTPUT_H  * sizeof(int)));
+
+        GPU_CHECK(cudaMallocHost((void**)&host_keep_data_, INPUT_NMS_MAX_SIZE * sizeof(long)));
+        GPU_CHECK(cudaMemset(host_keep_data_, -1, INPUT_NMS_MAX_SIZE * sizeof(long)));
+
+        GPU_CHECK(cudaMallocHost((void**)&host_boxes_, OUTPUT_NMS_MAX_SIZE * 9 * sizeof(float)));
+        GPU_CHECK(cudaMemset(host_boxes_, 0 ,  OUTPUT_NMS_MAX_SIZE * 9 * sizeof(float)));
+
+        GPU_CHECK(cudaMallocHost((void**)&host_label_, OUTPUT_NMS_MAX_SIZE * sizeof(int)));
+        GPU_CHECK(cudaMemset(host_label_, -1, OUTPUT_NMS_MAX_SIZE * sizeof(int)));
+
+    }
+
+    ~CenterPoint() {
+    // Free host pointers
+    // Free global pointers 
+    std::cout << "Free Variables . \n";
+    GPU_CHECK(cudaFree(dev_indices_));
+    GPU_CHECK(cudaFree(dev_points_));
+    GPU_CHECK(cudaFree(dev_score_idx_));
+
+    GPU_CHECK(cudaFree( p_bev_idx_)); 
+    GPU_CHECK(cudaFree( p_point_num_assigned_));
+    GPU_CHECK(cudaFree( p_mask_));
+    GPU_CHECK(cudaFree( bev_voxel_idx_)); // H * W
+    GPU_CHECK(cudaFree( v_point_sum_));
+    GPU_CHECK(cudaFree( v_range_));
+    GPU_CHECK(cudaFree( v_point_num_));
+
+
+    GPU_CHECK(cudaFreeHost(host_keep_data_));
+    GPU_CHECK(cudaFreeHost(host_boxes_));
+    GPU_CHECK(cudaFreeHost(host_label_));
+    GPU_CHECK(cudaFreeHost(host_score_idx_));
+    GPU_CHECK(cudaFreeHost(remv_cpu));
+    GPU_CHECK(cudaFreeHost(mask_cpu));
+
+
+    // // Free engine 
+    // std::cout << "Free PFE Engine  .\n";
+    // mEngine->destroy();
+    // std::cout << "Free RPN Engine  .\n";
+    // mEngineRPN->destroy();
+    }
+
+    std::shared_ptr<nvinfer1::ICudaEngine>  build( std::string onnxFilePath,std::string saveEnginePath);
+    std::shared_ptr<nvinfer1::ICudaEngine>  buildFromSerializedEngine(std::string serializedEngineFile);
+    bool infer(float* lidarpoints,int pointsnum,std::vector<Box>& predResult);
+    bool engineInitlization();
+    bool testinfer();
+    
+
+
+private:
+    // device pointers 
+    float* dev_scattered_feature_;
+    float* dev_points_ ;
+    int* dev_indices_;
+    int* dev_score_idx_;
+    long* dev_keep_data_;
+    SampleUniquePtr<ScatterCuda> scatter_cuda_ptr_;
+
+    // device pointers for preprocess
+    int* p_bev_idx_; 
+    int* p_point_num_assigned_;
+    bool* p_mask_;
+    int* bev_voxel_idx_; // H * W
+    float* v_point_sum_;
+    int* v_range_;
+    int* v_point_num_;
+    
+
+    // host  variables for post process
+    long* host_keep_data_;
+    float* host_boxes_;
+    int* host_label_;
+    int* host_score_idx_;
+    unsigned long long* mask_cpu;
+    unsigned long long* remv_cpu;
+
+    Params mParams;
+    int BATCH_SIZE_ = 1;
+    nvinfer1::Dims mInputDims;  //!< The dimensions of the input to the network.
+    nvinfer1::Dims mOutputDims; //!< The dimensions of the output to the network.
+    int mNumber{0};             //!< The number to classify
+    std::shared_ptr<nvinfer1::ICudaEngine> mEngine; //!< The TensorRT engine used to run the network
+    std::shared_ptr<nvinfer1::ICudaEngine> mEngineRPN;
+
+    samplesCommon::BufferManager * mbuffers;
+    samplesCommon::BufferManager * mbuffersRPN;
+
+    SampleUniquePtr<nvinfer1::IExecutionContext> mContext;
+    SampleUniquePtr<nvinfer1::IExecutionContext> mContextRPN;
+
+    //!
+    //! \brief Parses an ONNX model for MNIST and creates a TensorRT network
+    //!
+    bool constructNetwork(SampleUniquePtr<nvinfer1::IBuilder>& builder,
+        SampleUniquePtr<nvinfer1::INetworkDefinition>& network, SampleUniquePtr<nvinfer1::IBuilderConfig>& config,
+        SampleUniquePtr<nvonnxparser::IParser>& parser,
+       std::string onnxFilePath);
+    //!
+    //! \brief Reads the input  and stores the result in a managed buffer
+    //!
+    bool processInput(void*& points, std::string& pointFilePath, int& pointNum);
+    //!
+    //! \brief Classifies digits and verify result
+    //!
+    void saveOutput(std::vector<Box>& predResult, std::string& inputFileName, std::string savePath);
+};
+
+
+
+

+ 147 - 0
src/detection/CenterPoint-master/include/common/ErrorRecorder.h

@@ -0,0 +1,147 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ERROR_RECORDER_H
+#define ERROR_RECORDER_H
+#include "NvInferRuntime.h"
+#include "logger.h"
+#include <atomic>
+#include <cstdint>
+#include <exception>
+#include <mutex>
+#include <vector>
+#if NV_IS_SAFETY
+#include <iostream>
+#endif
+using namespace nvinfer1;
+//!
+//! A simple implementation of the IErrorRecorder interface for
+//! use by samples. This interface also can be used as a reference
+//! implementation.
+//! The sample Error recorder is based on a vector that pairs the error
+//! code and the error string into a single element. It also uses
+//! standard mutex's and atomics in order to make sure that the code
+//! works in a multi-threaded environment.
+//! SampleErrorRecorder is not intended for use in automotive safety
+//! environments.
+//!
+class SampleErrorRecorder : public IErrorRecorder
+{
+    using errorPair = std::pair<ErrorCode, std::string>;
+    using errorStack = std::vector<errorPair>;
+
+public:
+    SampleErrorRecorder() = default;
+
+    virtual ~SampleErrorRecorder() noexcept {}
+    int32_t getNbErrors() const noexcept final
+    {
+        return mErrorStack.size();
+    }
+    ErrorCode getErrorCode(int32_t errorIdx) const noexcept final
+    {
+        return invalidIndexCheck(errorIdx) ? ErrorCode::kINVALID_ARGUMENT : (*this)[errorIdx].first;
+    };
+    IErrorRecorder::ErrorDesc getErrorDesc(int32_t errorIdx) const noexcept final
+    {
+        return invalidIndexCheck(errorIdx) ? "errorIdx out of range." : (*this)[errorIdx].second.c_str();
+    }
+    // This class can never overflow since we have dynamic resize via std::vector usage.
+    bool hasOverflowed() const noexcept final
+    {
+        return false;
+    }
+
+    // Empty the errorStack.
+    void clear() noexcept final
+    {
+        try
+        {
+            // grab a lock so that there is no addition while clearing.
+            std::lock_guard<std::mutex> guard(mStackLock);
+            mErrorStack.clear();
+        }
+        catch (const std::exception& e)
+        {
+#if NV_IS_SAFETY
+            std::cerr << "Internal Error: " << e.what() << std::endl;
+#else
+            getLogger()->log(ILogger::Severity::kINTERNAL_ERROR, e.what());
+#endif
+        }
+    };
+
+    //! Simple helper function that
+    bool empty() const noexcept
+    {
+        return mErrorStack.empty();
+    }
+
+    bool reportError(ErrorCode val, IErrorRecorder::ErrorDesc desc) noexcept final
+    {
+        try
+        {
+            std::lock_guard<std::mutex> guard(mStackLock);
+            sample::gLogError << "Error[" << static_cast<int32_t>(val) << "]: " << desc << std::endl;
+            mErrorStack.push_back(errorPair(val, desc));
+        }
+        catch (const std::exception& e)
+        {
+#if NV_IS_SAFETY
+            std::cerr << "Internal Error: " << e.what() << std::endl;
+#else
+            getLogger()->log(ILogger::Severity::kINTERNAL_ERROR, e.what());
+#endif
+        }
+        // All errors are considered fatal.
+        return true;
+    }
+
+    // Atomically increment or decrement the ref counter.
+    IErrorRecorder::RefCount incRefCount() noexcept final
+    {
+        return ++mRefCount;
+    }
+    IErrorRecorder::RefCount decRefCount() noexcept final
+    {
+        return --mRefCount;
+    }
+
+private:
+    // Simple helper functions.
+    const errorPair& operator[](size_t index) const noexcept
+    {
+        return mErrorStack[index];
+    }
+
+    bool invalidIndexCheck(int32_t index) const noexcept
+    {
+        // By converting signed to unsigned, we only need a single check since
+        // negative numbers turn into large positive greater than the size.
+        size_t sIndex = index;
+        return sIndex >= mErrorStack.size();
+    }
+    // Mutex to hold when locking mErrorStack.
+    std::mutex mStackLock;
+
+    // Reference count of the class. Destruction of the class when mRefCount
+    // is not zero causes undefined behavior.
+    std::atomic<int32_t> mRefCount{0};
+
+    // The error stack that holds the errors recorded by TensorRT.
+    errorStack mErrorStack;
+};     // class SampleErrorRecorder
+#endif // ERROR_RECORDER_H

+ 217 - 0
src/detection/CenterPoint-master/include/common/argsParser.h

@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef TENSORRT_ARGS_PARSER_H
+#define TENSORRT_ARGS_PARSER_H
+
+#include <string>
+#include <vector>
+#ifdef _MSC_VER
+#include "..\common\windows\getopt.h"
+#else
+#include <getopt.h>
+#endif
+#include <iostream>
+
+namespace samplesCommon
+{
+
+//!
+//! \brief The SampleParams structure groups the basic parameters required by
+//!        all sample networks.
+//!
+struct SampleParams
+{
+    int32_t batchSize{1};              //!< Number of inputs in a batch
+    int32_t dlaCore{-1};               //!< Specify the DLA core to run network on.
+    bool int8{false};                  //!< Allow runnning the network in Int8 mode.
+    bool fp16{false};                  //!< Allow running the network in FP16 mode.
+    std::vector<std::string> dataDirs; //!< Directory paths where sample data files are stored
+    std::vector<std::string> inputTensorNames;
+    std::vector<std::string> outputTensorNames;
+};
+
+//!
+//! \brief The CaffeSampleParams structure groups the additional parameters required by
+//!         networks that use caffe
+//!
+struct CaffeSampleParams : public SampleParams
+{
+    std::string prototxtFileName; //!< Filename of prototxt design file of a network
+    std::string weightsFileName;  //!< Filename of trained weights file of a network
+    std::string meanFileName;     //!< Filename of mean file of a network
+};
+
+//!
+//! \brief The OnnxSampleParams structure groups the additional parameters required by
+//!         networks that use ONNX
+//!
+struct OnnxSampleParams : public SampleParams
+{
+    std::string onnxFileName; //!< Filename of ONNX file of a network
+};
+
+//!
+//! \brief The UffSampleParams structure groups the additional parameters required by
+//!         networks that use Uff
+//!
+struct UffSampleParams : public SampleParams
+{
+    std::string uffFileName; //!< Filename of uff file of a network
+};
+
+//!
+//! /brief Struct to maintain command-line arguments.
+//!
+struct Args
+{
+    bool runInInt8{false};
+    bool runInFp16{false};
+    bool help{false};
+    int32_t useDLACore{-1};
+    int32_t batch{1};
+    std::vector<std::string> dataDirs;
+    std::string saveEngine;
+    // std::string loadEngine;
+    bool loadEngine{false};
+    bool useILoop{false};
+    std::string pfeOnnxPath, rpnOnnxPath, pfeEnginePath, rpnEnginePath, savePath, filePath;
+
+};
+
+//!
+//! \brief Populates the Args struct with the provided command-line parameters.
+//!
+//! \throw invalid_argument if any of the arguments are not valid
+//!
+//! \return boolean If return value is true, execution can continue, otherwise program should exit
+//!
+inline bool parseArgs(Args& args, int32_t argc, char* argv[])
+{
+    while (1)
+    {
+        int32_t arg;
+        static struct option long_options[] = {{"help", no_argument, 0, 'h'}, {"datadir", required_argument, 0, 'd'},
+            {"int8", no_argument, 0, 'i'}, {"fp16", no_argument, 0, 'f'}, {"useILoop", no_argument, 0, 'l'},
+            {"saveEngine", required_argument, 0, 's'}, {"loadEngine", no_argument, 0, 'o'},
+            {"useDLACore", required_argument, 0, 'c'}, {"batch", required_argument, 0, 'b'},
+
+            {"pfeOnnxPath", required_argument, 0, 'u'},
+            {"rpnOnnxPath", required_argument, 0, 'v'},
+            {"pfeEnginePath", required_argument, 0, 'w'},
+            {"rpnEnginePath", required_argument, 0, 'x'},
+            {"savePath", required_argument, 0, 'y'},
+            {"filePath", required_argument, 0, 'z'},
+            
+             {nullptr, 0, nullptr, 0}};
+        int32_t option_index = 0;
+        arg = getopt_long(argc, argv, "hd:iu", long_options, &option_index);
+        if (arg == -1)
+        {
+            break;
+        }
+
+        switch (arg)
+        {
+        case 'h': args.help = true; return true;
+        ///////////////////////////////////////////////////////For CenterPoint/////////////////////////////////////////////////////////
+        case 'u':
+        if (optarg)
+            {
+                args.pfeOnnxPath = optarg;
+            }
+            break;
+
+        case 'v':
+        if (optarg)
+            {
+                args.rpnOnnxPath = optarg;
+            }
+            break;
+        case 'w':
+        if (optarg)
+            {
+                args.pfeEnginePath = optarg;
+            }
+            break;
+        case 'x':
+        if (optarg)
+            {
+                args.rpnEnginePath = optarg;
+            }
+            break;
+        case 'y':
+        if (optarg)
+            {
+                args.savePath = optarg;
+            }
+            break;
+
+        case 'z':
+        if (optarg)
+            {
+                args.filePath = optarg;
+            }
+            break;
+        ///////////////////////////////////////////////////////End For CenterPoint/////////////////////////////////////////////////////////
+
+        case 'd':
+            if (optarg)
+            {
+                args.dataDirs.push_back(optarg);
+            }
+            else
+            {
+                std::cerr << "ERROR: --datadir requires option argument" << std::endl;
+                return false;
+            }
+            break;
+        case 's':
+            if (optarg)
+            {
+                args.saveEngine = optarg;
+            }
+            break;
+        // case 'o':
+        //     if (optarg)
+        //     {
+        //         args.loadEngine = optarg;
+        //     }
+        //     break;
+        case 'o': args.loadEngine = true; break;
+        case 'i': args.runInInt8 = true; break;
+        case 'f': args.runInFp16 = true; break;
+        case 'l': args.useILoop = true; break;
+        case 'c':
+            if (optarg)
+            {
+                args.useDLACore = std::stoi(optarg);
+            }
+            break;
+        case 'b':
+            if (optarg)
+            {
+                args.batch = std::stoi(optarg);
+            }
+            break;
+        default: return false;
+        }
+    }
+    return true;
+}
+
+} // namespace samplesCommon
+
+#endif // TENSORRT_ARGS_PARSER_H

+ 467 - 0
src/detection/CenterPoint-master/include/common/buffers.h

@@ -0,0 +1,467 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef TENSORRT_BUFFERS_H
+#define TENSORRT_BUFFERS_H
+
+#include "NvInfer.h"
+#include "common.h"
+#include "half.h"
+#include <cassert>
+#include <cuda_runtime_api.h>
+#include <iostream>
+#include <iterator>
+#include <memory>
+#include <new>
+#include <numeric>
+#include <string>
+#include <vector>
+
+namespace samplesCommon
+{
+
+//!
+//! \brief  The GenericBuffer class is a templated class for buffers.
+//!
+//! \details This templated RAII (Resource Acquisition Is Initialization) class handles the allocation,
+//!          deallocation, querying of buffers on both the device and the host.
+//!          It can handle data of arbitrary types because it stores byte buffers.
+//!          The template parameters AllocFunc and FreeFunc are used for the
+//!          allocation and deallocation of the buffer.
+//!          AllocFunc must be a functor that takes in (void** ptr, size_t size)
+//!          and returns bool. ptr is a pointer to where the allocated buffer address should be stored.
+//!          size is the amount of memory in bytes to allocate.
+//!          The boolean indicates whether or not the memory allocation was successful.
+//!          FreeFunc must be a functor that takes in (void* ptr) and returns void.
+//!          ptr is the allocated buffer address. It must work with nullptr input.
+//!
+template <typename AllocFunc, typename FreeFunc>
+class GenericBuffer
+{
+public:
+    //!
+    //! \brief Construct an empty buffer.
+    //!
+    GenericBuffer(nvinfer1::DataType type = nvinfer1::DataType::kFLOAT)
+        : mSize(0)
+        , mCapacity(0)
+        , mType(type)
+        , mBuffer(nullptr)
+    {
+    }
+
+    //!
+    //! \brief Construct a buffer with the specified allocation size in bytes.
+    //!
+    GenericBuffer(size_t size, nvinfer1::DataType type)
+        : mSize(size)
+        , mCapacity(size)
+        , mType(type)
+    {
+        if (!allocFn(&mBuffer, this->nbBytes()))
+        {
+            throw std::bad_alloc();
+        }
+    }
+
+    GenericBuffer(GenericBuffer&& buf)
+        : mSize(buf.mSize)
+        , mCapacity(buf.mCapacity)
+        , mType(buf.mType)
+        , mBuffer(buf.mBuffer)
+    {
+        buf.mSize = 0;
+        buf.mCapacity = 0;
+        buf.mType = nvinfer1::DataType::kFLOAT;
+        buf.mBuffer = nullptr;
+    }
+
+    GenericBuffer& operator=(GenericBuffer&& buf)
+    {
+        if (this != &buf)
+        {
+            freeFn(mBuffer);
+            mSize = buf.mSize;
+            mCapacity = buf.mCapacity;
+            mType = buf.mType;
+            mBuffer = buf.mBuffer;
+            // Reset buf.
+            buf.mSize = 0;
+            buf.mCapacity = 0;
+            buf.mBuffer = nullptr;
+        }
+        return *this;
+    }
+
+    //!
+    //! \brief Returns pointer to underlying array.
+    //!
+    void* data()
+    {
+        return mBuffer;
+    }
+
+    //!
+    //! \brief Returns pointer to underlying array.
+    //!
+    const void* data() const
+    {
+        return mBuffer;
+    }
+
+    //!
+    //! \brief Returns the size (in number of elements) of the buffer.
+    //!
+    size_t size() const
+    {
+        return mSize;
+    }
+
+    //!
+    //! \brief Returns the size (in bytes) of the buffer.
+    //!
+    size_t nbBytes() const
+    {
+        return this->size() * samplesCommon::getElementSize(mType);
+    }
+
+    //!
+    //! \brief Resizes the buffer. This is a no-op if the new size is smaller than or equal to the current capacity.
+    //!
+    void resize(size_t newSize)
+    {
+        mSize = newSize;
+        if (mCapacity < newSize)
+        {
+            freeFn(mBuffer);
+            if (!allocFn(&mBuffer, this->nbBytes()))
+            {
+                throw std::bad_alloc{};
+            }
+            mCapacity = newSize;
+        }
+    }
+
+    //!
+    //! \brief Overload of resize that accepts Dims
+    //!
+    void resize(const nvinfer1::Dims& dims)
+    {
+        return this->resize(samplesCommon::volume(dims));
+    }
+
+    ~GenericBuffer()
+    {
+        freeFn(mBuffer);
+    }
+
+private:
+    size_t mSize{0}, mCapacity{0};
+    nvinfer1::DataType mType;
+    void* mBuffer;
+    AllocFunc allocFn;
+    FreeFunc freeFn;
+};
+
+class DeviceAllocator
+{
+public:
+    bool operator()(void** ptr, size_t size) const
+    {
+        return cudaMalloc(ptr, size) == cudaSuccess;
+    }
+};
+
+class DeviceFree
+{
+public:
+    void operator()(void* ptr) const
+    {
+        cudaFree(ptr);
+    }
+};
+
+class HostAllocator
+{
+public:
+    bool operator()(void** ptr, size_t size) const
+    {
+        *ptr = malloc(size);
+        return *ptr != nullptr;
+    }
+};
+
+class HostFree
+{
+public:
+    void operator()(void* ptr) const
+    {
+        free(ptr);
+    }
+};
+
+using DeviceBuffer = GenericBuffer<DeviceAllocator, DeviceFree>;
+using HostBuffer = GenericBuffer<HostAllocator, HostFree>;
+
+//!
+//! \brief  The ManagedBuffer class groups together a pair of corresponding device and host buffers.
+//!
+class ManagedBuffer
+{
+public:
+    DeviceBuffer deviceBuffer;
+    HostBuffer hostBuffer;
+};
+
+
+//!
+//! \brief  The BufferManager class handles host and device buffer allocation and deallocation.
+//!
+//! \details This RAII class handles host and device buffer allocation and deallocation,
+//!          memcpy between host and device buffers to aid with inference,
+//!          and debugging dumps to validate inference. The BufferManager class is meant to be
+//!          used to simplify buffer management and any interactions between buffers and the engine.
+//!
+class BufferManager
+{
+public:
+    static const size_t kINVALID_SIZE_VALUE = ~size_t(0);
+
+    //!
+    //! \brief Create a BufferManager for handling buffer interactions with engine.
+    //!
+    BufferManager(std::shared_ptr<nvinfer1::ICudaEngine> engine, const int batchSize = 0,
+        const nvinfer1::IExecutionContext* context = nullptr)
+        : mEngine(engine)
+        , mBatchSize(batchSize)
+    {
+        // Full Dims implies no batch size.
+        assert(engine->hasImplicitBatchDimension() || mBatchSize == 0);
+        // Create host and device buffers
+        for (int i = 0; i < mEngine->getNbBindings(); i++)
+        {
+            auto dims = context ? context->getBindingDimensions(i) : mEngine->getBindingDimensions(i);
+            size_t vol = context || !mBatchSize ? 1 : static_cast<size_t>(mBatchSize);
+            nvinfer1::DataType type = mEngine->getBindingDataType(i);
+            int vecDim = mEngine->getBindingVectorizedDim(i);
+            if (-1 != vecDim) // i.e., 0 != lgScalarsPerVector
+            {
+                int scalarsPerVec = mEngine->getBindingComponentsPerElement(i);
+                dims.d[vecDim] = divUp(dims.d[vecDim], scalarsPerVec);
+                vol *= scalarsPerVec;
+            }
+            vol *= samplesCommon::volume(dims);
+            std::unique_ptr<ManagedBuffer> manBuf{new ManagedBuffer()};
+            manBuf->deviceBuffer = DeviceBuffer(vol, type);
+            manBuf->hostBuffer = HostBuffer(vol, type);
+            mDeviceBindings.emplace_back(manBuf->deviceBuffer.data());
+            mManagedBuffers.emplace_back(std::move(manBuf));
+        }
+    }
+
+    //!
+    //! \brief Returns a vector of device buffers that you can use directly as
+    //!        bindings for the execute and enqueue methods of IExecutionContext.
+    //!
+    std::vector<void*>& getDeviceBindings()
+    {
+        return mDeviceBindings;
+    }
+
+    //!
+    //! \brief Returns a vector of device buffers.
+    //!
+    const std::vector<void*>& getDeviceBindings() const
+    {
+        return mDeviceBindings;
+    }
+
+    //!
+    //! \brief Returns the device buffer corresponding to tensorName.
+    //!        Returns nullptr if no such tensor can be found.
+    //!
+    void* getDeviceBuffer(const std::string& tensorName) const
+    {
+        return getBuffer(false, tensorName);
+    }
+
+    //!
+    //! \brief Returns the host buffer corresponding to tensorName.
+    //!        Returns nullptr if no such tensor can be found.
+    //!
+    void* getHostBuffer(const std::string& tensorName) const
+    {
+        return getBuffer(true, tensorName);
+    }
+
+    //!
+    //! \brief Returns the size of the host and device buffers that correspond to tensorName.
+    //!        Returns kINVALID_SIZE_VALUE if no such tensor can be found.
+    //!
+    size_t size(const std::string& tensorName) const
+    {
+        int index = mEngine->getBindingIndex(tensorName.c_str());
+        if (index == -1)
+            return kINVALID_SIZE_VALUE;
+        return mManagedBuffers[index]->hostBuffer.nbBytes();
+    }
+
+    //!
+    //! \brief Dump host buffer with specified tensorName to ostream.
+    //!        Prints error message to std::ostream if no such tensor can be found.
+    //!
+    void dumpBuffer(std::ostream& os, const std::string& tensorName)
+    {
+        int index = mEngine->getBindingIndex(tensorName.c_str());
+        if (index == -1)
+        {
+            os << "Invalid tensor name" << std::endl;
+            return;
+        }
+        void* buf = mManagedBuffers[index]->hostBuffer.data();
+        size_t bufSize = mManagedBuffers[index]->hostBuffer.nbBytes();
+        nvinfer1::Dims bufDims = mEngine->getBindingDimensions(index);
+        size_t rowCount = static_cast<size_t>(bufDims.nbDims > 0 ? bufDims.d[bufDims.nbDims - 1] : mBatchSize);
+        int leadDim = mBatchSize;
+        int* trailDims = bufDims.d;
+        int nbDims = bufDims.nbDims;
+
+        // Fix explicit Dimension networks
+        if (!leadDim && nbDims > 0)
+        {
+            leadDim = bufDims.d[0];
+            ++trailDims;
+            --nbDims;
+        }
+
+        os << "[" << leadDim;
+        for (int i = 0; i < nbDims; i++)
+            os << ", " << trailDims[i];
+        os << "]" << std::endl;
+        switch (mEngine->getBindingDataType(index))
+        {
+        case nvinfer1::DataType::kINT32: print<int32_t>(os, buf, bufSize, rowCount); break;
+        case nvinfer1::DataType::kFLOAT: print<float>(os, buf, bufSize, rowCount); break;
+        case nvinfer1::DataType::kHALF: print<half_float::half>(os, buf, bufSize, rowCount); break;
+        case nvinfer1::DataType::kINT8: assert(0 && "Int8 network-level input and output is not supported"); break;
+        case nvinfer1::DataType::kBOOL: assert(0 && "Bool network-level input and output are not supported"); break;
+        }
+    }
+
+    //!
+    //! \brief Templated print function that dumps buffers of arbitrary type to std::ostream.
+    //!        rowCount parameter controls how many elements are on each line.
+    //!        A rowCount of 1 means that there is only 1 element on each line.
+    //!
+    template <typename T>
+    void print(std::ostream& os, void* buf, size_t bufSize, size_t rowCount)
+    {
+        assert(rowCount != 0);
+        assert(bufSize % sizeof(T) == 0);
+        T* typedBuf = static_cast<T*>(buf);
+        size_t numItems = bufSize / sizeof(T);
+        for (int i = 0; i < static_cast<int>(numItems); i++)
+        {
+            // Handle rowCount == 1 case
+            if (rowCount == 1 && i != static_cast<int>(numItems) - 1)
+                os << typedBuf[i] << std::endl;
+            else if (rowCount == 1)
+                os << typedBuf[i];
+            // Handle rowCount > 1 case
+            else if (i % rowCount == 0)
+                os << typedBuf[i];
+            else if (i % rowCount == rowCount - 1)
+                os << " " << typedBuf[i] << std::endl;
+            else
+                os << " " << typedBuf[i];
+        }
+    }
+
+    //!
+    //! \brief Copy the contents of input host buffers to input device buffers synchronously.
+    //!
+    void copyInputToDevice()
+    {
+        memcpyBuffers(true, false, false);
+    }
+
+    //!
+    //! \brief Copy the contents of output device buffers to output host buffers synchronously.
+    //!
+    void copyOutputToHost()
+    {
+        memcpyBuffers(false, true, false);
+    }
+
+    //!
+    //! \brief Copy the contents of input host buffers to input device buffers asynchronously.
+    //!
+    void copyInputToDeviceAsync(const cudaStream_t& stream = 0)
+    {
+        memcpyBuffers(true, false, true, stream);
+    }
+
+    //!
+    //! \brief Copy the contents of output device buffers to output host buffers asynchronously.
+    //!
+    void copyOutputToHostAsync(const cudaStream_t& stream = 0)
+    {
+        memcpyBuffers(false, true, true, stream);
+    }
+
+    ~BufferManager() = default;
+
+private:
+    void* getBuffer(const bool isHost, const std::string& tensorName) const
+    {
+        int index = mEngine->getBindingIndex(tensorName.c_str());
+        if (index == -1)
+            return nullptr;
+        return (isHost ? mManagedBuffers[index]->hostBuffer.data() : mManagedBuffers[index]->deviceBuffer.data());
+    }
+
+    void memcpyBuffers(const bool copyInput, const bool deviceToHost, const bool async, const cudaStream_t& stream = 0)
+    {
+        for (int i = 0; i < mEngine->getNbBindings(); i++)
+        {
+            void* dstPtr
+                = deviceToHost ? mManagedBuffers[i]->hostBuffer.data() : mManagedBuffers[i]->deviceBuffer.data();
+            const void* srcPtr
+                = deviceToHost ? mManagedBuffers[i]->deviceBuffer.data() : mManagedBuffers[i]->hostBuffer.data();
+            const size_t byteSize = mManagedBuffers[i]->hostBuffer.nbBytes();
+            const cudaMemcpyKind memcpyType = deviceToHost ? cudaMemcpyDeviceToHost : cudaMemcpyHostToDevice;
+            if ((copyInput && mEngine->bindingIsInput(i)) || (!copyInput && !mEngine->bindingIsInput(i)))
+            {
+                if (async)
+                    CHECK(cudaMemcpyAsync(dstPtr, srcPtr, byteSize, memcpyType, stream));
+                else
+                    CHECK(cudaMemcpy(dstPtr, srcPtr, byteSize, memcpyType));
+            }
+        }
+    }
+
+    std::shared_ptr<nvinfer1::ICudaEngine> mEngine;              //!< The pointer to the engine
+    int mBatchSize;                                              //!< The batch size for legacy networks, 0 otherwise.
+    std::vector<std::unique_ptr<ManagedBuffer>> mManagedBuffers; //!< The vector of pointers to managed buffers
+    std::vector<void*> mDeviceBindings;                          //!< The vector of device buffers needed for engine execution
+};
+
+} // namespace samplesCommon
+
+#endif // TENSORRT_BUFFERS_H
+
+
+
+

+ 971 - 0
src/detection/CenterPoint-master/include/common/common.h

@@ -0,0 +1,971 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TENSORRT_COMMON_H
+#define TENSORRT_COMMON_H
+
+// For loadLibrary
+#ifdef _MSC_VER
+// Needed so that the max/min definitions in windows.h do not conflict with std::max/min.
+#define NOMINMAX
+#include <windows.h>
+#undef NOMINMAX
+#else
+#include <dlfcn.h>
+#endif
+
+#include "NvInfer.h"
+#include "NvInferPlugin.h"
+#include "logger.h"
+#include <algorithm>
+#include <cassert>
+#include <chrono>
+#include <cmath>
+#include <cstring>
+#include <cuda_runtime_api.h>
+#include <fstream>
+#include <iomanip>
+#include <iostream>
+#include <iterator>
+#include <map>
+#include <memory>
+#include <new>
+#include <numeric>
+#include <ratio>
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace nvinfer1;
+using namespace plugin;
+
+#ifdef _MSC_VER
+#define FN_NAME __FUNCTION__
+#else
+#define FN_NAME __func__
+#endif
+
+#if defined(__aarch64__) || defined(__QNX__)
+#define ENABLE_DLA_API 1
+#endif
+
+#define CHECK(status)                                                                                                  \
+    do                                                                                                                 \
+    {                                                                                                                  \
+        auto ret = (status);                                                                                           \
+        if (ret != 0)                                                                                                  \
+        {                                                                                                              \
+            sample::gLogError << "Cuda failure: " << ret << std::endl;                                                 \
+            abort();                                                                                                   \
+        }                                                                                                              \
+    } while (0)
+
+#define CHECK_RETURN_W_MSG(status, val, errMsg)                                                                        \
+    do                                                                                                                 \
+    {                                                                                                                  \
+        if (!(status))                                                                                                 \
+        {                                                                                                              \
+            sample::gLogError << errMsg << " Error in " << __FILE__ << ", function " << FN_NAME << "(), line " << __LINE__     \
+                      << std::endl;                                                                                    \
+            return val;                                                                                                \
+        }                                                                                                              \
+    } while (0)
+
+#undef ASSERT
+#define ASSERT(condition)                                                   \
+    do                                                                      \
+    {                                                                       \
+        if (!(condition))                                                   \
+        {                                                                   \
+            sample::gLogError << "Assertion failure: " << #condition << std::endl;  \
+            abort();                                                        \
+        }                                                                   \
+    } while (0)
+
+
+#define CHECK_RETURN(status, val) CHECK_RETURN_W_MSG(status, val, "")
+
+#define OBJ_GUARD(A) std::unique_ptr<A, void (*)(A * t)>
+
+template <typename T, typename T_>
+OBJ_GUARD(T)
+makeObjGuard(T_* t)
+{
+    CHECK(!(std::is_base_of<T, T_>::value || std::is_same<T, T_>::value));
+    auto deleter = [](T* t) { t->destroy(); };
+    return std::unique_ptr<T, decltype(deleter)>{static_cast<T*>(t), deleter};
+}
+
+constexpr long double operator"" _GiB(long double val)
+{
+    return val * (1 << 30);
+}
+constexpr long double operator"" _MiB(long double val)
+{
+    return val * (1 << 20);
+}
+constexpr long double operator"" _KiB(long double val)
+{
+    return val * (1 << 10);
+}
+
+// These is necessary if we want to be able to write 1_GiB instead of 1.0_GiB.
+// Since the return type is signed, -1_GiB will work as expected.
+constexpr long long int operator"" _GiB(unsigned long long val)
+{
+    return val * (1 << 30);
+}
+constexpr long long int operator"" _MiB(unsigned long long val)
+{
+    return val * (1 << 20);
+}
+constexpr long long int operator"" _KiB(unsigned long long val)
+{
+    return val * (1 << 10);
+}
+
+struct SimpleProfiler : public nvinfer1::IProfiler
+{
+    struct Record
+    {
+        float time{0};
+        int count{0};
+    };
+
+    virtual void reportLayerTime(const char* layerName, float ms) noexcept
+    {
+        mProfile[layerName].count++;
+        mProfile[layerName].time += ms;
+        if (std::find(mLayerNames.begin(), mLayerNames.end(), layerName) == mLayerNames.end())
+        {
+            mLayerNames.push_back(layerName);
+        }
+    }
+
+    SimpleProfiler(const char* name, const std::vector<SimpleProfiler>& srcProfilers = std::vector<SimpleProfiler>())
+        : mName(name)
+    {
+        for (const auto& srcProfiler : srcProfilers)
+        {
+            for (const auto& rec : srcProfiler.mProfile)
+            {
+                auto it = mProfile.find(rec.first);
+                if (it == mProfile.end())
+                {
+                    mProfile.insert(rec);
+                }
+                else
+                {
+                    it->second.time += rec.second.time;
+                    it->second.count += rec.second.count;
+                }
+            }
+        }
+    }
+
+    friend std::ostream& operator<<(std::ostream& out, const SimpleProfiler& value)
+    {
+        out << "========== " << value.mName << " profile ==========" << std::endl;
+        float totalTime = 0;
+        std::string layerNameStr = "TensorRT layer name";
+        int maxLayerNameLength = std::max(static_cast<int>(layerNameStr.size()), 70);
+        for (const auto& elem : value.mProfile)
+        {
+            totalTime += elem.second.time;
+            maxLayerNameLength = std::max(maxLayerNameLength, static_cast<int>(elem.first.size()));
+        }
+
+        auto old_settings = out.flags();
+        auto old_precision = out.precision();
+        // Output header
+        {
+            out << std::setw(maxLayerNameLength) << layerNameStr << " ";
+            out << std::setw(12) << "Runtime, "
+                << "%"
+                << " ";
+            out << std::setw(12) << "Invocations"
+                << " ";
+            out << std::setw(12) << "Runtime, ms" << std::endl;
+        }
+        for (size_t i = 0; i < value.mLayerNames.size(); i++)
+        {
+            const std::string layerName = value.mLayerNames[i];
+            auto elem = value.mProfile.at(layerName);
+            out << std::setw(maxLayerNameLength) << layerName << " ";
+            out << std::setw(12) << std::fixed << std::setprecision(1) << (elem.time * 100.0F / totalTime) << "%"
+                << " ";
+            out << std::setw(12) << elem.count << " ";
+            out << std::setw(12) << std::fixed << std::setprecision(2) << elem.time << std::endl;
+        }
+        out.flags(old_settings);
+        out.precision(old_precision);
+        out << "========== " << value.mName << " total runtime = " << totalTime << " ms ==========" << std::endl;
+
+        return out;
+    }
+
+private:
+    std::string mName;
+    std::vector<std::string> mLayerNames;
+    std::map<std::string, Record> mProfile;
+};
+
+//! Locate path to file, given its filename or filepath suffix and possible dirs it might lie in.
+//! Function will also walk back MAX_DEPTH dirs from CWD to check for such a file path.
+inline std::string locateFile(
+    const std::string& filepathSuffix, const std::vector<std::string>& directories, bool reportError = true)
+{
+    const int MAX_DEPTH{10};
+    bool found{false};
+    std::string filepath;
+
+    for (auto& dir : directories)
+    {
+        if (!dir.empty() && dir.back() != '/')
+        {
+#ifdef _MSC_VER
+            filepath = dir + "\\" + filepathSuffix;
+#else
+            filepath = dir + "/" + filepathSuffix;
+#endif
+        }
+        else
+        {
+            filepath = dir + filepathSuffix;
+        }
+
+        for (int i = 0; i < MAX_DEPTH && !found; i++)
+        {
+            const std::ifstream checkFile(filepath);
+            found = checkFile.is_open();
+            if (found)
+            {
+                break;
+            }
+
+            filepath = "../" + filepath; // Try again in parent dir
+        }
+
+        if (found)
+        {
+            break;
+        }
+
+        filepath.clear();
+    }
+
+    // Could not find the file
+    if (filepath.empty())
+    {
+        const std::string dirList = std::accumulate(directories.begin() + 1, directories.end(), directories.front(),
+            [](const std::string& a, const std::string& b) { return a + "\n\t" + b; });
+        std::cout << "Could not find " << filepathSuffix << " in data directories:\n\t" << dirList << std::endl;
+
+        if (reportError)
+        {
+            std::cout << "&&&& FAILED" << std::endl;
+            exit(EXIT_FAILURE);
+        }
+    }
+
+    return filepath;
+}
+
+inline void readPGMFile(const std::string& fileName, uint8_t* buffer, int inH, int inW)
+{
+    std::ifstream infile(fileName, std::ifstream::binary);
+    assert(infile.is_open() && "Attempting to read from a file that is not open.");
+    std::string magic, h, w, max;
+    infile >> magic >> h >> w >> max;
+    infile.seekg(1, infile.cur);
+    infile.read(reinterpret_cast<char*>(buffer), inH * inW);
+}
+
+namespace samplesCommon
+{
+
+// Swaps endianness of an integral type.
+template <typename T, typename std::enable_if<std::is_integral<T>::value, int>::type = 0>
+inline T swapEndianness(const T& value)
+{
+    uint8_t bytes[sizeof(T)];
+    for (int i = 0; i < static_cast<int>(sizeof(T)); ++i)
+    {
+        bytes[sizeof(T) - 1 - i] = *(reinterpret_cast<const uint8_t*>(&value) + i);
+    }
+    return *reinterpret_cast<T*>(bytes);
+}
+
+class HostMemory
+{
+public:
+    HostMemory() = delete;
+    virtual void* data() const noexcept
+    {
+        return mData;
+    }
+    virtual std::size_t size() const noexcept
+    {
+        return mSize;
+    }
+    virtual DataType type() const noexcept
+    {
+        return mType;
+    }
+    virtual ~HostMemory() {}
+
+protected:
+    HostMemory(std::size_t size, DataType type)
+        : mSize(size)
+        , mType(type)
+    {
+    }
+    void* mData;
+    std::size_t mSize;
+    DataType mType;
+};
+
+template <typename ElemType, DataType dataType>
+class TypedHostMemory : public HostMemory
+{
+public:
+    TypedHostMemory(std::size_t size)
+        : HostMemory(size, dataType)
+    {
+        mData = new ElemType[size];
+    };
+    ~TypedHostMemory() noexcept
+    {
+        delete[](ElemType*) mData;
+    }
+    ElemType* raw() noexcept
+    {
+        return static_cast<ElemType*>(data());
+    }
+};
+
+using FloatMemory = TypedHostMemory<float, DataType::kFLOAT>;
+using HalfMemory = TypedHostMemory<uint16_t, DataType::kHALF>;
+using ByteMemory = TypedHostMemory<uint8_t, DataType::kINT8>;
+
+inline void* safeCudaMalloc(size_t memSize)
+{
+    void* deviceMem;
+    CHECK(cudaMalloc(&deviceMem, memSize));
+    if (deviceMem == nullptr)
+    {
+        std::cerr << "Out of memory" << std::endl;
+        exit(1);
+    }
+    return deviceMem;
+}
+
+inline bool isDebug()
+{
+    return (std::getenv("TENSORRT_DEBUG") ? true : false);
+}
+
+struct InferDeleter
+{
+    template <typename T>
+    void operator()(T* obj) const
+    {
+        delete obj;
+    }
+};
+
+template <typename T>
+using SampleUniquePtr = std::unique_ptr<T, InferDeleter>;
+
+static auto StreamDeleter = [](cudaStream_t* pStream)
+    {
+        if (pStream)
+        {
+            cudaStreamDestroy(*pStream);
+            delete pStream;
+        }
+    };
+
+inline std::unique_ptr<cudaStream_t, decltype(StreamDeleter)> makeCudaStream()
+{
+    std::unique_ptr<cudaStream_t, decltype(StreamDeleter)> pStream(new cudaStream_t, StreamDeleter);
+    if (cudaStreamCreate(pStream.get()) != cudaSuccess)
+    {
+        pStream.reset(nullptr);
+    }
+
+    return pStream;
+}
+
+template <typename T>
+std::shared_ptr<T> infer_object(T* obj)
+{
+    if (!obj)
+    {
+        throw std::runtime_error(std::string("Failed to create object"));
+    }
+    return std::shared_ptr<T>(obj);
+}
+
+//! Return vector of indices that puts magnitudes of sequence in descending order.
+template <class Iter>
+std::vector<size_t> argMagnitudeSort(Iter begin, Iter end)
+{
+    std::vector<size_t> indices(end - begin);
+    std::iota(indices.begin(), indices.end(), 0);
+    std::sort(indices.begin(), indices.end(), [&begin](size_t i, size_t j) { return std::abs(begin[j]) < std::abs(begin[i]); });
+    return indices;
+}
+
+inline bool readReferenceFile(const std::string& fileName, std::vector<std::string>& refVector)
+{
+    std::ifstream infile(fileName);
+    if (!infile.is_open())
+    {
+        std::cout << "ERROR: readReferenceFile: Attempting to read from a file that is not open." << std::endl;
+        return false;
+    }
+    std::string line;
+    while (std::getline(infile, line))
+    {
+        if (line.empty())
+            continue;
+        refVector.push_back(line);
+    }
+    infile.close();
+    return true;
+}
+
+template <typename T>
+std::vector<std::string> classify(
+    const std::vector<std::string>& refVector, const std::vector<T>& output, const size_t topK)
+{
+    const auto inds = samplesCommon::argMagnitudeSort(output.cbegin(), output.cend());
+    std::vector<std::string> result;
+    result.reserve(topK);
+    for (size_t k = 0; k < topK; ++k)
+    {
+        result.push_back(refVector[inds[k]]);
+    }
+    return result;
+}
+
+// Returns indices of highest K magnitudes in v.
+template <typename T>
+std::vector<size_t> topKMagnitudes(const std::vector<T>& v, const size_t k)
+{
+    std::vector<size_t> indices = samplesCommon::argMagnitudeSort(v.cbegin(), v.cend());
+    indices.resize(k);
+    return indices;
+}
+
+template <typename T>
+bool readASCIIFile(const std::string& fileName, const size_t size, std::vector<T>& out)
+{
+    std::ifstream infile(fileName);
+    if (!infile.is_open())
+    {
+        std::cout << "ERROR readASCIIFile: Attempting to read from a file that is not open." << std::endl;
+        return false;
+    }
+    out.clear();
+    out.reserve(size);
+    out.assign(std::istream_iterator<T>(infile), std::istream_iterator<T>());
+    infile.close();
+    return true;
+}
+
+template <typename T>
+bool writeASCIIFile(const std::string& fileName, const std::vector<T>& in)
+{
+    std::ofstream outfile(fileName);
+    if (!outfile.is_open())
+    {
+        std::cout << "ERROR: writeASCIIFile: Attempting to write to a file that is not open." << std::endl;
+        return false;
+    }
+    for (auto fn : in)
+    {
+        outfile << fn << "\n";
+    }
+    outfile.close();
+    return true;
+}
+
+inline void print_version()
+{
+    std::cout << "  TensorRT version: " << NV_TENSORRT_MAJOR << "." << NV_TENSORRT_MINOR << "." << NV_TENSORRT_PATCH
+              << "." << NV_TENSORRT_BUILD << std::endl;
+}
+
+inline std::string getFileType(const std::string& filepath)
+{
+    return filepath.substr(filepath.find_last_of(".") + 1);
+}
+
+inline std::string toLower(const std::string& inp)
+{
+    std::string out = inp;
+    std::transform(out.begin(), out.end(), out.begin(), ::tolower);
+    return out;
+}
+
+inline float getMaxValue(const float* buffer, int64_t size)
+{
+    assert(buffer != nullptr);
+    assert(size > 0);
+    return *std::max_element(buffer, buffer + size);
+}
+
+// Ensures that every tensor used by a network has a scale.
+//
+// All tensors in a network must have a range specified if a calibrator is not used.
+// This function is just a utility to globally fill in missing scales for the entire network.
+//
+// If a tensor does not have a scale, it is assigned inScales or outScales as follows:
+//
+// * If the tensor is the input to a layer or output of a pooling node, its scale is assigned inScales.
+// * Otherwise its scale is assigned outScales.
+//
+// The default parameter values are intended to demonstrate, for final layers in the network,
+// cases where scaling factors are asymmetric.
+inline void setAllTensorScales(INetworkDefinition* network, float inScales = 2.0f, float outScales = 4.0f)
+{
+    // Ensure that all layer inputs have a scale.
+    for (int i = 0; i < network->getNbLayers(); i++)
+    {
+        auto layer = network->getLayer(i);
+        for (int j = 0; j < layer->getNbInputs(); j++)
+        {
+            ITensor* input{layer->getInput(j)};
+            // Optional inputs are nullptr here and are from RNN layers.
+            if (input != nullptr && !input->dynamicRangeIsSet())
+            {
+                input->setDynamicRange(-inScales, inScales);
+            }
+        }
+    }
+
+    // Ensure that all layer outputs have a scale.
+    // Tensors that are also inputs to layers are ingored here
+    // since the previous loop nest assigned scales to them.
+    for (int i = 0; i < network->getNbLayers(); i++)
+    {
+        auto layer = network->getLayer(i);
+        for (int j = 0; j < layer->getNbOutputs(); j++)
+        {
+            ITensor* output{layer->getOutput(j)};
+            // Optional outputs are nullptr here and are from RNN layers.
+            if (output != nullptr && !output->dynamicRangeIsSet())
+            {
+                // Pooling must have the same input and output scales.
+                if (layer->getType() == LayerType::kPOOLING)
+                {
+                    output->setDynamicRange(-inScales, inScales);
+                }
+                else
+                {
+                    output->setDynamicRange(-outScales, outScales);
+                }
+            }
+        }
+    }
+}
+
+inline void setAllDynamicRanges(INetworkDefinition* network, float inRange = 2.0f, float outRange = 4.0f)
+{
+    return setAllTensorScales(network, inRange, outRange);
+}
+
+inline void setDummyInt8DynamicRanges(const IBuilderConfig* c, INetworkDefinition* n)
+{
+    // Set dummy per-tensor dynamic range if Int8 mode is requested.
+    if (c->getFlag(BuilderFlag::kINT8))
+    {
+        sample::gLogWarning
+            << "Int8 calibrator not provided. Generating dummy per-tensor dynamic range. Int8 accuracy is not guaranteed."
+            << std::endl;
+        setAllDynamicRanges(n);
+    }
+}
+
+inline void enableDLA(IBuilder* builder, IBuilderConfig* config, int useDLACore, bool allowGPUFallback = true)
+{
+    if (useDLACore >= 0)
+    {
+        if (builder->getNbDLACores() == 0)
+        {
+            std::cerr << "Trying to use DLA core " << useDLACore << " on a platform that doesn't have any DLA cores"
+                      << std::endl;
+            assert("Error: use DLA core on a platfrom that doesn't have any DLA cores" && false);
+        }
+        if (allowGPUFallback)
+        {
+            config->setFlag(BuilderFlag::kGPU_FALLBACK);
+        }
+        if (!config->getFlag(BuilderFlag::kINT8))
+        {
+            // User has not requested INT8 Mode.
+            // By default run in FP16 mode. FP32 mode is not permitted.
+            config->setFlag(BuilderFlag::kFP16);
+        }
+        config->setDefaultDeviceType(DeviceType::kDLA);
+        config->setDLACore(useDLACore);
+        config->setFlag(BuilderFlag::kSTRICT_TYPES);
+    }
+}
+
+inline int parseDLA(int argc, char** argv)
+{
+    for (int i = 1; i < argc; i++)
+    {
+        std::string arg(argv[i]);
+        if (strncmp(argv[i], "--useDLACore=", 13) == 0)
+            return std::stoi(argv[i] + 13);
+    }
+    return -1;
+}
+
+inline uint32_t getElementSize(nvinfer1::DataType t) noexcept
+{
+    switch (t)
+    {
+    case nvinfer1::DataType::kINT32: return 4;
+    case nvinfer1::DataType::kFLOAT: return 4;
+    case nvinfer1::DataType::kHALF: return 2;
+    case nvinfer1::DataType::kBOOL:
+    case nvinfer1::DataType::kINT8: return 1;
+    }
+    return 0;
+}
+
+inline int64_t volume(const nvinfer1::Dims& d)
+{
+    return std::accumulate(d.d, d.d + d.nbDims, 1, std::multiplies<int64_t>());
+}
+
+inline uint32_t elementSize(DataType t) noexcept
+{
+    switch (t)
+    {
+    case DataType::kINT32:
+    case DataType::kFLOAT: return 4;
+    case DataType::kHALF: return 2;
+    case DataType::kBOOL:
+    case DataType::kINT8: return 1;
+    }
+    return 0;
+}
+
+template <typename A, typename B>
+inline A divUp(A x, B n)
+{
+    return (x + n - 1) / n;
+}
+
+template <int C, int H, int W>
+struct PPM
+{
+    std::string magic, fileName;
+    int h, w, max;
+    uint8_t buffer[C * H * W];
+};
+
+// New vPPM(variable sized PPM) class with variable dimensions.
+struct vPPM
+{
+    std::string magic, fileName;
+    int h, w, max;
+    std::vector<uint8_t> buffer;
+};
+
+struct BBox
+{
+    float x1, y1, x2, y2;
+};
+
+template <int C, int H, int W>
+void readPPMFile(const std::string& filename, samplesCommon::PPM<C, H, W>& ppm)
+{
+    ppm.fileName = filename;
+    std::ifstream infile(filename, std::ifstream::binary);
+    assert(infile.is_open() && "Attempting to read from a file that is not open.");
+    infile >> ppm.magic >> ppm.w >> ppm.h >> ppm.max;
+    infile.seekg(1, infile.cur);
+    infile.read(reinterpret_cast<char*>(ppm.buffer), ppm.w * ppm.h * 3);
+}
+
+inline void readPPMFile(const std::string& filename, vPPM& ppm, std::vector<std::string>& input_dir)
+{
+    ppm.fileName = filename;
+    std::ifstream infile(locateFile(filename, input_dir), std::ifstream::binary);
+    infile >> ppm.magic >> ppm.w >> ppm.h >> ppm.max;
+    infile.seekg(1, infile.cur);
+
+    for (int i = 0; i < ppm.w * ppm.h * 3; ++i)
+    {
+        ppm.buffer.push_back(0);
+    }
+
+    infile.read(reinterpret_cast<char*>(&ppm.buffer[0]), ppm.w * ppm.h * 3);
+}
+
+template <int C, int H, int W>
+void writePPMFileWithBBox(const std::string& filename, PPM<C, H, W>& ppm, const BBox& bbox)
+{
+    std::ofstream outfile("./" + filename, std::ofstream::binary);
+    assert(!outfile.fail());
+    outfile << "P6"
+            << "\n"
+            << ppm.w << " " << ppm.h << "\n"
+            << ppm.max << "\n";
+
+    auto round = [](float x) -> int { return int(std::floor(x + 0.5f)); };
+    const int x1 = std::min(std::max(0, round(int(bbox.x1))), W - 1);
+    const int x2 = std::min(std::max(0, round(int(bbox.x2))), W - 1);
+    const int y1 = std::min(std::max(0, round(int(bbox.y1))), H - 1);
+    const int y2 = std::min(std::max(0, round(int(bbox.y2))), H - 1);
+
+    for (int x = x1; x <= x2; ++x)
+    {
+        // bbox top border
+        ppm.buffer[(y1 * ppm.w + x) * 3] = 255;
+        ppm.buffer[(y1 * ppm.w + x) * 3 + 1] = 0;
+        ppm.buffer[(y1 * ppm.w + x) * 3 + 2] = 0;
+        // bbox bottom border
+        ppm.buffer[(y2 * ppm.w + x) * 3] = 255;
+        ppm.buffer[(y2 * ppm.w + x) * 3 + 1] = 0;
+        ppm.buffer[(y2 * ppm.w + x) * 3 + 2] = 0;
+    }
+
+    for (int y = y1; y <= y2; ++y)
+    {
+        // bbox left border
+        ppm.buffer[(y * ppm.w + x1) * 3] = 255;
+        ppm.buffer[(y * ppm.w + x1) * 3 + 1] = 0;
+        ppm.buffer[(y * ppm.w + x1) * 3 + 2] = 0;
+        // bbox right border
+        ppm.buffer[(y * ppm.w + x2) * 3] = 255;
+        ppm.buffer[(y * ppm.w + x2) * 3 + 1] = 0;
+        ppm.buffer[(y * ppm.w + x2) * 3 + 2] = 0;
+    }
+
+    outfile.write(reinterpret_cast<char*>(ppm.buffer), ppm.w * ppm.h * 3);
+}
+
+inline void writePPMFileWithBBox(const std::string& filename, vPPM ppm, std::vector<BBox>& dets)
+{
+    std::ofstream outfile("./" + filename, std::ofstream::binary);
+    assert(!outfile.fail());
+    outfile << "P6"
+            << "\n"
+            << ppm.w << " " << ppm.h << "\n"
+            << ppm.max << "\n";
+    auto round = [](float x) -> int { return int(std::floor(x + 0.5f)); };
+
+    for (auto bbox : dets)
+    {
+        for (int x = int(bbox.x1); x < int(bbox.x2); ++x)
+        {
+            // bbox top border
+            ppm.buffer[(round(bbox.y1) * ppm.w + x) * 3] = 255;
+            ppm.buffer[(round(bbox.y1) * ppm.w + x) * 3 + 1] = 0;
+            ppm.buffer[(round(bbox.y1) * ppm.w + x) * 3 + 2] = 0;
+            // bbox bottom border
+            ppm.buffer[(round(bbox.y2) * ppm.w + x) * 3] = 255;
+            ppm.buffer[(round(bbox.y2) * ppm.w + x) * 3 + 1] = 0;
+            ppm.buffer[(round(bbox.y2) * ppm.w + x) * 3 + 2] = 0;
+        }
+
+        for (int y = int(bbox.y1); y < int(bbox.y2); ++y)
+        {
+            // bbox left border
+            ppm.buffer[(y * ppm.w + round(bbox.x1)) * 3] = 255;
+            ppm.buffer[(y * ppm.w + round(bbox.x1)) * 3 + 1] = 0;
+            ppm.buffer[(y * ppm.w + round(bbox.x1)) * 3 + 2] = 0;
+            // bbox right border
+            ppm.buffer[(y * ppm.w + round(bbox.x2)) * 3] = 255;
+            ppm.buffer[(y * ppm.w + round(bbox.x2)) * 3 + 1] = 0;
+            ppm.buffer[(y * ppm.w + round(bbox.x2)) * 3 + 2] = 0;
+        }
+    }
+
+    outfile.write(reinterpret_cast<char*>(&ppm.buffer[0]), ppm.w * ppm.h * 3);
+}
+
+class TimerBase
+{
+public:
+    virtual void start() {}
+    virtual void stop() {}
+    float microseconds() const noexcept
+    {
+        return mMs * 1000.f;
+    }
+    float milliseconds() const noexcept
+    {
+        return mMs;
+    }
+    float seconds() const noexcept
+    {
+        return mMs / 1000.f;
+    }
+    void reset() noexcept
+    {
+        mMs = 0.f;
+    }
+
+protected:
+    float mMs{0.0f};
+};
+
+class GpuTimer : public TimerBase
+{
+public:
+    GpuTimer(cudaStream_t stream)
+        : mStream(stream)
+    {
+        CHECK(cudaEventCreate(&mStart));
+        CHECK(cudaEventCreate(&mStop));
+    }
+    ~GpuTimer()
+    {
+        CHECK(cudaEventDestroy(mStart));
+        CHECK(cudaEventDestroy(mStop));
+    }
+    void start()
+    {
+        CHECK(cudaEventRecord(mStart, mStream));
+    }
+    void stop()
+    {
+        CHECK(cudaEventRecord(mStop, mStream));
+        float ms{0.0f};
+        CHECK(cudaEventSynchronize(mStop));
+        CHECK(cudaEventElapsedTime(&ms, mStart, mStop));
+        mMs += ms;
+    }
+
+private:
+    cudaEvent_t mStart, mStop;
+    cudaStream_t mStream;
+}; // class GpuTimer
+
+template <typename Clock>
+class CpuTimer : public TimerBase
+{
+public:
+    using clock_type = Clock;
+
+    void start()
+    {
+        mStart = Clock::now();
+    }
+    void stop()
+    {
+        mStop = Clock::now();
+        mMs += std::chrono::duration<float, std::milli>{mStop - mStart}.count();
+    }
+
+private:
+    std::chrono::time_point<Clock> mStart, mStop;
+}; // class CpuTimer
+
+using PreciseCpuTimer = CpuTimer<std::chrono::high_resolution_clock>;
+
+inline std::vector<std::string> splitString(std::string str, char delimiter = ',')
+{
+    std::vector<std::string> splitVect;
+    std::stringstream ss(str);
+    std::string substr;
+
+    while (ss.good())
+    {
+        getline(ss, substr, delimiter);
+        splitVect.emplace_back(std::move(substr));
+    }
+    return splitVect;
+}
+
+// Return m rounded up to nearest multiple of n
+inline int roundUp(int m, int n)
+{
+    return ((m + n - 1) / n) * n;
+}
+
+inline int getC(const Dims& d)
+{
+    return d.nbDims >= 3 ? d.d[d.nbDims - 3] : 1;
+}
+
+inline int getH(const Dims& d)
+{
+    return d.nbDims >= 2 ? d.d[d.nbDims - 2] : 1;
+}
+
+inline int getW(const Dims& d)
+{
+    return d.nbDims >= 1 ? d.d[d.nbDims - 1] : 1;
+}
+
+inline void loadLibrary(const std::string& path)
+{
+#ifdef _MSC_VER
+    void* handle = LoadLibrary(path.c_str());
+#else
+    void* handle = dlopen(path.c_str(), RTLD_LAZY);
+#endif
+    if (handle == nullptr)
+    {
+#ifdef _MSC_VER
+        sample::gLogError << "Could not load plugin library: " << path << std::endl;
+#else
+        sample::gLogError << "Could not load plugin library: " << path << ", due to: " << dlerror() << std::endl;
+#endif
+    }
+}
+
+inline int32_t getSMVersion()
+{
+    int32_t deviceIndex = 0;
+    CHECK(cudaGetDevice(&deviceIndex));
+
+    int32_t major, minor;
+    CHECK(cudaDeviceGetAttribute(&major, cudaDevAttrComputeCapabilityMajor, deviceIndex));
+    CHECK(cudaDeviceGetAttribute(&minor, cudaDevAttrComputeCapabilityMinor, deviceIndex));
+
+    return ((major << 8) | minor);
+}
+
+inline bool isSMSafe()
+{
+    const int32_t smVersion = getSMVersion();
+    return smVersion == 0x0700 || smVersion == 0x0702 || smVersion == 0x0705;
+}
+} // namespace samplesCommon
+
+inline std::ostream& operator<<(std::ostream& os, const nvinfer1::Dims& dims)
+{
+    os << "(";
+    for (int i = 0; i < dims.nbDims; ++i)
+    {
+        os << (i ? ", " : "") << dims.d[i];
+    }
+    return os << ")";
+}
+
+#endif // TENSORRT_COMMON_H

+ 116 - 0
src/detection/CenterPoint-master/include/common/dumpTFWts.py

@@ -0,0 +1,116 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Script to dump TensorFlow weights in TRT v1 and v2 dump format.
+# The V1 format is for TensorRT 4.0. The V2 format is for TensorRT 4.0 and later.
+
+import sys
+import struct
+import argparse
+try:
+    import tensorflow as tf
+    from tensorflow.python import pywrap_tensorflow
+except ImportError as err:
+    sys.stderr.write("""Error: Failed to import module ({})""".format(err))
+    sys.exit()
+
+parser = argparse.ArgumentParser(description='TensorFlow Weight Dumper')
+
+parser.add_argument('-m', '--model', required=True, help='The checkpoint file basename, example basename(model.ckpt-766908.data-00000-of-00001) -> model.ckpt-766908')
+parser.add_argument('-o', '--output', required=True, help='The weight file to dump all the weights to.')
+parser.add_argument('-1', '--wtsv1', required=False, default=False, type=bool, help='Dump the weights in the wts v1.')
+
+opt = parser.parse_args()
+
+if opt.wtsv1:
+    print("Outputting the trained weights in TensorRT's wts v1 format. This format is documented as:")
+    print("Line 0: <number of buffers in the file>")
+    print("Line 1-Num: [buffer name] [buffer type] [buffer size] <hex values>")
+else:
+    print("Outputting the trained weights in TensorRT's wts v2 format. This format is documented as:")
+    print("Line 0: <number of buffers in the file>")
+    print("Line 1-Num: [buffer name] [buffer type] [(buffer shape{e.g. (1, 2, 3)}] <buffer shaped size bytes of data>")
+
+inputbase = opt.model
+outputbase = opt.output
+
+def float_to_hex(f):
+    return hex(struct.unpack('<I', struct.pack('<f', f))[0])
+
+def getTRTType(tensor):
+    if tf.as_dtype(tensor.dtype) == tf.float32:
+        return 0
+    if tf.as_dtype(tensor.dtype) == tf.float16:
+        return 1
+    if tf.as_dtype(tensor.dtype) == tf.int8:
+        return 2
+    if tf.as_dtype(tensor.dtype) == tf.int32:
+        return 3
+    print("Tensor data type of %s is not supported in TensorRT"%(tensor.dtype))
+    sys.exit();
+
+try:
+   # Open output file
+    if opt.wtsv1:
+        outputFileName = outputbase + ".wts"
+    else:
+        outputFileName = outputbase + ".wts2"
+    outputFile = open(outputFileName, 'w')
+
+    # read vars from checkpoint
+    reader = pywrap_tensorflow.NewCheckpointReader(inputbase)
+    var_to_shape_map = reader.get_variable_to_shape_map()
+
+    # Record count of weights
+    count = 0
+    for key in sorted(var_to_shape_map):
+        count += 1
+    outputFile.write("%s\n"%(count))
+
+    # Dump the weights in either v1 or v2 format
+    for key in sorted(var_to_shape_map):
+        tensor = reader.get_tensor(key)
+        file_key = key.replace('/','_')
+        typeOfElem = getTRTType(tensor)
+        val = tensor.shape
+        if opt.wtsv1:
+            val = tensor.size
+        print("%s %s %s "%(file_key, typeOfElem, val))
+        flat_tensor = tensor.flatten()
+        outputFile.write("%s 0 %s "%(file_key, val))
+        if opt.wtsv1:
+            for weight in flat_tensor:
+                hexval = float_to_hex(float(weight))
+                outputFile.write("%s "%(hexval[2:]))
+        else:
+            outputFile.write(flat_tensor.tobytes())
+        outputFile.write("\n");
+    outputFile.close()
+
+except Exception as e:  # pylint: disable=broad-except
+    print(str(e))
+    if "corrupted compressed block contents" in str(e):
+        print("It's likely that your checkpoint file has been compressed "
+                "with SNAPPY.")
+        if ("Data loss" in str(e) and
+                (any([e in inputbase for e in [".index", ".meta", ".data"]]))):
+            proposed_file = ".".join(inputbase.split(".")[0:-1])
+            v2_file_error_template = """
+           It's likely that this is a V2 checkpoint and you need to provide the filename
+           *prefix*.  Try removing the '.' and extension.  Try:
+           inspect checkpoint --file_name = {}"""
+            print(v2_file_error_template.format(proposed_file))

+ 247 - 0
src/detection/CenterPoint-master/include/common/getOptions.cpp

@@ -0,0 +1,247 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "getOptions.h"
+#include "logger.h"
+
+#include <algorithm>
+#include <cassert>
+#include <cctype>
+#include <cstring>
+#include <set>
+
+namespace nvinfer1
+{
+namespace utility
+{
+
+//! Matching for TRTOptions is defined as follows:
+//!
+//! If A and B both have longName set, A matches B if and only if A.longName ==
+//! B.longName and (A.shortName == B.shortName if both have short name set).
+//!
+//! If A only has shortName set and B only has longName set, then A does not
+//! match B. It is assumed that when 2 TRTOptions are compared, one of them is
+//! the definition of a TRTOption in the input to getOptions. As such, if the
+//! definition only has shortName set, it will never be equal to a TRTOption
+//! that does not have shortName set (and same for longName).
+//!
+//! If A and B both have shortName set but B does not have longName set, A
+//! matches B if and only if A.shortName == B.shortName.
+//!
+//! If A has neither long or short name set, A matches B if and only if B has
+//! neither long or short name set.
+bool matches(const TRTOption& a, const TRTOption& b)
+{
+    if (!a.longName.empty() && !b.longName.empty())
+    {
+        if (a.shortName && b.shortName)
+        {
+            return (a.longName == b.longName) && (a.shortName == b.shortName);
+        }
+        return a.longName == b.longName;
+    }
+
+    // If only one of them is not set, this will return false anyway.
+    return a.shortName == b.shortName;
+}
+
+//! getTRTOptionIndex returns the index of a TRTOption in a vector of
+//! TRTOptions, -1 if not found.
+int getTRTOptionIndex(const std::vector<TRTOption>& options, const TRTOption& opt)
+{
+    for (size_t i = 0; i < options.size(); ++i)
+    {
+        if (matches(opt, options[i]))
+        {
+            return i;
+        }
+    }
+    return -1;
+}
+
+//! validateTRTOption will return a string containing an error message if options
+//! contain non-numeric characters, or if there are duplicate option names found.
+//! Otherwise, returns the empty string.
+std::string validateTRTOption(
+    const std::set<char>& seenShortNames, const std::set<std::string>& seenLongNames, const TRTOption& opt)
+{
+    if (opt.shortName != 0)
+    {
+        if (!std::isalnum(opt.shortName))
+        {
+            return "Short name '" + std::to_string(opt.shortName) + "' is non-alphanumeric";
+        }
+
+        if (seenShortNames.find(opt.shortName) != seenShortNames.end())
+        {
+            return "Short name '" + std::to_string(opt.shortName) + "' is a duplicate";
+        }
+    }
+
+    if (!opt.longName.empty())
+    {
+        for (const char& c : opt.longName)
+        {
+            if (!std::isalnum(c) && c != '-' && c != '_')
+            {
+                return "Long name '" + opt.longName + "' contains characters that are not '-', '_', or alphanumeric";
+            }
+        }
+
+        if (seenLongNames.find(opt.longName) != seenLongNames.end())
+        {
+            return "Long name '" + opt.longName + "' is a duplicate";
+        }
+    }
+    return "";
+}
+
+//! validateTRTOptions will return a string containing an error message if any
+//! options contain non-numeric characters, or if there are duplicate option
+//! names found. Otherwise, returns the empty string.
+std::string validateTRTOptions(const std::vector<TRTOption>& options)
+{
+    std::set<char> seenShortNames;
+    std::set<std::string> seenLongNames;
+    for (size_t i = 0; i < options.size(); ++i)
+    {
+        const std::string errMsg = validateTRTOption(seenShortNames, seenLongNames, options[i]);
+        if (!errMsg.empty())
+        {
+            return "Error '" + errMsg + "' at TRTOption " + std::to_string(i);
+        }
+
+        seenShortNames.insert(options[i].shortName);
+        seenLongNames.insert(options[i].longName);
+    }
+    return "";
+}
+
+//! parseArgs parses an argument list and returns a TRTParsedArgs with the
+//! fields set accordingly. Assumes that options is validated.
+//! ErrMsg will be set if:
+//!     - an argument is null
+//!     - an argument is empty
+//!     - an argument does not have option (i.e. "-" and "--")
+//!     - a short argument has more than 1 character
+//!     - the last argument in the list requires a value
+TRTParsedArgs parseArgs(int argc, const char* const* argv, const std::vector<TRTOption>& options)
+{
+    TRTParsedArgs parsedArgs;
+    parsedArgs.values.resize(options.size());
+
+    for (int i = 1; i < argc; ++i) // index of current command-line argument
+    {
+        if (argv[i] == nullptr)
+        {
+            return TRTParsedArgs{"Null argument at index " + std::to_string(i)};
+        }
+
+        const std::string argStr(argv[i]);
+        if (argStr.empty())
+        {
+            return TRTParsedArgs{"Empty argument at index " + std::to_string(i)};
+        }
+
+        // No starting hyphen means it is a positional argument
+        if (argStr[0] != '-')
+        {
+            parsedArgs.positionalArgs.push_back(argStr);
+            continue;
+        }
+
+        if (argStr == "-" || argStr == "--")
+        {
+            return TRTParsedArgs{"Argument does not specify an option at index " + std::to_string(i)};
+        }
+
+        // If only 1 hyphen, char after is the flag.
+        TRTOption opt{' ', "", false, ""};
+        std::string value;
+        if (argStr[1] != '-')
+        {
+            // Must only have 1 char after the hyphen
+            if (argStr.size() > 2)
+            {
+                return TRTParsedArgs{"Short arg contains more than 1 character at index " + std::to_string(i)};
+            }
+            opt.shortName = argStr[1];
+        }
+        else
+        {
+            opt.longName = argStr.substr(2);
+
+            // We need to support --foo=bar syntax, so look for '='
+            const size_t eqIndex = opt.longName.find('=');
+            if (eqIndex < opt.longName.size())
+            {
+                value = opt.longName.substr(eqIndex + 1);
+                opt.longName = opt.longName.substr(0, eqIndex);
+            }
+        }
+
+        const int idx = getTRTOptionIndex(options, opt);
+        if (idx < 0)
+        {
+            continue;
+        }
+
+        if (options[idx].valueRequired)
+        {
+            if (!value.empty())
+            {
+                parsedArgs.values[idx].second.push_back(value);
+                parsedArgs.values[idx].first = parsedArgs.values[idx].second.size();
+                continue;
+            }
+
+            if (i + 1 >= argc)
+            {
+                return TRTParsedArgs{"Last argument requires value, but none given"};
+            }
+
+            const std::string nextArg(argv[i + 1]);
+            if (nextArg.size() >= 1 && nextArg[0] == '-')
+            {
+                sample::gLogWarning << "Warning: Using '" << nextArg << "' as a value for '" << argStr
+                                    << "', Should this be its own flag?" << std::endl;
+            }
+
+            parsedArgs.values[idx].second.push_back(nextArg);
+            i += 1; // Next argument already consumed
+
+            parsedArgs.values[idx].first = parsedArgs.values[idx].second.size();
+        }
+        else
+        {
+            parsedArgs.values[idx].first += 1;
+        }
+    }
+    return parsedArgs;
+}
+
+TRTParsedArgs getOptions(int argc, const char* const* argv, const std::vector<TRTOption>& options)
+{
+    const std::string errMsg = validateTRTOptions(options);
+    if (!errMsg.empty())
+    {
+        return TRTParsedArgs{errMsg};
+    }
+    return parseArgs(argc, argv, options);
+}
+} // namespace utility
+} // namespace nvinfer1

+ 127 - 0
src/detection/CenterPoint-master/include/common/getOptions.h

@@ -0,0 +1,127 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TRT_GET_OPTIONS_H
+#define TRT_GET_OPTIONS_H
+
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace nvinfer1
+{
+namespace utility
+{
+
+//! TRTOption defines a command line option. At least 1 of shortName and longName
+//! must be defined.
+//! If bool initialization is undefined behavior on your system, valueRequired
+//! must also be explicitly defined.
+//! helpText is optional.
+struct TRTOption
+{
+    char shortName;       //!< Option name in short (single hyphen) form (i.e. -a, -b)
+    std::string longName; //!< Option name in long (double hyphen) form (i.e. --foo, --bar)
+    bool valueRequired;   //!< True if a value is needed for an option (i.e. -N 4, --foo bar)
+    std::string helpText; //!< Text to show when printing out the command usage
+};
+
+//! TRTParsedArgs is returned by getOptions after it has parsed a command line
+//! argument list (argv).
+//!
+//! errMsg is a string containing an error message if any errors occurred. If it
+//! is empty, no errors occurred.
+//!
+//! values stores a vector of pairs for each option (ordered by order in the
+//! input). Each pair contains an int (the number of occurrences) and a vector
+//! of strings (a list of values). The user should know which of these to use,
+//! and which options required values. For non-value options, only occurrences is
+//! populated. For value-required options, occurrences == # of values. Values do
+//! not need to be unique.
+//!
+//! positionalArgs stores additional arguments that are passed in without an
+//! option (these must not start with a hyphen).
+struct TRTParsedArgs
+{
+    std::string errMsg;
+    std::vector<std::pair<int, std::vector<std::string>>> values;
+    std::vector<std::string> positionalArgs;
+};
+
+//! Parse the input arguments passed to main() and extract options as well as
+//! positional arguments.
+//!
+//! Options are supposed to be passed to main() with a preceding hyphen '-'.
+//!
+//! If there is a single preceding hyphen, there should be exactly 1 character
+//! after the hyphen, which is interpreted as the option.
+//!
+//! If there are 2 preceding hyphens, the entire argument (without the hyphens)
+//! is interpreted as the option.
+//!
+//! If the option requires a value, the next argument is used as the value.
+//!
+//! Positional arguments must not start with a hyphen.
+//!
+//! If an argument requires a value, the next argument is interpreted as the
+//! value, even if it is the form of a valid option (i.e. --foo --bar will store
+//! "--bar" as a value for option "foo" if "foo" requires a value).
+//! We also support --name=value syntax. In this case, 'value' would be used as
+//! the value, NOT the next argument.
+//!
+//! For options:
+//!   { { 'a', "", false },
+//!     { 'b', "", false },
+//!     { 0, "cee", false },
+//!     { 'd', "", true },
+//!     { 'e', "", true },
+//!     { 'f', "foo", true } }
+//!
+//! ./main hello world -a -a --cee -d 12 -f 34
+//! and
+//! ./main hello world -a -a --cee -d 12 --foo 34
+//!
+//! will result in:
+//!
+//! TRTParsedArgs {
+//!      errMsg: "",
+//!      values: { { 2, {} },
+//!                { 0, {} },
+//!                { 1, {} },
+//!                { 1, {"12"} },
+//!                { 0, {} },
+//!                { 1, {"34"} } }
+//!      positionalArgs: {"hello", "world"},
+//! }
+//!
+//! Non-POSIX behavior:
+//!      - Does not support "-abcde" as a shorthand for "-a -b -c -d -e". Each
+//!        option must have its own hyphen prefix.
+//!      - Does not support -e12 as a shorthand for "-e 12". Values MUST be
+//!        whitespace-separated from the option it is for.
+//!
+//! @param[in] argc The number of arguments passed to main (including the
+//!            file name, which is disregarded)
+//! @param[in] argv The arguments passed to main (including the file name,
+//!            which is disregarded)
+//! @param[in] options List of TRTOptions to parse
+//! @return TRTParsedArgs. See TRTParsedArgs documentation for descriptions of
+//!         the fields.
+TRTParsedArgs getOptions(int argc, const char* const* argv, const std::vector<TRTOption>& options);
+} // namespace utility
+} // namespace nvinfer1
+
+#endif // TRT_GET_OPTIONS_H

+ 4302 - 0
src/detection/CenterPoint-master/include/common/half.h

@@ -0,0 +1,4302 @@
+// half - IEEE 754-based half-precision floating point library.
+//
+// Copyright (c) 2012-2017 Christian Rau <rauy@users.sourceforge.net>
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
+// documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
+// permit persons to whom the Software is furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
+// Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
+// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Version 1.12.0
+
+/// \file
+/// Main header file for half precision functionality.
+
+#ifndef HALF_HALF_HPP
+#define HALF_HALF_HPP
+
+/// Combined gcc version number.
+#define HALF_GNUC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+// check C++11 language features
+#if defined(__clang__) // clang
+#if __has_feature(cxx_static_assert) && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)
+#define HALF_ENABLE_CPP11_STATIC_ASSERT 1
+#endif
+#if __has_feature(cxx_constexpr) && !defined(HALF_ENABLE_CPP11_CONSTEXPR)
+#define HALF_ENABLE_CPP11_CONSTEXPR 1
+#endif
+#if __has_feature(cxx_noexcept) && !defined(HALF_ENABLE_CPP11_NOEXCEPT)
+#define HALF_ENABLE_CPP11_NOEXCEPT 1
+#endif
+#if __has_feature(cxx_user_literals) && !defined(HALF_ENABLE_CPP11_USER_LITERALS)
+#define HALF_ENABLE_CPP11_USER_LITERALS 1
+#endif
+#if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L) && !defined(HALF_ENABLE_CPP11_LONG_LONG)
+#define HALF_ENABLE_CPP11_LONG_LONG 1
+#endif
+/*#elif defined(__INTEL_COMPILER)								//Intel C++
+    #if __INTEL_COMPILER >= 1100 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)		????????
+        #define HALF_ENABLE_CPP11_STATIC_ASSERT 1
+    #endif
+    #if __INTEL_COMPILER >= 1300 && !defined(HALF_ENABLE_CPP11_CONSTEXPR)			????????
+        #define HALF_ENABLE_CPP11_CONSTEXPR 1
+    #endif
+    #if __INTEL_COMPILER >= 1300 && !defined(HALF_ENABLE_CPP11_NOEXCEPT)			????????
+        #define HALF_ENABLE_CPP11_NOEXCEPT 1
+    #endif
+    #if __INTEL_COMPILER >= 1100 && !defined(HALF_ENABLE_CPP11_LONG_LONG)			????????
+        #define HALF_ENABLE_CPP11_LONG_LONG 1
+    #endif*/
+#elif defined(__GNUC__) // gcc
+#if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L
+#if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)
+#define HALF_ENABLE_CPP11_STATIC_ASSERT 1
+#endif
+#if HALF_GNUC_VERSION >= 406 && !defined(HALF_ENABLE_CPP11_CONSTEXPR)
+#define HALF_ENABLE_CPP11_CONSTEXPR 1
+#endif
+#if HALF_GNUC_VERSION >= 406 && !defined(HALF_ENABLE_CPP11_NOEXCEPT)
+#define HALF_ENABLE_CPP11_NOEXCEPT 1
+#endif
+#if HALF_GNUC_VERSION >= 407 && !defined(HALF_ENABLE_CPP11_USER_LITERALS)
+#define HALF_ENABLE_CPP11_USER_LITERALS 1
+#endif
+#if !defined(HALF_ENABLE_CPP11_LONG_LONG)
+#define HALF_ENABLE_CPP11_LONG_LONG 1
+#endif
+#endif
+#elif defined(_MSC_VER) // Visual C++
+#if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_CONSTEXPR)
+#define HALF_ENABLE_CPP11_CONSTEXPR 1
+#endif
+#if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_NOEXCEPT)
+#define HALF_ENABLE_CPP11_NOEXCEPT 1
+#endif
+#if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_USER_LITERALS)
+#define HALF_ENABLE_CPP11_USER_LITERALS 1
+#endif
+#if _MSC_VER >= 1600 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT)
+#define HALF_ENABLE_CPP11_STATIC_ASSERT 1
+#endif
+#if _MSC_VER >= 1310 && !defined(HALF_ENABLE_CPP11_LONG_LONG)
+#define HALF_ENABLE_CPP11_LONG_LONG 1
+#endif
+#define HALF_POP_WARNINGS 1
+#pragma warning(push)
+#pragma warning(disable : 4099 4127 4146) // struct vs class, constant in if, negative unsigned
+#endif
+
+// check C++11 library features
+#include <utility>
+#if defined(_LIBCPP_VERSION) // libc++
+#if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103
+#ifndef HALF_ENABLE_CPP11_TYPE_TRAITS
+#define HALF_ENABLE_CPP11_TYPE_TRAITS 1
+#endif
+#ifndef HALF_ENABLE_CPP11_CSTDINT
+#define HALF_ENABLE_CPP11_CSTDINT 1
+#endif
+#ifndef HALF_ENABLE_CPP11_CMATH
+#define HALF_ENABLE_CPP11_CMATH 1
+#endif
+#ifndef HALF_ENABLE_CPP11_HASH
+#define HALF_ENABLE_CPP11_HASH 1
+#endif
+#endif
+#elif defined(__GLIBCXX__) // libstdc++
+#if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103
+#ifdef __clang__
+#if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_TYPE_TRAITS)
+#define HALF_ENABLE_CPP11_TYPE_TRAITS 1
+#endif
+#if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CSTDINT)
+#define HALF_ENABLE_CPP11_CSTDINT 1
+#endif
+#if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CMATH)
+#define HALF_ENABLE_CPP11_CMATH 1
+#endif
+#if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_HASH)
+#define HALF_ENABLE_CPP11_HASH 1
+#endif
+#else
+#if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CSTDINT)
+#define HALF_ENABLE_CPP11_CSTDINT 1
+#endif
+#if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CMATH)
+#define HALF_ENABLE_CPP11_CMATH 1
+#endif
+#if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_HASH)
+#define HALF_ENABLE_CPP11_HASH 1
+#endif
+#endif
+#endif
+#elif defined(_CPPLIB_VER) // Dinkumware/Visual C++
+#if _CPPLIB_VER >= 520
+#ifndef HALF_ENABLE_CPP11_TYPE_TRAITS
+#define HALF_ENABLE_CPP11_TYPE_TRAITS 1
+#endif
+#ifndef HALF_ENABLE_CPP11_CSTDINT
+#define HALF_ENABLE_CPP11_CSTDINT 1
+#endif
+#ifndef HALF_ENABLE_CPP11_HASH
+#define HALF_ENABLE_CPP11_HASH 1
+#endif
+#endif
+#if _CPPLIB_VER >= 610
+#ifndef HALF_ENABLE_CPP11_CMATH
+#define HALF_ENABLE_CPP11_CMATH 1
+#endif
+#endif
+#endif
+#undef HALF_GNUC_VERSION
+
+// support constexpr
+#if HALF_ENABLE_CPP11_CONSTEXPR
+#define HALF_CONSTEXPR constexpr
+#define HALF_CONSTEXPR_CONST constexpr
+#else
+#define HALF_CONSTEXPR
+#define HALF_CONSTEXPR_CONST const
+#endif
+
+// support noexcept
+#if HALF_ENABLE_CPP11_NOEXCEPT
+#define HALF_NOEXCEPT noexcept
+#define HALF_NOTHROW noexcept
+#else
+#define HALF_NOEXCEPT
+#define HALF_NOTHROW throw()
+#endif
+
+#include <algorithm>
+#include <climits>
+#include <cmath>
+#include <cstring>
+#include <iostream>
+#include <limits>
+#if HALF_ENABLE_CPP11_TYPE_TRAITS
+#include <type_traits>
+#endif
+#if HALF_ENABLE_CPP11_CSTDINT
+#include <cstdint>
+#endif
+#if HALF_ENABLE_CPP11_HASH
+#include <functional>
+#endif
+
+/// Default rounding mode.
+/// This specifies the rounding mode used for all conversions between [half](\ref half_float::half)s and `float`s as
+/// well as for the half_cast() if not specifying a rounding mode explicitly. It can be redefined (before including
+/// half.hpp) to one of the standard rounding modes using their respective constants or the equivalent values of
+/// `std::float_round_style`:
+///
+/// `std::float_round_style`         | value | rounding
+/// ---------------------------------|-------|-------------------------
+/// `std::round_indeterminate`       | -1    | fastest (default)
+/// `std::round_toward_zero`         | 0     | toward zero
+/// `std::round_to_nearest`          | 1     | to nearest
+/// `std::round_toward_infinity`     | 2     | toward positive infinity
+/// `std::round_toward_neg_infinity` | 3     | toward negative infinity
+///
+/// By default this is set to `-1` (`std::round_indeterminate`), which uses truncation (round toward zero, but with
+/// overflows set to infinity) and is the fastest rounding mode possible. It can even be set to
+/// `std::numeric_limits<float>::round_style` to synchronize the rounding mode with that of the underlying
+/// single-precision implementation.
+#ifndef HALF_ROUND_STYLE
+#define HALF_ROUND_STYLE 1 // = std::round_to_nearest
+#endif
+
+/// Tie-breaking behaviour for round to nearest.
+/// This specifies if ties in round to nearest should be resolved by rounding to the nearest even value. By default this
+/// is defined to `0` resulting in the faster but slightly more biased behaviour of rounding away from zero in half-way
+/// cases (and thus equal to the round() function), but can be redefined to `1` (before including half.hpp) if more
+/// IEEE-conformant behaviour is needed.
+#ifndef HALF_ROUND_TIES_TO_EVEN
+#define HALF_ROUND_TIES_TO_EVEN 0 // ties away from zero
+#endif
+
+/// Value signaling overflow.
+/// In correspondence with `HUGE_VAL[F|L]` from `<cmath>` this symbol expands to a positive value signaling the overflow
+/// of an operation, in particular it just evaluates to positive infinity.
+#define HUGE_VALH std::numeric_limits<half_float::half>::infinity()
+
+/// Fast half-precision fma function.
+/// This symbol is only defined if the fma() function generally executes as fast as, or faster than, a separate
+/// half-precision multiplication followed by an addition. Due to the internal single-precision implementation of all
+/// arithmetic operations, this is in fact always the case.
+#define FP_FAST_FMAH 1
+
+#ifndef FP_ILOGB0
+#define FP_ILOGB0 INT_MIN
+#endif
+#ifndef FP_ILOGBNAN
+#define FP_ILOGBNAN INT_MAX
+#endif
+#ifndef FP_SUBNORMAL
+#define FP_SUBNORMAL 0
+#endif
+#ifndef FP_ZERO
+#define FP_ZERO 1
+#endif
+#ifndef FP_NAN
+#define FP_NAN 2
+#endif
+#ifndef FP_INFINITE
+#define FP_INFINITE 3
+#endif
+#ifndef FP_NORMAL
+#define FP_NORMAL 4
+#endif
+
+/// Main namespace for half precision functionality.
+/// This namespace contains all the functionality provided by the library.
+namespace half_float
+{
+class half;
+
+#if HALF_ENABLE_CPP11_USER_LITERALS
+/// Library-defined half-precision literals.
+/// Import this namespace to enable half-precision floating point literals:
+/// ~~~~{.cpp}
+/// using namespace half_float::literal;
+/// half_float::half = 4.2_h;
+/// ~~~~
+namespace literal
+{
+half operator"" _h(long double);
+}
+#endif
+
+/// \internal
+/// \brief Implementation details.
+namespace detail
+{
+#if HALF_ENABLE_CPP11_TYPE_TRAITS
+/// Conditional type.
+template <bool B, typename T, typename F>
+struct conditional : std::conditional<B, T, F>
+{
+};
+
+/// Helper for tag dispatching.
+template <bool B>
+struct bool_type : std::integral_constant<bool, B>
+{
+};
+using std::false_type;
+using std::true_type;
+
+/// Type traits for floating point types.
+template <typename T>
+struct is_float : std::is_floating_point<T>
+{
+};
+#else
+/// Conditional type.
+template <bool, typename T, typename>
+struct conditional
+{
+    typedef T type;
+};
+template <typename T, typename F>
+struct conditional<false, T, F>
+{
+    typedef F type;
+};
+
+/// Helper for tag dispatching.
+template <bool>
+struct bool_type
+{
+};
+typedef bool_type<true> true_type;
+typedef bool_type<false> false_type;
+
+/// Type traits for floating point types.
+template <typename>
+struct is_float : false_type
+{
+};
+template <typename T>
+struct is_float<const T> : is_float<T>
+{
+};
+template <typename T>
+struct is_float<volatile T> : is_float<T>
+{
+};
+template <typename T>
+struct is_float<const volatile T> : is_float<T>
+{
+};
+template <>
+struct is_float<float> : true_type
+{
+};
+template <>
+struct is_float<double> : true_type
+{
+};
+template <>
+struct is_float<long double> : true_type
+{
+};
+#endif
+
+/// Type traits for floating point bits.
+template <typename T>
+struct bits
+{
+    typedef unsigned char type;
+};
+template <typename T>
+struct bits<const T> : bits<T>
+{
+};
+template <typename T>
+struct bits<volatile T> : bits<T>
+{
+};
+template <typename T>
+struct bits<const volatile T> : bits<T>
+{
+};
+
+#if HALF_ENABLE_CPP11_CSTDINT
+/// Unsigned integer of (at least) 16 bits width.
+typedef std::uint_least16_t uint16;
+
+/// Unsigned integer of (at least) 32 bits width.
+template <>
+struct bits<float>
+{
+    typedef std::uint_least32_t type;
+};
+
+/// Unsigned integer of (at least) 64 bits width.
+template <>
+struct bits<double>
+{
+    typedef std::uint_least64_t type;
+};
+#else
+/// Unsigned integer of (at least) 16 bits width.
+typedef unsigned short uint16;
+
+/// Unsigned integer of (at least) 32 bits width.
+template <>
+struct bits<float> : conditional<std::numeric_limits<unsigned int>::digits >= 32, unsigned int, unsigned long>
+{
+};
+
+#if HALF_ENABLE_CPP11_LONG_LONG
+/// Unsigned integer of (at least) 64 bits width.
+template <>
+struct bits<double> : conditional<std::numeric_limits<unsigned long>::digits >= 64, unsigned long, unsigned long long>
+{
+};
+#else
+/// Unsigned integer of (at least) 64 bits width.
+template <>
+struct bits<double>
+{
+    typedef unsigned long type;
+};
+#endif
+#endif
+
+/// Tag type for binary construction.
+struct binary_t
+{
+};
+
+/// Tag for binary construction.
+HALF_CONSTEXPR_CONST binary_t binary = binary_t();
+
+/// Temporary half-precision expression.
+/// This class represents a half-precision expression which just stores a single-precision value internally.
+struct expr
+{
+    /// Conversion constructor.
+    /// \param f single-precision value to convert
+    explicit HALF_CONSTEXPR expr(float f) HALF_NOEXCEPT : value_(f) {}
+
+    /// Conversion to single-precision.
+    /// \return single precision value representing expression value
+    HALF_CONSTEXPR operator float() const HALF_NOEXCEPT
+    {
+        return value_;
+    }
+
+private:
+    /// Internal expression value stored in single-precision.
+    float value_;
+};
+
+/// SFINAE helper for generic half-precision functions.
+/// This class template has to be specialized for each valid combination of argument types to provide a corresponding
+/// `type` member equivalent to \a T.
+/// \tparam T type to return
+template <typename T, typename, typename = void, typename = void>
+struct enable
+{
+};
+template <typename T>
+struct enable<T, half, void, void>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, expr, void, void>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, half, half, void>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, half, expr, void>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, expr, half, void>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, expr, expr, void>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, half, half, half>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, half, half, expr>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, half, expr, half>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, half, expr, expr>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, expr, half, half>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, expr, half, expr>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, expr, expr, half>
+{
+    typedef T type;
+};
+template <typename T>
+struct enable<T, expr, expr, expr>
+{
+    typedef T type;
+};
+
+/// Return type for specialized generic 2-argument half-precision functions.
+/// This class template has to be specialized for each valid combination of argument types to provide a corresponding
+/// `type` member denoting the appropriate return type.
+/// \tparam T first argument type
+/// \tparam U first argument type
+template <typename T, typename U>
+struct result : enable<expr, T, U>
+{
+};
+template <>
+struct result<half, half>
+{
+    typedef half type;
+};
+
+/// \name Classification helpers
+/// \{
+
+/// Check for infinity.
+/// \tparam T argument type (builtin floating point type)
+/// \param arg value to query
+/// \retval true if infinity
+/// \retval false else
+template <typename T>
+bool builtin_isinf(T arg)
+{
+#if HALF_ENABLE_CPP11_CMATH
+    return std::isinf(arg);
+#elif defined(_MSC_VER)
+    return !::_finite(static_cast<double>(arg)) && !::_isnan(static_cast<double>(arg));
+#else
+    return arg == std::numeric_limits<T>::infinity() || arg == -std::numeric_limits<T>::infinity();
+#endif
+}
+
+/// Check for NaN.
+/// \tparam T argument type (builtin floating point type)
+/// \param arg value to query
+/// \retval true if not a number
+/// \retval false else
+template <typename T>
+bool builtin_isnan(T arg)
+{
+#if HALF_ENABLE_CPP11_CMATH
+    return std::isnan(arg);
+#elif defined(_MSC_VER)
+    return ::_isnan(static_cast<double>(arg)) != 0;
+#else
+    return arg != arg;
+#endif
+}
+
+/// Check sign.
+/// \tparam T argument type (builtin floating point type)
+/// \param arg value to query
+/// \retval true if signbit set
+/// \retval false else
+template <typename T>
+bool builtin_signbit(T arg)
+{
+#if HALF_ENABLE_CPP11_CMATH
+    return std::signbit(arg);
+#else
+    return arg < T() || (arg == T() && T(1) / arg < T());
+#endif
+}
+
+/// \}
+/// \name Conversion
+/// \{
+
+/// Convert IEEE single-precision to half-precision.
+/// Credit for this goes to [Jeroen van der Zijp](ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf).
+/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
+/// \param value single-precision value
+/// \return binary representation of half-precision value
+template <std::float_round_style R>
+uint16 float2half_impl(float value, true_type)
+{
+    typedef bits<float>::type uint32;
+    uint32 bits; // = *reinterpret_cast<uint32*>(&value);		//violating strict aliasing!
+    std::memcpy(&bits, &value, sizeof(float));
+    /*			uint16 hbits = (bits>>16) & 0x8000;
+                bits &= 0x7FFFFFFF;
+                int exp = bits >> 23;
+                if(exp == 255)
+                    return hbits | 0x7C00 | (0x3FF&-static_cast<unsigned>((bits&0x7FFFFF)!=0));
+                if(exp > 142)
+                {
+                    if(R == std::round_toward_infinity)
+                        return hbits | 0x7C00 - (hbits>>15);
+                    if(R == std::round_toward_neg_infinity)
+                        return hbits | 0x7BFF + (hbits>>15);
+                    return hbits | 0x7BFF + (R!=std::round_toward_zero);
+                }
+                int g, s;
+                if(exp > 112)
+                {
+                    g = (bits>>12) & 1;
+                    s = (bits&0xFFF) != 0;
+                    hbits |= ((exp-112)<<10) | ((bits>>13)&0x3FF);
+                }
+                else if(exp > 101)
+                {
+                    int i = 125 - exp;
+                    bits = (bits&0x7FFFFF) | 0x800000;
+                    g = (bits>>i) & 1;
+                    s = (bits&((1L<<i)-1)) != 0;
+                    hbits |= bits >> (i+1);
+                }
+                else
+                {
+                    g = 0;
+                    s = bits != 0;
+                }
+                if(R == std::round_to_nearest)
+                    #if HALF_ROUND_TIES_TO_EVEN
+                        hbits += g & (s|hbits);
+                    #else
+                        hbits += g;
+                    #endif
+                else if(R == std::round_toward_infinity)
+                    hbits += ~(hbits>>15) & (s|g);
+                else if(R == std::round_toward_neg_infinity)
+                    hbits += (hbits>>15) & (g|s);
+    */
+    static const uint16 base_table[512] = {0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+        0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+        0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+        0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+        0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+        0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+        0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
+        0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0008,
+        0x0010, 0x0020, 0x0040, 0x0080, 0x0100, 0x0200, 0x0400, 0x0800, 0x0C00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000,
+        0x2400, 0x2800, 0x2C00, 0x3000, 0x3400, 0x3800, 0x3C00, 0x4000, 0x4400, 0x4800, 0x4C00, 0x5000, 0x5400, 0x5800,
+        0x5C00, 0x6000, 0x6400, 0x6800, 0x6C00, 0x7000, 0x7400, 0x7800, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00,
+        0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00,
+        0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00,
+        0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00,
+        0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00,
+        0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00,
+        0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00,
+        0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00,
+        0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+        0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+        0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+        0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+        0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+        0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+        0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+        0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000,
+        0x8001, 0x8002, 0x8004, 0x8008, 0x8010, 0x8020, 0x8040, 0x8080, 0x8100, 0x8200, 0x8400, 0x8800, 0x8C00, 0x9000,
+        0x9400, 0x9800, 0x9C00, 0xA000, 0xA400, 0xA800, 0xAC00, 0xB000, 0xB400, 0xB800, 0xBC00, 0xC000, 0xC400, 0xC800,
+        0xCC00, 0xD000, 0xD400, 0xD800, 0xDC00, 0xE000, 0xE400, 0xE800, 0xEC00, 0xF000, 0xF400, 0xF800, 0xFC00, 0xFC00,
+        0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00,
+        0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00,
+        0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00,
+        0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00,
+        0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00,
+        0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00,
+        0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00,
+        0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00};
+    static const unsigned char shift_table[512] = {24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+        13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 13, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
+        13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24,
+        24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 13};
+    uint16 hbits = base_table[bits >> 23] + static_cast<uint16>((bits & 0x7FFFFF) >> shift_table[bits >> 23]);
+    if (R == std::round_to_nearest)
+        hbits += (((bits & 0x7FFFFF) >> (shift_table[bits >> 23] - 1)) | (((bits >> 23) & 0xFF) == 102))
+            & ((hbits & 0x7C00) != 0x7C00)
+#if HALF_ROUND_TIES_TO_EVEN
+            & (((((static_cast<uint32>(1) << (shift_table[bits >> 23] - 1)) - 1) & bits) != 0) | hbits)
+#endif
+            ;
+    else if (R == std::round_toward_zero)
+        hbits -= ((hbits & 0x7FFF) == 0x7C00) & ~shift_table[bits >> 23];
+    else if (R == std::round_toward_infinity)
+        hbits += ((((bits & 0x7FFFFF & ((static_cast<uint32>(1) << (shift_table[bits >> 23])) - 1)) != 0)
+                      | (((bits >> 23) <= 102) & ((bits >> 23) != 0)))
+                     & (hbits < 0x7C00))
+            - ((hbits == 0xFC00) & ((bits >> 23) != 511));
+    else if (R == std::round_toward_neg_infinity)
+        hbits += ((((bits & 0x7FFFFF & ((static_cast<uint32>(1) << (shift_table[bits >> 23])) - 1)) != 0)
+                      | (((bits >> 23) <= 358) & ((bits >> 23) != 256)))
+                     & (hbits < 0xFC00) & (hbits >> 15))
+            - ((hbits == 0x7C00) & ((bits >> 23) != 255));
+    return hbits;
+}
+
+/// Convert IEEE double-precision to half-precision.
+/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
+/// \param value double-precision value
+/// \return binary representation of half-precision value
+template <std::float_round_style R>
+uint16 float2half_impl(double value, true_type)
+{
+    typedef bits<float>::type uint32;
+    typedef bits<double>::type uint64;
+    uint64 bits; // = *reinterpret_cast<uint64*>(&value);		//violating strict aliasing!
+    std::memcpy(&bits, &value, sizeof(double));
+    uint32 hi = bits >> 32, lo = bits & 0xFFFFFFFF;
+    uint16 hbits = (hi >> 16) & 0x8000;
+    hi &= 0x7FFFFFFF;
+    int exp = hi >> 20;
+    if (exp == 2047)
+        return hbits | 0x7C00 | (0x3FF & -static_cast<unsigned>((bits & 0xFFFFFFFFFFFFF) != 0));
+    if (exp > 1038)
+    {
+        if (R == std::round_toward_infinity)
+            return hbits | 0x7C00 - (hbits >> 15);
+        if (R == std::round_toward_neg_infinity)
+            return hbits | 0x7BFF + (hbits >> 15);
+        return hbits | 0x7BFF + (R != std::round_toward_zero);
+    }
+    int g, s = lo != 0;
+    if (exp > 1008)
+    {
+        g = (hi >> 9) & 1;
+        s |= (hi & 0x1FF) != 0;
+        hbits |= ((exp - 1008) << 10) | ((hi >> 10) & 0x3FF);
+    }
+    else if (exp > 997)
+    {
+        int i = 1018 - exp;
+        hi = (hi & 0xFFFFF) | 0x100000;
+        g = (hi >> i) & 1;
+        s |= (hi & ((1L << i) - 1)) != 0;
+        hbits |= hi >> (i + 1);
+    }
+    else
+    {
+        g = 0;
+        s |= hi != 0;
+    }
+    if (R == std::round_to_nearest)
+#if HALF_ROUND_TIES_TO_EVEN
+        hbits += g & (s | hbits);
+#else
+        hbits += g;
+#endif
+    else if (R == std::round_toward_infinity)
+        hbits += ~(hbits >> 15) & (s | g);
+    else if (R == std::round_toward_neg_infinity)
+        hbits += (hbits >> 15) & (g | s);
+    return hbits;
+}
+
+/// Convert non-IEEE floating point to half-precision.
+/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
+/// \tparam T source type (builtin floating point type)
+/// \param value floating point value
+/// \return binary representation of half-precision value
+template <std::float_round_style R, typename T>
+uint16 float2half_impl(T value, ...)
+{
+    uint16 hbits = static_cast<unsigned>(builtin_signbit(value)) << 15;
+    if (value == T())
+        return hbits;
+    if (builtin_isnan(value))
+        return hbits | 0x7FFF;
+    if (builtin_isinf(value))
+        return hbits | 0x7C00;
+    int exp;
+    std::frexp(value, &exp);
+    if (exp > 16)
+    {
+        if (R == std::round_toward_infinity)
+            return hbits | (0x7C00 - (hbits >> 15));
+        else if (R == std::round_toward_neg_infinity)
+            return hbits | (0x7BFF + (hbits >> 15));
+        return hbits | (0x7BFF + (R != std::round_toward_zero));
+    }
+    if (exp < -13)
+        value = std::ldexp(value, 24);
+    else
+    {
+        value = std::ldexp(value, 11 - exp);
+        hbits |= ((exp + 13) << 10);
+    }
+    T ival, frac = std::modf(value, &ival);
+    hbits += static_cast<uint16>(std::abs(static_cast<int>(ival)));
+    if (R == std::round_to_nearest)
+    {
+        frac = std::abs(frac);
+#if HALF_ROUND_TIES_TO_EVEN
+        hbits += (frac > T(0.5)) | ((frac == T(0.5)) & hbits);
+#else
+        hbits += frac >= T(0.5);
+#endif
+    }
+    else if (R == std::round_toward_infinity)
+        hbits += frac > T();
+    else if (R == std::round_toward_neg_infinity)
+        hbits += frac < T();
+    return hbits;
+}
+
+/// Convert floating point to half-precision.
+/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
+/// \tparam T source type (builtin floating point type)
+/// \param value floating point value
+/// \return binary representation of half-precision value
+template <std::float_round_style R, typename T>
+uint16 float2half(T value)
+{
+    return float2half_impl<R>(
+        value, bool_type < std::numeric_limits<T>::is_iec559 && sizeof(typename bits<T>::type) == sizeof(T) > ());
+}
+
+/// Convert integer to half-precision floating point.
+/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
+/// \tparam S `true` if value negative, `false` else
+/// \tparam T type to convert (builtin integer type)
+/// \param value non-negative integral value
+/// \return binary representation of half-precision value
+template <std::float_round_style R, bool S, typename T>
+uint16 int2half_impl(T value)
+{
+#if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS
+    static_assert(std::is_integral<T>::value, "int to half conversion only supports builtin integer types");
+#endif
+    if (S)
+        value = -value;
+    uint16 bits = S << 15;
+    if (value > 0xFFFF)
+    {
+        if (R == std::round_toward_infinity)
+            bits |= 0x7C00 - S;
+        else if (R == std::round_toward_neg_infinity)
+            bits |= 0x7BFF + S;
+        else
+            bits |= 0x7BFF + (R != std::round_toward_zero);
+    }
+    else if (value)
+    {
+        uint32_t m = value, exp = 24;
+        for (; m < 0x400; m <<= 1, --exp)
+            ;
+        for (; m > 0x7FF; m >>= 1, ++exp)
+            ;
+        bits |= (exp << 10) + m;
+        if (exp > 24)
+        {
+            if (R == std::round_to_nearest)
+                bits += (value >> (exp - 25)) & 1
+#if HALF_ROUND_TIES_TO_EVEN
+                    & (((((1 << (exp - 25)) - 1) & value) != 0) | bits)
+#endif
+                    ;
+            else if (R == std::round_toward_infinity)
+                bits += ((value & ((1 << (exp - 24)) - 1)) != 0) & !S;
+            else if (R == std::round_toward_neg_infinity)
+                bits += ((value & ((1 << (exp - 24)) - 1)) != 0) & S;
+        }
+    }
+    return bits;
+}
+
+/// Convert integer to half-precision floating point.
+/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
+/// \tparam T type to convert (builtin integer type)
+/// \param value integral value
+/// \return binary representation of half-precision value
+template <std::float_round_style R, typename T>
+uint16 int2half(T value)
+{
+    return (value < 0) ? int2half_impl<R, true>(value) : int2half_impl<R, false>(value);
+}
+
+/// Convert half-precision to IEEE single-precision.
+/// Credit for this goes to [Jeroen van der Zijp](ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf).
+/// \param value binary representation of half-precision value
+/// \return single-precision value
+inline float half2float_impl(uint16 value, float, true_type)
+{
+    typedef bits<float>::type uint32;
+    /*			uint32 bits = static_cast<uint32>(value&0x8000) << 16;
+                int abs = value & 0x7FFF;
+                if(abs)
+                {
+                    bits |= 0x38000000 << static_cast<unsigned>(abs>=0x7C00);
+                    for(; abs<0x400; abs<<=1,bits-=0x800000) ;
+                    bits += static_cast<uint32>(abs) << 13;
+                }
+    */
+    static const uint32 mantissa_table[2048] = {0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34A00000,
+        0x34C00000, 0x34E00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000, 0x35400000, 0x35500000, 0x35600000,
+        0x35700000, 0x35800000, 0x35880000, 0x35900000, 0x35980000, 0x35A00000, 0x35A80000, 0x35B00000, 0x35B80000,
+        0x35C00000, 0x35C80000, 0x35D00000, 0x35D80000, 0x35E00000, 0x35E80000, 0x35F00000, 0x35F80000, 0x36000000,
+        0x36040000, 0x36080000, 0x360C0000, 0x36100000, 0x36140000, 0x36180000, 0x361C0000, 0x36200000, 0x36240000,
+        0x36280000, 0x362C0000, 0x36300000, 0x36340000, 0x36380000, 0x363C0000, 0x36400000, 0x36440000, 0x36480000,
+        0x364C0000, 0x36500000, 0x36540000, 0x36580000, 0x365C0000, 0x36600000, 0x36640000, 0x36680000, 0x366C0000,
+        0x36700000, 0x36740000, 0x36780000, 0x367C0000, 0x36800000, 0x36820000, 0x36840000, 0x36860000, 0x36880000,
+        0x368A0000, 0x368C0000, 0x368E0000, 0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369A0000,
+        0x369C0000, 0x369E0000, 0x36A00000, 0x36A20000, 0x36A40000, 0x36A60000, 0x36A80000, 0x36AA0000, 0x36AC0000,
+        0x36AE0000, 0x36B00000, 0x36B20000, 0x36B40000, 0x36B60000, 0x36B80000, 0x36BA0000, 0x36BC0000, 0x36BE0000,
+        0x36C00000, 0x36C20000, 0x36C40000, 0x36C60000, 0x36C80000, 0x36CA0000, 0x36CC0000, 0x36CE0000, 0x36D00000,
+        0x36D20000, 0x36D40000, 0x36D60000, 0x36D80000, 0x36DA0000, 0x36DC0000, 0x36DE0000, 0x36E00000, 0x36E20000,
+        0x36E40000, 0x36E60000, 0x36E80000, 0x36EA0000, 0x36EC0000, 0x36EE0000, 0x36F00000, 0x36F20000, 0x36F40000,
+        0x36F60000, 0x36F80000, 0x36FA0000, 0x36FC0000, 0x36FE0000, 0x37000000, 0x37010000, 0x37020000, 0x37030000,
+        0x37040000, 0x37050000, 0x37060000, 0x37070000, 0x37080000, 0x37090000, 0x370A0000, 0x370B0000, 0x370C0000,
+        0x370D0000, 0x370E0000, 0x370F0000, 0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000,
+        0x37160000, 0x37170000, 0x37180000, 0x37190000, 0x371A0000, 0x371B0000, 0x371C0000, 0x371D0000, 0x371E0000,
+        0x371F0000, 0x37200000, 0x37210000, 0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000,
+        0x37280000, 0x37290000, 0x372A0000, 0x372B0000, 0x372C0000, 0x372D0000, 0x372E0000, 0x372F0000, 0x37300000,
+        0x37310000, 0x37320000, 0x37330000, 0x37340000, 0x37350000, 0x37360000, 0x37370000, 0x37380000, 0x37390000,
+        0x373A0000, 0x373B0000, 0x373C0000, 0x373D0000, 0x373E0000, 0x373F0000, 0x37400000, 0x37410000, 0x37420000,
+        0x37430000, 0x37440000, 0x37450000, 0x37460000, 0x37470000, 0x37480000, 0x37490000, 0x374A0000, 0x374B0000,
+        0x374C0000, 0x374D0000, 0x374E0000, 0x374F0000, 0x37500000, 0x37510000, 0x37520000, 0x37530000, 0x37540000,
+        0x37550000, 0x37560000, 0x37570000, 0x37580000, 0x37590000, 0x375A0000, 0x375B0000, 0x375C0000, 0x375D0000,
+        0x375E0000, 0x375F0000, 0x37600000, 0x37610000, 0x37620000, 0x37630000, 0x37640000, 0x37650000, 0x37660000,
+        0x37670000, 0x37680000, 0x37690000, 0x376A0000, 0x376B0000, 0x376C0000, 0x376D0000, 0x376E0000, 0x376F0000,
+        0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000, 0x37760000, 0x37770000, 0x37780000,
+        0x37790000, 0x377A0000, 0x377B0000, 0x377C0000, 0x377D0000, 0x377E0000, 0x377F0000, 0x37800000, 0x37808000,
+        0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000, 0x37840000, 0x37848000, 0x37850000,
+        0x37858000, 0x37860000, 0x37868000, 0x37870000, 0x37878000, 0x37880000, 0x37888000, 0x37890000, 0x37898000,
+        0x378A0000, 0x378A8000, 0x378B0000, 0x378B8000, 0x378C0000, 0x378C8000, 0x378D0000, 0x378D8000, 0x378E0000,
+        0x378E8000, 0x378F0000, 0x378F8000, 0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000,
+        0x37930000, 0x37938000, 0x37940000, 0x37948000, 0x37950000, 0x37958000, 0x37960000, 0x37968000, 0x37970000,
+        0x37978000, 0x37980000, 0x37988000, 0x37990000, 0x37998000, 0x379A0000, 0x379A8000, 0x379B0000, 0x379B8000,
+        0x379C0000, 0x379C8000, 0x379D0000, 0x379D8000, 0x379E0000, 0x379E8000, 0x379F0000, 0x379F8000, 0x37A00000,
+        0x37A08000, 0x37A10000, 0x37A18000, 0x37A20000, 0x37A28000, 0x37A30000, 0x37A38000, 0x37A40000, 0x37A48000,
+        0x37A50000, 0x37A58000, 0x37A60000, 0x37A68000, 0x37A70000, 0x37A78000, 0x37A80000, 0x37A88000, 0x37A90000,
+        0x37A98000, 0x37AA0000, 0x37AA8000, 0x37AB0000, 0x37AB8000, 0x37AC0000, 0x37AC8000, 0x37AD0000, 0x37AD8000,
+        0x37AE0000, 0x37AE8000, 0x37AF0000, 0x37AF8000, 0x37B00000, 0x37B08000, 0x37B10000, 0x37B18000, 0x37B20000,
+        0x37B28000, 0x37B30000, 0x37B38000, 0x37B40000, 0x37B48000, 0x37B50000, 0x37B58000, 0x37B60000, 0x37B68000,
+        0x37B70000, 0x37B78000, 0x37B80000, 0x37B88000, 0x37B90000, 0x37B98000, 0x37BA0000, 0x37BA8000, 0x37BB0000,
+        0x37BB8000, 0x37BC0000, 0x37BC8000, 0x37BD0000, 0x37BD8000, 0x37BE0000, 0x37BE8000, 0x37BF0000, 0x37BF8000,
+        0x37C00000, 0x37C08000, 0x37C10000, 0x37C18000, 0x37C20000, 0x37C28000, 0x37C30000, 0x37C38000, 0x37C40000,
+        0x37C48000, 0x37C50000, 0x37C58000, 0x37C60000, 0x37C68000, 0x37C70000, 0x37C78000, 0x37C80000, 0x37C88000,
+        0x37C90000, 0x37C98000, 0x37CA0000, 0x37CA8000, 0x37CB0000, 0x37CB8000, 0x37CC0000, 0x37CC8000, 0x37CD0000,
+        0x37CD8000, 0x37CE0000, 0x37CE8000, 0x37CF0000, 0x37CF8000, 0x37D00000, 0x37D08000, 0x37D10000, 0x37D18000,
+        0x37D20000, 0x37D28000, 0x37D30000, 0x37D38000, 0x37D40000, 0x37D48000, 0x37D50000, 0x37D58000, 0x37D60000,
+        0x37D68000, 0x37D70000, 0x37D78000, 0x37D80000, 0x37D88000, 0x37D90000, 0x37D98000, 0x37DA0000, 0x37DA8000,
+        0x37DB0000, 0x37DB8000, 0x37DC0000, 0x37DC8000, 0x37DD0000, 0x37DD8000, 0x37DE0000, 0x37DE8000, 0x37DF0000,
+        0x37DF8000, 0x37E00000, 0x37E08000, 0x37E10000, 0x37E18000, 0x37E20000, 0x37E28000, 0x37E30000, 0x37E38000,
+        0x37E40000, 0x37E48000, 0x37E50000, 0x37E58000, 0x37E60000, 0x37E68000, 0x37E70000, 0x37E78000, 0x37E80000,
+        0x37E88000, 0x37E90000, 0x37E98000, 0x37EA0000, 0x37EA8000, 0x37EB0000, 0x37EB8000, 0x37EC0000, 0x37EC8000,
+        0x37ED0000, 0x37ED8000, 0x37EE0000, 0x37EE8000, 0x37EF0000, 0x37EF8000, 0x37F00000, 0x37F08000, 0x37F10000,
+        0x37F18000, 0x37F20000, 0x37F28000, 0x37F30000, 0x37F38000, 0x37F40000, 0x37F48000, 0x37F50000, 0x37F58000,
+        0x37F60000, 0x37F68000, 0x37F70000, 0x37F78000, 0x37F80000, 0x37F88000, 0x37F90000, 0x37F98000, 0x37FA0000,
+        0x37FA8000, 0x37FB0000, 0x37FB8000, 0x37FC0000, 0x37FC8000, 0x37FD0000, 0x37FD8000, 0x37FE0000, 0x37FE8000,
+        0x37FF0000, 0x37FF8000, 0x38000000, 0x38004000, 0x38008000, 0x3800C000, 0x38010000, 0x38014000, 0x38018000,
+        0x3801C000, 0x38020000, 0x38024000, 0x38028000, 0x3802C000, 0x38030000, 0x38034000, 0x38038000, 0x3803C000,
+        0x38040000, 0x38044000, 0x38048000, 0x3804C000, 0x38050000, 0x38054000, 0x38058000, 0x3805C000, 0x38060000,
+        0x38064000, 0x38068000, 0x3806C000, 0x38070000, 0x38074000, 0x38078000, 0x3807C000, 0x38080000, 0x38084000,
+        0x38088000, 0x3808C000, 0x38090000, 0x38094000, 0x38098000, 0x3809C000, 0x380A0000, 0x380A4000, 0x380A8000,
+        0x380AC000, 0x380B0000, 0x380B4000, 0x380B8000, 0x380BC000, 0x380C0000, 0x380C4000, 0x380C8000, 0x380CC000,
+        0x380D0000, 0x380D4000, 0x380D8000, 0x380DC000, 0x380E0000, 0x380E4000, 0x380E8000, 0x380EC000, 0x380F0000,
+        0x380F4000, 0x380F8000, 0x380FC000, 0x38100000, 0x38104000, 0x38108000, 0x3810C000, 0x38110000, 0x38114000,
+        0x38118000, 0x3811C000, 0x38120000, 0x38124000, 0x38128000, 0x3812C000, 0x38130000, 0x38134000, 0x38138000,
+        0x3813C000, 0x38140000, 0x38144000, 0x38148000, 0x3814C000, 0x38150000, 0x38154000, 0x38158000, 0x3815C000,
+        0x38160000, 0x38164000, 0x38168000, 0x3816C000, 0x38170000, 0x38174000, 0x38178000, 0x3817C000, 0x38180000,
+        0x38184000, 0x38188000, 0x3818C000, 0x38190000, 0x38194000, 0x38198000, 0x3819C000, 0x381A0000, 0x381A4000,
+        0x381A8000, 0x381AC000, 0x381B0000, 0x381B4000, 0x381B8000, 0x381BC000, 0x381C0000, 0x381C4000, 0x381C8000,
+        0x381CC000, 0x381D0000, 0x381D4000, 0x381D8000, 0x381DC000, 0x381E0000, 0x381E4000, 0x381E8000, 0x381EC000,
+        0x381F0000, 0x381F4000, 0x381F8000, 0x381FC000, 0x38200000, 0x38204000, 0x38208000, 0x3820C000, 0x38210000,
+        0x38214000, 0x38218000, 0x3821C000, 0x38220000, 0x38224000, 0x38228000, 0x3822C000, 0x38230000, 0x38234000,
+        0x38238000, 0x3823C000, 0x38240000, 0x38244000, 0x38248000, 0x3824C000, 0x38250000, 0x38254000, 0x38258000,
+        0x3825C000, 0x38260000, 0x38264000, 0x38268000, 0x3826C000, 0x38270000, 0x38274000, 0x38278000, 0x3827C000,
+        0x38280000, 0x38284000, 0x38288000, 0x3828C000, 0x38290000, 0x38294000, 0x38298000, 0x3829C000, 0x382A0000,
+        0x382A4000, 0x382A8000, 0x382AC000, 0x382B0000, 0x382B4000, 0x382B8000, 0x382BC000, 0x382C0000, 0x382C4000,
+        0x382C8000, 0x382CC000, 0x382D0000, 0x382D4000, 0x382D8000, 0x382DC000, 0x382E0000, 0x382E4000, 0x382E8000,
+        0x382EC000, 0x382F0000, 0x382F4000, 0x382F8000, 0x382FC000, 0x38300000, 0x38304000, 0x38308000, 0x3830C000,
+        0x38310000, 0x38314000, 0x38318000, 0x3831C000, 0x38320000, 0x38324000, 0x38328000, 0x3832C000, 0x38330000,
+        0x38334000, 0x38338000, 0x3833C000, 0x38340000, 0x38344000, 0x38348000, 0x3834C000, 0x38350000, 0x38354000,
+        0x38358000, 0x3835C000, 0x38360000, 0x38364000, 0x38368000, 0x3836C000, 0x38370000, 0x38374000, 0x38378000,
+        0x3837C000, 0x38380000, 0x38384000, 0x38388000, 0x3838C000, 0x38390000, 0x38394000, 0x38398000, 0x3839C000,
+        0x383A0000, 0x383A4000, 0x383A8000, 0x383AC000, 0x383B0000, 0x383B4000, 0x383B8000, 0x383BC000, 0x383C0000,
+        0x383C4000, 0x383C8000, 0x383CC000, 0x383D0000, 0x383D4000, 0x383D8000, 0x383DC000, 0x383E0000, 0x383E4000,
+        0x383E8000, 0x383EC000, 0x383F0000, 0x383F4000, 0x383F8000, 0x383FC000, 0x38400000, 0x38404000, 0x38408000,
+        0x3840C000, 0x38410000, 0x38414000, 0x38418000, 0x3841C000, 0x38420000, 0x38424000, 0x38428000, 0x3842C000,
+        0x38430000, 0x38434000, 0x38438000, 0x3843C000, 0x38440000, 0x38444000, 0x38448000, 0x3844C000, 0x38450000,
+        0x38454000, 0x38458000, 0x3845C000, 0x38460000, 0x38464000, 0x38468000, 0x3846C000, 0x38470000, 0x38474000,
+        0x38478000, 0x3847C000, 0x38480000, 0x38484000, 0x38488000, 0x3848C000, 0x38490000, 0x38494000, 0x38498000,
+        0x3849C000, 0x384A0000, 0x384A4000, 0x384A8000, 0x384AC000, 0x384B0000, 0x384B4000, 0x384B8000, 0x384BC000,
+        0x384C0000, 0x384C4000, 0x384C8000, 0x384CC000, 0x384D0000, 0x384D4000, 0x384D8000, 0x384DC000, 0x384E0000,
+        0x384E4000, 0x384E8000, 0x384EC000, 0x384F0000, 0x384F4000, 0x384F8000, 0x384FC000, 0x38500000, 0x38504000,
+        0x38508000, 0x3850C000, 0x38510000, 0x38514000, 0x38518000, 0x3851C000, 0x38520000, 0x38524000, 0x38528000,
+        0x3852C000, 0x38530000, 0x38534000, 0x38538000, 0x3853C000, 0x38540000, 0x38544000, 0x38548000, 0x3854C000,
+        0x38550000, 0x38554000, 0x38558000, 0x3855C000, 0x38560000, 0x38564000, 0x38568000, 0x3856C000, 0x38570000,
+        0x38574000, 0x38578000, 0x3857C000, 0x38580000, 0x38584000, 0x38588000, 0x3858C000, 0x38590000, 0x38594000,
+        0x38598000, 0x3859C000, 0x385A0000, 0x385A4000, 0x385A8000, 0x385AC000, 0x385B0000, 0x385B4000, 0x385B8000,
+        0x385BC000, 0x385C0000, 0x385C4000, 0x385C8000, 0x385CC000, 0x385D0000, 0x385D4000, 0x385D8000, 0x385DC000,
+        0x385E0000, 0x385E4000, 0x385E8000, 0x385EC000, 0x385F0000, 0x385F4000, 0x385F8000, 0x385FC000, 0x38600000,
+        0x38604000, 0x38608000, 0x3860C000, 0x38610000, 0x38614000, 0x38618000, 0x3861C000, 0x38620000, 0x38624000,
+        0x38628000, 0x3862C000, 0x38630000, 0x38634000, 0x38638000, 0x3863C000, 0x38640000, 0x38644000, 0x38648000,
+        0x3864C000, 0x38650000, 0x38654000, 0x38658000, 0x3865C000, 0x38660000, 0x38664000, 0x38668000, 0x3866C000,
+        0x38670000, 0x38674000, 0x38678000, 0x3867C000, 0x38680000, 0x38684000, 0x38688000, 0x3868C000, 0x38690000,
+        0x38694000, 0x38698000, 0x3869C000, 0x386A0000, 0x386A4000, 0x386A8000, 0x386AC000, 0x386B0000, 0x386B4000,
+        0x386B8000, 0x386BC000, 0x386C0000, 0x386C4000, 0x386C8000, 0x386CC000, 0x386D0000, 0x386D4000, 0x386D8000,
+        0x386DC000, 0x386E0000, 0x386E4000, 0x386E8000, 0x386EC000, 0x386F0000, 0x386F4000, 0x386F8000, 0x386FC000,
+        0x38700000, 0x38704000, 0x38708000, 0x3870C000, 0x38710000, 0x38714000, 0x38718000, 0x3871C000, 0x38720000,
+        0x38724000, 0x38728000, 0x3872C000, 0x38730000, 0x38734000, 0x38738000, 0x3873C000, 0x38740000, 0x38744000,
+        0x38748000, 0x3874C000, 0x38750000, 0x38754000, 0x38758000, 0x3875C000, 0x38760000, 0x38764000, 0x38768000,
+        0x3876C000, 0x38770000, 0x38774000, 0x38778000, 0x3877C000, 0x38780000, 0x38784000, 0x38788000, 0x3878C000,
+        0x38790000, 0x38794000, 0x38798000, 0x3879C000, 0x387A0000, 0x387A4000, 0x387A8000, 0x387AC000, 0x387B0000,
+        0x387B4000, 0x387B8000, 0x387BC000, 0x387C0000, 0x387C4000, 0x387C8000, 0x387CC000, 0x387D0000, 0x387D4000,
+        0x387D8000, 0x387DC000, 0x387E0000, 0x387E4000, 0x387E8000, 0x387EC000, 0x387F0000, 0x387F4000, 0x387F8000,
+        0x387FC000, 0x38000000, 0x38002000, 0x38004000, 0x38006000, 0x38008000, 0x3800A000, 0x3800C000, 0x3800E000,
+        0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801A000, 0x3801C000, 0x3801E000, 0x38020000,
+        0x38022000, 0x38024000, 0x38026000, 0x38028000, 0x3802A000, 0x3802C000, 0x3802E000, 0x38030000, 0x38032000,
+        0x38034000, 0x38036000, 0x38038000, 0x3803A000, 0x3803C000, 0x3803E000, 0x38040000, 0x38042000, 0x38044000,
+        0x38046000, 0x38048000, 0x3804A000, 0x3804C000, 0x3804E000, 0x38050000, 0x38052000, 0x38054000, 0x38056000,
+        0x38058000, 0x3805A000, 0x3805C000, 0x3805E000, 0x38060000, 0x38062000, 0x38064000, 0x38066000, 0x38068000,
+        0x3806A000, 0x3806C000, 0x3806E000, 0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807A000,
+        0x3807C000, 0x3807E000, 0x38080000, 0x38082000, 0x38084000, 0x38086000, 0x38088000, 0x3808A000, 0x3808C000,
+        0x3808E000, 0x38090000, 0x38092000, 0x38094000, 0x38096000, 0x38098000, 0x3809A000, 0x3809C000, 0x3809E000,
+        0x380A0000, 0x380A2000, 0x380A4000, 0x380A6000, 0x380A8000, 0x380AA000, 0x380AC000, 0x380AE000, 0x380B0000,
+        0x380B2000, 0x380B4000, 0x380B6000, 0x380B8000, 0x380BA000, 0x380BC000, 0x380BE000, 0x380C0000, 0x380C2000,
+        0x380C4000, 0x380C6000, 0x380C8000, 0x380CA000, 0x380CC000, 0x380CE000, 0x380D0000, 0x380D2000, 0x380D4000,
+        0x380D6000, 0x380D8000, 0x380DA000, 0x380DC000, 0x380DE000, 0x380E0000, 0x380E2000, 0x380E4000, 0x380E6000,
+        0x380E8000, 0x380EA000, 0x380EC000, 0x380EE000, 0x380F0000, 0x380F2000, 0x380F4000, 0x380F6000, 0x380F8000,
+        0x380FA000, 0x380FC000, 0x380FE000, 0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810A000,
+        0x3810C000, 0x3810E000, 0x38110000, 0x38112000, 0x38114000, 0x38116000, 0x38118000, 0x3811A000, 0x3811C000,
+        0x3811E000, 0x38120000, 0x38122000, 0x38124000, 0x38126000, 0x38128000, 0x3812A000, 0x3812C000, 0x3812E000,
+        0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813A000, 0x3813C000, 0x3813E000, 0x38140000,
+        0x38142000, 0x38144000, 0x38146000, 0x38148000, 0x3814A000, 0x3814C000, 0x3814E000, 0x38150000, 0x38152000,
+        0x38154000, 0x38156000, 0x38158000, 0x3815A000, 0x3815C000, 0x3815E000, 0x38160000, 0x38162000, 0x38164000,
+        0x38166000, 0x38168000, 0x3816A000, 0x3816C000, 0x3816E000, 0x38170000, 0x38172000, 0x38174000, 0x38176000,
+        0x38178000, 0x3817A000, 0x3817C000, 0x3817E000, 0x38180000, 0x38182000, 0x38184000, 0x38186000, 0x38188000,
+        0x3818A000, 0x3818C000, 0x3818E000, 0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819A000,
+        0x3819C000, 0x3819E000, 0x381A0000, 0x381A2000, 0x381A4000, 0x381A6000, 0x381A8000, 0x381AA000, 0x381AC000,
+        0x381AE000, 0x381B0000, 0x381B2000, 0x381B4000, 0x381B6000, 0x381B8000, 0x381BA000, 0x381BC000, 0x381BE000,
+        0x381C0000, 0x381C2000, 0x381C4000, 0x381C6000, 0x381C8000, 0x381CA000, 0x381CC000, 0x381CE000, 0x381D0000,
+        0x381D2000, 0x381D4000, 0x381D6000, 0x381D8000, 0x381DA000, 0x381DC000, 0x381DE000, 0x381E0000, 0x381E2000,
+        0x381E4000, 0x381E6000, 0x381E8000, 0x381EA000, 0x381EC000, 0x381EE000, 0x381F0000, 0x381F2000, 0x381F4000,
+        0x381F6000, 0x381F8000, 0x381FA000, 0x381FC000, 0x381FE000, 0x38200000, 0x38202000, 0x38204000, 0x38206000,
+        0x38208000, 0x3820A000, 0x3820C000, 0x3820E000, 0x38210000, 0x38212000, 0x38214000, 0x38216000, 0x38218000,
+        0x3821A000, 0x3821C000, 0x3821E000, 0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822A000,
+        0x3822C000, 0x3822E000, 0x38230000, 0x38232000, 0x38234000, 0x38236000, 0x38238000, 0x3823A000, 0x3823C000,
+        0x3823E000, 0x38240000, 0x38242000, 0x38244000, 0x38246000, 0x38248000, 0x3824A000, 0x3824C000, 0x3824E000,
+        0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825A000, 0x3825C000, 0x3825E000, 0x38260000,
+        0x38262000, 0x38264000, 0x38266000, 0x38268000, 0x3826A000, 0x3826C000, 0x3826E000, 0x38270000, 0x38272000,
+        0x38274000, 0x38276000, 0x38278000, 0x3827A000, 0x3827C000, 0x3827E000, 0x38280000, 0x38282000, 0x38284000,
+        0x38286000, 0x38288000, 0x3828A000, 0x3828C000, 0x3828E000, 0x38290000, 0x38292000, 0x38294000, 0x38296000,
+        0x38298000, 0x3829A000, 0x3829C000, 0x3829E000, 0x382A0000, 0x382A2000, 0x382A4000, 0x382A6000, 0x382A8000,
+        0x382AA000, 0x382AC000, 0x382AE000, 0x382B0000, 0x382B2000, 0x382B4000, 0x382B6000, 0x382B8000, 0x382BA000,
+        0x382BC000, 0x382BE000, 0x382C0000, 0x382C2000, 0x382C4000, 0x382C6000, 0x382C8000, 0x382CA000, 0x382CC000,
+        0x382CE000, 0x382D0000, 0x382D2000, 0x382D4000, 0x382D6000, 0x382D8000, 0x382DA000, 0x382DC000, 0x382DE000,
+        0x382E0000, 0x382E2000, 0x382E4000, 0x382E6000, 0x382E8000, 0x382EA000, 0x382EC000, 0x382EE000, 0x382F0000,
+        0x382F2000, 0x382F4000, 0x382F6000, 0x382F8000, 0x382FA000, 0x382FC000, 0x382FE000, 0x38300000, 0x38302000,
+        0x38304000, 0x38306000, 0x38308000, 0x3830A000, 0x3830C000, 0x3830E000, 0x38310000, 0x38312000, 0x38314000,
+        0x38316000, 0x38318000, 0x3831A000, 0x3831C000, 0x3831E000, 0x38320000, 0x38322000, 0x38324000, 0x38326000,
+        0x38328000, 0x3832A000, 0x3832C000, 0x3832E000, 0x38330000, 0x38332000, 0x38334000, 0x38336000, 0x38338000,
+        0x3833A000, 0x3833C000, 0x3833E000, 0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834A000,
+        0x3834C000, 0x3834E000, 0x38350000, 0x38352000, 0x38354000, 0x38356000, 0x38358000, 0x3835A000, 0x3835C000,
+        0x3835E000, 0x38360000, 0x38362000, 0x38364000, 0x38366000, 0x38368000, 0x3836A000, 0x3836C000, 0x3836E000,
+        0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837A000, 0x3837C000, 0x3837E000, 0x38380000,
+        0x38382000, 0x38384000, 0x38386000, 0x38388000, 0x3838A000, 0x3838C000, 0x3838E000, 0x38390000, 0x38392000,
+        0x38394000, 0x38396000, 0x38398000, 0x3839A000, 0x3839C000, 0x3839E000, 0x383A0000, 0x383A2000, 0x383A4000,
+        0x383A6000, 0x383A8000, 0x383AA000, 0x383AC000, 0x383AE000, 0x383B0000, 0x383B2000, 0x383B4000, 0x383B6000,
+        0x383B8000, 0x383BA000, 0x383BC000, 0x383BE000, 0x383C0000, 0x383C2000, 0x383C4000, 0x383C6000, 0x383C8000,
+        0x383CA000, 0x383CC000, 0x383CE000, 0x383D0000, 0x383D2000, 0x383D4000, 0x383D6000, 0x383D8000, 0x383DA000,
+        0x383DC000, 0x383DE000, 0x383E0000, 0x383E2000, 0x383E4000, 0x383E6000, 0x383E8000, 0x383EA000, 0x383EC000,
+        0x383EE000, 0x383F0000, 0x383F2000, 0x383F4000, 0x383F6000, 0x383F8000, 0x383FA000, 0x383FC000, 0x383FE000,
+        0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840A000, 0x3840C000, 0x3840E000, 0x38410000,
+        0x38412000, 0x38414000, 0x38416000, 0x38418000, 0x3841A000, 0x3841C000, 0x3841E000, 0x38420000, 0x38422000,
+        0x38424000, 0x38426000, 0x38428000, 0x3842A000, 0x3842C000, 0x3842E000, 0x38430000, 0x38432000, 0x38434000,
+        0x38436000, 0x38438000, 0x3843A000, 0x3843C000, 0x3843E000, 0x38440000, 0x38442000, 0x38444000, 0x38446000,
+        0x38448000, 0x3844A000, 0x3844C000, 0x3844E000, 0x38450000, 0x38452000, 0x38454000, 0x38456000, 0x38458000,
+        0x3845A000, 0x3845C000, 0x3845E000, 0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846A000,
+        0x3846C000, 0x3846E000, 0x38470000, 0x38472000, 0x38474000, 0x38476000, 0x38478000, 0x3847A000, 0x3847C000,
+        0x3847E000, 0x38480000, 0x38482000, 0x38484000, 0x38486000, 0x38488000, 0x3848A000, 0x3848C000, 0x3848E000,
+        0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849A000, 0x3849C000, 0x3849E000, 0x384A0000,
+        0x384A2000, 0x384A4000, 0x384A6000, 0x384A8000, 0x384AA000, 0x384AC000, 0x384AE000, 0x384B0000, 0x384B2000,
+        0x384B4000, 0x384B6000, 0x384B8000, 0x384BA000, 0x384BC000, 0x384BE000, 0x384C0000, 0x384C2000, 0x384C4000,
+        0x384C6000, 0x384C8000, 0x384CA000, 0x384CC000, 0x384CE000, 0x384D0000, 0x384D2000, 0x384D4000, 0x384D6000,
+        0x384D8000, 0x384DA000, 0x384DC000, 0x384DE000, 0x384E0000, 0x384E2000, 0x384E4000, 0x384E6000, 0x384E8000,
+        0x384EA000, 0x384EC000, 0x384EE000, 0x384F0000, 0x384F2000, 0x384F4000, 0x384F6000, 0x384F8000, 0x384FA000,
+        0x384FC000, 0x384FE000, 0x38500000, 0x38502000, 0x38504000, 0x38506000, 0x38508000, 0x3850A000, 0x3850C000,
+        0x3850E000, 0x38510000, 0x38512000, 0x38514000, 0x38516000, 0x38518000, 0x3851A000, 0x3851C000, 0x3851E000,
+        0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852A000, 0x3852C000, 0x3852E000, 0x38530000,
+        0x38532000, 0x38534000, 0x38536000, 0x38538000, 0x3853A000, 0x3853C000, 0x3853E000, 0x38540000, 0x38542000,
+        0x38544000, 0x38546000, 0x38548000, 0x3854A000, 0x3854C000, 0x3854E000, 0x38550000, 0x38552000, 0x38554000,
+        0x38556000, 0x38558000, 0x3855A000, 0x3855C000, 0x3855E000, 0x38560000, 0x38562000, 0x38564000, 0x38566000,
+        0x38568000, 0x3856A000, 0x3856C000, 0x3856E000, 0x38570000, 0x38572000, 0x38574000, 0x38576000, 0x38578000,
+        0x3857A000, 0x3857C000, 0x3857E000, 0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858A000,
+        0x3858C000, 0x3858E000, 0x38590000, 0x38592000, 0x38594000, 0x38596000, 0x38598000, 0x3859A000, 0x3859C000,
+        0x3859E000, 0x385A0000, 0x385A2000, 0x385A4000, 0x385A6000, 0x385A8000, 0x385AA000, 0x385AC000, 0x385AE000,
+        0x385B0000, 0x385B2000, 0x385B4000, 0x385B6000, 0x385B8000, 0x385BA000, 0x385BC000, 0x385BE000, 0x385C0000,
+        0x385C2000, 0x385C4000, 0x385C6000, 0x385C8000, 0x385CA000, 0x385CC000, 0x385CE000, 0x385D0000, 0x385D2000,
+        0x385D4000, 0x385D6000, 0x385D8000, 0x385DA000, 0x385DC000, 0x385DE000, 0x385E0000, 0x385E2000, 0x385E4000,
+        0x385E6000, 0x385E8000, 0x385EA000, 0x385EC000, 0x385EE000, 0x385F0000, 0x385F2000, 0x385F4000, 0x385F6000,
+        0x385F8000, 0x385FA000, 0x385FC000, 0x385FE000, 0x38600000, 0x38602000, 0x38604000, 0x38606000, 0x38608000,
+        0x3860A000, 0x3860C000, 0x3860E000, 0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861A000,
+        0x3861C000, 0x3861E000, 0x38620000, 0x38622000, 0x38624000, 0x38626000, 0x38628000, 0x3862A000, 0x3862C000,
+        0x3862E000, 0x38630000, 0x38632000, 0x38634000, 0x38636000, 0x38638000, 0x3863A000, 0x3863C000, 0x3863E000,
+        0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864A000, 0x3864C000, 0x3864E000, 0x38650000,
+        0x38652000, 0x38654000, 0x38656000, 0x38658000, 0x3865A000, 0x3865C000, 0x3865E000, 0x38660000, 0x38662000,
+        0x38664000, 0x38666000, 0x38668000, 0x3866A000, 0x3866C000, 0x3866E000, 0x38670000, 0x38672000, 0x38674000,
+        0x38676000, 0x38678000, 0x3867A000, 0x3867C000, 0x3867E000, 0x38680000, 0x38682000, 0x38684000, 0x38686000,
+        0x38688000, 0x3868A000, 0x3868C000, 0x3868E000, 0x38690000, 0x38692000, 0x38694000, 0x38696000, 0x38698000,
+        0x3869A000, 0x3869C000, 0x3869E000, 0x386A0000, 0x386A2000, 0x386A4000, 0x386A6000, 0x386A8000, 0x386AA000,
+        0x386AC000, 0x386AE000, 0x386B0000, 0x386B2000, 0x386B4000, 0x386B6000, 0x386B8000, 0x386BA000, 0x386BC000,
+        0x386BE000, 0x386C0000, 0x386C2000, 0x386C4000, 0x386C6000, 0x386C8000, 0x386CA000, 0x386CC000, 0x386CE000,
+        0x386D0000, 0x386D2000, 0x386D4000, 0x386D6000, 0x386D8000, 0x386DA000, 0x386DC000, 0x386DE000, 0x386E0000,
+        0x386E2000, 0x386E4000, 0x386E6000, 0x386E8000, 0x386EA000, 0x386EC000, 0x386EE000, 0x386F0000, 0x386F2000,
+        0x386F4000, 0x386F6000, 0x386F8000, 0x386FA000, 0x386FC000, 0x386FE000, 0x38700000, 0x38702000, 0x38704000,
+        0x38706000, 0x38708000, 0x3870A000, 0x3870C000, 0x3870E000, 0x38710000, 0x38712000, 0x38714000, 0x38716000,
+        0x38718000, 0x3871A000, 0x3871C000, 0x3871E000, 0x38720000, 0x38722000, 0x38724000, 0x38726000, 0x38728000,
+        0x3872A000, 0x3872C000, 0x3872E000, 0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873A000,
+        0x3873C000, 0x3873E000, 0x38740000, 0x38742000, 0x38744000, 0x38746000, 0x38748000, 0x3874A000, 0x3874C000,
+        0x3874E000, 0x38750000, 0x38752000, 0x38754000, 0x38756000, 0x38758000, 0x3875A000, 0x3875C000, 0x3875E000,
+        0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876A000, 0x3876C000, 0x3876E000, 0x38770000,
+        0x38772000, 0x38774000, 0x38776000, 0x38778000, 0x3877A000, 0x3877C000, 0x3877E000, 0x38780000, 0x38782000,
+        0x38784000, 0x38786000, 0x38788000, 0x3878A000, 0x3878C000, 0x3878E000, 0x38790000, 0x38792000, 0x38794000,
+        0x38796000, 0x38798000, 0x3879A000, 0x3879C000, 0x3879E000, 0x387A0000, 0x387A2000, 0x387A4000, 0x387A6000,
+        0x387A8000, 0x387AA000, 0x387AC000, 0x387AE000, 0x387B0000, 0x387B2000, 0x387B4000, 0x387B6000, 0x387B8000,
+        0x387BA000, 0x387BC000, 0x387BE000, 0x387C0000, 0x387C2000, 0x387C4000, 0x387C6000, 0x387C8000, 0x387CA000,
+        0x387CC000, 0x387CE000, 0x387D0000, 0x387D2000, 0x387D4000, 0x387D6000, 0x387D8000, 0x387DA000, 0x387DC000,
+        0x387DE000, 0x387E0000, 0x387E2000, 0x387E4000, 0x387E6000, 0x387E8000, 0x387EA000, 0x387EC000, 0x387EE000,
+        0x387F0000, 0x387F2000, 0x387F4000, 0x387F6000, 0x387F8000, 0x387FA000, 0x387FC000, 0x387FE000};
+    static const uint32 exponent_table[64] = {0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000,
+        0x03000000, 0x03800000, 0x04000000, 0x04800000, 0x05000000, 0x05800000, 0x06000000, 0x06800000, 0x07000000,
+        0x07800000, 0x08000000, 0x08800000, 0x09000000, 0x09800000, 0x0A000000, 0x0A800000, 0x0B000000, 0x0B800000,
+        0x0C000000, 0x0C800000, 0x0D000000, 0x0D800000, 0x0E000000, 0x0E800000, 0x0F000000, 0x47800000, 0x80000000,
+        0x80800000, 0x81000000, 0x81800000, 0x82000000, 0x82800000, 0x83000000, 0x83800000, 0x84000000, 0x84800000,
+        0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000, 0x88000000, 0x88800000, 0x89000000,
+        0x89800000, 0x8A000000, 0x8A800000, 0x8B000000, 0x8B800000, 0x8C000000, 0x8C800000, 0x8D000000, 0x8D800000,
+        0x8E000000, 0x8E800000, 0x8F000000, 0xC7800000};
+    static const unsigned short offset_table[64] = {0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+        1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+        1024, 1024, 0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024,
+        1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024};
+    uint32 bits = mantissa_table[offset_table[value >> 10] + (value & 0x3FF)] + exponent_table[value >> 10];
+    //			return *reinterpret_cast<float*>(&bits);			//violating strict aliasing!
+    float out;
+    std::memcpy(&out, &bits, sizeof(float));
+    return out;
+}
+
+/// Convert half-precision to IEEE double-precision.
+/// \param value binary representation of half-precision value
+/// \return double-precision value
+inline double half2float_impl(uint16 value, double, true_type)
+{
+    typedef bits<float>::type uint32;
+    typedef bits<double>::type uint64;
+    uint32 hi = static_cast<uint32>(value & 0x8000) << 16;
+    int abs = value & 0x7FFF;
+    if (abs)
+    {
+        hi |= 0x3F000000 << static_cast<unsigned>(abs >= 0x7C00);
+        for (; abs < 0x400; abs <<= 1, hi -= 0x100000)
+            ;
+        hi += static_cast<uint32>(abs) << 10;
+    }
+    uint64 bits = static_cast<uint64>(hi) << 32;
+    //			return *reinterpret_cast<double*>(&bits);			//violating strict aliasing!
+    double out;
+    std::memcpy(&out, &bits, sizeof(double));
+    return out;
+}
+
+/// Convert half-precision to non-IEEE floating point.
+/// \tparam T type to convert to (builtin integer type)
+/// \param value binary representation of half-precision value
+/// \return floating point value
+template <typename T>
+T half2float_impl(uint16 value, T, ...)
+{
+    T out;
+    int abs = value & 0x7FFF;
+    if (abs > 0x7C00)
+        out = std::numeric_limits<T>::has_quiet_NaN ? std::numeric_limits<T>::quiet_NaN() : T();
+    else if (abs == 0x7C00)
+        out = std::numeric_limits<T>::has_infinity ? std::numeric_limits<T>::infinity() : std::numeric_limits<T>::max();
+    else if (abs > 0x3FF)
+        out = std::ldexp(static_cast<T>((abs & 0x3FF) | 0x400), (abs >> 10) - 25);
+    else
+        out = std::ldexp(static_cast<T>(abs), -24);
+    return (value & 0x8000) ? -out : out;
+}
+
+/// Convert half-precision to floating point.
+/// \tparam T type to convert to (builtin integer type)
+/// \param value binary representation of half-precision value
+/// \return floating point value
+template <typename T>
+T half2float(uint16 value)
+{
+    return half2float_impl(
+        value, T(), bool_type < std::numeric_limits<T>::is_iec559 && sizeof(typename bits<T>::type) == sizeof(T) > ());
+}
+
+/// Convert half-precision floating point to integer.
+/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
+/// \tparam E `true` for round to even, `false` for round away from zero
+/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign
+/// bits) \param value binary representation of half-precision value \return integral value
+template <std::float_round_style R, bool E, typename T>
+T half2int_impl(uint16 value)
+{
+#if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS
+    static_assert(std::is_integral<T>::value, "half to int conversion only supports builtin integer types");
+#endif
+    uint32_t e = value & 0x7FFF;
+    if (e >= 0x7C00)
+        return (value & 0x8000) ? std::numeric_limits<T>::min() : std::numeric_limits<T>::max();
+    if (e < 0x3800)
+    {
+        if (R == std::round_toward_infinity)
+            return T(~(value >> 15) & (e != 0));
+        else if (R == std::round_toward_neg_infinity)
+            return -T(value > 0x8000);
+        return T();
+    }
+    uint32_t m = (value & 0x3FF) | 0x400;
+    e >>= 10;
+    if (e < 25)
+    {
+        if (R == std::round_to_nearest)
+            m += (1 << (24 - e)) - (~(m >> (25 - e)) & E);
+        else if (R == std::round_toward_infinity)
+            m += ((value >> 15) - 1) & ((1 << (25 - e)) - 1U);
+        else if (R == std::round_toward_neg_infinity)
+            m += -(value >> 15) & ((1 << (25 - e)) - 1U);
+        m >>= 25 - e;
+    }
+    else
+        m <<= e - 25;
+    return (value & 0x8000) ? -static_cast<T>(m) : static_cast<T>(m);
+}
+
+/// Convert half-precision floating point to integer.
+/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
+/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign
+/// bits) \param value binary representation of half-precision value \return integral value
+template <std::float_round_style R, typename T>
+T half2int(uint16 value)
+{
+    return half2int_impl<R, HALF_ROUND_TIES_TO_EVEN, T>(value);
+}
+
+/// Convert half-precision floating point to integer using round-to-nearest-away-from-zero.
+/// \tparam T type to convert to (buitlin integer type with at least 16 bits precision, excluding any implicit sign
+/// bits) \param value binary representation of half-precision value \return integral value
+template <typename T>
+T half2int_up(uint16 value)
+{
+    return half2int_impl<std::round_to_nearest, 0, T>(value);
+}
+
+/// Round half-precision number to nearest integer value.
+/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
+/// \tparam E `true` for round to even, `false` for round away from zero
+/// \param value binary representation of half-precision value
+/// \return half-precision bits for nearest integral value
+template <std::float_round_style R, bool E>
+uint16 round_half_impl(uint16 value)
+{
+    uint32_t e = value & 0x7FFF;
+    uint16 result = value;
+    if (e < 0x3C00)
+    {
+        result &= 0x8000;
+        if (R == std::round_to_nearest)
+            result |= 0x3C00U & -(e >= (0x3800 + E));
+        else if (R == std::round_toward_infinity)
+            result |= 0x3C00U & -(~(value >> 15) & (e != 0));
+        else if (R == std::round_toward_neg_infinity)
+            result |= 0x3C00U & -(value > 0x8000);
+    }
+    else if (e < 0x6400)
+    {
+        e = 25 - (e >> 10);
+        uint32_t mask = (1 << e) - 1;
+        if (R == std::round_to_nearest)
+            result += (1 << (e - 1)) - (~(result >> e) & E);
+        else if (R == std::round_toward_infinity)
+            result += mask & ((value >> 15) - 1);
+        else if (R == std::round_toward_neg_infinity)
+            result += mask & -(value >> 15);
+        result &= ~mask;
+    }
+    return result;
+}
+
+/// Round half-precision number to nearest integer value.
+/// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding
+/// \param value binary representation of half-precision value
+/// \return half-precision bits for nearest integral value
+template <std::float_round_style R>
+uint16 round_half(uint16 value)
+{
+    return round_half_impl<R, HALF_ROUND_TIES_TO_EVEN>(value);
+}
+
+/// Round half-precision number to nearest integer value using round-to-nearest-away-from-zero.
+/// \param value binary representation of half-precision value
+/// \return half-precision bits for nearest integral value
+inline uint16 round_half_up(uint16 value)
+{
+    return round_half_impl<std::round_to_nearest, 0>(value);
+}
+/// \}
+
+struct functions;
+template <typename>
+struct unary_specialized;
+template <typename, typename>
+struct binary_specialized;
+template <typename, typename, std::float_round_style>
+struct half_caster;
+} // namespace detail
+
+/// Half-precision floating point type.
+/// This class implements an IEEE-conformant half-precision floating point type with the usual arithmetic operators and
+/// conversions. It is implicitly convertible to single-precision floating point, which makes artihmetic expressions and
+/// functions with mixed-type operands to be of the most precise operand type. Additionally all arithmetic operations
+/// (and many mathematical functions) are carried out in single-precision internally. All conversions from single- to
+/// half-precision are done using the library's default rounding mode, but temporary results inside chained arithmetic
+/// expressions are kept in single-precision as long as possible (while of course still maintaining a strong
+/// half-precision type).
+///
+/// According to the C++98/03 definition, the half type is not a POD type. But according to C++11's less strict and
+/// extended definitions it is both a standard layout type and a trivially copyable type (even if not a POD type), which
+/// means it can be standard-conformantly copied using raw binary copies. But in this context some more words about the
+/// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not neccessarily have to be
+/// of exactly 16-bits size. But on any reasonable implementation the actual binary representation of this type will
+/// most probably not ivolve any additional "magic" or padding beyond the simple binary representation of the underlying
+/// 16-bit IEEE number, even if not strictly guaranteed by the standard. But even then it only has an actual size of 16
+/// bits if your C++ implementation supports an unsigned integer type of exactly 16 bits width. But this should be the
+/// case on nearly any reasonable platform.
+///
+/// So if your C++ implementation is not totally exotic or imposes special alignment requirements, it is a reasonable
+/// assumption that the data of a half is just comprised of the 2 bytes of the underlying IEEE representation.
+class half
+{
+    friend struct detail::functions;
+    friend struct detail::unary_specialized<half>;
+    friend struct detail::binary_specialized<half, half>;
+    template <typename, typename, std::float_round_style>
+    friend struct detail::half_caster;
+    friend class std::numeric_limits<half>;
+#if HALF_ENABLE_CPP11_HASH
+    friend struct std::hash<half>;
+#endif
+#if HALF_ENABLE_CPP11_USER_LITERALS
+    friend half literal::operator"" _h(long double);
+#endif
+
+public:
+    /// Default constructor.
+    /// This initializes the half to 0. Although this does not match the builtin types' default-initialization semantics
+    /// and may be less efficient than no initialization, it is needed to provide proper value-initialization semantics.
+    HALF_CONSTEXPR half() HALF_NOEXCEPT : data_() {}
+
+    /// Copy constructor.
+    /// \tparam T type of concrete half expression
+    /// \param rhs half expression to copy from
+    half(detail::expr rhs)
+        : data_(detail::float2half<round_style>(static_cast<float>(rhs)))
+    {
+    }
+
+    /// Conversion constructor.
+    /// \param rhs float to convert
+    explicit half(float rhs)
+        : data_(detail::float2half<round_style>(rhs))
+    {
+    }
+
+    /// Conversion to single-precision.
+    /// \return single precision value representing expression value
+    operator float() const
+    {
+        return detail::half2float<float>(data_);
+    }
+
+    /// Assignment operator.
+    /// \tparam T type of concrete half expression
+    /// \param rhs half expression to copy from
+    /// \return reference to this half
+    half& operator=(detail::expr rhs)
+    {
+        return *this = static_cast<float>(rhs);
+    }
+
+    /// Arithmetic assignment.
+    /// \tparam T type of concrete half expression
+    /// \param rhs half expression to add
+    /// \return reference to this half
+    template <typename T>
+    typename detail::enable<half&, T>::type operator+=(T rhs)
+    {
+        return *this += static_cast<float>(rhs);
+    }
+
+    /// Arithmetic assignment.
+    /// \tparam T type of concrete half expression
+    /// \param rhs half expression to subtract
+    /// \return reference to this half
+    template <typename T>
+    typename detail::enable<half&, T>::type operator-=(T rhs)
+    {
+        return *this -= static_cast<float>(rhs);
+    }
+
+    /// Arithmetic assignment.
+    /// \tparam T type of concrete half expression
+    /// \param rhs half expression to multiply with
+    /// \return reference to this half
+    template <typename T>
+    typename detail::enable<half&, T>::type operator*=(T rhs)
+    {
+        return *this *= static_cast<float>(rhs);
+    }
+
+    /// Arithmetic assignment.
+    /// \tparam T type of concrete half expression
+    /// \param rhs half expression to divide by
+    /// \return reference to this half
+    template <typename T>
+    typename detail::enable<half&, T>::type operator/=(T rhs)
+    {
+        return *this /= static_cast<float>(rhs);
+    }
+
+    /// Assignment operator.
+    /// \param rhs single-precision value to copy from
+    /// \return reference to this half
+    half& operator=(float rhs)
+    {
+        data_ = detail::float2half<round_style>(rhs);
+        return *this;
+    }
+
+    /// Arithmetic assignment.
+    /// \param rhs single-precision value to add
+    /// \return reference to this half
+    half& operator+=(float rhs)
+    {
+        data_ = detail::float2half<round_style>(detail::half2float<float>(data_) + rhs);
+        return *this;
+    }
+
+    /// Arithmetic assignment.
+    /// \param rhs single-precision value to subtract
+    /// \return reference to this half
+    half& operator-=(float rhs)
+    {
+        data_ = detail::float2half<round_style>(detail::half2float<float>(data_) - rhs);
+        return *this;
+    }
+
+    /// Arithmetic assignment.
+    /// \param rhs single-precision value to multiply with
+    /// \return reference to this half
+    half& operator*=(float rhs)
+    {
+        data_ = detail::float2half<round_style>(detail::half2float<float>(data_) * rhs);
+        return *this;
+    }
+
+    /// Arithmetic assignment.
+    /// \param rhs single-precision value to divide by
+    /// \return reference to this half
+    half& operator/=(float rhs)
+    {
+        data_ = detail::float2half<round_style>(detail::half2float<float>(data_) / rhs);
+        return *this;
+    }
+
+    /// Prefix increment.
+    /// \return incremented half value
+    half& operator++()
+    {
+        return *this += 1.0f;
+    }
+
+    /// Prefix decrement.
+    /// \return decremented half value
+    half& operator--()
+    {
+        return *this -= 1.0f;
+    }
+
+    /// Postfix increment.
+    /// \return non-incremented half value
+    half operator++(int)
+    {
+        half out(*this);
+        ++*this;
+        return out;
+    }
+
+    /// Postfix decrement.
+    /// \return non-decremented half value
+    half operator--(int)
+    {
+        half out(*this);
+        --*this;
+        return out;
+    }
+
+private:
+    /// Rounding mode to use
+    static const std::float_round_style round_style = (std::float_round_style)(HALF_ROUND_STYLE);
+
+    /// Constructor.
+    /// \param bits binary representation to set half to
+    HALF_CONSTEXPR half(detail::binary_t, detail::uint16 bits) HALF_NOEXCEPT : data_(bits) {}
+
+    /// Internal binary representation
+    detail::uint16 data_;
+};
+
+#if HALF_ENABLE_CPP11_USER_LITERALS
+namespace literal
+{
+/// Half literal.
+/// While this returns an actual half-precision value, half literals can unfortunately not be constant expressions due
+/// to rather involved conversions.
+/// \param value literal value
+/// \return half with given value (if representable)
+inline half operator"" _h(long double value)
+{
+    return half(detail::binary, detail::float2half<half::round_style>(value));
+}
+} // namespace literal
+#endif
+
+namespace detail
+{
+/// Wrapper implementing unspecialized half-precision functions.
+struct functions
+{
+    /// Addition implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \return Half-precision sum stored in single-precision
+    static expr plus(float x, float y)
+    {
+        return expr(x + y);
+    }
+
+    /// Subtraction implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \return Half-precision difference stored in single-precision
+    static expr minus(float x, float y)
+    {
+        return expr(x - y);
+    }
+
+    /// Multiplication implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \return Half-precision product stored in single-precision
+    static expr multiplies(float x, float y)
+    {
+        return expr(x * y);
+    }
+
+    /// Division implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \return Half-precision quotient stored in single-precision
+    static expr divides(float x, float y)
+    {
+        return expr(x / y);
+    }
+
+    /// Output implementation.
+    /// \param out stream to write to
+    /// \param arg value to write
+    /// \return reference to stream
+    template <typename charT, typename traits>
+    static std::basic_ostream<charT, traits>& write(std::basic_ostream<charT, traits>& out, float arg)
+    {
+        return out << arg;
+    }
+
+    /// Input implementation.
+    /// \param in stream to read from
+    /// \param arg half to read into
+    /// \return reference to stream
+    template <typename charT, typename traits>
+    static std::basic_istream<charT, traits>& read(std::basic_istream<charT, traits>& in, half& arg)
+    {
+        float f;
+        if (in >> f)
+            arg = f;
+        return in;
+    }
+
+    /// Modulo implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \return Half-precision division remainder stored in single-precision
+    static expr fmod(float x, float y)
+    {
+        return expr(std::fmod(x, y));
+    }
+
+    /// Remainder implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \return Half-precision division remainder stored in single-precision
+    static expr remainder(float x, float y)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::remainder(x, y));
+#else
+        if (builtin_isnan(x) || builtin_isnan(y))
+            return expr(std::numeric_limits<float>::quiet_NaN());
+        float ax = std::fabs(x), ay = std::fabs(y);
+        if (ax >= 65536.0f || ay < std::ldexp(1.0f, -24))
+            return expr(std::numeric_limits<float>::quiet_NaN());
+        if (ay >= 65536.0f)
+            return expr(x);
+        if (ax == ay)
+            return expr(builtin_signbit(x) ? -0.0f : 0.0f);
+        ax = std::fmod(ax, ay + ay);
+        float y2 = 0.5f * ay;
+        if (ax > y2)
+        {
+            ax -= ay;
+            if (ax >= y2)
+                ax -= ay;
+        }
+        return expr(builtin_signbit(x) ? -ax : ax);
+#endif
+    }
+
+    /// Remainder implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \param quo address to store quotient bits at
+    /// \return Half-precision division remainder stored in single-precision
+    static expr remquo(float x, float y, int* quo)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::remquo(x, y, quo));
+#else
+        if (builtin_isnan(x) || builtin_isnan(y))
+            return expr(std::numeric_limits<float>::quiet_NaN());
+        bool sign = builtin_signbit(x), qsign = static_cast<bool>(sign ^ builtin_signbit(y));
+        float ax = std::fabs(x), ay = std::fabs(y);
+        if (ax >= 65536.0f || ay < std::ldexp(1.0f, -24))
+            return expr(std::numeric_limits<float>::quiet_NaN());
+        if (ay >= 65536.0f)
+            return expr(x);
+        if (ax == ay)
+            return *quo = qsign ? -1 : 1, expr(sign ? -0.0f : 0.0f);
+        ax = std::fmod(ax, 8.0f * ay);
+        int cquo = 0;
+        if (ax >= 4.0f * ay)
+        {
+            ax -= 4.0f * ay;
+            cquo += 4;
+        }
+        if (ax >= 2.0f * ay)
+        {
+            ax -= 2.0f * ay;
+            cquo += 2;
+        }
+        float y2 = 0.5f * ay;
+        if (ax > y2)
+        {
+            ax -= ay;
+            ++cquo;
+            if (ax >= y2)
+            {
+                ax -= ay;
+                ++cquo;
+            }
+        }
+        return *quo = qsign ? -cquo : cquo, expr(sign ? -ax : ax);
+#endif
+    }
+
+    /// Positive difference implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \return Positive difference stored in single-precision
+    static expr fdim(float x, float y)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::fdim(x, y));
+#else
+        return expr((x <= y) ? 0.0f : (x - y));
+#endif
+    }
+
+    /// Fused multiply-add implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \param z third operand
+    /// \return \a x * \a y + \a z stored in single-precision
+    static expr fma(float x, float y, float z)
+    {
+#if HALF_ENABLE_CPP11_CMATH && defined(FP_FAST_FMAF)
+        return expr(std::fma(x, y, z));
+#else
+        return expr(x * y + z);
+#endif
+    }
+
+    /// Get NaN.
+    /// \return Half-precision quiet NaN
+    static half nanh()
+    {
+        return half(binary, 0x7FFF);
+    }
+
+    /// Exponential implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr exp(float arg)
+    {
+        return expr(std::exp(arg));
+    }
+
+    /// Exponential implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr expm1(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::expm1(arg));
+#else
+        return expr(static_cast<float>(std::exp(static_cast<double>(arg)) - 1.0));
+#endif
+    }
+
+    /// Binary exponential implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr exp2(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::exp2(arg));
+#else
+        return expr(static_cast<float>(std::exp(arg * 0.69314718055994530941723212145818)));
+#endif
+    }
+
+    /// Logarithm implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr log(float arg)
+    {
+        return expr(std::log(arg));
+    }
+
+    /// Common logarithm implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr log10(float arg)
+    {
+        return expr(std::log10(arg));
+    }
+
+    /// Logarithm implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr log1p(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::log1p(arg));
+#else
+        return expr(static_cast<float>(std::log(1.0 + arg)));
+#endif
+    }
+
+    /// Binary logarithm implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr log2(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::log2(arg));
+#else
+        return expr(static_cast<float>(std::log(static_cast<double>(arg)) * 1.4426950408889634073599246810019));
+#endif
+    }
+
+    /// Square root implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr sqrt(float arg)
+    {
+        return expr(std::sqrt(arg));
+    }
+
+    /// Cubic root implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr cbrt(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::cbrt(arg));
+#else
+        if (builtin_isnan(arg) || builtin_isinf(arg))
+            return expr(arg);
+        return expr(builtin_signbit(arg) ? -static_cast<float>(std::pow(-static_cast<double>(arg), 1.0 / 3.0))
+                                         : static_cast<float>(std::pow(static_cast<double>(arg), 1.0 / 3.0)));
+#endif
+    }
+
+    /// Hypotenuse implementation.
+    /// \param x first argument
+    /// \param y second argument
+    /// \return function value stored in single-preicision
+    static expr hypot(float x, float y)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::hypot(x, y));
+#else
+        return expr((builtin_isinf(x) || builtin_isinf(y))
+                ? std::numeric_limits<float>::infinity()
+                : static_cast<float>(std::sqrt(static_cast<double>(x) * x + static_cast<double>(y) * y)));
+#endif
+    }
+
+    /// Power implementation.
+    /// \param base value to exponentiate
+    /// \param exp power to expontiate to
+    /// \return function value stored in single-preicision
+    static expr pow(float base, float exp)
+    {
+        return expr(std::pow(base, exp));
+    }
+
+    /// Sine implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr sin(float arg)
+    {
+        return expr(std::sin(arg));
+    }
+
+    /// Cosine implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr cos(float arg)
+    {
+        return expr(std::cos(arg));
+    }
+
+    /// Tan implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr tan(float arg)
+    {
+        return expr(std::tan(arg));
+    }
+
+    /// Arc sine implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr asin(float arg)
+    {
+        return expr(std::asin(arg));
+    }
+
+    /// Arc cosine implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr acos(float arg)
+    {
+        return expr(std::acos(arg));
+    }
+
+    /// Arc tangent implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr atan(float arg)
+    {
+        return expr(std::atan(arg));
+    }
+
+    /// Arc tangent implementation.
+    /// \param x first argument
+    /// \param y second argument
+    /// \return function value stored in single-preicision
+    static expr atan2(float x, float y)
+    {
+        return expr(std::atan2(x, y));
+    }
+
+    /// Hyperbolic sine implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr sinh(float arg)
+    {
+        return expr(std::sinh(arg));
+    }
+
+    /// Hyperbolic cosine implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr cosh(float arg)
+    {
+        return expr(std::cosh(arg));
+    }
+
+    /// Hyperbolic tangent implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr tanh(float arg)
+    {
+        return expr(std::tanh(arg));
+    }
+
+    /// Hyperbolic area sine implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr asinh(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::asinh(arg));
+#else
+        return expr((arg == -std::numeric_limits<float>::infinity())
+                ? arg
+                : static_cast<float>(std::log(arg + std::sqrt(arg * arg + 1.0))));
+#endif
+    }
+
+    /// Hyperbolic area cosine implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr acosh(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::acosh(arg));
+#else
+        return expr((arg < -1.0f) ? std::numeric_limits<float>::quiet_NaN()
+                                  : static_cast<float>(std::log(arg + std::sqrt(arg * arg - 1.0))));
+#endif
+    }
+
+    /// Hyperbolic area tangent implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr atanh(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::atanh(arg));
+#else
+        return expr(static_cast<float>(0.5 * std::log((1.0 + arg) / (1.0 - arg))));
+#endif
+    }
+
+    /// Error function implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr erf(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::erf(arg));
+#else
+        return expr(static_cast<float>(erf(static_cast<double>(arg))));
+#endif
+    }
+
+    /// Complementary implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr erfc(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::erfc(arg));
+#else
+        return expr(static_cast<float>(1.0 - erf(static_cast<double>(arg))));
+#endif
+    }
+
+    /// Gamma logarithm implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr lgamma(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::lgamma(arg));
+#else
+        if (builtin_isinf(arg))
+            return expr(std::numeric_limits<float>::infinity());
+        if (arg < 0.0f)
+        {
+            float i, f = std::modf(-arg, &i);
+            if (f == 0.0f)
+                return expr(std::numeric_limits<float>::infinity());
+            return expr(static_cast<float>(1.1447298858494001741434273513531
+                - std::log(std::abs(std::sin(3.1415926535897932384626433832795 * f))) - lgamma(1.0 - arg)));
+        }
+        return expr(static_cast<float>(lgamma(static_cast<double>(arg))));
+#endif
+    }
+
+    /// Gamma implementation.
+    /// \param arg function argument
+    /// \return function value stored in single-preicision
+    static expr tgamma(float arg)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::tgamma(arg));
+#else
+        if (arg == 0.0f)
+            return builtin_signbit(arg) ? expr(-std::numeric_limits<float>::infinity())
+                                        : expr(std::numeric_limits<float>::infinity());
+        if (arg < 0.0f)
+        {
+            float i, f = std::modf(-arg, &i);
+            if (f == 0.0f)
+                return expr(std::numeric_limits<float>::quiet_NaN());
+            double value = 3.1415926535897932384626433832795
+                / (std::sin(3.1415926535897932384626433832795 * f) * std::exp(lgamma(1.0 - arg)));
+            return expr(static_cast<float>((std::fmod(i, 2.0f) == 0.0f) ? -value : value));
+        }
+        if (builtin_isinf(arg))
+            return expr(arg);
+        return expr(static_cast<float>(std::exp(lgamma(static_cast<double>(arg)))));
+#endif
+    }
+
+    /// Floor implementation.
+    /// \param arg value to round
+    /// \return rounded value
+    static half floor(half arg)
+    {
+        return half(binary, round_half<std::round_toward_neg_infinity>(arg.data_));
+    }
+
+    /// Ceiling implementation.
+    /// \param arg value to round
+    /// \return rounded value
+    static half ceil(half arg)
+    {
+        return half(binary, round_half<std::round_toward_infinity>(arg.data_));
+    }
+
+    /// Truncation implementation.
+    /// \param arg value to round
+    /// \return rounded value
+    static half trunc(half arg)
+    {
+        return half(binary, round_half<std::round_toward_zero>(arg.data_));
+    }
+
+    /// Nearest integer implementation.
+    /// \param arg value to round
+    /// \return rounded value
+    static half round(half arg)
+    {
+        return half(binary, round_half_up(arg.data_));
+    }
+
+    /// Nearest integer implementation.
+    /// \param arg value to round
+    /// \return rounded value
+    static long lround(half arg)
+    {
+        return detail::half2int_up<long>(arg.data_);
+    }
+
+    /// Nearest integer implementation.
+    /// \param arg value to round
+    /// \return rounded value
+    static half rint(half arg)
+    {
+        return half(binary, round_half<half::round_style>(arg.data_));
+    }
+
+    /// Nearest integer implementation.
+    /// \param arg value to round
+    /// \return rounded value
+    static long lrint(half arg)
+    {
+        return detail::half2int<half::round_style, long>(arg.data_);
+    }
+
+#if HALF_ENABLE_CPP11_LONG_LONG
+    /// Nearest integer implementation.
+    /// \param arg value to round
+    /// \return rounded value
+    static long long llround(half arg)
+    {
+        return detail::half2int_up<long long>(arg.data_);
+    }
+
+    /// Nearest integer implementation.
+    /// \param arg value to round
+    /// \return rounded value
+    static long long llrint(half arg)
+    {
+        return detail::half2int<half::round_style, long long>(arg.data_);
+    }
+#endif
+
+    /// Decompression implementation.
+    /// \param arg number to decompress
+    /// \param exp address to store exponent at
+    /// \return normalized significant
+    static half frexp(half arg, int* exp)
+    {
+        int m = arg.data_ & 0x7FFF, e = -14;
+        if (m >= 0x7C00 || !m)
+            return *exp = 0, arg;
+        for (; m < 0x400; m <<= 1, --e)
+            ;
+        return *exp = e + (m >> 10), half(binary, (arg.data_ & 0x8000) | 0x3800 | (m & 0x3FF));
+    }
+
+    /// Decompression implementation.
+    /// \param arg number to decompress
+    /// \param iptr address to store integer part at
+    /// \return fractional part
+    static half modf(half arg, half* iptr)
+    {
+        uint32_t e = arg.data_ & 0x7FFF;
+        if (e >= 0x6400)
+            return *iptr = arg, half(binary, arg.data_ & (0x8000U | -(e > 0x7C00)));
+        if (e < 0x3C00)
+            return iptr->data_ = arg.data_ & 0x8000, arg;
+        e >>= 10;
+        uint32_t mask = (1 << (25 - e)) - 1, m = arg.data_ & mask;
+        iptr->data_ = arg.data_ & ~mask;
+        if (!m)
+            return half(binary, arg.data_ & 0x8000);
+        for (; m < 0x400; m <<= 1, --e)
+            ;
+        return half(binary, static_cast<uint16>((arg.data_ & 0x8000) | (e << 10) | (m & 0x3FF)));
+    }
+
+    /// Scaling implementation.
+    /// \param arg number to scale
+    /// \param exp power of two to scale by
+    /// \return scaled number
+    static half scalbln(half arg, long exp)
+    {
+        uint32_t m = arg.data_ & 0x7FFF;
+        if (m >= 0x7C00 || !m)
+            return arg;
+        for (; m < 0x400; m <<= 1, --exp)
+            ;
+        exp += m >> 10;
+        uint16 value = arg.data_ & 0x8000;
+        if (exp > 30)
+        {
+            if (half::round_style == std::round_toward_zero)
+                value |= 0x7BFF;
+            else if (half::round_style == std::round_toward_infinity)
+                value |= 0x7C00 - (value >> 15);
+            else if (half::round_style == std::round_toward_neg_infinity)
+                value |= 0x7BFF + (value >> 15);
+            else
+                value |= 0x7C00;
+        }
+        else if (exp > 0)
+            value |= (exp << 10) | (m & 0x3FF);
+        else if (exp > -11)
+        {
+            m = (m & 0x3FF) | 0x400;
+            if (half::round_style == std::round_to_nearest)
+            {
+                m += 1 << -exp;
+#if HALF_ROUND_TIES_TO_EVEN
+                m -= (m >> (1 - exp)) & 1;
+#endif
+            }
+            else if (half::round_style == std::round_toward_infinity)
+                m += ((value >> 15) - 1) & ((1 << (1 - exp)) - 1U);
+            else if (half::round_style == std::round_toward_neg_infinity)
+                m += -(value >> 15) & ((1 << (1 - exp)) - 1U);
+            value |= m >> (1 - exp);
+        }
+        else if (half::round_style == std::round_toward_infinity)
+            value -= (value >> 15) - 1;
+        else if (half::round_style == std::round_toward_neg_infinity)
+            value += value >> 15;
+        return half(binary, value);
+    }
+
+    /// Exponent implementation.
+    /// \param arg number to query
+    /// \return floating point exponent
+    static int ilogb(half arg)
+    {
+        int abs = arg.data_ & 0x7FFF;
+        if (!abs)
+            return FP_ILOGB0;
+        if (abs < 0x7C00)
+        {
+            int exp = (abs >> 10) - 15;
+            if (abs < 0x400)
+                for (; abs < 0x200; abs <<= 1, --exp)
+                    ;
+            return exp;
+        }
+        if (abs > 0x7C00)
+            return FP_ILOGBNAN;
+        return INT_MAX;
+    }
+
+    /// Exponent implementation.
+    /// \param arg number to query
+    /// \return floating point exponent
+    static half logb(half arg)
+    {
+        int abs = arg.data_ & 0x7FFF;
+        if (!abs)
+            return half(binary, 0xFC00);
+        if (abs < 0x7C00)
+        {
+            int exp = (abs >> 10) - 15;
+            if (abs < 0x400)
+                for (; abs < 0x200; abs <<= 1, --exp)
+                    ;
+            uint16 bits = (exp < 0) << 15;
+            if (exp)
+            {
+                uint32_t m = std::abs(exp) << 6, e = 18;
+                for (; m < 0x400; m <<= 1, --e)
+                    ;
+                bits |= (e << 10) + m;
+            }
+            return half(binary, bits);
+        }
+        if (abs > 0x7C00)
+            return arg;
+        return half(binary, 0x7C00);
+    }
+
+    /// Enumeration implementation.
+    /// \param from number to increase/decrease
+    /// \param to direction to enumerate into
+    /// \return next representable number
+    static half nextafter(half from, half to)
+    {
+        uint16 fabs = from.data_ & 0x7FFF, tabs = to.data_ & 0x7FFF;
+        if (fabs > 0x7C00)
+            return from;
+        if (tabs > 0x7C00 || from.data_ == to.data_ || !(fabs | tabs))
+            return to;
+        if (!fabs)
+            return half(binary, (to.data_ & 0x8000) + 1);
+        bool lt = ((fabs == from.data_) ? static_cast<int>(fabs) : -static_cast<int>(fabs))
+            < ((tabs == to.data_) ? static_cast<int>(tabs) : -static_cast<int>(tabs));
+        return half(binary, from.data_ + (((from.data_ >> 15) ^ static_cast<unsigned>(lt)) << 1) - 1);
+    }
+
+    /// Enumeration implementation.
+    /// \param from number to increase/decrease
+    /// \param to direction to enumerate into
+    /// \return next representable number
+    static half nexttoward(half from, long double to)
+    {
+        if (isnan(from))
+            return from;
+        long double lfrom = static_cast<long double>(from);
+        if (builtin_isnan(to) || lfrom == to)
+            return half(static_cast<float>(to));
+        if (!(from.data_ & 0x7FFF))
+            return half(binary, (static_cast<detail::uint16>(builtin_signbit(to)) << 15) + 1);
+        return half(binary, from.data_ + (((from.data_ >> 15) ^ static_cast<unsigned>(lfrom < to)) << 1) - 1);
+    }
+
+    /// Sign implementation
+    /// \param x first operand
+    /// \param y second operand
+    /// \return composed value
+    static half copysign(half x, half y)
+    {
+        return half(binary, x.data_ ^ ((x.data_ ^ y.data_) & 0x8000));
+    }
+
+    /// Classification implementation.
+    /// \param arg value to classify
+    /// \retval true if infinite number
+    /// \retval false else
+    static int fpclassify(half arg)
+    {
+        uint32_t abs = arg.data_ & 0x7FFF;
+        return abs
+            ? ((abs > 0x3FF) ? ((abs >= 0x7C00) ? ((abs > 0x7C00) ? FP_NAN : FP_INFINITE) : FP_NORMAL) : FP_SUBNORMAL)
+            : FP_ZERO;
+    }
+
+    /// Classification implementation.
+    /// \param arg value to classify
+    /// \retval true if finite number
+    /// \retval false else
+    static bool isfinite(half arg)
+    {
+        return (arg.data_ & 0x7C00) != 0x7C00;
+    }
+
+    /// Classification implementation.
+    /// \param arg value to classify
+    /// \retval true if infinite number
+    /// \retval false else
+    static bool isinf(half arg)
+    {
+        return (arg.data_ & 0x7FFF) == 0x7C00;
+    }
+
+    /// Classification implementation.
+    /// \param arg value to classify
+    /// \retval true if not a number
+    /// \retval false else
+    static bool isnan(half arg)
+    {
+        return (arg.data_ & 0x7FFF) > 0x7C00;
+    }
+
+    /// Classification implementation.
+    /// \param arg value to classify
+    /// \retval true if normal number
+    /// \retval false else
+    static bool isnormal(half arg)
+    {
+        return ((arg.data_ & 0x7C00) != 0) & ((arg.data_ & 0x7C00) != 0x7C00);
+    }
+
+    /// Sign bit implementation.
+    /// \param arg value to check
+    /// \retval true if signed
+    /// \retval false if unsigned
+    static bool signbit(half arg)
+    {
+        return (arg.data_ & 0x8000) != 0;
+    }
+
+    /// Comparison implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \retval true if operands equal
+    /// \retval false else
+    static bool isequal(half x, half y)
+    {
+        return (x.data_ == y.data_ || !((x.data_ | y.data_) & 0x7FFF)) && !isnan(x);
+    }
+
+    /// Comparison implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \retval true if operands not equal
+    /// \retval false else
+    static bool isnotequal(half x, half y)
+    {
+        return (x.data_ != y.data_ && ((x.data_ | y.data_) & 0x7FFF)) || isnan(x);
+    }
+
+    /// Comparison implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \retval true if \a x > \a y
+    /// \retval false else
+    static bool isgreater(half x, half y)
+    {
+        int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF;
+        return xabs <= 0x7C00 && yabs <= 0x7C00
+            && (((xabs == x.data_) ? xabs : -xabs) > ((yabs == y.data_) ? yabs : -yabs));
+    }
+
+    /// Comparison implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \retval true if \a x >= \a y
+    /// \retval false else
+    static bool isgreaterequal(half x, half y)
+    {
+        int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF;
+        return xabs <= 0x7C00 && yabs <= 0x7C00
+            && (((xabs == x.data_) ? xabs : -xabs) >= ((yabs == y.data_) ? yabs : -yabs));
+    }
+
+    /// Comparison implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \retval true if \a x < \a y
+    /// \retval false else
+    static bool isless(half x, half y)
+    {
+        int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF;
+        return xabs <= 0x7C00 && yabs <= 0x7C00
+            && (((xabs == x.data_) ? xabs : -xabs) < ((yabs == y.data_) ? yabs : -yabs));
+    }
+
+    /// Comparison implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \retval true if \a x <= \a y
+    /// \retval false else
+    static bool islessequal(half x, half y)
+    {
+        int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF;
+        return xabs <= 0x7C00 && yabs <= 0x7C00
+            && (((xabs == x.data_) ? xabs : -xabs) <= ((yabs == y.data_) ? yabs : -yabs));
+    }
+
+    /// Comparison implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \retval true if either \a x > \a y nor \a x < \a y
+    /// \retval false else
+    static bool islessgreater(half x, half y)
+    {
+        int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF;
+        if (xabs > 0x7C00 || yabs > 0x7C00)
+            return false;
+        int a = (xabs == x.data_) ? xabs : -xabs, b = (yabs == y.data_) ? yabs : -yabs;
+        return a < b || a > b;
+    }
+
+    /// Comparison implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \retval true if operand unordered
+    /// \retval false else
+    static bool isunordered(half x, half y)
+    {
+        return isnan(x) || isnan(y);
+    }
+
+private:
+    static double erf(double arg)
+    {
+        if (builtin_isinf(arg))
+            return (arg < 0.0) ? -1.0 : 1.0;
+        double x2 = arg * arg, ax2 = 0.147 * x2,
+               value = std::sqrt(1.0 - std::exp(-x2 * (1.2732395447351626861510701069801 + ax2) / (1.0 + ax2)));
+        return builtin_signbit(arg) ? -value : value;
+    }
+
+    static double lgamma(double arg)
+    {
+        double v = 1.0;
+        for (; arg < 8.0; ++arg)
+            v *= arg;
+        double w = 1.0 / (arg * arg);
+        return (((((((-0.02955065359477124183006535947712 * w + 0.00641025641025641025641025641026) * w
+                        + -0.00191752691752691752691752691753)
+                           * w
+                       + 8.4175084175084175084175084175084e-4)
+                          * w
+                      + -5.952380952380952380952380952381e-4)
+                         * w
+                     + 7.9365079365079365079365079365079e-4)
+                        * w
+                    + -0.00277777777777777777777777777778)
+                       * w
+                   + 0.08333333333333333333333333333333)
+            / arg
+            + 0.91893853320467274178032973640562 - std::log(v) - arg + (arg - 0.5) * std::log(arg);
+    }
+};
+
+/// Wrapper for unary half-precision functions needing specialization for individual argument types.
+/// \tparam T argument type
+template <typename T>
+struct unary_specialized
+{
+    /// Negation implementation.
+    /// \param arg value to negate
+    /// \return negated value
+    static HALF_CONSTEXPR half negate(half arg)
+    {
+        return half(binary, arg.data_ ^ 0x8000);
+    }
+
+    /// Absolute value implementation.
+    /// \param arg function argument
+    /// \return absolute value
+    static half fabs(half arg)
+    {
+        return half(binary, arg.data_ & 0x7FFF);
+    }
+};
+template <>
+struct unary_specialized<expr>
+{
+    static HALF_CONSTEXPR expr negate(float arg)
+    {
+        return expr(-arg);
+    }
+    static expr fabs(float arg)
+    {
+        return expr(std::fabs(arg));
+    }
+};
+
+/// Wrapper for binary half-precision functions needing specialization for individual argument types.
+/// \tparam T first argument type
+/// \tparam U first argument type
+template <typename T, typename U>
+struct binary_specialized
+{
+    /// Minimum implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \return minimum value
+    static expr fmin(float x, float y)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::fmin(x, y));
+#else
+        if (builtin_isnan(x))
+            return expr(y);
+        if (builtin_isnan(y))
+            return expr(x);
+        return expr(std::min(x, y));
+#endif
+    }
+
+    /// Maximum implementation.
+    /// \param x first operand
+    /// \param y second operand
+    /// \return maximum value
+    static expr fmax(float x, float y)
+    {
+#if HALF_ENABLE_CPP11_CMATH
+        return expr(std::fmax(x, y));
+#else
+        if (builtin_isnan(x))
+            return expr(y);
+        if (builtin_isnan(y))
+            return expr(x);
+        return expr(std::max(x, y));
+#endif
+    }
+};
+template <>
+struct binary_specialized<half, half>
+{
+    static half fmin(half x, half y)
+    {
+        int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF;
+        if (xabs > 0x7C00)
+            return y;
+        if (yabs > 0x7C00)
+            return x;
+        return (((xabs == x.data_) ? xabs : -xabs) > ((yabs == y.data_) ? yabs : -yabs)) ? y : x;
+    }
+    static half fmax(half x, half y)
+    {
+        int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF;
+        if (xabs > 0x7C00)
+            return y;
+        if (yabs > 0x7C00)
+            return x;
+        return (((xabs == x.data_) ? xabs : -xabs) < ((yabs == y.data_) ? yabs : -yabs)) ? y : x;
+    }
+};
+
+/// Helper class for half casts.
+/// This class template has to be specialized for all valid cast argument to define an appropriate static `cast` member
+/// function and a corresponding `type` member denoting its return type.
+/// \tparam T destination type
+/// \tparam U source type
+/// \tparam R rounding mode to use
+template <typename T, typename U, std::float_round_style R = (std::float_round_style)(HALF_ROUND_STYLE)>
+struct half_caster
+{
+};
+template <typename U, std::float_round_style R>
+struct half_caster<half, U, R>
+{
+#if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS
+    static_assert(std::is_arithmetic<U>::value, "half_cast from non-arithmetic type unsupported");
+#endif
+
+    static half cast(U arg)
+    {
+        return cast_impl(arg, is_float<U>());
+    };
+
+private:
+    static half cast_impl(U arg, true_type)
+    {
+        return half(binary, float2half<R>(arg));
+    }
+    static half cast_impl(U arg, false_type)
+    {
+        return half(binary, int2half<R>(arg));
+    }
+};
+template <typename T, std::float_round_style R>
+struct half_caster<T, half, R>
+{
+#if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS
+    static_assert(std::is_arithmetic<T>::value, "half_cast to non-arithmetic type unsupported");
+#endif
+
+    static T cast(half arg)
+    {
+        return cast_impl(arg, is_float<T>());
+    }
+
+private:
+    static T cast_impl(half arg, true_type)
+    {
+        return half2float<T>(arg.data_);
+    }
+    static T cast_impl(half arg, false_type)
+    {
+        return half2int<R, T>(arg.data_);
+    }
+};
+template <typename T, std::float_round_style R>
+struct half_caster<T, expr, R>
+{
+#if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS
+    static_assert(std::is_arithmetic<T>::value, "half_cast to non-arithmetic type unsupported");
+#endif
+
+    static T cast(expr arg)
+    {
+        return cast_impl(arg, is_float<T>());
+    }
+
+private:
+    static T cast_impl(float arg, true_type)
+    {
+        return static_cast<T>(arg);
+    }
+    static T cast_impl(half arg, false_type)
+    {
+        return half2int<R, T>(arg.data_);
+    }
+};
+template <std::float_round_style R>
+struct half_caster<half, half, R>
+{
+    static half cast(half arg)
+    {
+        return arg;
+    }
+};
+template <std::float_round_style R>
+struct half_caster<half, expr, R> : half_caster<half, half, R>
+{
+};
+
+/// \name Comparison operators
+/// \{
+
+/// Comparison for equality.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if operands equal
+/// \retval false else
+template <typename T, typename U>
+typename enable<bool, T, U>::type operator==(T x, U y)
+{
+    return functions::isequal(x, y);
+}
+
+/// Comparison for inequality.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if operands not equal
+/// \retval false else
+template <typename T, typename U>
+typename enable<bool, T, U>::type operator!=(T x, U y)
+{
+    return functions::isnotequal(x, y);
+}
+
+/// Comparison for less than.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if \a x less than \a y
+/// \retval false else
+template <typename T, typename U>
+typename enable<bool, T, U>::type operator<(T x, U y)
+{
+    return functions::isless(x, y);
+}
+
+/// Comparison for greater than.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if \a x greater than \a y
+/// \retval false else
+template <typename T, typename U>
+typename enable<bool, T, U>::type operator>(T x, U y)
+{
+    return functions::isgreater(x, y);
+}
+
+/// Comparison for less equal.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if \a x less equal \a y
+/// \retval false else
+template <typename T, typename U>
+typename enable<bool, T, U>::type operator<=(T x, U y)
+{
+    return functions::islessequal(x, y);
+}
+
+/// Comparison for greater equal.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if \a x greater equal \a y
+/// \retval false else
+template <typename T, typename U>
+typename enable<bool, T, U>::type operator>=(T x, U y)
+{
+    return functions::isgreaterequal(x, y);
+}
+
+/// \}
+/// \name Arithmetic operators
+/// \{
+
+/// Add halfs.
+/// \param x left operand
+/// \param y right operand
+/// \return sum of half expressions
+template <typename T, typename U>
+typename enable<expr, T, U>::type operator+(T x, U y)
+{
+    return functions::plus(x, y);
+}
+
+/// Subtract halfs.
+/// \param x left operand
+/// \param y right operand
+/// \return difference of half expressions
+template <typename T, typename U>
+typename enable<expr, T, U>::type operator-(T x, U y)
+{
+    return functions::minus(x, y);
+}
+
+/// Multiply halfs.
+/// \param x left operand
+/// \param y right operand
+/// \return product of half expressions
+template <typename T, typename U>
+typename enable<expr, T, U>::type operator*(T x, U y)
+{
+    return functions::multiplies(x, y);
+}
+
+/// Divide halfs.
+/// \param x left operand
+/// \param y right operand
+/// \return quotient of half expressions
+template <typename T, typename U>
+typename enable<expr, T, U>::type operator/(T x, U y)
+{
+    return functions::divides(x, y);
+}
+
+/// Identity.
+/// \param arg operand
+/// \return uncahnged operand
+template <typename T>
+HALF_CONSTEXPR typename enable<T, T>::type operator+(T arg)
+{
+    return arg;
+}
+
+/// Negation.
+/// \param arg operand
+/// \return negated operand
+template <typename T>
+HALF_CONSTEXPR typename enable<T, T>::type operator-(T arg)
+{
+    return unary_specialized<T>::negate(arg);
+}
+
+/// \}
+/// \name Input and output
+/// \{
+
+/// Output operator.
+/// \param out output stream to write into
+/// \param arg half expression to write
+/// \return reference to output stream
+template <typename T, typename charT, typename traits>
+typename enable<std::basic_ostream<charT, traits>&, T>::type operator<<(std::basic_ostream<charT, traits>& out, T arg)
+{
+    return functions::write(out, arg);
+}
+
+/// Input operator.
+/// \param in input stream to read from
+/// \param arg half to read into
+/// \return reference to input stream
+template <typename charT, typename traits>
+std::basic_istream<charT, traits>& operator>>(std::basic_istream<charT, traits>& in, half& arg)
+{
+    return functions::read(in, arg);
+}
+
+/// \}
+/// \name Basic mathematical operations
+/// \{
+
+/// Absolute value.
+/// \param arg operand
+/// \return absolute value of \a arg
+//		template<typename T> typename enable<T,T>::type abs(T arg) { return unary_specialized<T>::fabs(arg); }
+inline half abs(half arg)
+{
+    return unary_specialized<half>::fabs(arg);
+}
+inline expr abs(expr arg)
+{
+    return unary_specialized<expr>::fabs(arg);
+}
+
+/// Absolute value.
+/// \param arg operand
+/// \return absolute value of \a arg
+//		template<typename T> typename enable<T,T>::type fabs(T arg) { return unary_specialized<T>::fabs(arg); }
+inline half fabs(half arg)
+{
+    return unary_specialized<half>::fabs(arg);
+}
+inline expr fabs(expr arg)
+{
+    return unary_specialized<expr>::fabs(arg);
+}
+
+/// Remainder of division.
+/// \param x first operand
+/// \param y second operand
+/// \return remainder of floating point division.
+//		template<typename T,typename U> typename enable<expr,T,U>::type fmod(T x, U y) { return functions::fmod(x, y); }
+inline expr fmod(half x, half y)
+{
+    return functions::fmod(x, y);
+}
+inline expr fmod(half x, expr y)
+{
+    return functions::fmod(x, y);
+}
+inline expr fmod(expr x, half y)
+{
+    return functions::fmod(x, y);
+}
+inline expr fmod(expr x, expr y)
+{
+    return functions::fmod(x, y);
+}
+
+/// Remainder of division.
+/// \param x first operand
+/// \param y second operand
+/// \return remainder of floating point division.
+//		template<typename T,typename U> typename enable<expr,T,U>::type remainder(T x, U y) { return
+// functions::remainder(x, y); }
+inline expr remainder(half x, half y)
+{
+    return functions::remainder(x, y);
+}
+inline expr remainder(half x, expr y)
+{
+    return functions::remainder(x, y);
+}
+inline expr remainder(expr x, half y)
+{
+    return functions::remainder(x, y);
+}
+inline expr remainder(expr x, expr y)
+{
+    return functions::remainder(x, y);
+}
+
+/// Remainder of division.
+/// \param x first operand
+/// \param y second operand
+/// \param quo address to store some bits of quotient at
+/// \return remainder of floating point division.
+//		template<typename T,typename U> typename enable<expr,T,U>::type remquo(T x, U y, int *quo) { return
+// functions::remquo(x, y, quo); }
+inline expr remquo(half x, half y, int* quo)
+{
+    return functions::remquo(x, y, quo);
+}
+inline expr remquo(half x, expr y, int* quo)
+{
+    return functions::remquo(x, y, quo);
+}
+inline expr remquo(expr x, half y, int* quo)
+{
+    return functions::remquo(x, y, quo);
+}
+inline expr remquo(expr x, expr y, int* quo)
+{
+    return functions::remquo(x, y, quo);
+}
+
+/// Fused multiply add.
+/// \param x first operand
+/// \param y second operand
+/// \param z third operand
+/// \return ( \a x * \a y ) + \a z rounded as one operation.
+//		template<typename T,typename U,typename V> typename enable<expr,T,U,V>::type fma(T x, U y, V z) { return
+// functions::fma(x, y, z); }
+inline expr fma(half x, half y, half z)
+{
+    return functions::fma(x, y, z);
+}
+inline expr fma(half x, half y, expr z)
+{
+    return functions::fma(x, y, z);
+}
+inline expr fma(half x, expr y, half z)
+{
+    return functions::fma(x, y, z);
+}
+inline expr fma(half x, expr y, expr z)
+{
+    return functions::fma(x, y, z);
+}
+inline expr fma(expr x, half y, half z)
+{
+    return functions::fma(x, y, z);
+}
+inline expr fma(expr x, half y, expr z)
+{
+    return functions::fma(x, y, z);
+}
+inline expr fma(expr x, expr y, half z)
+{
+    return functions::fma(x, y, z);
+}
+inline expr fma(expr x, expr y, expr z)
+{
+    return functions::fma(x, y, z);
+}
+
+/// Maximum of half expressions.
+/// \param x first operand
+/// \param y second operand
+/// \return maximum of operands
+//		template<typename T,typename U> typename result<T,U>::type fmax(T x, U y) { return
+// binary_specialized<T,U>::fmax(x, y); }
+inline half fmax(half x, half y)
+{
+    return binary_specialized<half, half>::fmax(x, y);
+}
+inline expr fmax(half x, expr y)
+{
+    return binary_specialized<half, expr>::fmax(x, y);
+}
+inline expr fmax(expr x, half y)
+{
+    return binary_specialized<expr, half>::fmax(x, y);
+}
+inline expr fmax(expr x, expr y)
+{
+    return binary_specialized<expr, expr>::fmax(x, y);
+}
+
+/// Minimum of half expressions.
+/// \param x first operand
+/// \param y second operand
+/// \return minimum of operands
+//		template<typename T,typename U> typename result<T,U>::type fmin(T x, U y) { return
+// binary_specialized<T,U>::fmin(x, y); }
+inline half fmin(half x, half y)
+{
+    return binary_specialized<half, half>::fmin(x, y);
+}
+inline expr fmin(half x, expr y)
+{
+    return binary_specialized<half, expr>::fmin(x, y);
+}
+inline expr fmin(expr x, half y)
+{
+    return binary_specialized<expr, half>::fmin(x, y);
+}
+inline expr fmin(expr x, expr y)
+{
+    return binary_specialized<expr, expr>::fmin(x, y);
+}
+
+/// Positive difference.
+/// \param x first operand
+/// \param y second operand
+/// \return \a x - \a y or 0 if difference negative
+//		template<typename T,typename U> typename enable<expr,T,U>::type fdim(T x, U y) { return functions::fdim(x, y); }
+inline expr fdim(half x, half y)
+{
+    return functions::fdim(x, y);
+}
+inline expr fdim(half x, expr y)
+{
+    return functions::fdim(x, y);
+}
+inline expr fdim(expr x, half y)
+{
+    return functions::fdim(x, y);
+}
+inline expr fdim(expr x, expr y)
+{
+    return functions::fdim(x, y);
+}
+
+/// Get NaN value.
+/// \return quiet NaN
+inline half nanh(const char*)
+{
+    return functions::nanh();
+}
+
+/// \}
+/// \name Exponential functions
+/// \{
+
+/// Exponential function.
+/// \param arg function argument
+/// \return e raised to \a arg
+//		template<typename T> typename enable<expr,T>::type exp(T arg) { return functions::exp(arg); }
+inline expr exp(half arg)
+{
+    return functions::exp(arg);
+}
+inline expr exp(expr arg)
+{
+    return functions::exp(arg);
+}
+
+/// Exponential minus one.
+/// \param arg function argument
+/// \return e raised to \a arg subtracted by 1
+//		template<typename T> typename enable<expr,T>::type expm1(T arg) { return functions::expm1(arg); }
+inline expr expm1(half arg)
+{
+    return functions::expm1(arg);
+}
+inline expr expm1(expr arg)
+{
+    return functions::expm1(arg);
+}
+
+/// Binary exponential.
+/// \param arg function argument
+/// \return 2 raised to \a arg
+//		template<typename T> typename enable<expr,T>::type exp2(T arg) { return functions::exp2(arg); }
+inline expr exp2(half arg)
+{
+    return functions::exp2(arg);
+}
+inline expr exp2(expr arg)
+{
+    return functions::exp2(arg);
+}
+
+/// Natural logorithm.
+/// \param arg function argument
+/// \return logarithm of \a arg to base e
+//		template<typename T> typename enable<expr,T>::type log(T arg) { return functions::log(arg); }
+inline expr log(half arg)
+{
+    return functions::log(arg);
+}
+inline expr log(expr arg)
+{
+    return functions::log(arg);
+}
+
+/// Common logorithm.
+/// \param arg function argument
+/// \return logarithm of \a arg to base 10
+//		template<typename T> typename enable<expr,T>::type log10(T arg) { return functions::log10(arg); }
+inline expr log10(half arg)
+{
+    return functions::log10(arg);
+}
+inline expr log10(expr arg)
+{
+    return functions::log10(arg);
+}
+
+/// Natural logorithm.
+/// \param arg function argument
+/// \return logarithm of \a arg plus 1 to base e
+//		template<typename T> typename enable<expr,T>::type log1p(T arg) { return functions::log1p(arg); }
+inline expr log1p(half arg)
+{
+    return functions::log1p(arg);
+}
+inline expr log1p(expr arg)
+{
+    return functions::log1p(arg);
+}
+
+/// Binary logorithm.
+/// \param arg function argument
+/// \return logarithm of \a arg to base 2
+//		template<typename T> typename enable<expr,T>::type log2(T arg) { return functions::log2(arg); }
+inline expr log2(half arg)
+{
+    return functions::log2(arg);
+}
+inline expr log2(expr arg)
+{
+    return functions::log2(arg);
+}
+
+/// \}
+/// \name Power functions
+/// \{
+
+/// Square root.
+/// \param arg function argument
+/// \return square root of \a arg
+//		template<typename T> typename enable<expr,T>::type sqrt(T arg) { return functions::sqrt(arg); }
+inline expr sqrt(half arg)
+{
+    return functions::sqrt(arg);
+}
+inline expr sqrt(expr arg)
+{
+    return functions::sqrt(arg);
+}
+
+/// Cubic root.
+/// \param arg function argument
+/// \return cubic root of \a arg
+//		template<typename T> typename enable<expr,T>::type cbrt(T arg) { return functions::cbrt(arg); }
+inline expr cbrt(half arg)
+{
+    return functions::cbrt(arg);
+}
+inline expr cbrt(expr arg)
+{
+    return functions::cbrt(arg);
+}
+
+/// Hypotenuse function.
+/// \param x first argument
+/// \param y second argument
+/// \return square root of sum of squares without internal over- or underflows
+//		template<typename T,typename U> typename enable<expr,T,U>::type hypot(T x, U y) { return functions::hypot(x, y);
+//}
+inline expr hypot(half x, half y)
+{
+    return functions::hypot(x, y);
+}
+inline expr hypot(half x, expr y)
+{
+    return functions::hypot(x, y);
+}
+inline expr hypot(expr x, half y)
+{
+    return functions::hypot(x, y);
+}
+inline expr hypot(expr x, expr y)
+{
+    return functions::hypot(x, y);
+}
+
+/// Power function.
+/// \param base first argument
+/// \param exp second argument
+/// \return \a base raised to \a exp
+//		template<typename T,typename U> typename enable<expr,T,U>::type pow(T base, U exp) { return functions::pow(base,
+// exp); }
+inline expr pow(half base, half exp)
+{
+    return functions::pow(base, exp);
+}
+inline expr pow(half base, expr exp)
+{
+    return functions::pow(base, exp);
+}
+inline expr pow(expr base, half exp)
+{
+    return functions::pow(base, exp);
+}
+inline expr pow(expr base, expr exp)
+{
+    return functions::pow(base, exp);
+}
+
+/// \}
+/// \name Trigonometric functions
+/// \{
+
+/// Sine function.
+/// \param arg function argument
+/// \return sine value of \a arg
+//		template<typename T> typename enable<expr,T>::type sin(T arg) { return functions::sin(arg); }
+inline expr sin(half arg)
+{
+    return functions::sin(arg);
+}
+inline expr sin(expr arg)
+{
+    return functions::sin(arg);
+}
+
+/// Cosine function.
+/// \param arg function argument
+/// \return cosine value of \a arg
+//		template<typename T> typename enable<expr,T>::type cos(T arg) { return functions::cos(arg); }
+inline expr cos(half arg)
+{
+    return functions::cos(arg);
+}
+inline expr cos(expr arg)
+{
+    return functions::cos(arg);
+}
+
+/// Tangent function.
+/// \param arg function argument
+/// \return tangent value of \a arg
+//		template<typename T> typename enable<expr,T>::type tan(T arg) { return functions::tan(arg); }
+inline expr tan(half arg)
+{
+    return functions::tan(arg);
+}
+inline expr tan(expr arg)
+{
+    return functions::tan(arg);
+}
+
+/// Arc sine.
+/// \param arg function argument
+/// \return arc sine value of \a arg
+//		template<typename T> typename enable<expr,T>::type asin(T arg) { return functions::asin(arg); }
+inline expr asin(half arg)
+{
+    return functions::asin(arg);
+}
+inline expr asin(expr arg)
+{
+    return functions::asin(arg);
+}
+
+/// Arc cosine function.
+/// \param arg function argument
+/// \return arc cosine value of \a arg
+//		template<typename T> typename enable<expr,T>::type acos(T arg) { return functions::acos(arg); }
+inline expr acos(half arg)
+{
+    return functions::acos(arg);
+}
+inline expr acos(expr arg)
+{
+    return functions::acos(arg);
+}
+
+/// Arc tangent function.
+/// \param arg function argument
+/// \return arc tangent value of \a arg
+//		template<typename T> typename enable<expr,T>::type atan(T arg) { return functions::atan(arg); }
+inline expr atan(half arg)
+{
+    return functions::atan(arg);
+}
+inline expr atan(expr arg)
+{
+    return functions::atan(arg);
+}
+
+/// Arc tangent function.
+/// \param x first argument
+/// \param y second argument
+/// \return arc tangent value
+//		template<typename T,typename U> typename enable<expr,T,U>::type atan2(T x, U y) { return functions::atan2(x, y);
+//}
+inline expr atan2(half x, half y)
+{
+    return functions::atan2(x, y);
+}
+inline expr atan2(half x, expr y)
+{
+    return functions::atan2(x, y);
+}
+inline expr atan2(expr x, half y)
+{
+    return functions::atan2(x, y);
+}
+inline expr atan2(expr x, expr y)
+{
+    return functions::atan2(x, y);
+}
+
+/// \}
+/// \name Hyperbolic functions
+/// \{
+
+/// Hyperbolic sine.
+/// \param arg function argument
+/// \return hyperbolic sine value of \a arg
+//		template<typename T> typename enable<expr,T>::type sinh(T arg) { return functions::sinh(arg); }
+inline expr sinh(half arg)
+{
+    return functions::sinh(arg);
+}
+inline expr sinh(expr arg)
+{
+    return functions::sinh(arg);
+}
+
+/// Hyperbolic cosine.
+/// \param arg function argument
+/// \return hyperbolic cosine value of \a arg
+//		template<typename T> typename enable<expr,T>::type cosh(T arg) { return functions::cosh(arg); }
+inline expr cosh(half arg)
+{
+    return functions::cosh(arg);
+}
+inline expr cosh(expr arg)
+{
+    return functions::cosh(arg);
+}
+
+/// Hyperbolic tangent.
+/// \param arg function argument
+/// \return hyperbolic tangent value of \a arg
+//		template<typename T> typename enable<expr,T>::type tanh(T arg) { return functions::tanh(arg); }
+inline expr tanh(half arg)
+{
+    return functions::tanh(arg);
+}
+inline expr tanh(expr arg)
+{
+    return functions::tanh(arg);
+}
+
+/// Hyperbolic area sine.
+/// \param arg function argument
+/// \return area sine value of \a arg
+//		template<typename T> typename enable<expr,T>::type asinh(T arg) { return functions::asinh(arg); }
+inline expr asinh(half arg)
+{
+    return functions::asinh(arg);
+}
+inline expr asinh(expr arg)
+{
+    return functions::asinh(arg);
+}
+
+/// Hyperbolic area cosine.
+/// \param arg function argument
+/// \return area cosine value of \a arg
+//		template<typename T> typename enable<expr,T>::type acosh(T arg) { return functions::acosh(arg); }
+inline expr acosh(half arg)
+{
+    return functions::acosh(arg);
+}
+inline expr acosh(expr arg)
+{
+    return functions::acosh(arg);
+}
+
+/// Hyperbolic area tangent.
+/// \param arg function argument
+/// \return area tangent value of \a arg
+//		template<typename T> typename enable<expr,T>::type atanh(T arg) { return functions::atanh(arg); }
+inline expr atanh(half arg)
+{
+    return functions::atanh(arg);
+}
+inline expr atanh(expr arg)
+{
+    return functions::atanh(arg);
+}
+
+/// \}
+/// \name Error and gamma functions
+/// \{
+
+/// Error function.
+/// \param arg function argument
+/// \return error function value of \a arg
+//		template<typename T> typename enable<expr,T>::type erf(T arg) { return functions::erf(arg); }
+inline expr erf(half arg)
+{
+    return functions::erf(arg);
+}
+inline expr erf(expr arg)
+{
+    return functions::erf(arg);
+}
+
+/// Complementary error function.
+/// \param arg function argument
+/// \return 1 minus error function value of \a arg
+//		template<typename T> typename enable<expr,T>::type erfc(T arg) { return functions::erfc(arg); }
+inline expr erfc(half arg)
+{
+    return functions::erfc(arg);
+}
+inline expr erfc(expr arg)
+{
+    return functions::erfc(arg);
+}
+
+/// Natural logarithm of gamma function.
+/// \param arg function argument
+/// \return natural logarith of gamma function for \a arg
+//		template<typename T> typename enable<expr,T>::type lgamma(T arg) { return functions::lgamma(arg); }
+inline expr lgamma(half arg)
+{
+    return functions::lgamma(arg);
+}
+inline expr lgamma(expr arg)
+{
+    return functions::lgamma(arg);
+}
+
+/// Gamma function.
+/// \param arg function argument
+/// \return gamma function value of \a arg
+//		template<typename T> typename enable<expr,T>::type tgamma(T arg) { return functions::tgamma(arg); }
+inline expr tgamma(half arg)
+{
+    return functions::tgamma(arg);
+}
+inline expr tgamma(expr arg)
+{
+    return functions::tgamma(arg);
+}
+
+/// \}
+/// \name Rounding
+/// \{
+
+/// Nearest integer not less than half value.
+/// \param arg half to round
+/// \return nearest integer not less than \a arg
+//		template<typename T> typename enable<half,T>::type ceil(T arg) { return functions::ceil(arg); }
+inline half ceil(half arg)
+{
+    return functions::ceil(arg);
+}
+inline half ceil(expr arg)
+{
+    return functions::ceil(arg);
+}
+
+/// Nearest integer not greater than half value.
+/// \param arg half to round
+/// \return nearest integer not greater than \a arg
+//		template<typename T> typename enable<half,T>::type floor(T arg) { return functions::floor(arg); }
+inline half floor(half arg)
+{
+    return functions::floor(arg);
+}
+inline half floor(expr arg)
+{
+    return functions::floor(arg);
+}
+
+/// Nearest integer not greater in magnitude than half value.
+/// \param arg half to round
+/// \return nearest integer not greater in magnitude than \a arg
+//		template<typename T> typename enable<half,T>::type trunc(T arg) { return functions::trunc(arg); }
+inline half trunc(half arg)
+{
+    return functions::trunc(arg);
+}
+inline half trunc(expr arg)
+{
+    return functions::trunc(arg);
+}
+
+/// Nearest integer.
+/// \param arg half to round
+/// \return nearest integer, rounded away from zero in half-way cases
+//		template<typename T> typename enable<half,T>::type round(T arg) { return functions::round(arg); }
+inline half round(half arg)
+{
+    return functions::round(arg);
+}
+inline half round(expr arg)
+{
+    return functions::round(arg);
+}
+
+/// Nearest integer.
+/// \param arg half to round
+/// \return nearest integer, rounded away from zero in half-way cases
+//		template<typename T> typename enable<long,T>::type lround(T arg) { return functions::lround(arg); }
+inline long lround(half arg)
+{
+    return functions::lround(arg);
+}
+inline long lround(expr arg)
+{
+    return functions::lround(arg);
+}
+
+/// Nearest integer using half's internal rounding mode.
+/// \param arg half expression to round
+/// \return nearest integer using default rounding mode
+//		template<typename T> typename enable<half,T>::type nearbyint(T arg) { return functions::nearbyint(arg); }
+inline half nearbyint(half arg)
+{
+    return functions::rint(arg);
+}
+inline half nearbyint(expr arg)
+{
+    return functions::rint(arg);
+}
+
+/// Nearest integer using half's internal rounding mode.
+/// \param arg half expression to round
+/// \return nearest integer using default rounding mode
+//		template<typename T> typename enable<half,T>::type rint(T arg) { return functions::rint(arg); }
+inline half rint(half arg)
+{
+    return functions::rint(arg);
+}
+inline half rint(expr arg)
+{
+    return functions::rint(arg);
+}
+
+/// Nearest integer using half's internal rounding mode.
+/// \param arg half expression to round
+/// \return nearest integer using default rounding mode
+//		template<typename T> typename enable<long,T>::type lrint(T arg) { return functions::lrint(arg); }
+inline long lrint(half arg)
+{
+    return functions::lrint(arg);
+}
+inline long lrint(expr arg)
+{
+    return functions::lrint(arg);
+}
+#if HALF_ENABLE_CPP11_LONG_LONG
+/// Nearest integer.
+/// \param arg half to round
+/// \return nearest integer, rounded away from zero in half-way cases
+//		template<typename T> typename enable<long long,T>::type llround(T arg) { return functions::llround(arg); }
+inline long long llround(half arg)
+{
+    return functions::llround(arg);
+}
+inline long long llround(expr arg)
+{
+    return functions::llround(arg);
+}
+
+/// Nearest integer using half's internal rounding mode.
+/// \param arg half expression to round
+/// \return nearest integer using default rounding mode
+//		template<typename T> typename enable<long long,T>::type llrint(T arg) { return functions::llrint(arg); }
+inline long long llrint(half arg)
+{
+    return functions::llrint(arg);
+}
+inline long long llrint(expr arg)
+{
+    return functions::llrint(arg);
+}
+#endif
+
+/// \}
+/// \name Floating point manipulation
+/// \{
+
+/// Decompress floating point number.
+/// \param arg number to decompress
+/// \param exp address to store exponent at
+/// \return significant in range [0.5, 1)
+//		template<typename T> typename enable<half,T>::type frexp(T arg, int *exp) { return functions::frexp(arg, exp); }
+inline half frexp(half arg, int* exp)
+{
+    return functions::frexp(arg, exp);
+}
+inline half frexp(expr arg, int* exp)
+{
+    return functions::frexp(arg, exp);
+}
+
+/// Multiply by power of two.
+/// \param arg number to modify
+/// \param exp power of two to multiply with
+/// \return \a arg multplied by 2 raised to \a exp
+//		template<typename T> typename enable<half,T>::type ldexp(T arg, int exp) { return functions::scalbln(arg, exp);
+//}
+inline half ldexp(half arg, int exp)
+{
+    return functions::scalbln(arg, exp);
+}
+inline half ldexp(expr arg, int exp)
+{
+    return functions::scalbln(arg, exp);
+}
+
+/// Extract integer and fractional parts.
+/// \param arg number to decompress
+/// \param iptr address to store integer part at
+/// \return fractional part
+//		template<typename T> typename enable<half,T>::type modf(T arg, half *iptr) { return functions::modf(arg, iptr);
+//}
+inline half modf(half arg, half* iptr)
+{
+    return functions::modf(arg, iptr);
+}
+inline half modf(expr arg, half* iptr)
+{
+    return functions::modf(arg, iptr);
+}
+
+/// Multiply by power of two.
+/// \param arg number to modify
+/// \param exp power of two to multiply with
+/// \return \a arg multplied by 2 raised to \a exp
+//		template<typename T> typename enable<half,T>::type scalbn(T arg, int exp) { return functions::scalbln(arg, exp);
+//}
+inline half scalbn(half arg, int exp)
+{
+    return functions::scalbln(arg, exp);
+}
+inline half scalbn(expr arg, int exp)
+{
+    return functions::scalbln(arg, exp);
+}
+
+/// Multiply by power of two.
+/// \param arg number to modify
+/// \param exp power of two to multiply with
+/// \return \a arg multplied by 2 raised to \a exp
+//		template<typename T> typename enable<half,T>::type scalbln(T arg, long exp) { return functions::scalbln(arg,
+// exp);
+//}
+inline half scalbln(half arg, long exp)
+{
+    return functions::scalbln(arg, exp);
+}
+inline half scalbln(expr arg, long exp)
+{
+    return functions::scalbln(arg, exp);
+}
+
+/// Extract exponent.
+/// \param arg number to query
+/// \return floating point exponent
+/// \retval FP_ILOGB0 for zero
+/// \retval FP_ILOGBNAN for NaN
+/// \retval MAX_INT for infinity
+//		template<typename T> typename enable<int,T>::type ilogb(T arg) { return functions::ilogb(arg); }
+inline int ilogb(half arg)
+{
+    return functions::ilogb(arg);
+}
+inline int ilogb(expr arg)
+{
+    return functions::ilogb(arg);
+}
+
+/// Extract exponent.
+/// \param arg number to query
+/// \return floating point exponent
+//		template<typename T> typename enable<half,T>::type logb(T arg) { return functions::logb(arg); }
+inline half logb(half arg)
+{
+    return functions::logb(arg);
+}
+inline half logb(expr arg)
+{
+    return functions::logb(arg);
+}
+
+/// Next representable value.
+/// \param from value to compute next representable value for
+/// \param to direction towards which to compute next value
+/// \return next representable value after \a from in direction towards \a to
+//		template<typename T,typename U> typename enable<half,T,U>::type nextafter(T from, U to) { return
+// functions::nextafter(from, to); }
+inline half nextafter(half from, half to)
+{
+    return functions::nextafter(from, to);
+}
+inline half nextafter(half from, expr to)
+{
+    return functions::nextafter(from, to);
+}
+inline half nextafter(expr from, half to)
+{
+    return functions::nextafter(from, to);
+}
+inline half nextafter(expr from, expr to)
+{
+    return functions::nextafter(from, to);
+}
+
+/// Next representable value.
+/// \param from value to compute next representable value for
+/// \param to direction towards which to compute next value
+/// \return next representable value after \a from in direction towards \a to
+//		template<typename T> typename enable<half,T>::type nexttoward(T from, long double to) { return
+// functions::nexttoward(from, to); }
+inline half nexttoward(half from, long double to)
+{
+    return functions::nexttoward(from, to);
+}
+inline half nexttoward(expr from, long double to)
+{
+    return functions::nexttoward(from, to);
+}
+
+/// Take sign.
+/// \param x value to change sign for
+/// \param y value to take sign from
+/// \return value equal to \a x in magnitude and to \a y in sign
+//		template<typename T,typename U> typename enable<half,T,U>::type copysign(T x, U y) { return
+// functions::copysign(x, y); }
+inline half copysign(half x, half y)
+{
+    return functions::copysign(x, y);
+}
+inline half copysign(half x, expr y)
+{
+    return functions::copysign(x, y);
+}
+inline half copysign(expr x, half y)
+{
+    return functions::copysign(x, y);
+}
+inline half copysign(expr x, expr y)
+{
+    return functions::copysign(x, y);
+}
+
+/// \}
+/// \name Floating point classification
+/// \{
+
+/// Classify floating point value.
+/// \param arg number to classify
+/// \retval FP_ZERO for positive and negative zero
+/// \retval FP_SUBNORMAL for subnormal numbers
+/// \retval FP_INFINITY for positive and negative infinity
+/// \retval FP_NAN for NaNs
+/// \retval FP_NORMAL for all other (normal) values
+//		template<typename T> typename enable<int,T>::type fpclassify(T arg) { return functions::fpclassify(arg); }
+inline int fpclassify(half arg)
+{
+    return functions::fpclassify(arg);
+}
+inline int fpclassify(expr arg)
+{
+    return functions::fpclassify(arg);
+}
+
+/// Check if finite number.
+/// \param arg number to check
+/// \retval true if neither infinity nor NaN
+/// \retval false else
+//		template<typename T> typename enable<bool,T>::type isfinite(T arg) { return functions::isfinite(arg); }
+inline bool isfinite(half arg)
+{
+    return functions::isfinite(arg);
+}
+inline bool isfinite(expr arg)
+{
+    return functions::isfinite(arg);
+}
+
+/// Check for infinity.
+/// \param arg number to check
+/// \retval true for positive or negative infinity
+/// \retval false else
+//		template<typename T> typename enable<bool,T>::type isinf(T arg) { return functions::isinf(arg); }
+inline bool isinf(half arg)
+{
+    return functions::isinf(arg);
+}
+inline bool isinf(expr arg)
+{
+    return functions::isinf(arg);
+}
+
+/// Check for NaN.
+/// \param arg number to check
+/// \retval true for NaNs
+/// \retval false else
+//		template<typename T> typename enable<bool,T>::type isnan(T arg) { return functions::isnan(arg); }
+inline bool isnan(half arg)
+{
+    return functions::isnan(arg);
+}
+inline bool isnan(expr arg)
+{
+    return functions::isnan(arg);
+}
+
+/// Check if normal number.
+/// \param arg number to check
+/// \retval true if normal number
+/// \retval false if either subnormal, zero, infinity or NaN
+//		template<typename T> typename enable<bool,T>::type isnormal(T arg) { return functions::isnormal(arg); }
+inline bool isnormal(half arg)
+{
+    return functions::isnormal(arg);
+}
+inline bool isnormal(expr arg)
+{
+    return functions::isnormal(arg);
+}
+
+/// Check sign.
+/// \param arg number to check
+/// \retval true for negative number
+/// \retval false for positive number
+//		template<typename T> typename enable<bool,T>::type signbit(T arg) { return functions::signbit(arg); }
+inline bool signbit(half arg)
+{
+    return functions::signbit(arg);
+}
+inline bool signbit(expr arg)
+{
+    return functions::signbit(arg);
+}
+
+/// \}
+/// \name Comparison
+/// \{
+
+/// Comparison for greater than.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if \a x greater than \a y
+/// \retval false else
+//		template<typename T,typename U> typename enable<bool,T,U>::type isgreater(T x, U y) { return
+// functions::isgreater(x, y); }
+inline bool isgreater(half x, half y)
+{
+    return functions::isgreater(x, y);
+}
+inline bool isgreater(half x, expr y)
+{
+    return functions::isgreater(x, y);
+}
+inline bool isgreater(expr x, half y)
+{
+    return functions::isgreater(x, y);
+}
+inline bool isgreater(expr x, expr y)
+{
+    return functions::isgreater(x, y);
+}
+
+/// Comparison for greater equal.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if \a x greater equal \a y
+/// \retval false else
+//		template<typename T,typename U> typename enable<bool,T,U>::type isgreaterequal(T x, U y) { return
+// functions::isgreaterequal(x, y); }
+inline bool isgreaterequal(half x, half y)
+{
+    return functions::isgreaterequal(x, y);
+}
+inline bool isgreaterequal(half x, expr y)
+{
+    return functions::isgreaterequal(x, y);
+}
+inline bool isgreaterequal(expr x, half y)
+{
+    return functions::isgreaterequal(x, y);
+}
+inline bool isgreaterequal(expr x, expr y)
+{
+    return functions::isgreaterequal(x, y);
+}
+
+/// Comparison for less than.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if \a x less than \a y
+/// \retval false else
+//		template<typename T,typename U> typename enable<bool,T,U>::type isless(T x, U y) { return functions::isless(x,
+// y);
+//}
+inline bool isless(half x, half y)
+{
+    return functions::isless(x, y);
+}
+inline bool isless(half x, expr y)
+{
+    return functions::isless(x, y);
+}
+inline bool isless(expr x, half y)
+{
+    return functions::isless(x, y);
+}
+inline bool isless(expr x, expr y)
+{
+    return functions::isless(x, y);
+}
+
+/// Comparison for less equal.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if \a x less equal \a y
+/// \retval false else
+//		template<typename T,typename U> typename enable<bool,T,U>::type islessequal(T x, U y) { return
+// functions::islessequal(x, y); }
+inline bool islessequal(half x, half y)
+{
+    return functions::islessequal(x, y);
+}
+inline bool islessequal(half x, expr y)
+{
+    return functions::islessequal(x, y);
+}
+inline bool islessequal(expr x, half y)
+{
+    return functions::islessequal(x, y);
+}
+inline bool islessequal(expr x, expr y)
+{
+    return functions::islessequal(x, y);
+}
+
+/// Comarison for less or greater.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if either less or greater
+/// \retval false else
+//		template<typename T,typename U> typename enable<bool,T,U>::type islessgreater(T x, U y) { return
+// functions::islessgreater(x, y); }
+inline bool islessgreater(half x, half y)
+{
+    return functions::islessgreater(x, y);
+}
+inline bool islessgreater(half x, expr y)
+{
+    return functions::islessgreater(x, y);
+}
+inline bool islessgreater(expr x, half y)
+{
+    return functions::islessgreater(x, y);
+}
+inline bool islessgreater(expr x, expr y)
+{
+    return functions::islessgreater(x, y);
+}
+
+/// Check if unordered.
+/// \param x first operand
+/// \param y second operand
+/// \retval true if unordered (one or two NaN operands)
+/// \retval false else
+//		template<typename T,typename U> typename enable<bool,T,U>::type isunordered(T x, U y) { return
+// functions::isunordered(x, y); }
+inline bool isunordered(half x, half y)
+{
+    return functions::isunordered(x, y);
+}
+inline bool isunordered(half x, expr y)
+{
+    return functions::isunordered(x, y);
+}
+inline bool isunordered(expr x, half y)
+{
+    return functions::isunordered(x, y);
+}
+inline bool isunordered(expr x, expr y)
+{
+    return functions::isunordered(x, y);
+}
+
+/// \name Casting
+/// \{
+
+/// Cast to or from half-precision floating point number.
+/// This casts between [half](\ref half_float::half) and any built-in arithmetic type. The values are converted
+/// directly using the given rounding mode, without any roundtrip over `float` that a `static_cast` would otherwise do.
+/// It uses the default rounding mode.
+///
+/// Using this cast with neither of the two types being a [half](\ref half_float::half) or with any of the two types
+/// not being a built-in arithmetic type (apart from [half](\ref half_float::half), of course) results in a compiler
+/// error and casting between [half](\ref half_float::half)s is just a no-op.
+/// \tparam T destination type (half or built-in arithmetic type)
+/// \tparam U source type (half or built-in arithmetic type)
+/// \param arg value to cast
+/// \return \a arg converted to destination type
+template <typename T, typename U>
+T half_cast(U arg)
+{
+    return half_caster<T, U>::cast(arg);
+}
+
+/// Cast to or from half-precision floating point number.
+/// This casts between [half](\ref half_float::half) and any built-in arithmetic type. The values are converted
+/// directly using the given rounding mode, without any roundtrip over `float` that a `static_cast` would otherwise do.
+///
+/// Using this cast with neither of the two types being a [half](\ref half_float::half) or with any of the two types
+/// not being a built-in arithmetic type (apart from [half](\ref half_float::half), of course) results in a compiler
+/// error and casting between [half](\ref half_float::half)s is just a no-op.
+/// \tparam T destination type (half or built-in arithmetic type)
+/// \tparam R rounding mode to use.
+/// \tparam U source type (half or built-in arithmetic type)
+/// \param arg value to cast
+/// \return \a arg converted to destination type
+template <typename T, std::float_round_style R, typename U>
+T half_cast(U arg)
+{
+    return half_caster<T, U, R>::cast(arg);
+}
+/// \}
+} // namespace detail
+
+using detail::operator==;
+using detail::operator!=;
+using detail::operator<;
+using detail::operator>;
+using detail::operator<=;
+using detail::operator>=;
+using detail::operator+;
+using detail::operator-;
+using detail::operator*;
+using detail::operator/;
+using detail::operator<<;
+using detail::operator>>;
+
+using detail::abs;
+using detail::acos;
+using detail::acosh;
+using detail::asin;
+using detail::asinh;
+using detail::atan;
+using detail::atan2;
+using detail::atanh;
+using detail::cbrt;
+using detail::ceil;
+using detail::cos;
+using detail::cosh;
+using detail::erf;
+using detail::erfc;
+using detail::exp;
+using detail::exp2;
+using detail::expm1;
+using detail::fabs;
+using detail::fdim;
+using detail::floor;
+using detail::fma;
+using detail::fmax;
+using detail::fmin;
+using detail::fmod;
+using detail::hypot;
+using detail::lgamma;
+using detail::log;
+using detail::log10;
+using detail::log1p;
+using detail::log2;
+using detail::lrint;
+using detail::lround;
+using detail::nanh;
+using detail::nearbyint;
+using detail::pow;
+using detail::remainder;
+using detail::remquo;
+using detail::rint;
+using detail::round;
+using detail::sin;
+using detail::sinh;
+using detail::sqrt;
+using detail::tan;
+using detail::tanh;
+using detail::tgamma;
+using detail::trunc;
+#if HALF_ENABLE_CPP11_LONG_LONG
+using detail::llrint;
+using detail::llround;
+#endif
+using detail::copysign;
+using detail::fpclassify;
+using detail::frexp;
+using detail::ilogb;
+using detail::isfinite;
+using detail::isgreater;
+using detail::isgreaterequal;
+using detail::isinf;
+using detail::isless;
+using detail::islessequal;
+using detail::islessgreater;
+using detail::isnan;
+using detail::isnormal;
+using detail::isunordered;
+using detail::ldexp;
+using detail::logb;
+using detail::modf;
+using detail::nextafter;
+using detail::nexttoward;
+using detail::scalbln;
+using detail::scalbn;
+using detail::signbit;
+
+using detail::half_cast;
+} // namespace half_float
+
+/// Extensions to the C++ standard library.
+namespace std
+{
+/// Numeric limits for half-precision floats.
+/// Because of the underlying single-precision implementation of many operations, it inherits some properties from
+/// `std::numeric_limits<float>`.
+template <>
+class numeric_limits<half_float::half> : public numeric_limits<float>
+{
+public:
+    /// Supports signed values.
+    static HALF_CONSTEXPR_CONST bool is_signed = true;
+
+    /// Is not exact.
+    static HALF_CONSTEXPR_CONST bool is_exact = false;
+
+    /// Doesn't provide modulo arithmetic.
+    static HALF_CONSTEXPR_CONST bool is_modulo = false;
+
+    /// IEEE conformant.
+    static HALF_CONSTEXPR_CONST bool is_iec559 = true;
+
+    /// Supports infinity.
+    static HALF_CONSTEXPR_CONST bool has_infinity = true;
+
+    /// Supports quiet NaNs.
+    static HALF_CONSTEXPR_CONST bool has_quiet_NaN = true;
+
+    /// Supports subnormal values.
+    static HALF_CONSTEXPR_CONST float_denorm_style has_denorm = denorm_present;
+
+    /// Rounding mode.
+    /// Due to the mix of internal single-precision computations (using the rounding mode of the underlying
+    /// single-precision implementation) with the rounding mode of the single-to-half conversions, the actual rounding
+    /// mode might be `std::round_indeterminate` if the default half-precision rounding mode doesn't match the
+    /// single-precision rounding mode.
+    static HALF_CONSTEXPR_CONST float_round_style round_style
+        = (std::numeric_limits<float>::round_style == half_float::half::round_style) ? half_float::half::round_style
+                                                                                     : round_indeterminate;
+
+    /// Significant digits.
+    static HALF_CONSTEXPR_CONST int digits = 11;
+
+    /// Significant decimal digits.
+    static HALF_CONSTEXPR_CONST int digits10 = 3;
+
+    /// Required decimal digits to represent all possible values.
+    static HALF_CONSTEXPR_CONST int max_digits10 = 5;
+
+    /// Number base.
+    static HALF_CONSTEXPR_CONST int radix = 2;
+
+    /// One more than smallest exponent.
+    static HALF_CONSTEXPR_CONST int min_exponent = -13;
+
+    /// Smallest normalized representable power of 10.
+    static HALF_CONSTEXPR_CONST int min_exponent10 = -4;
+
+    /// One more than largest exponent
+    static HALF_CONSTEXPR_CONST int max_exponent = 16;
+
+    /// Largest finitely representable power of 10.
+    static HALF_CONSTEXPR_CONST int max_exponent10 = 4;
+
+    /// Smallest positive normal value.
+    static HALF_CONSTEXPR half_float::half min() HALF_NOTHROW
+    {
+        return half_float::half(half_float::detail::binary, 0x0400);
+    }
+
+    /// Smallest finite value.
+    static HALF_CONSTEXPR half_float::half lowest() HALF_NOTHROW
+    {
+        return half_float::half(half_float::detail::binary, 0xFBFF);
+    }
+
+    /// Largest finite value.
+    static HALF_CONSTEXPR half_float::half max() HALF_NOTHROW
+    {
+        return half_float::half(half_float::detail::binary, 0x7BFF);
+    }
+
+    /// Difference between one and next representable value.
+    static HALF_CONSTEXPR half_float::half epsilon() HALF_NOTHROW
+    {
+        return half_float::half(half_float::detail::binary, 0x1400);
+    }
+
+    /// Maximum rounding error.
+    static HALF_CONSTEXPR half_float::half round_error() HALF_NOTHROW
+    {
+        return half_float::half(half_float::detail::binary, (round_style == std::round_to_nearest) ? 0x3800 : 0x3C00);
+    }
+
+    /// Positive infinity.
+    static HALF_CONSTEXPR half_float::half infinity() HALF_NOTHROW
+    {
+        return half_float::half(half_float::detail::binary, 0x7C00);
+    }
+
+    /// Quiet NaN.
+    static HALF_CONSTEXPR half_float::half quiet_NaN() HALF_NOTHROW
+    {
+        return half_float::half(half_float::detail::binary, 0x7FFF);
+    }
+
+    /// Signalling NaN.
+    static HALF_CONSTEXPR half_float::half signaling_NaN() HALF_NOTHROW
+    {
+        return half_float::half(half_float::detail::binary, 0x7DFF);
+    }
+
+    /// Smallest positive subnormal value.
+    static HALF_CONSTEXPR half_float::half denorm_min() HALF_NOTHROW
+    {
+        return half_float::half(half_float::detail::binary, 0x0001);
+    }
+};
+
+#if HALF_ENABLE_CPP11_HASH
+/// Hash function for half-precision floats.
+/// This is only defined if C++11 `std::hash` is supported and enabled.
+template <>
+struct hash<half_float::half> //: unary_function<half_float::half,size_t>
+{
+    /// Type of function argument.
+    typedef half_float::half argument_type;
+
+    /// Function return type.
+    typedef size_t result_type;
+
+    /// Compute hash function.
+    /// \param arg half to hash
+    /// \return hash value
+    result_type operator()(argument_type arg) const
+    {
+        return hash<half_float::detail::uint16>()(static_cast<unsigned>(arg.data_) & -(arg.data_ != 0x8000));
+    }
+};
+#endif
+} // namespace std
+
+#undef HALF_CONSTEXPR
+#undef HALF_CONSTEXPR_CONST
+#undef HALF_NOEXCEPT
+#undef HALF_NOTHROW
+#ifdef HALF_POP_WARNINGS
+#pragma warning(pop)
+#undef HALF_POP_WARNINGS
+#endif
+
+#endif

+ 45 - 0
src/detection/CenterPoint-master/include/common/logger.cpp

@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "logger.h"
+#include "ErrorRecorder.h"
+#include "logging.h"
+
+SampleErrorRecorder gRecorder;
+namespace sample
+{
+Logger gLogger{Logger::Severity::kINFO};
+LogStreamConsumer gLogVerbose{LOG_VERBOSE(gLogger)};
+LogStreamConsumer gLogInfo{LOG_INFO(gLogger)};
+LogStreamConsumer gLogWarning{LOG_WARN(gLogger)};
+LogStreamConsumer gLogError{LOG_ERROR(gLogger)};
+LogStreamConsumer gLogFatal{LOG_FATAL(gLogger)};
+
+void setReportableSeverity(Logger::Severity severity)
+{
+    gLogger.setReportableSeverity(severity);
+    gLogVerbose.setReportableSeverity(severity);
+    gLogInfo.setReportableSeverity(severity);
+    gLogWarning.setReportableSeverity(severity);
+    gLogError.setReportableSeverity(severity);
+    gLogFatal.setReportableSeverity(severity);
+}
+} // namespace sample
+
+
+
+
+

+ 36 - 0
src/detection/CenterPoint-master/include/common/logger.h

@@ -0,0 +1,36 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LOGGER_H
+#define LOGGER_H
+
+#include "logging.h"
+
+class SampleErrorRecorder;
+extern SampleErrorRecorder gRecorder;
+namespace sample
+{
+extern Logger gLogger;
+extern LogStreamConsumer gLogVerbose;
+extern LogStreamConsumer gLogInfo;
+extern LogStreamConsumer gLogWarning;
+extern LogStreamConsumer gLogError;
+extern LogStreamConsumer gLogFatal;
+
+void setReportableSeverity(Logger::Severity severity);
+} // namespace sample
+
+#endif // LOGGER_H

+ 514 - 0
src/detection/CenterPoint-master/include/common/logging.h

@@ -0,0 +1,514 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TENSORRT_LOGGING_H
+#define TENSORRT_LOGGING_H
+
+#include "NvInferRuntime.h"
+#include <cassert>
+#include <ctime>
+#include <iomanip>
+#include <iostream>
+#include <ostream>
+#include <sstream>
+#include <string>
+
+namespace sample
+{
+
+using Severity = nvinfer1::ILogger::Severity;
+
+class LogStreamConsumerBuffer : public std::stringbuf
+{
+public:
+    LogStreamConsumerBuffer(std::ostream& stream, const std::string& prefix, bool shouldLog)
+        : mOutput(stream)
+        , mPrefix(prefix)
+        , mShouldLog(shouldLog)
+    {
+    }
+
+    LogStreamConsumerBuffer(LogStreamConsumerBuffer&& other)
+        : mOutput(other.mOutput)
+        , mPrefix(other.mPrefix)
+        , mShouldLog(other.mShouldLog)
+    {
+    }
+
+    ~LogStreamConsumerBuffer()
+    {
+        // std::streambuf::pbase() gives a pointer to the beginning of the buffered part of the output sequence
+        // std::streambuf::pptr() gives a pointer to the current position of the output sequence
+        // if the pointer to the beginning is not equal to the pointer to the current position,
+        // call putOutput() to log the output to the stream
+        if (pbase() != pptr())
+        {
+            putOutput();
+        }
+    }
+
+    // synchronizes the stream buffer and returns 0 on success
+    // synchronizing the stream buffer consists of inserting the buffer contents into the stream,
+    // resetting the buffer and flushing the stream
+    virtual int sync()
+    {
+        putOutput();
+        return 0;
+    }
+
+    void putOutput()
+    {
+        if (mShouldLog)
+        {
+            // prepend timestamp
+            std::time_t timestamp = std::time(nullptr);
+            tm* tm_local = std::localtime(&timestamp);
+            std::cout << "[";
+            std::cout << std::setw(2) << std::setfill('0') << 1 + tm_local->tm_mon << "/";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_mday << "/";
+            std::cout << std::setw(4) << std::setfill('0') << 1900 + tm_local->tm_year << "-";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_hour << ":";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_min << ":";
+            std::cout << std::setw(2) << std::setfill('0') << tm_local->tm_sec << "] ";
+            // std::stringbuf::str() gets the string contents of the buffer
+            // insert the buffer contents pre-appended by the appropriate prefix into the stream
+            mOutput << mPrefix << str();
+        }
+        // set the buffer to empty
+        str("");
+        // flush the stream
+        mOutput.flush();
+    }
+
+    void setShouldLog(bool shouldLog)
+    {
+        mShouldLog = shouldLog;
+    }
+
+private:
+    std::ostream& mOutput;
+    std::string mPrefix;
+    bool mShouldLog;
+};
+
+//!
+//! \class LogStreamConsumerBase
+//! \brief Convenience object used to initialize LogStreamConsumerBuffer before std::ostream in LogStreamConsumer
+//!
+class LogStreamConsumerBase
+{
+public:
+    LogStreamConsumerBase(std::ostream& stream, const std::string& prefix, bool shouldLog)
+        : mBuffer(stream, prefix, shouldLog)
+    {
+    }
+
+protected:
+    LogStreamConsumerBuffer mBuffer;
+};
+
+//!
+//! \class LogStreamConsumer
+//! \brief Convenience object used to facilitate use of C++ stream syntax when logging messages.
+//!  Order of base classes is LogStreamConsumerBase and then std::ostream.
+//!  This is because the LogStreamConsumerBase class is used to initialize the LogStreamConsumerBuffer member field
+//!  in LogStreamConsumer and then the address of the buffer is passed to std::ostream.
+//!  This is necessary to prevent the address of an uninitialized buffer from being passed to std::ostream.
+//!  Please do not change the order of the parent classes.
+//!
+class LogStreamConsumer : protected LogStreamConsumerBase, public std::ostream
+{
+public:
+    //! \brief Creates a LogStreamConsumer which logs messages with level severity.
+    //!  Reportable severity determines if the messages are severe enough to be logged.
+    LogStreamConsumer(Severity reportableSeverity, Severity severity)
+        : LogStreamConsumerBase(severityOstream(severity), severityPrefix(severity), severity <= reportableSeverity)
+        , std::ostream(&mBuffer) // links the stream buffer with the stream
+        , mShouldLog(severity <= reportableSeverity)
+        , mSeverity(severity)
+    {
+    }
+
+    LogStreamConsumer(LogStreamConsumer&& other)
+        : LogStreamConsumerBase(severityOstream(other.mSeverity), severityPrefix(other.mSeverity), other.mShouldLog)
+        , std::ostream(&mBuffer) // links the stream buffer with the stream
+        , mShouldLog(other.mShouldLog)
+        , mSeverity(other.mSeverity)
+    {
+    }
+
+    void setReportableSeverity(Severity reportableSeverity)
+    {
+        mShouldLog = mSeverity <= reportableSeverity;
+        mBuffer.setShouldLog(mShouldLog);
+    }
+
+private:
+    static std::ostream& severityOstream(Severity severity)
+    {
+        return severity >= Severity::kINFO ? std::cout : std::cerr;
+    }
+
+    static std::string severityPrefix(Severity severity)
+    {
+        switch (severity)
+        {
+        case Severity::kINTERNAL_ERROR: return "[F] ";
+        case Severity::kERROR: return "[E] ";
+        case Severity::kWARNING: return "[W] ";
+        case Severity::kINFO: return "[I] ";
+        case Severity::kVERBOSE: return "[V] ";
+        default: assert(0); return "";
+        }
+    }
+
+    bool mShouldLog;
+    Severity mSeverity;
+};
+
+//! \class Logger
+//!
+//! \brief Class which manages logging of TensorRT tools and samples
+//!
+//! \details This class provides a common interface for TensorRT tools and samples to log information to the console,
+//! and supports logging two types of messages:
+//!
+//! - Debugging messages with an associated severity (info, warning, error, or internal error/fatal)
+//! - Test pass/fail messages
+//!
+//! The advantage of having all samples use this class for logging as opposed to emitting directly to stdout/stderr is
+//! that the logic for controlling the verbosity and formatting of sample output is centralized in one location.
+//!
+//! In the future, this class could be extended to support dumping test results to a file in some standard format
+//! (for example, JUnit XML), and providing additional metadata (e.g. timing the duration of a test run).
+//!
+//! TODO: For backwards compatibility with existing samples, this class inherits directly from the nvinfer1::ILogger
+//! interface, which is problematic since there isn't a clean separation between messages coming from the TensorRT
+//! library and messages coming from the sample.
+//!
+//! In the future (once all samples are updated to use Logger::getTRTLogger() to access the ILogger) we can refactor the
+//! class to eliminate the inheritance and instead make the nvinfer1::ILogger implementation a member of the Logger
+//! object.
+
+class Logger : public nvinfer1::ILogger
+{
+public:
+    Logger(Severity severity = Severity::kWARNING)
+        : mReportableSeverity(severity)
+    {
+    }
+
+    //!
+    //! \enum TestResult
+    //! \brief Represents the state of a given test
+    //!
+    enum class TestResult
+    {
+        kRUNNING, //!< The test is running
+        kPASSED,  //!< The test passed
+        kFAILED,  //!< The test failed
+        kWAIVED   //!< The test was waived
+    };
+
+    //!
+    //! \brief Forward-compatible method for retrieving the nvinfer::ILogger associated with this Logger
+    //! \return The nvinfer1::ILogger associated with this Logger
+    //!
+    //! TODO Once all samples are updated to use this method to register the logger with TensorRT,
+    //! we can eliminate the inheritance of Logger from ILogger
+    //!
+    nvinfer1::ILogger& getTRTLogger() noexcept
+    {
+        return *this;
+    }
+
+    //!
+    //! \brief Implementation of the nvinfer1::ILogger::log() virtual method
+    //!
+    //! Note samples should not be calling this function directly; it will eventually go away once we eliminate the
+    //! inheritance from nvinfer1::ILogger
+    //!
+    void log(Severity severity, const char* msg) noexcept override
+    {
+        LogStreamConsumer(mReportableSeverity, severity) << "[TRT] " << std::string(msg) << std::endl;
+    }
+
+    //!
+    //! \brief Method for controlling the verbosity of logging output
+    //!
+    //! \param severity The logger will only emit messages that have severity of this level or higher.
+    //!
+    void setReportableSeverity(Severity severity)
+    {
+        mReportableSeverity = severity;
+    }
+
+    //!
+    //! \brief Opaque handle that holds logging information for a particular test
+    //!
+    //! This object is an opaque handle to information used by the Logger to print test results.
+    //! The sample must call Logger::defineTest() in order to obtain a TestAtom that can be used
+    //! with Logger::reportTest{Start,End}().
+    //!
+    class TestAtom
+    {
+    public:
+        TestAtom(TestAtom&&) = default;
+
+    private:
+        friend class Logger;
+
+        TestAtom(bool started, const std::string& name, const std::string& cmdline)
+            : mStarted(started)
+            , mName(name)
+            , mCmdline(cmdline)
+        {
+        }
+
+        bool mStarted;
+        std::string mName;
+        std::string mCmdline;
+    };
+
+    //!
+    //! \brief Define a test for logging
+    //!
+    //! \param[in] name The name of the test.  This should be a string starting with
+    //!                  "TensorRT" and containing dot-separated strings containing
+    //!                  the characters [A-Za-z0-9_].
+    //!                  For example, "TensorRT.sample_googlenet"
+    //! \param[in] cmdline The command line used to reproduce the test
+    //
+    //! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
+    //!
+    static TestAtom defineTest(const std::string& name, const std::string& cmdline)
+    {
+        return TestAtom(false, name, cmdline);
+    }
+
+    //!
+    //! \brief A convenience overloaded version of defineTest() that accepts an array of command-line arguments
+    //!        as input
+    //!
+    //! \param[in] name The name of the test
+    //! \param[in] argc The number of command-line arguments
+    //! \param[in] argv The array of command-line arguments (given as C strings)
+    //!
+    //! \return a TestAtom that can be used in Logger::reportTest{Start,End}().
+    static TestAtom defineTest(const std::string& name, int argc, char const* const* argv)
+    {
+        // Append TensorRT version as info
+        const std::string vname = name + " [TensorRT v" + std::to_string(NV_TENSORRT_VERSION) + "]";
+        auto cmdline = genCmdlineString(argc, argv);
+        return defineTest(vname, cmdline);
+    }
+
+    //!
+    //! \brief Report that a test has started.
+    //!
+    //! \pre reportTestStart() has not been called yet for the given testAtom
+    //!
+    //! \param[in] testAtom The handle to the test that has started
+    //!
+    static void reportTestStart(TestAtom& testAtom)
+    {
+        reportTestResult(testAtom, TestResult::kRUNNING);
+        assert(!testAtom.mStarted);
+        testAtom.mStarted = true;
+    }
+
+    //!
+    //! \brief Report that a test has ended.
+    //!
+    //! \pre reportTestStart() has been called for the given testAtom
+    //!
+    //! \param[in] testAtom The handle to the test that has ended
+    //! \param[in] result The result of the test. Should be one of TestResult::kPASSED,
+    //!                   TestResult::kFAILED, TestResult::kWAIVED
+    //!
+    static void reportTestEnd(const TestAtom& testAtom, TestResult result)
+    {
+        assert(result != TestResult::kRUNNING);
+        assert(testAtom.mStarted);
+        reportTestResult(testAtom, result);
+    }
+
+    static int reportPass(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kPASSED);
+        return EXIT_SUCCESS;
+    }
+
+    static int reportFail(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kFAILED);
+        return EXIT_FAILURE;
+    }
+
+    static int reportWaive(const TestAtom& testAtom)
+    {
+        reportTestEnd(testAtom, TestResult::kWAIVED);
+        return EXIT_SUCCESS;
+    }
+
+    static int reportTest(const TestAtom& testAtom, bool pass)
+    {
+        return pass ? reportPass(testAtom) : reportFail(testAtom);
+    }
+
+    Severity getReportableSeverity() const
+    {
+        return mReportableSeverity;
+    }
+
+private:
+    //!
+    //! \brief returns an appropriate string for prefixing a log message with the given severity
+    //!
+    static const char* severityPrefix(Severity severity)
+    {
+        switch (severity)
+        {
+        case Severity::kINTERNAL_ERROR: return "[F] ";
+        case Severity::kERROR: return "[E] ";
+        case Severity::kWARNING: return "[W] ";
+        case Severity::kINFO: return "[I] ";
+        case Severity::kVERBOSE: return "[V] ";
+        default: assert(0); return "";
+        }
+    }
+
+    //!
+    //! \brief returns an appropriate string for prefixing a test result message with the given result
+    //!
+    static const char* testResultString(TestResult result)
+    {
+        switch (result)
+        {
+        case TestResult::kRUNNING: return "RUNNING";
+        case TestResult::kPASSED: return "PASSED";
+        case TestResult::kFAILED: return "FAILED";
+        case TestResult::kWAIVED: return "WAIVED";
+        default: assert(0); return "";
+        }
+    }
+
+    //!
+    //! \brief returns an appropriate output stream (cout or cerr) to use with the given severity
+    //!
+    static std::ostream& severityOstream(Severity severity)
+    {
+        return severity >= Severity::kINFO ? std::cout : std::cerr;
+    }
+
+    //!
+    //! \brief method that implements logging test results
+    //!
+    static void reportTestResult(const TestAtom& testAtom, TestResult result)
+    {
+        severityOstream(Severity::kINFO) << "&&&& " << testResultString(result) << " " << testAtom.mName << " # "
+                                         << testAtom.mCmdline << std::endl;
+    }
+
+    //!
+    //! \brief generate a command line string from the given (argc, argv) values
+    //!
+    static std::string genCmdlineString(int argc, char const* const* argv)
+    {
+        std::stringstream ss;
+        for (int i = 0; i < argc; i++)
+        {
+            if (i > 0)
+            {
+                ss << " ";
+            }
+            ss << argv[i];
+        }
+        return ss.str();
+    }
+
+    Severity mReportableSeverity;
+};
+
+namespace
+{
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kVERBOSE
+//!
+//! Example usage:
+//!
+//!     LOG_VERBOSE(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_VERBOSE(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kVERBOSE);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINFO
+//!
+//! Example usage:
+//!
+//!     LOG_INFO(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_INFO(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINFO);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kWARNING
+//!
+//! Example usage:
+//!
+//!     LOG_WARN(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_WARN(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kWARNING);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kERROR
+//!
+//! Example usage:
+//!
+//!     LOG_ERROR(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_ERROR(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kERROR);
+}
+
+//!
+//! \brief produces a LogStreamConsumer object that can be used to log messages of severity kINTERNAL_ERROR
+//!         ("fatal" severity)
+//!
+//! Example usage:
+//!
+//!     LOG_FATAL(logger) << "hello world" << std::endl;
+//!
+inline LogStreamConsumer LOG_FATAL(const Logger& logger)
+{
+    return LogStreamConsumer(logger.getReportableSeverity(), Severity::kINTERNAL_ERROR);
+}
+
+} // anonymous namespace
+
+} // namespace sample
+
+#endif // TENSORRT_LOGGING_H

+ 153 - 0
src/detection/CenterPoint-master/include/common/parserOnnxConfig.h

@@ -0,0 +1,153 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PARSER_ONNX_CONFIG_H
+#define PARSER_ONNX_CONFIG_H
+
+#include <cstring>
+#include <iostream>
+#include <string>
+
+#include "NvInfer.h"
+#include "NvOnnxConfig.h"
+#include "NvOnnxParser.h"
+
+#define ONNX_DEBUG 1
+
+/**
+ * \class ParserOnnxConfig
+ * \brief Configuration Manager Class Concrete Implementation
+ *
+ * \note:
+ *
+ */
+
+using namespace std;
+
+class ParserOnnxConfig : public nvonnxparser::IOnnxConfig
+{
+
+protected:
+    string mModelFilename{};
+    string mTextFilename{};
+    string mFullTextFilename{};
+    nvinfer1::DataType mModelDtype;
+    nvonnxparser::IOnnxConfig::Verbosity mVerbosity;
+    bool mPrintLayercInfo;
+
+public:
+    ParserOnnxConfig()
+        : mModelDtype(nvinfer1::DataType::kFLOAT)
+        , mVerbosity(static_cast<int>(nvinfer1::ILogger::Severity::kWARNING))
+        , mPrintLayercInfo(false)
+    {
+#ifdef ONNX_DEBUG
+        if (isDebug())
+        {
+            std::cout << " ParserOnnxConfig::ctor(): " << this << "\t" << std::endl;
+        }
+#endif
+    }
+
+protected:
+    ~ParserOnnxConfig()
+    {
+#ifdef ONNX_DEBUG
+        if (isDebug())
+        {
+            std::cout << "ParserOnnxConfig::dtor(): " << this << std::endl;
+        }
+#endif
+    }
+
+public:
+    virtual void setModelDtype(const nvinfer1::DataType modelDtype) noexcept
+    {
+        mModelDtype = modelDtype;
+    }
+
+    virtual nvinfer1::DataType getModelDtype() const noexcept
+    {
+        return mModelDtype;
+    }
+
+    virtual const char* getModelFileName() const noexcept
+    {
+        return mModelFilename.c_str();
+    }
+    virtual void setModelFileName(const char* onnxFilename) noexcept
+    {
+        mModelFilename = string(onnxFilename);
+    }
+    virtual nvonnxparser::IOnnxConfig::Verbosity getVerbosityLevel() const noexcept
+    {
+        return mVerbosity;
+    }
+    virtual void addVerbosity() noexcept
+    {
+        ++mVerbosity;
+    }
+    virtual void reduceVerbosity() noexcept
+    {
+        --mVerbosity;
+    }
+    virtual void setVerbosityLevel(nvonnxparser::IOnnxConfig::Verbosity verbosity) noexcept
+    {
+        mVerbosity = verbosity;
+    }
+
+    virtual const char* getTextFileName() const noexcept
+    {
+        return mTextFilename.c_str();
+    }
+    virtual void setTextFileName(const char* textFilename) noexcept
+    {
+        mTextFilename = string(textFilename);
+    }
+    virtual const char* getFullTextFileName() const noexcept
+    {
+        return mFullTextFilename.c_str();
+    }
+    virtual void setFullTextFileName(const char* fullTextFilename) noexcept
+    {
+        mFullTextFilename = string(fullTextFilename);
+    }
+    virtual bool getPrintLayerInfo() const noexcept
+    {
+        return mPrintLayercInfo;
+    }
+    virtual void setPrintLayerInfo(bool src) noexcept
+    {
+        mPrintLayercInfo = src;
+    } //!< get the boolean variable corresponding to the Layer Info, see getPrintLayerInfo()
+
+    virtual bool isDebug() const noexcept
+    {
+#if ONNX_DEBUG
+        return (std::getenv("ONNX_DEBUG") ? true : false);
+#else
+        return false;
+#endif
+    }
+
+    virtual void destroy() noexcept
+    {
+        delete this;
+    }
+
+}; // class ParserOnnxConfig
+
+#endif

+ 70 - 0
src/detection/CenterPoint-master/include/common/safeCommon.h

@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION.  All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TENSORRT_SAFE_COMMON_H
+#define TENSORRT_SAFE_COMMON_H
+
+#include <cstdlib>
+#include <iostream>
+#include <memory>
+#include <stdexcept>
+#include <string>
+
+#define CHECK(status)                                                                                                  \
+    do                                                                                                                 \
+    {                                                                                                                  \
+        auto ret = (status);                                                                                           \
+        if (ret != 0)                                                                                                  \
+        {                                                                                                              \
+            std::cerr << "Cuda failure: " << ret << std::endl;                                                         \
+            abort();                                                                                                   \
+        }                                                                                                              \
+    } while (0)
+
+namespace samplesCommon
+{
+template <typename T>
+inline std::shared_ptr<T> infer_object(T* obj)
+{
+    if (!obj)
+    {
+        throw std::runtime_error("Failed to create object");
+    }
+    return std::shared_ptr<T>(obj);
+}
+
+inline uint32_t elementSize(DataType t)
+{
+    switch (t)
+    {
+    case DataType::kINT32:
+    case DataType::kFLOAT: return 4;
+    case DataType::kHALF: return 2;
+    case DataType::kINT8: return 1;
+    case DataType::kBOOL: return 1;
+    }
+    return 0;
+}
+
+template <typename A, typename B>
+inline A divUp(A x, B n)
+{
+    return (x + n - 1) / n;
+}
+
+} // namespace samplesCommon
+
+#endif // TENSORRT_SAFE_COMMON_H

+ 347 - 0
src/detection/CenterPoint-master/include/common/sampleConfig.h

@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SampleConfig_H
+#define SampleConfig_H
+
+#include <cstring>
+#include <iostream>
+#include <string>
+
+#include "NvInfer.h"
+#include "NvOnnxConfig.h"
+class SampleConfig : public nvonnxparser::IOnnxConfig
+{
+public:
+    enum class InputDataFormat : int
+    {
+        kASCII = 0,
+        kPPM = 1
+    };
+
+private:
+    std::string mModelFilename;
+    std::string mEngineFilename;
+    std::string mTextFilename;
+    std::string mFullTextFilename;
+    std::string mImageFilename;
+    std::string mReferenceFilename;
+    std::string mOutputFilename;
+    std::string mCalibrationFilename;
+    std::string mTimingCacheFilename;
+    int64_t mLabel{-1};
+    int64_t mMaxBatchSize{32};
+    int64_t mMaxWorkspaceSize{1 * 1024 * 1024 * 1024};
+    int64_t mCalibBatchSize{0};
+    int64_t mMaxNCalibBatch{0};
+    int64_t mFirstCalibBatch{0};
+    int64_t mUseDLACore{-1};
+    nvinfer1::DataType mModelDtype{nvinfer1::DataType::kFLOAT};
+    bool mTF32{true};
+    Verbosity mVerbosity{static_cast<int>(nvinfer1::ILogger::Severity::kWARNING)};
+    bool mPrintLayercInfo{false};
+    bool mDebugBuilder{false};
+    InputDataFormat mInputDataFormat{InputDataFormat::kASCII};
+    uint64_t mTopK{0};
+    float mFailurePercentage{-1.0f};
+    float mTolerance{0.0f};
+    float mAbsTolerance{1e-5f};
+
+public:
+    SampleConfig()
+    {
+#ifdef ONNX_DEBUG
+        if (isDebug())
+        {
+            std::cout << " SampleConfig::ctor(): " << this << "\t" << std::endl;
+        }
+#endif
+    }
+
+protected:
+    ~SampleConfig()
+    {
+#ifdef ONNX_DEBUG
+        if (isDebug())
+        {
+            std::cout << "SampleConfig::dtor(): " << this << std::endl;
+        }
+#endif
+    }
+
+public:
+    void setModelDtype(const nvinfer1::DataType mdt) noexcept
+    {
+        mModelDtype = mdt;
+    }
+
+    nvinfer1::DataType getModelDtype() const noexcept
+    {
+        return mModelDtype;
+    }
+
+    bool getTF32() const noexcept
+    {
+        return mTF32;
+    }
+
+    void setTF32(bool enabled) noexcept
+    {
+        mTF32 = enabled;
+    }
+
+    const char* getModelFileName() const noexcept
+    {
+        return mModelFilename.c_str();
+    }
+
+    void setModelFileName(const char* onnxFilename) noexcept
+    {
+        mModelFilename = std::string(onnxFilename);
+    }
+    Verbosity getVerbosityLevel() const noexcept
+    {
+        return mVerbosity;
+    }
+    void addVerbosity() noexcept
+    {
+        ++mVerbosity;
+    }
+    void reduceVerbosity() noexcept
+    {
+        --mVerbosity;
+    }
+    virtual void setVerbosityLevel(Verbosity v) noexcept
+    {
+        mVerbosity = v;
+    }
+    const char* getEngineFileName() const noexcept
+    {
+        return mEngineFilename.c_str();
+    }
+    void setEngineFileName(const char* engineFilename) noexcept
+    {
+        mEngineFilename = std::string(engineFilename);
+    }
+    const char* getTextFileName() const noexcept
+    {
+        return mTextFilename.c_str();
+    }
+    void setTextFileName(const char* textFilename) noexcept
+    {
+        mTextFilename = std::string(textFilename);
+    }
+    const char* getFullTextFileName() const noexcept
+    {
+        return mFullTextFilename.c_str();
+    }
+    void setFullTextFileName(const char* fullTextFilename) noexcept
+    {
+        mFullTextFilename = std::string(fullTextFilename);
+    }
+    void setLabel(int64_t label) noexcept
+    {
+        mLabel = label;
+    } //!<  set the Label
+
+    int64_t getLabel() const noexcept
+    {
+        return mLabel;
+    } //!<  get the Label
+
+    bool getPrintLayerInfo() const noexcept
+    {
+        return mPrintLayercInfo;
+    }
+
+    void setPrintLayerInfo(bool b) noexcept
+    {
+        mPrintLayercInfo = b;
+    } //!< get the boolean variable corresponding to the Layer Info, see getPrintLayerInfo()
+
+    void setMaxBatchSize(int64_t maxBatchSize) noexcept
+    {
+        mMaxBatchSize = maxBatchSize;
+    } //!<  set the Max Batch Size
+    int64_t getMaxBatchSize() const noexcept
+    {
+        return mMaxBatchSize;
+    } //!<  get the Max Batch Size
+
+    void setMaxWorkSpaceSize(int64_t maxWorkSpaceSize) noexcept
+    {
+        mMaxWorkspaceSize = maxWorkSpaceSize;
+    } //!<  set the Max Work Space size
+    int64_t getMaxWorkSpaceSize() const noexcept
+    {
+        return mMaxWorkspaceSize;
+    } //!<  get the Max Work Space size
+
+    void setCalibBatchSize(int64_t CalibBatchSize) noexcept
+    {
+        mCalibBatchSize = CalibBatchSize;
+    } //!<  set the calibration batch size
+    int64_t getCalibBatchSize() const noexcept
+    {
+        return mCalibBatchSize;
+    } //!<  get calibration batch size
+
+    void setMaxNCalibBatch(int64_t MaxNCalibBatch) noexcept
+    {
+        mMaxNCalibBatch = MaxNCalibBatch;
+    } //!<  set Max Number of Calibration Batches
+    int64_t getMaxNCalibBatch() const noexcept
+    {
+        return mMaxNCalibBatch;
+    } //!<  get the Max Number of Calibration Batches
+
+    void setFirstCalibBatch(int64_t FirstCalibBatch) noexcept
+    {
+        mFirstCalibBatch = FirstCalibBatch;
+    } //!<  set the first calibration batch
+    int64_t getFirstCalibBatch() const noexcept
+    {
+        return mFirstCalibBatch;
+    } //!<  get the first calibration batch
+
+    void setUseDLACore(int64_t UseDLACore) noexcept
+    {
+        mUseDLACore = UseDLACore;
+    } //!<  set the DLA core to use
+    int64_t getUseDLACore() const noexcept
+    {
+        return mUseDLACore;
+    } //!<  get the DLA core to use
+
+    void setDebugBuilder() noexcept
+    {
+        mDebugBuilder = true;
+    } //!<  enable the Debug info, while building the engine.
+    bool getDebugBuilder() const noexcept
+    {
+        return mDebugBuilder;
+    } //!<  get the boolean variable, corresponding to the debug builder
+
+    const char* getImageFileName() const noexcept //!<  set Image file name (PPM or ASCII)
+    {
+        return mImageFilename.c_str();
+    }
+    void setImageFileName(const char* imageFilename) noexcept //!< get the Image file name
+    {
+        mImageFilename = std::string(imageFilename);
+    }
+    const char* getReferenceFileName() const noexcept
+    {
+        return mReferenceFilename.c_str();
+    }
+    void setReferenceFileName(const char* referenceFilename) noexcept //!<  set reference file name
+    {
+        mReferenceFilename = std::string(referenceFilename);
+    }
+
+    void setInputDataFormat(InputDataFormat idt) noexcept
+    {
+        mInputDataFormat = idt;
+    } //!<  specifies expected data format of the image file (PPM or ASCII)
+    InputDataFormat getInputDataFormat() const noexcept
+    {
+        return mInputDataFormat;
+    } //!<  returns the expected data format of the image file.
+
+    const char* getOutputFileName() const noexcept //!<  specifies the file to save the results
+    {
+        return mOutputFilename.c_str();
+    }
+    void setOutputFileName(const char* outputFilename) noexcept //!<  get the output file name
+    {
+        mOutputFilename = std::string(outputFilename);
+    }
+
+    const char* getCalibrationFileName() const noexcept
+    {
+        return mCalibrationFilename.c_str();
+    } //!<  specifies the file containing the list of image files for int8 calibration
+    void setCalibrationFileName(const char* calibrationFilename) noexcept //!<  get the int 8 calibration list file name
+    {
+        mCalibrationFilename = std::string(calibrationFilename);
+    }
+
+    uint64_t getTopK() const noexcept
+    {
+        return mTopK;
+    }
+    void setTopK(uint64_t topK) noexcept
+    {
+        mTopK = topK;
+    } //!<  If this options is specified, return the K top probabilities.
+
+    float getFailurePercentage() const noexcept
+    {
+        return mFailurePercentage;
+    }
+
+    void setFailurePercentage(float f) noexcept
+    {
+        mFailurePercentage = f;
+    }
+
+    float getAbsoluteTolerance() const noexcept
+    {
+        return mAbsTolerance;
+    }
+
+    void setAbsoluteTolerance(float a) noexcept
+    {
+        mAbsTolerance = a;
+    }
+
+    float getTolerance() const noexcept
+    {
+        return mTolerance;
+    }
+
+    void setTolerance(float t) noexcept
+    {
+        mTolerance = t;
+    }
+
+    const char* getTimingCacheFilename() const noexcept
+    {
+        return mTimingCacheFilename.c_str();
+    }
+    
+    void setTimingCacheFileName(const char* timingCacheFilename) noexcept
+    {
+        mTimingCacheFilename = std::string(timingCacheFilename);
+    }
+
+    bool isDebug() const noexcept
+    {
+#if ONNX_DEBUG
+        return (std::getenv("ONNX_DEBUG") ? true : false);
+#else
+        return false;
+#endif
+    }
+
+    void destroy() noexcept
+    {
+        delete this;
+    }
+
+}; // class SampleConfig
+
+#endif

+ 389 - 0
src/detection/CenterPoint-master/include/common/sampleDevice.h

@@ -0,0 +1,389 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TRT_SAMPLE_DEVICE_H
+#define TRT_SAMPLE_DEVICE_H
+
+#include <cassert>
+#include <cuda.h>
+#include <cuda_runtime.h>
+#include <iostream>
+#include <thread>
+
+namespace sample
+{
+
+inline void cudaCheck(cudaError_t ret, std::ostream& err = std::cerr)
+{
+    if (ret != cudaSuccess)
+    {
+        err << "Cuda failure: " << cudaGetErrorString(ret) << std::endl;
+        abort();
+    }
+}
+
+class TrtCudaEvent;
+
+namespace
+{
+
+#if CUDA_VERSION < 10000
+void cudaSleep(cudaStream_t stream, cudaError_t status, void* sleep)
+#else
+void cudaSleep(void* sleep)
+#endif
+{
+    std::this_thread::sleep_for(std::chrono::duration<int, std::milli>(*static_cast<int*>(sleep)));
+}
+
+} // namespace
+
+//!
+//! \class TrtCudaStream
+//! \brief Managed CUDA stream
+//!
+class TrtCudaStream
+{
+public:
+    TrtCudaStream()
+    {
+        cudaCheck(cudaStreamCreate(&mStream));
+    }
+
+    TrtCudaStream(const TrtCudaStream&) = delete;
+
+    TrtCudaStream& operator=(const TrtCudaStream&) = delete;
+
+    TrtCudaStream(TrtCudaStream&&) = delete;
+
+    TrtCudaStream& operator=(TrtCudaStream&&) = delete;
+
+    ~TrtCudaStream()
+    {
+        cudaCheck(cudaStreamDestroy(mStream));
+    }
+
+    cudaStream_t get() const
+    {
+        return mStream;
+    }
+
+    void synchronize()
+    {
+        cudaCheck(cudaStreamSynchronize(mStream));
+    }
+
+    void wait(TrtCudaEvent& event);
+
+    void sleep(int* ms)
+    {
+#if CUDA_VERSION < 10000
+        cudaCheck(cudaStreamAddCallback(mStream, cudaSleep, ms, 0));
+#else
+        cudaCheck(cudaLaunchHostFunc(mStream, cudaSleep, ms));
+#endif
+    }
+
+private:
+    cudaStream_t mStream{};
+};
+
+//!
+//! \class TrtCudaEvent
+//! \brief Managed CUDA event
+//!
+class TrtCudaEvent
+{
+public:
+    explicit TrtCudaEvent(bool blocking = true)
+    {
+        const uint32_t flags = blocking ? cudaEventBlockingSync : cudaEventDefault;
+        cudaCheck(cudaEventCreateWithFlags(&mEvent, flags));
+    }
+
+    TrtCudaEvent(const TrtCudaEvent&) = delete;
+
+    TrtCudaEvent& operator=(const TrtCudaEvent&) = delete;
+
+    TrtCudaEvent(TrtCudaEvent&&) = delete;
+
+    TrtCudaEvent& operator=(TrtCudaEvent&&) = delete;
+
+    ~TrtCudaEvent()
+    {
+        cudaCheck(cudaEventDestroy(mEvent));
+    }
+
+    cudaEvent_t get() const
+    {
+        return mEvent;
+    }
+
+    void record(const TrtCudaStream& stream)
+    {
+        cudaCheck(cudaEventRecord(mEvent, stream.get()));
+    }
+
+    void synchronize()
+    {
+        cudaCheck(cudaEventSynchronize(mEvent));
+    }
+
+    // Returns time elapsed time in milliseconds
+    float operator-(const TrtCudaEvent& e) const
+    {
+        float time{0};
+        cudaCheck(cudaEventElapsedTime(&time, e.get(), get()));
+        return time;
+    }
+
+private:
+    cudaEvent_t mEvent{};
+};
+
+inline void TrtCudaStream::wait(TrtCudaEvent& event)
+{
+    cudaCheck(cudaStreamWaitEvent(mStream, event.get(), 0));
+}
+
+//!
+//! \class TrtCudaGraph
+//! \brief Managed CUDA graph
+//!
+class TrtCudaGraph
+{
+public:
+    explicit TrtCudaGraph() = default;
+
+    TrtCudaGraph(const TrtCudaGraph&) = delete;
+
+    TrtCudaGraph& operator=(const TrtCudaGraph&) = delete;
+
+    TrtCudaGraph(TrtCudaGraph&&) = delete;
+
+    TrtCudaGraph& operator=(TrtCudaGraph&&) = delete;
+
+    ~TrtCudaGraph()
+    {
+        if (mGraphExec)
+        {
+            cudaGraphExecDestroy(mGraphExec);
+        }
+    }
+
+    void beginCapture(TrtCudaStream& stream)
+    {
+        cudaCheck(cudaGraphCreate(&mGraph, 0));
+        cudaCheck(cudaStreamBeginCapture(stream.get(), cudaStreamCaptureModeThreadLocal));
+    }
+
+    bool launch(TrtCudaStream& stream)
+    {
+        return cudaGraphLaunch(mGraphExec, stream.get()) == cudaSuccess;
+    }
+
+    void endCapture(TrtCudaStream& stream)
+    {
+        cudaCheck(cudaStreamEndCapture(stream.get(), &mGraph));
+        cudaCheck(cudaGraphInstantiate(&mGraphExec, mGraph, nullptr, nullptr, 0));
+        cudaCheck(cudaGraphDestroy(mGraph));
+    }
+
+    void endCaptureOnError(TrtCudaStream& stream)
+    {
+        const auto ret = cudaStreamEndCapture(stream.get(), &mGraph);
+        assert(ret == cudaErrorStreamCaptureInvalidated);
+        assert(mGraph == nullptr);
+        // Clean up the above CUDA error.
+        cudaGetLastError();
+        sample::gLogWarning << "The CUDA graph capture on the stream has failed." << std::endl;
+    }
+
+private:
+    cudaGraph_t mGraph{};
+    cudaGraphExec_t mGraphExec{};
+};
+
+//!
+//! \class TrtCudaBuffer
+//! \brief Managed buffer for host and device
+//!
+template <typename A, typename D>
+class TrtCudaBuffer
+{
+public:
+    TrtCudaBuffer() = default;
+
+    TrtCudaBuffer(const TrtCudaBuffer&) = delete;
+
+    TrtCudaBuffer& operator=(const TrtCudaBuffer&) = delete;
+
+    TrtCudaBuffer(TrtCudaBuffer&& rhs)
+    {
+        reset(rhs.mPtr);
+        rhs.mPtr = nullptr;
+    }
+
+    TrtCudaBuffer& operator=(TrtCudaBuffer&& rhs)
+    {
+        if (this != &rhs)
+        {
+            reset(rhs.mPtr);
+            rhs.mPtr = nullptr;
+        }
+        return *this;
+    }
+
+    ~TrtCudaBuffer()
+    {
+        reset();
+    }
+
+    TrtCudaBuffer(size_t size)
+    {
+        A()(&mPtr, size);
+    }
+
+    void allocate(size_t size)
+    {
+        reset();
+        A()(&mPtr, size);
+    }
+
+    void reset(void* ptr = nullptr)
+    {
+        if (mPtr)
+        {
+            D()(mPtr);
+        }
+        mPtr = ptr;
+    }
+
+    void* get() const
+    {
+        return mPtr;
+    }
+
+private:
+    void* mPtr{nullptr};
+};
+
+struct DeviceAllocator
+{
+    void operator()(void** ptr, size_t size)
+    {
+        cudaCheck(cudaMalloc(ptr, size));
+    }
+};
+
+struct DeviceDeallocator
+{
+    void operator()(void* ptr)
+    {
+        cudaCheck(cudaFree(ptr));
+    }
+};
+
+struct HostAllocator
+{
+    void operator()(void** ptr, size_t size)
+    {
+        cudaCheck(cudaMallocHost(ptr, size));
+    }
+};
+
+struct HostDeallocator
+{
+    void operator()(void* ptr)
+    {
+        cudaCheck(cudaFreeHost(ptr));
+    }
+};
+
+using TrtDeviceBuffer = TrtCudaBuffer<DeviceAllocator, DeviceDeallocator>;
+
+using TrtHostBuffer = TrtCudaBuffer<HostAllocator, HostDeallocator>;
+
+//!
+//! \class MirroredBuffer
+//! \brief Coupled host and device buffers
+//!
+class MirroredBuffer
+{
+public:
+    void allocate(size_t size)
+    {
+        mSize = size;
+        mHostBuffer.allocate(size);
+        mDeviceBuffer.allocate(size);
+    }
+
+    void* getDeviceBuffer() const
+    {
+        return mDeviceBuffer.get();
+    }
+
+    void* getHostBuffer() const
+    {
+        return mHostBuffer.get();
+    }
+
+    void hostToDevice(TrtCudaStream& stream)
+    {
+        cudaCheck(cudaMemcpyAsync(mDeviceBuffer.get(), mHostBuffer.get(), mSize, cudaMemcpyHostToDevice, stream.get()));
+    }
+
+    void deviceToHost(TrtCudaStream& stream)
+    {
+        cudaCheck(cudaMemcpyAsync(mHostBuffer.get(), mDeviceBuffer.get(), mSize, cudaMemcpyDeviceToHost, stream.get()));
+    }
+
+    size_t getSize() const
+    {
+        return mSize;
+    }
+
+private:
+    size_t mSize{0};
+    TrtHostBuffer mHostBuffer;
+    TrtDeviceBuffer mDeviceBuffer;
+};
+
+
+inline void setCudaDevice(int device, std::ostream& os)
+{
+    cudaCheck(cudaSetDevice(device));
+
+    cudaDeviceProp properties;
+    cudaCheck(cudaGetDeviceProperties(&properties, device));
+
+// clang-format off
+    os << "=== Device Information ===" << std::endl;
+    os << "Selected Device: "      << properties.name                                               << std::endl;
+    os << "Compute Capability: "   << properties.major << "." << properties.minor                   << std::endl;
+    os << "SMs: "                  << properties.multiProcessorCount                                << std::endl;
+    os << "Compute Clock Rate: "   << properties.clockRate / 1000000.0F << " GHz"                   << std::endl;
+    os << "Device Global Memory: " << (properties.totalGlobalMem >> 20) << " MiB"                   << std::endl;
+    os << "Shared Memory per SM: " << (properties.sharedMemPerMultiprocessor >> 10) << " KiB"       << std::endl;
+    os << "Memory Bus Width: "     << properties.memoryBusWidth << " bits"
+                        << " (ECC " << (properties.ECCEnabled != 0 ? "enabled" : "disabled") << ")" << std::endl;
+    os << "Memory Clock Rate: "    << properties.memoryClockRate / 1000000.0F << " GHz"             << std::endl;
+    // clang-format on
+}
+
+} // namespace sample
+
+#endif // TRT_SAMPLE_DEVICE_H

+ 1205 - 0
src/detection/CenterPoint-master/include/common/sampleEngines.cpp

@@ -0,0 +1,1205 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <fstream>
+#include <iostream>
+#include <iterator>
+#include <map>
+#include <random>
+#include <set>
+#include <string>
+
+#include "NvCaffeParser.h"
+#include "NvInfer.h"
+#include "NvOnnxParser.h"
+#include "NvUffParser.h"
+
+#include "common.h"
+#include "ErrorRecorder.h"
+#include "half.h"
+#include "logger.h"
+#include "sampleEngines.h"
+#include "sampleOptions.h"
+#include "sampleUtils.h"
+
+using namespace nvinfer1;
+
+namespace sample
+{
+
+namespace
+{
+
+struct CaffeBufferShutter
+{
+    ~CaffeBufferShutter()
+    {
+        nvcaffeparser1::shutdownProtobufLibrary();
+    }
+};
+
+struct UffBufferShutter
+{
+    ~UffBufferShutter()
+    {
+        nvuffparser::shutdownProtobufLibrary();
+    }
+};
+
+std::map<std::string, float> readScalesFromCalibrationCache(const std::string& calibrationFile)
+{
+    std::map<std::string, float> tensorScales;
+    std::ifstream cache{calibrationFile};
+    if (!cache.is_open())
+    {
+        sample::gLogError << "[TRT] Can not open provided calibration cache file" << std::endl;
+        return tensorScales;
+    }
+    std::string line;
+    while (std::getline(cache, line))
+    {
+        auto colonPos = line.find_last_of(':');
+        if (colonPos != std::string::npos)
+        {
+            // Scales should be stored in calibration cache as 32-bit floating numbers encoded as 32-bit integers
+            int32_t scalesAsInt = std::stoi(line.substr(colonPos + 2, 8), nullptr, 16);
+            const auto tensorName = line.substr(0, colonPos);
+            tensorScales[tensorName] = *reinterpret_cast<float*>(&scalesAsInt);
+        }
+    }
+    cache.close();
+    return tensorScales;
+}
+} // namespace
+
+void setTensorScalesFromCalibration(nvinfer1::INetworkDefinition& network, const std::vector<IOFormat>& inputFormats,
+    const std::vector<IOFormat>& outputFormats, const std::string& calibrationFile)
+{
+    const auto tensorScales = readScalesFromCalibrationCache(calibrationFile);
+    const bool broadcastInputFormats = broadcastIOFormats(inputFormats, network.getNbInputs());
+    for (int32_t i = 0, n = network.getNbInputs(); i < n; ++i)
+    {
+        int32_t formatIdx = broadcastInputFormats ? 0 : i;
+        if (!inputFormats.empty() && inputFormats[formatIdx].first == DataType::kINT8)
+        {
+            auto* input = network.getInput(i);
+            const auto calibScale = tensorScales.at(input->getName());
+            input->setDynamicRange(-127 * calibScale, 127 * calibScale);
+        }
+    }
+    const bool broadcastOutputFormats = broadcastIOFormats(outputFormats, network.getNbInputs());
+    for (int32_t i = 0, n = network.getNbOutputs(); i < n; ++i)
+    {
+        int32_t formatIdx = broadcastOutputFormats ? 0 : i;
+        if (!outputFormats.empty() && outputFormats[formatIdx].first == DataType::kINT8)
+        {
+            auto* output = network.getOutput(i);
+            const auto calibScale = tensorScales.at(output->getName());
+            output->setDynamicRange(-127 * calibScale, 127 * calibScale);
+        }
+    }
+}
+
+#define SMP_RETVAL_IF_FALSE(condition, msg, retval, err)                                                               \
+    {                                                                                                                  \
+        if ((condition) == false)                                                                                      \
+        {                                                                                                              \
+            (err) << (msg) << std::endl;                                                                               \
+            return retval;                                                                                             \
+        }                                                                                                              \
+    }
+
+Parser modelToNetwork(const ModelOptions& model, nvinfer1::INetworkDefinition& network, std::ostream& err)
+{
+    sample::gLogInfo << "Start parsing network model" << std::endl;
+    Parser parser;
+    const std::string& modelName = model.baseModel.model;
+    switch (model.baseModel.format)
+    {
+    case ModelFormat::kCAFFE:
+    {
+        using namespace nvcaffeparser1;
+        parser.caffeParser.reset(createCaffeParser());
+        CaffeBufferShutter bufferShutter;
+        const auto* const blobNameToTensor = parser.caffeParser->parse(
+            model.prototxt.c_str(), modelName.empty() ? nullptr : modelName.c_str(), network, DataType::kFLOAT);
+        if (!blobNameToTensor)
+        {
+            err << "Failed to parse caffe model or prototxt, tensors blob not found" << std::endl;
+            parser.caffeParser.reset();
+            break;
+        }
+
+        for (const auto& s : model.outputs)
+        {
+            if (blobNameToTensor->find(s.c_str()) == nullptr)
+            {
+                err << "Could not find output blob " << s << std::endl;
+                parser.caffeParser.reset();
+                break;
+            }
+            network.markOutput(*blobNameToTensor->find(s.c_str()));
+        }
+        break;
+    }
+    case ModelFormat::kUFF:
+    {
+        using namespace nvuffparser;
+        parser.uffParser.reset(createUffParser());
+        UffBufferShutter bufferShutter;
+        for (const auto& s : model.uffInputs.inputs)
+        {
+            if (!parser.uffParser->registerInput(
+                    s.first.c_str(), s.second, model.uffInputs.NHWC ? UffInputOrder::kNHWC : UffInputOrder::kNCHW))
+            {
+                err << "Failed to register input " << s.first << std::endl;
+                parser.uffParser.reset();
+                break;
+            }
+        }
+
+        for (const auto& s : model.outputs)
+        {
+            if (!parser.uffParser->registerOutput(s.c_str()))
+            {
+                err << "Failed to register output " << s << std::endl;
+                parser.uffParser.reset();
+                break;
+            }
+        }
+
+        if (!parser.uffParser->parse(model.baseModel.model.c_str(), network))
+        {
+            err << "Failed to parse uff file" << std::endl;
+            parser.uffParser.reset();
+            break;
+        }
+        break;
+    }
+    case ModelFormat::kONNX:
+    {
+        using namespace nvonnxparser;
+        parser.onnxParser.reset(createParser(network, sample::gLogger.getTRTLogger()));
+        if (!parser.onnxParser->parseFromFile(
+                model.baseModel.model.c_str(), static_cast<int>(sample::gLogger.getReportableSeverity())))
+        {
+            err << "Failed to parse onnx file" << std::endl;
+            parser.onnxParser.reset();
+        }
+        break;
+    }
+    case ModelFormat::kANY: break;
+    }
+
+    sample::gLogInfo << "Finish parsing network model" << std::endl;
+    return parser;
+}
+
+namespace
+{
+
+class RndInt8Calibrator : public nvinfer1::IInt8EntropyCalibrator2
+{
+public:
+    RndInt8Calibrator(int batches, std::vector<int64_t>& elemCount, const std::string& cacheFile,
+        const nvinfer1::INetworkDefinition& network, std::ostream& err);
+
+    ~RndInt8Calibrator()
+    {
+        for (auto& elem : mInputDeviceBuffers)
+        {
+            cudaCheck(cudaFree(elem.second), mErr);
+        }
+    }
+
+    bool getBatch(void* bindings[], const char* names[], int nbBindings) noexcept override;
+
+    int getBatchSize() const noexcept override
+    {
+        return 1;
+    }
+
+    const void* readCalibrationCache(size_t& length) noexcept override;
+
+    virtual void writeCalibrationCache(const void*, size_t) noexcept override {}
+
+private:
+    int mBatches{};
+    int mCurrentBatch{};
+    std::string mCacheFile;
+    std::map<std::string, void*> mInputDeviceBuffers;
+    std::vector<char> mCalibrationCache;
+    std::ostream& mErr;
+};
+
+RndInt8Calibrator::RndInt8Calibrator(int batches, std::vector<int64_t>& elemCount, const std::string& cacheFile,
+    const INetworkDefinition& network, std::ostream& err)
+    : mBatches(batches)
+    , mCurrentBatch(0)
+    , mCacheFile(cacheFile)
+    , mErr(err)
+{
+    std::ifstream tryCache(cacheFile, std::ios::binary);
+    if (tryCache.good())
+    {
+        return;
+    }
+
+    std::default_random_engine generator;
+    std::uniform_real_distribution<float> distribution(-1.0F, 1.0F);
+    auto gen = [&generator, &distribution]() { return distribution(generator); };
+
+    for (int i = 0; i < network.getNbInputs(); i++)
+    {
+        auto* input = network.getInput(i);
+        std::vector<float> rnd_data(elemCount[i]);
+        std::generate_n(rnd_data.begin(), elemCount[i], gen);
+
+        void* data;
+        cudaCheck(cudaMalloc(&data, elemCount[i] * sizeof(float)), mErr);
+        cudaCheck(cudaMemcpy(data, rnd_data.data(), elemCount[i] * sizeof(float), cudaMemcpyHostToDevice), mErr);
+
+        mInputDeviceBuffers.insert(std::make_pair(input->getName(), data));
+    }
+}
+
+bool RndInt8Calibrator::getBatch(void* bindings[], const char* names[], int nbBindings) noexcept
+{
+    if (mCurrentBatch >= mBatches)
+    {
+        return false;
+    }
+
+    for (int i = 0; i < nbBindings; ++i)
+    {
+        bindings[i] = mInputDeviceBuffers[names[i]];
+    }
+
+    ++mCurrentBatch;
+
+    return true;
+}
+
+const void* RndInt8Calibrator::readCalibrationCache(size_t& length) noexcept
+{
+    mCalibrationCache.clear();
+    std::ifstream input(mCacheFile, std::ios::binary);
+    input >> std::noskipws;
+    if (input.good())
+    {
+        std::copy(
+            std::istream_iterator<char>(input), std::istream_iterator<char>(), std::back_inserter(mCalibrationCache));
+    }
+
+    length = mCalibrationCache.size();
+    return !mCalibrationCache.empty() ? mCalibrationCache.data() : nullptr;
+}
+
+bool setTensorDynamicRange(const INetworkDefinition& network, float inRange = 2.0F, float outRange = 4.0F)
+{
+    // Ensure that all layer inputs have a dynamic range.
+    for (int l = 0; l < network.getNbLayers(); l++)
+    {
+        auto* layer = network.getLayer(l);
+        for (int i = 0; i < layer->getNbInputs(); i++)
+        {
+            ITensor* input{layer->getInput(i)};
+            // Optional inputs are nullptr here and are from RNN layers.
+            if (input && !input->dynamicRangeIsSet())
+            {
+                if (!input->setDynamicRange(-inRange, inRange))
+                {
+                    return false;
+                }
+            }
+        }
+        for (int o = 0; o < layer->getNbOutputs(); o++)
+        {
+            ITensor* output{layer->getOutput(o)};
+            // Optional outputs are nullptr here and are from RNN layers.
+            if (output && !output->dynamicRangeIsSet())
+            {
+                // Pooling must have the same input and output dynamic range.
+                if (layer->getType() == LayerType::kPOOLING)
+                {
+                    if (!output->setDynamicRange(-inRange, inRange))
+                    {
+                        return false;
+                    }
+                }
+                else
+                {
+                    if (!output->setDynamicRange(-outRange, outRange))
+                    {
+                        return false;
+                    }
+                }
+            }
+        }
+    }
+    return true;
+}
+
+template <typename T>
+void sparsify(const T* values, int64_t count, int32_t k, int32_t rs, std::vector<char>& sparseWeights)
+{
+    const auto c = count / (k * rs);
+    sparseWeights.resize(count * sizeof(T));
+    auto* sparseValues = reinterpret_cast<T*>(sparseWeights.data());
+
+    constexpr int32_t window = 4;
+    constexpr int32_t nonzeros = 2;
+
+    const int32_t crs = c * rs;
+    const auto getIndex = [=](int32_t ki, int32_t ci, int32_t rsi) { return ki * crs + ci * rs + rsi; };
+
+    for (int64_t ki = 0; ki < k; ++ki)
+    {
+        for (int64_t rsi = 0; rsi < rs; ++rsi)
+        {
+            int32_t w = 0;
+            int32_t nz = 0;
+            for (int64_t ci = 0; ci < c; ++ci)
+            {
+                const auto index = getIndex(ki, ci, rsi);
+                if (nz < nonzeros)
+                {
+                    sparseValues[index] = values[index];
+                    ++nz;
+                }
+                else
+                {
+                    sparseValues[index] = 0;
+                }
+                if (++w == window)
+                {
+                    w = 0;
+                    nz = 0;
+                }
+            }
+        }
+    }
+}
+
+void sparsify(const Weights& weights, int32_t k, int32_t rs, std::vector<char>& sparseWeights)
+{
+    switch (weights.type)
+    {
+    case DataType::kFLOAT:
+        sparsify(static_cast<const float*>(weights.values), weights.count, k, rs, sparseWeights);
+        break;
+    case DataType::kHALF:
+        sparsify(static_cast<const half_float::half*>(weights.values), weights.count, k, rs, sparseWeights);
+        break;
+    case DataType::kINT8:
+    case DataType::kINT32:
+    case DataType::kBOOL: break;
+    }
+}
+
+template <typename L>
+void setSparseWeights(L& l, int32_t k, int32_t rs, std::vector<char>& sparseWeights)
+{
+    auto weights = l.getKernelWeights();
+    sparsify(weights, k, rs, sparseWeights);
+    weights.values = sparseWeights.data();
+    l.setKernelWeights(weights);
+}
+
+void sparsify(INetworkDefinition& network, std::vector<std::vector<char>>& sparseWeights)
+{
+    for (int32_t l = 0; l < network.getNbLayers(); ++l)
+    {
+        auto* layer = network.getLayer(l);
+        const auto t = layer->getType();
+        if (t == LayerType::kCONVOLUTION)
+        {
+            auto& conv = *static_cast<IConvolutionLayer*>(layer);
+            const auto& dims = conv.getKernelSizeNd();
+            if (dims.nbDims > 2)
+            {
+                continue;
+            }
+            const auto k = conv.getNbOutputMaps();
+            const auto rs = dims.d[0] * dims.d[1];
+            sparseWeights.emplace_back();
+            setSparseWeights(conv, k, rs, sparseWeights.back());
+        }
+        else if (t == LayerType::kFULLY_CONNECTED)
+        {
+            auto& fc = *static_cast<IFullyConnectedLayer*>(layer);
+            const auto k = fc.getNbOutputChannels();
+            sparseWeights.emplace_back();
+            setSparseWeights(fc, k, 1, sparseWeights.back());
+        }
+    }
+}
+
+} // namespace
+
+bool setupNetworkAndConfig(const BuildOptions& build, const SystemOptions& sys, IBuilder& builder,
+    INetworkDefinition& network, IBuilderConfig& config, std::ostream& err,
+    std::vector<std::vector<char>>& sparseWeights)
+{
+    IOptimizationProfile* profile{nullptr};
+    if (build.maxBatch)
+    {
+        builder.setMaxBatchSize(build.maxBatch);
+    }
+    else
+    {
+        profile = builder.createOptimizationProfile();
+    }
+
+    bool hasDynamicShapes{false};
+
+    bool broadcastInputFormats = broadcastIOFormats(build.inputFormats, network.getNbInputs());
+
+    for (uint32_t i = 0, n = network.getNbInputs(); i < n; i++)
+    {
+        // Set formats and data types of inputs
+        auto* input = network.getInput(i);
+        if (!build.inputFormats.empty())
+        {
+            int inputFormatIndex = broadcastInputFormats ? 0 : i;
+            input->setType(build.inputFormats[inputFormatIndex].first);
+            input->setAllowedFormats(build.inputFormats[inputFormatIndex].second);
+        }
+        else
+        {
+            switch (input->getType())
+            {
+            case DataType::kINT32:
+            case DataType::kBOOL:
+            case DataType::kHALF:
+                // Leave these as is.
+                break;
+            case DataType::kFLOAT:
+            case DataType::kINT8:
+                // User did not specify a floating-point format.  Default to kFLOAT.
+                input->setType(DataType::kFLOAT);
+                break;
+            }
+            input->setAllowedFormats(1U << static_cast<int>(TensorFormat::kLINEAR));
+        }
+
+        if (profile)
+        {
+            Dims dims = input->getDimensions();
+            const bool isScalar = dims.nbDims == 0;
+            const bool isDynamicInput = std::any_of(dims.d, dims.d + dims.nbDims, [](int dim) { return dim == -1; })
+                || input->isShapeTensor();
+            if (isDynamicInput)
+            {
+                hasDynamicShapes = true;
+                auto shape = build.shapes.find(input->getName());
+                ShapeRange shapes{};
+
+                // If no shape is provided, set dynamic dimensions to 1.
+                if (shape == build.shapes.end())
+                {
+                    constexpr int DEFAULT_DIMENSION = 1;
+                    std::vector<int> staticDims;
+                    if (input->isShapeTensor())
+                    {
+                        if (isScalar)
+                        {
+                            staticDims.push_back(1);
+                        }
+                        else
+                        {
+                            staticDims.resize(dims.d[0]);
+                            std::fill(staticDims.begin(), staticDims.end(), DEFAULT_DIMENSION);
+                        }
+                    }
+                    else
+                    {
+                        staticDims.resize(dims.nbDims);
+                        std::transform(dims.d, dims.d + dims.nbDims, staticDims.begin(),
+                            [&](int dimension) { return dimension > 0 ? dimension : DEFAULT_DIMENSION; });
+                    }
+                    sample::gLogWarning << "Dynamic dimensions required for input: " << input->getName()
+                                        << ", but no shapes were provided. Automatically overriding shape to: "
+                                        << staticDims << std::endl;
+                    std::fill(shapes.begin(), shapes.end(), staticDims);
+                }
+                else
+                {
+                    shapes = shape->second;
+                }
+
+                std::vector<int> profileDims{};
+                if (input->isShapeTensor())
+                {
+                    profileDims = shapes[static_cast<size_t>(OptProfileSelector::kMIN)];
+                    SMP_RETVAL_IF_FALSE(profile->setShapeValues(input->getName(), OptProfileSelector::kMIN,
+                                            profileDims.data(), static_cast<int>(profileDims.size())),
+                        "Error in set shape values MIN", false, err);
+                    profileDims = shapes[static_cast<size_t>(OptProfileSelector::kOPT)];
+                    SMP_RETVAL_IF_FALSE(profile->setShapeValues(input->getName(), OptProfileSelector::kOPT,
+                                            profileDims.data(), static_cast<int>(profileDims.size())),
+                        "Error in set shape values OPT", false, err);
+                    profileDims = shapes[static_cast<size_t>(OptProfileSelector::kMAX)];
+                    SMP_RETVAL_IF_FALSE(profile->setShapeValues(input->getName(), OptProfileSelector::kMAX,
+                                            profileDims.data(), static_cast<int>(profileDims.size())),
+                        "Error in set shape values MAX", false, err);
+                }
+                else
+                {
+                    profileDims = shapes[static_cast<size_t>(OptProfileSelector::kMIN)];
+                    SMP_RETVAL_IF_FALSE(
+                        profile->setDimensions(input->getName(), OptProfileSelector::kMIN, toDims(profileDims)),
+                        "Error in set dimensions to profile MIN", false, err);
+                    profileDims = shapes[static_cast<size_t>(OptProfileSelector::kOPT)];
+                    SMP_RETVAL_IF_FALSE(
+                        profile->setDimensions(input->getName(), OptProfileSelector::kOPT, toDims(profileDims)),
+                        "Error in set dimensions to profile OPT", false, err);
+                    profileDims = shapes[static_cast<size_t>(OptProfileSelector::kMAX)];
+                    SMP_RETVAL_IF_FALSE(
+                        profile->setDimensions(input->getName(), OptProfileSelector::kMAX, toDims(profileDims)),
+                        "Error in set dimensions to profile MAX", false, err);
+                }
+            }
+        }
+    }
+
+    if (!hasDynamicShapes && !build.shapes.empty())
+    {
+        sample::gLogError << "Static model does not take explicit shapes since the shape of inference tensors will be "
+                             "determined by the model itself"
+                          << std::endl;
+        return false;
+    }
+
+    if (profile && hasDynamicShapes)
+    {
+        SMP_RETVAL_IF_FALSE(profile->isValid(), "Required optimization profile is invalid", false, err);
+        SMP_RETVAL_IF_FALSE(
+            config.addOptimizationProfile(profile) != -1, "Error in add optimization profile", false, err);
+    }
+
+    bool broadcastOutputFormats = broadcastIOFormats(build.outputFormats, network.getNbOutputs(), false);
+
+    for (uint32_t i = 0, n = network.getNbOutputs(); i < n; i++)
+    {
+        // Set formats and data types of outputs
+        auto* output = network.getOutput(i);
+        if (!build.outputFormats.empty())
+        {
+            int outputFormatIndex = broadcastOutputFormats ? 0 : i;
+            output->setType(build.outputFormats[outputFormatIndex].first);
+            output->setAllowedFormats(build.outputFormats[outputFormatIndex].second);
+        }
+        else
+        {
+            output->setAllowedFormats(1U << static_cast<int>(TensorFormat::kLINEAR));
+        }
+    }
+
+    config.setMaxWorkspaceSize(static_cast<size_t>(build.workspace) << 20);
+
+    if (build.timingCacheMode == TimingCacheMode::kDISABLE)
+    {
+        config.setFlag(BuilderFlag::kDISABLE_TIMING_CACHE);
+    }
+
+    if (!build.tf32)
+    {
+        config.clearFlag(BuilderFlag::kTF32);
+    }
+
+    if (build.refittable)
+    {
+        config.setFlag(BuilderFlag::kREFIT);
+    }
+
+    if (build.sparsity != SparsityFlag::kDISABLE)
+    {
+        config.setFlag(BuilderFlag::kSPARSE_WEIGHTS);
+        if (build.sparsity == SparsityFlag::kFORCE)
+        {
+            sparsify(network, sparseWeights);
+        }
+    }
+
+    config.setProfilingVerbosity(build.nvtxMode);
+    config.setMinTimingIterations(build.minTiming);
+    config.setAvgTimingIterations(build.avgTiming);
+
+    if (build.fp16)
+    {
+        config.setFlag(BuilderFlag::kFP16);
+    }
+
+    if (build.int8)
+    {
+        config.setFlag(BuilderFlag::kINT8);
+    }
+
+    if (build.int8 && !build.fp16)
+    {
+        sample::gLogInfo
+            << "FP32 and INT8 precisions have been specified - more performance might be enabled by additionally "
+               "specifying --fp16 or --best"
+            << std::endl;
+    }
+
+    auto isInt8 = [](const IOFormat& format) { return format.first == DataType::kINT8; };
+    auto int8IO = std::count_if(build.inputFormats.begin(), build.inputFormats.end(), isInt8)
+        + std::count_if(build.outputFormats.begin(), build.outputFormats.end(), isInt8);
+
+    auto hasQDQLayers = [](INetworkDefinition& network) {
+        // Determine if our network has QDQ layers.
+        const auto nbLayers = network.getNbLayers();
+        for (int32_t i = 0; i < nbLayers; i++)
+        {
+            const auto& layer = network.getLayer(i);
+            if (layer->getType() == LayerType::kQUANTIZE || layer->getType() == LayerType::kDEQUANTIZE)
+            {
+                return true;
+            }
+        }
+        return false;
+    };
+
+    if (!hasQDQLayers(network) && (build.int8 || int8IO) && build.calibration.empty())
+    {
+        // Explicitly set int8 scales if no calibrator is provided and if I/O tensors use int8,
+        // because auto calibration does not support this case.
+        SMP_RETVAL_IF_FALSE(setTensorDynamicRange(network), "Error in set tensor dynamic range.", false, err);
+    }
+    else if (build.int8)
+    {
+        if (!hasQDQLayers(network) && int8IO)
+        {
+            try
+            {
+                // Set dynamic ranges of int8 inputs / outputs to match scales loaded from calibration cache
+                setTensorScalesFromCalibration(network, build.inputFormats, build.outputFormats, build.calibration);
+            }
+            catch (std::exception&)
+            {
+                sample::gLogError
+                    << "Int8IO was specified but impossible to read tensor scales from provided calibration cache file"
+                    << std::endl;
+                return false;
+            }
+        }
+        IOptimizationProfile* profileCalib{nullptr};
+        if (!build.shapesCalib.empty())
+        {
+            profileCalib = builder.createOptimizationProfile();
+            for (uint32_t i = 0, n = network.getNbInputs(); i < n; i++)
+            {
+                auto* input = network.getInput(i);
+                Dims profileDims{};
+                auto shape = build.shapesCalib.find(input->getName());
+                ShapeRange shapesCalib{};
+                shapesCalib = shape->second;
+
+                profileDims = toDims(shapesCalib[static_cast<size_t>(OptProfileSelector::kOPT)]);
+                // Here we check only kMIN as all profileDims are the same.
+                SMP_RETVAL_IF_FALSE(
+                    profileCalib->setDimensions(input->getName(), OptProfileSelector::kMIN, profileDims),
+                    "Error in set dimensions to calibration profile OPT", false, err);
+                profileCalib->setDimensions(input->getName(), OptProfileSelector::kOPT, profileDims);
+                profileCalib->setDimensions(input->getName(), OptProfileSelector::kMAX, profileDims);
+            }
+            SMP_RETVAL_IF_FALSE(profileCalib->isValid(), "Calibration profile is invalid", false, err);
+            SMP_RETVAL_IF_FALSE(
+                config.setCalibrationProfile(profileCalib), "Error in set calibration profile", false, err);
+        }
+
+        std::vector<int64_t> elemCount{};
+        for (int i = 0; i < network.getNbInputs(); i++)
+        {
+            auto* input = network.getInput(i);
+            if (profileCalib)
+            {
+                elemCount.push_back(volume(profileCalib->getDimensions(input->getName(), OptProfileSelector::kOPT)));
+            }
+            else if (profile && hasDynamicShapes)
+            {
+                elemCount.push_back(volume(profile->getDimensions(input->getName(), OptProfileSelector::kOPT)));
+            }
+            else
+            {
+                elemCount.push_back(volume(input->getDimensions()));
+            }
+        }
+
+        config.setInt8Calibrator(new RndInt8Calibrator(1, elemCount, build.calibration, network, err));
+    }
+
+    if (build.safe)
+    {
+        config.setEngineCapability(sys.DLACore != -1 ? EngineCapability::kSAFE_DLA : EngineCapability::kSAFE_GPU);
+    }
+
+    if (sys.DLACore != -1)
+    {
+        if (sys.DLACore < builder.getNbDLACores())
+        {
+            config.setDefaultDeviceType(DeviceType::kDLA);
+            config.setDLACore(sys.DLACore);
+            config.setFlag(BuilderFlag::kSTRICT_TYPES);
+
+            if (sys.fallback)
+            {
+                config.setFlag(BuilderFlag::kGPU_FALLBACK);
+            }
+            if (!build.int8)
+            {
+                config.setFlag(BuilderFlag::kFP16);
+            }
+        }
+        else
+        {
+            err << "Cannot create DLA engine, " << sys.DLACore << " not available" << std::endl;
+            return false;
+        }
+    }
+
+    if (build.enabledTactics || build.disabledTactics)
+    {
+        TacticSources tacticSources = config.getTacticSources();
+        tacticSources |= build.enabledTactics;
+        tacticSources &= ~build.disabledTactics;
+        config.setTacticSources(tacticSources);
+    }
+
+    return true;
+}
+
+//!
+//! \brief Create an engine for a network defintion
+//!
+//! \return Pointer to the engine created or nullptr if the creation failed
+//!
+TrtUniquePtr<ICudaEngine> networkToEngine(const BuildOptions& build, const SystemOptions& sys, IBuilder& builder,
+    INetworkDefinition& network, std::ostream& err)
+{
+    TrtUniquePtr<IBuilderConfig> config{builder.createBuilderConfig()};
+    TrtUniquePtr<IRuntime> runtime{createInferRuntime(sample::gLogger.getTRTLogger())};
+    std::vector<std::vector<char>> sparseWeights;
+    SMP_RETVAL_IF_FALSE(config != nullptr, "Config creation failed", nullptr, err);
+    SMP_RETVAL_IF_FALSE(runtime != nullptr, "Runtime creation failed", nullptr, err);
+    SMP_RETVAL_IF_FALSE(setupNetworkAndConfig(build, sys, builder, network, *config, err, sparseWeights),
+        "Network And Config setup failed", nullptr, err);
+    runtime->setErrorRecorder(&gRecorder);
+
+    std::unique_ptr<ITimingCache> timingCache{nullptr};
+    // Try to load cache from file. Create a fresh cache if the file doesn't exist
+    if (build.timingCacheMode == TimingCacheMode::kGLOBAL)
+    {
+        std::vector<char> loadedCache = loadTimingCacheFile(build.timingCacheFile);
+        timingCache.reset(config->createTimingCache(static_cast<const void*>(loadedCache.data()), loadedCache.size()));
+        SMP_RETVAL_IF_FALSE(timingCache != nullptr, "TimingCache creation failed", nullptr, err);
+        config->setTimingCache(*timingCache, false);
+    }
+
+    // CUDA stream used for profiling by the builder.
+    auto profileStream = samplesCommon::makeCudaStream();
+    SMP_RETVAL_IF_FALSE(profileStream != nullptr, "Cuda stream creation failed", nullptr, err);
+    config->setProfileStream(*profileStream);
+
+    TrtUniquePtr<IHostMemory> plan{builder.buildSerializedNetwork(network, *config)};
+    ICudaEngine* engine{runtime->deserializeCudaEngine(plan->data(), plan->size())};
+    SMP_RETVAL_IF_FALSE(engine != nullptr, "Engine creation failed", nullptr, err);
+    if (build.timingCacheMode == TimingCacheMode::kGLOBAL)
+    {
+        auto timingCache = config->getTimingCache();
+        std::unique_ptr<IHostMemory> timingCacheHostData{timingCache->serialize()};
+        SMP_RETVAL_IF_FALSE(timingCacheHostData != nullptr, "Timing Cache serialization failed", nullptr, err);
+        saveTimingCacheFile(build.timingCacheFile, timingCacheHostData.get());
+    }
+    if (config->getInt8Calibrator())
+    {
+        delete config->getInt8Calibrator();
+    }
+    return TrtUniquePtr<ICudaEngine>(engine);
+}
+
+//!
+//! \brief Parse a given model, create a network and an engine.
+//!
+std::tuple<TrtUniquePtr<nvinfer1::ICudaEngine>, TrtUniquePtr<INetworkDefinition>, Parser> modelToEngineNetworkParserTuple(
+    const ModelOptions& model, const BuildOptions& build, const SystemOptions& sys, std::ostream& err)
+{
+    TrtUniquePtr<IBuilder> builder{createInferBuilder(sample::gLogger.getTRTLogger())};
+    if (builder == nullptr)
+    {
+        err << "Builder creation failed" << std::endl;
+        return {};
+    }
+    builder->setErrorRecorder(&gRecorder);
+    auto networkFlags
+        = (build.maxBatch) ? 0U : 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
+    if (build.explicitPrecision)
+    {
+        networkFlags |= 1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_PRECISION);
+    }
+
+    TrtUniquePtr<INetworkDefinition> network{builder->createNetworkV2(networkFlags)};
+    if (!network)
+    {
+        err << "Network creation failed" << std::endl;
+        return {};
+    }
+    Parser parser = modelToNetwork(model, *network, err);
+    if (!parser)
+    {
+        err << "Parsing model failed" << std::endl;
+        return {};
+    }
+
+    auto engine = networkToEngine(build, sys, *builder, *network, err);
+    return std::make_tuple(std::move(engine), std::move(network), std::move(parser));
+}
+
+namespace
+{
+std::pair<std::vector<std::string>, std::vector<WeightsRole>> getLayerWeightsRolePair(IRefitter& refitter)
+{
+    // Get number of refittable items.
+    auto const nbAll = refitter.getAll(0, nullptr, nullptr);
+    std::vector<char const*> layerNames(nbAll);
+    // Allocate buffers for the items and get them.
+    std::vector<nvinfer1::WeightsRole> weightsRoles(nbAll);
+    refitter.getAll(nbAll, layerNames.data(), weightsRoles.data());
+    std::vector<std::string> layerNameStrs(nbAll);
+    std::transform(layerNames.begin(), layerNames.end(), layerNameStrs.begin(), [](char const* name) {
+        if (name == nullptr)
+        {
+            return std::string{};
+        }
+        return std::string{name};
+    });
+    return {layerNameStrs, weightsRoles};
+}
+
+std::pair<std::vector<std::string>, std::vector<WeightsRole>> getMissingLayerWeightsRolePair(IRefitter& refitter)
+{
+    // Get number of refittable items.
+    auto const nbMissing = refitter.getMissing(0, nullptr, nullptr);
+    std::vector<const char*> layerNames(nbMissing);
+    // Allocate buffers for the items and get them.
+    std::vector<nvinfer1::WeightsRole> weightsRoles(nbMissing);
+    refitter.getMissing(nbMissing, layerNames.data(), weightsRoles.data());
+    std::vector<std::string> layerNameStrs(nbMissing);
+    std::transform(layerNames.begin(), layerNames.end(), layerNameStrs.begin(), [](char const* name) {
+        if (name == nullptr)
+        {
+            return std::string{};
+        }
+        return std::string{name};
+    });
+    return {layerNameStrs, weightsRoles};
+}
+} // namespace
+
+void dumpRefittable(nvinfer1::ICudaEngine& engine)
+{
+    TrtUniquePtr<IRefitter> refitter{createInferRefitter(engine, sample::gLogger.getTRTLogger())};
+    if (refitter == nullptr)
+    {
+        sample::gLogError << "Failed to create a refitter." << std::endl;
+        return;
+    }
+    auto const& layerWeightsRolePair = getLayerWeightsRolePair(*refitter);
+
+    auto const& layerNames = layerWeightsRolePair.first;
+    auto const& weightsRoles = layerWeightsRolePair.second;
+    auto const nbAll = layerWeightsRolePair.first.size();
+    for (size_t i = 0; i < nbAll; ++i)
+    {
+        sample::gLogInfo << layerNames[i] << " " << weightsRoles[i] << std::endl;
+    }
+}
+
+ICudaEngine* loadEngine(const std::string& engine, int DLACore, std::ostream& err)
+{
+    std::ifstream engineFile(engine, std::ios::binary);
+    if (!engineFile)
+    {
+        err << "Error opening engine file: " << engine << std::endl;
+        return nullptr;
+    }
+
+    engineFile.seekg(0, std::ifstream::end);
+    long int fsize = engineFile.tellg();
+    engineFile.seekg(0, std::ifstream::beg);
+
+    std::vector<char> engineData(fsize);
+    engineFile.read(engineData.data(), fsize);
+    if (!engineFile)
+    {
+        err << "Error loading engine file: " << engine << std::endl;
+        return nullptr;
+    }
+
+    TrtUniquePtr<IRuntime> runtime{createInferRuntime(sample::gLogger.getTRTLogger())};
+    if (DLACore != -1)
+    {
+        runtime->setDLACore(DLACore);
+    }
+    runtime->setErrorRecorder(&gRecorder);
+
+    return runtime->deserializeCudaEngine(engineData.data(), fsize, nullptr);
+}
+
+bool saveEngine(const ICudaEngine& engine, const std::string& fileName, std::ostream& err)
+{
+    std::ofstream engineFile(fileName, std::ios::binary);
+    if (!engineFile)
+    {
+        err << "Cannot open engine file: " << fileName << std::endl;
+        return false;
+    }
+
+    TrtUniquePtr<IHostMemory> serializedEngine{engine.serialize()};
+    if (serializedEngine == nullptr)
+    {
+        err << "Engine serialization failed" << std::endl;
+        return false;
+    }
+
+    engineFile.write(static_cast<char*>(serializedEngine->data()), serializedEngine->size());
+    return !engineFile.fail();
+}
+
+std::tuple<TrtUniquePtr<nvinfer1::ICudaEngine>, TrtUniquePtr<INetworkDefinition>, Parser> getEngineNetworkParserTuple(
+    const ModelOptions& model, const BuildOptions& build, const SystemOptions& sys, std::ostream& err)
+{
+    TrtUniquePtr<nvinfer1::ICudaEngine> engine;
+    TrtUniquePtr<INetworkDefinition> network;
+    Parser parser;
+    if (build.load)
+    {
+        engine.reset(loadEngine(build.engine, sys.DLACore, err));
+    }
+    else
+    {
+        std::tie(engine, network, parser) = modelToEngineNetworkParserTuple(model, build, sys, err);
+    }
+    if (!engine)
+    {
+        err << "Engine creation failed" << std::endl;
+        return {};
+    }
+    if (build.save && !saveEngine(*engine, build.engine, err))
+    {
+        err << "Saving engine to file failed" << std::endl;
+        return {};
+    }
+    return std::make_tuple(std::move(engine), std::move(network), std::move(parser));
+}
+
+IHostMemory* networkToSerialized(const BuildOptions& build, const SystemOptions& sys, IBuilder& builder,
+    INetworkDefinition& network, std::ostream& err)
+{
+    TrtUniquePtr<IBuilderConfig> config{builder.createBuilderConfig()};
+    std::vector<std::vector<char>> sparseWeights;
+    SMP_RETVAL_IF_FALSE(config != nullptr, "Config creation failed", nullptr, err);
+    SMP_RETVAL_IF_FALSE(setupNetworkAndConfig(build, sys, builder, network, *config, err, sparseWeights),
+        "Network And Config setup failed", nullptr, err);
+    return builder.buildSerializedNetwork(network, *config);
+}
+
+IHostMemory* modelToSerialized(
+    const ModelOptions& model, const BuildOptions& build, const SystemOptions& sys, std::ostream& err)
+{
+    TrtUniquePtr<IBuilder> builder{createInferBuilder(sample::gLogger.getTRTLogger())};
+    SMP_RETVAL_IF_FALSE(builder != nullptr, "Builder creation failed", nullptr, err);
+    builder->setErrorRecorder(&gRecorder);
+
+    auto networkFlags
+        = (build.maxBatch) ? 0U : 1U << static_cast<uint32_t>(nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
+    if (build.explicitPrecision)
+    {
+        networkFlags |= 1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_PRECISION);
+    }
+
+    TrtUniquePtr<INetworkDefinition> network{builder->createNetworkV2(networkFlags)};
+    SMP_RETVAL_IF_FALSE(network != nullptr, "Network creation failed", nullptr, err);
+
+    Parser parser = modelToNetwork(model, *network, err);
+    SMP_RETVAL_IF_FALSE(parser, "Parsing model failed", nullptr, err);
+
+    return networkToSerialized(build, sys, *builder, *network, err);
+}
+
+bool serializeAndSave(const ModelOptions& model, const BuildOptions& build, const SystemOptions& sys, std::ostream& err)
+{
+    TrtUniquePtr<IHostMemory> serialized{modelToSerialized(model, build, sys, err)};
+    SMP_RETVAL_IF_FALSE(serialized != nullptr, "Network serialization failed", false, err);
+
+    std::ofstream engineFile(build.engine, std::ios::binary);
+    SMP_RETVAL_IF_FALSE(!!engineFile, "Cannot open a file to save a serialize network", false, err);
+    engineFile.write(static_cast<char*>(serialized->data()), serialized->size());
+    return !engineFile.fail();
+}
+
+// There is not a getWeightsName API, so we need to use WeightsRole.
+std::vector<std::pair<WeightsRole, Weights>> getAllRefitWeightsForLayer(const ILayer& l)
+{
+    switch (l.getType())
+    {
+    case LayerType::kCONSTANT:
+    {
+        const auto& layer = static_cast<const nvinfer1::IConstantLayer&>(l);
+        return {std::make_pair(WeightsRole::kCONSTANT, layer.getWeights())};
+    }
+    case LayerType::kCONVOLUTION:
+    {
+        const auto& layer = static_cast<const nvinfer1::IConvolutionLayer&>(l);
+        return {std::make_pair(WeightsRole::kKERNEL, layer.getKernelWeights()),
+            std::make_pair(WeightsRole::kBIAS, layer.getBiasWeights())};
+    }
+    case LayerType::kDECONVOLUTION:
+    {
+        const auto& layer = static_cast<const nvinfer1::IDeconvolutionLayer&>(l);
+        return {std::make_pair(WeightsRole::kKERNEL, layer.getKernelWeights()),
+            std::make_pair(WeightsRole::kBIAS, layer.getBiasWeights())};
+    }
+    case LayerType::kFULLY_CONNECTED:
+    {
+        const auto& layer = static_cast<const nvinfer1::IFullyConnectedLayer&>(l);
+        return {std::make_pair(WeightsRole::kKERNEL, layer.getKernelWeights()),
+            std::make_pair(WeightsRole::kBIAS, layer.getBiasWeights())};
+    }
+    case LayerType::kSCALE:
+    {
+        const auto& layer = static_cast<const nvinfer1::IScaleLayer&>(l);
+        return {std::make_pair(WeightsRole::kSCALE, layer.getScale()),
+            std::make_pair(WeightsRole::kSHIFT, layer.getShift())};
+    }
+    case LayerType::kRNN_V2:
+    case LayerType::kACTIVATION:
+    case LayerType::kPOOLING:
+    case LayerType::kLRN:
+    case LayerType::kSOFTMAX:
+    case LayerType::kSHUFFLE:
+    case LayerType::kCONCATENATION:
+    case LayerType::kELEMENTWISE:
+    case LayerType::kPLUGIN:
+    case LayerType::kUNARY:
+    case LayerType::kPADDING:
+    case LayerType::kREDUCE:
+    case LayerType::kTOPK:
+    case LayerType::kGATHER:
+    case LayerType::kMATRIX_MULTIPLY:
+    case LayerType::kRAGGED_SOFTMAX:
+    case LayerType::kIDENTITY:
+    case LayerType::kPLUGIN_V2:
+    case LayerType::kSLICE:
+    case LayerType::kFILL:
+    case LayerType::kSHAPE:
+    case LayerType::kPARAMETRIC_RELU:
+    case LayerType::kRESIZE:
+    case LayerType::kTRIP_LIMIT:
+    case LayerType::kRECURRENCE:
+    case LayerType::kITERATOR:
+    case LayerType::kLOOP_OUTPUT:
+    case LayerType::kSELECT:
+    case LayerType::kQUANTIZE:
+    case LayerType::kDEQUANTIZE: return {};
+    }
+    return {};
+}
+
+bool timeRefit(INetworkDefinition const& network, nvinfer1::ICudaEngine& engine)
+{
+    using time_point = std::chrono::time_point<std::chrono::steady_clock>;
+    using durationMs = std::chrono::duration<float, std::milli>;
+
+    auto const nbLayers = network.getNbLayers();
+    TrtUniquePtr<IRefitter> refitter{createInferRefitter(engine, sample::gLogger.getTRTLogger())};
+    auto const& layerWeightsRolePair = getLayerWeightsRolePair(*refitter);
+    // We use std::string instead of const char* since we can have copies of layer names.
+    std::set<std::pair<std::string, WeightsRole>> layerRoleSet;
+
+    auto const& layerNames = layerWeightsRolePair.first;
+    auto const& weightsRoles = layerWeightsRolePair.second;
+
+    std::transform(layerNames.begin(), layerNames.end(), weightsRoles.begin(),
+        std::inserter(layerRoleSet, layerRoleSet.begin()),
+        [](std::string const& layerName, WeightsRole const role) { return std::make_pair(layerName, role); });
+
+    auto const isRefittable = [&layerRoleSet](char const* layerName, WeightsRole const role) {
+        return layerRoleSet.find(std::make_pair(layerName, role)) != layerRoleSet.end();
+    };
+
+    auto const setWeights = [&] {
+        for (int32_t i = 0; i < nbLayers; i++)
+        {
+            auto const layer = network.getLayer(i);
+            auto const roleWeightsVec = getAllRefitWeightsForLayer(*layer);
+            for (auto const& roleWeights : roleWeightsVec)
+            {
+                if (isRefittable(layer->getName(), roleWeights.first))
+                {
+                    bool const success = refitter->setWeights(layer->getName(), roleWeights.first, roleWeights.second);
+                    if (!success)
+                    {
+                        return false;
+                    }
+                }
+            }
+        }
+        return true;
+    };
+
+    auto const reportMissingWeights = [&] {
+        auto const& missingPair = getMissingLayerWeightsRolePair(*refitter);
+        auto const& layerNames = missingPair.first;
+        auto const& weightsRoles = missingPair.second;
+        for (size_t i = 0; i < layerNames.size(); ++i)
+        {
+            sample::gLogError << "Missing (" << layerNames[i] << ", " << weightsRoles[i] << ") for refitting."
+                              << std::endl;
+        }
+        return layerNames.empty();
+    };
+
+    // Warm up and report missing weights
+    bool const success = setWeights() && reportMissingWeights() && refitter->refitCudaEngine();
+    if (!success)
+    {
+        return false;
+    }
+
+    constexpr int32_t loop = 10;
+    time_point const refitStartTime{std::chrono::steady_clock::now()};
+    {
+        for (int32_t l = 0; l < loop; l++)
+        {
+            bool const success = setWeights() && refitter->refitCudaEngine();
+            if (!success)
+            {
+                return false;
+            }
+        }
+    }
+    time_point const refitEndTime{std::chrono::steady_clock::now()};
+
+    sample::gLogInfo << "Engine refitted"
+        << " in " << durationMs(refitEndTime - refitStartTime).count() / loop << " ms." << std::endl;
+    return true;
+}
+
+} // namespace sample

+ 136 - 0
src/detection/CenterPoint-master/include/common/sampleEngines.h

@@ -0,0 +1,136 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TRT_SAMPLE_ENGINES_H
+#define TRT_SAMPLE_ENGINES_H
+
+#include <iostream>
+#include <vector>
+
+#include "NvCaffeParser.h"
+#include "NvInfer.h"
+#include "NvOnnxParser.h"
+#include "NvUffParser.h"
+
+#include "sampleOptions.h"
+#include "sampleUtils.h"
+
+namespace sample
+{
+
+struct Parser
+{
+    TrtUniquePtr<nvcaffeparser1::ICaffeParser> caffeParser;
+    TrtUniquePtr<nvuffparser::IUffParser> uffParser;
+    TrtUniquePtr<nvonnxparser::IParser> onnxParser;
+
+    operator bool() const
+    {
+        return caffeParser || uffParser || onnxParser;
+    }
+};
+
+//!
+//! \brief Generate a network definition for a given model
+//!
+//! \return Parser The parser used to initialize the network and that holds the weights for the network, or an invalid
+//! parser (the returned parser converts to false if tested)
+//!
+//! \see Parser::operator bool()
+//!
+Parser modelToNetwork(const ModelOptions& model, nvinfer1::INetworkDefinition& network, std::ostream& err);
+
+//!
+//! \brief Set up network and config
+//!
+//! \return boolean Return true if network and config were successfully set
+//!
+bool setupNetworkAndConfig(const BuildOptions& build, const SystemOptions& sys, IBuilder& builder,
+    INetworkDefinition& network, IBuilderConfig& config, std::ostream& err,
+    std::vector<std::vector<char>>& sparseWeights);
+
+//!
+//! \brief Log refittable layers and weights of a refittable engine
+//!
+void dumpRefittable(nvinfer1::ICudaEngine& engine);
+
+//!
+//! \brief Load a serialized engine
+//!
+//! \return Pointer to the engine loaded or nullptr if the operation failed
+//!
+nvinfer1::ICudaEngine* loadEngine(const std::string& engine, int DLACore, std::ostream& err);
+
+//!
+//! \brief Save an engine into a file
+//!
+//! \return boolean Return true if the engine was successfully saved
+//!
+bool saveEngine(const nvinfer1::ICudaEngine& engine, const std::string& fileName, std::ostream& err);
+
+//!
+//! \brief Create an engine from model or serialized file, and optionally save engine
+//!
+//! \return Pointer to the engine created or nullptr if the creation failed
+//!
+std::tuple<TrtUniquePtr<nvinfer1::ICudaEngine>, TrtUniquePtr<INetworkDefinition>, Parser> getEngineNetworkParserTuple(
+    const ModelOptions& model, const BuildOptions& build, const SystemOptions& sys, std::ostream& err);
+
+//!
+//! \brief Create an engine from model or serialized file, and optionally save engine
+//!
+//! \return Pointer to the engine created or nullptr if the creation failed
+//!
+inline TrtUniquePtr<nvinfer1::ICudaEngine> getEngine(
+    const ModelOptions& model, const BuildOptions& build, const SystemOptions& sys, std::ostream& err)
+{
+    return std::get<0>(getEngineNetworkParserTuple(model, build, sys, err));
+}
+
+//!
+//! \brief Create a serialized network
+//!
+//! \return Pointer to a host memory for a serialized network
+//!
+IHostMemory* networkToSerialized(const BuildOptions& build, const SystemOptions& sys, IBuilder& builder,
+    INetworkDefinition& network, std::ostream& err);
+
+//!
+//! \brief Tranfer model to a serialized network
+//!
+//! \return Pointer to a host memory for a serialized network
+//!
+IHostMemory* modelToSerialized(
+    const ModelOptions& model, const BuildOptions& build, const SystemOptions& sys, std::ostream& err);
+
+//!
+//! \brief Serialize network and save it into a file
+//!
+//! \return boolean Return true if the network was successfully serialized and saved
+//!
+bool serializeAndSave(const ModelOptions& model, const BuildOptions& build, const SystemOptions& sys, std::ostream& err);
+
+bool timeRefit(const INetworkDefinition& network, nvinfer1::ICudaEngine& engine);
+
+//!
+//! \brief Set tensor scales from a calibration table
+//!
+void setTensorScalesFromCalibration(nvinfer1::INetworkDefinition& network, const std::vector<IOFormat>& inputFormats,
+        const std::vector<IOFormat>& outputFormats, const std::string& calibrationFile);
+
+} // namespace sample
+
+#endif // TRT_SAMPLE_ENGINES_H

+ 702 - 0
src/detection/CenterPoint-master/include/common/sampleInference.cpp

@@ -0,0 +1,702 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <array>
+#include <chrono>
+#include <functional>
+#include <limits>
+#include <memory>
+#include <mutex>
+#include <numeric>
+#include <thread>
+#include <utility>
+#include <vector>
+
+#include "NvInfer.h"
+
+#include "logger.h"
+#include "sampleDevice.h"
+#include "sampleInference.h"
+#include "sampleOptions.h"
+#include "sampleReporting.h"
+#include "sampleUtils.h"
+
+namespace sample
+{
+
+bool setUpInference(InferenceEnvironment& iEnv, const InferenceOptions& inference)
+{
+    for (int s = 0; s < inference.streams; ++s)
+    {
+        iEnv.context.emplace_back(iEnv.engine->createExecutionContext());
+        iEnv.bindings.emplace_back(new Bindings);
+    }
+    if (iEnv.profiler)
+    {
+        iEnv.context.front()->setProfiler(iEnv.profiler.get());
+    }
+
+    const int nOptProfiles = iEnv.engine->getNbOptimizationProfiles();
+    const int nBindings = iEnv.engine->getNbBindings();
+    const int bindingsInProfile = nOptProfiles > 0 ? nBindings / nOptProfiles : 0;
+    const int endBindingIndex = bindingsInProfile ? bindingsInProfile : iEnv.engine->getNbBindings();
+
+    if (nOptProfiles > 1)
+    {
+        sample::gLogWarning << "Multiple profiles are currently not supported. Running with one profile." << std::endl;
+    }
+
+    // Set all input dimensions before all bindings can be allocated
+    for (int b = 0; b < endBindingIndex; ++b)
+    {
+        if (iEnv.engine->bindingIsInput(b))
+        {
+            auto dims = iEnv.context.front()->getBindingDimensions(b);
+            const bool isScalar = dims.nbDims == 0;
+            const bool isDynamicInput = std::any_of(dims.d, dims.d + dims.nbDims, [](int dim) { return dim == -1; })
+                || iEnv.engine->isShapeBinding(b);
+            if (isDynamicInput)
+            {
+                auto shape = inference.shapes.find(iEnv.engine->getBindingName(b));
+
+                // If no shape is provided, set dynamic dimensions to 1.
+                std::vector<int> staticDims;
+                if (shape == inference.shapes.end())
+                {
+                    constexpr int DEFAULT_DIMENSION = 1;
+                    if (iEnv.engine->isShapeBinding(b))
+                    {
+                        if (isScalar)
+                        {
+                            staticDims.push_back(1);
+                        }
+                        else
+                        {
+                            staticDims.resize(dims.d[0]);
+                            std::fill(staticDims.begin(), staticDims.end(), DEFAULT_DIMENSION);
+                        }
+                    }
+                    else
+                    {
+                        staticDims.resize(dims.nbDims);
+                        std::transform(dims.d, dims.d + dims.nbDims, staticDims.begin(),
+                            [&](int dimension) { return dimension >= 0 ? dimension : DEFAULT_DIMENSION; });
+                    }
+                    sample::gLogWarning << "Dynamic dimensions required for input: " << iEnv.engine->getBindingName(b)
+                                        << ", but no shapes were provided. Automatically overriding shape to: "
+                                        << staticDims << std::endl;
+                }
+                else
+                {
+                    staticDims = shape->second;
+                }
+
+                for (auto& c : iEnv.context)
+                {
+                    if (iEnv.engine->isShapeBinding(b))
+                    {
+                        if (!c->setInputShapeBinding(b, staticDims.data()))
+                        {
+                            return false;
+                        }
+                    }
+                    else
+                    {
+                        if (!c->setBindingDimensions(b, toDims(staticDims)))
+                        {
+                            return false;
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    for (int b = 0; b < endBindingIndex; ++b)
+    {
+        const auto dims = iEnv.context.front()->getBindingDimensions(b);
+        const auto vecDim = iEnv.engine->getBindingVectorizedDim(b);
+        const auto comps = iEnv.engine->getBindingComponentsPerElement(b);
+        const auto dataType = iEnv.engine->getBindingDataType(b);
+        const auto strides = iEnv.context.front()->getStrides(b);
+        const int batch = iEnv.engine->hasImplicitBatchDimension() ? inference.batch : 1;
+        const auto vol = volume(dims, strides, vecDim, comps, batch);
+        const auto name = iEnv.engine->getBindingName(b);
+        const auto isInput = iEnv.engine->bindingIsInput(b);
+        for (auto& bindings : iEnv.bindings)
+        {
+            const auto input = inference.inputs.find(name);
+            if (isInput && input != inference.inputs.end())
+            {
+                bindings->addBinding(b, name, isInput, vol, dataType, input->second);
+            }
+            else
+            {
+                bindings->addBinding(b, name, isInput, vol, dataType);
+            }
+
+            if (isInput)
+            {
+                sample::gLogInfo << "Created input binding for " << name << " with dimensions " << dims << std::endl;
+            }
+            else
+            {
+                sample::gLogInfo << "Created output binding for " << name << " with dimensions " << dims << std::endl;
+            }
+        }
+    }
+
+    return true;
+}
+
+namespace
+{
+
+using TimePoint = std::chrono::time_point<std::chrono::high_resolution_clock>;
+
+//!
+//! \struct SyncStruct
+//! \brief Threads synchronization structure
+//!
+struct SyncStruct
+{
+    std::mutex mutex;
+    TrtCudaStream mainStream;
+    TrtCudaEvent gpuStart{cudaEventBlockingSync};
+    TimePoint cpuStart{};
+    int sleep{0};
+};
+
+struct Enqueue
+{
+    explicit Enqueue(nvinfer1::IExecutionContext& context, void** buffers)
+        : mContext(context)
+        , mBuffers(buffers)
+    {
+    }
+
+    nvinfer1::IExecutionContext& mContext;
+    void** mBuffers{};
+};
+
+//!
+//! \class EnqueueImplicit
+//! \brief Functor to enqueue inference with implict batch
+//!
+class EnqueueImplicit : private Enqueue
+{
+
+public:
+    explicit EnqueueImplicit(nvinfer1::IExecutionContext& context, void** buffers, int batch)
+        : Enqueue(context, buffers)
+        , mBatch(batch)
+    {
+    }
+
+    bool operator()(TrtCudaStream& stream) const
+    {
+        return mContext.enqueue(mBatch, mBuffers, stream.get(), nullptr);
+    }
+
+private:
+    int mBatch;
+};
+
+//!
+//! \class EnqueueExplicit
+//! \brief Functor to enqueue inference with explict batch
+//!
+class EnqueueExplicit : private Enqueue
+{
+
+public:
+    explicit EnqueueExplicit(nvinfer1::IExecutionContext& context, void** buffers)
+        : Enqueue(context, buffers)
+    {
+    }
+
+    bool operator()(TrtCudaStream& stream) const
+    {
+        return mContext.enqueueV2(mBuffers, stream.get(), nullptr);
+    }
+};
+
+//!
+//! \class EnqueueGraph
+//! \brief Functor to enqueue inference from CUDA Graph
+//!
+class EnqueueGraph
+{
+
+public:
+    explicit EnqueueGraph(TrtCudaGraph& graph)
+        : mGraph(graph)
+    {
+    }
+
+    bool operator()(TrtCudaStream& stream) const
+    {
+        return mGraph.launch(stream);
+    }
+
+    TrtCudaGraph& mGraph;
+};
+
+using EnqueueFunction = std::function<bool(TrtCudaStream&)>;
+
+enum class StreamType : int
+{
+    kINPUT = 0,
+    kCOMPUTE = 1,
+    kOUTPUT = 2,
+    kNUM = 3
+};
+
+enum class EventType : int
+{
+    kINPUT_S = 0,
+    kINPUT_E = 1,
+    kCOMPUTE_S = 2,
+    kCOMPUTE_E = 3,
+    kOUTPUT_S = 4,
+    kOUTPUT_E = 5,
+    kNUM = 6
+};
+
+using MultiStream = std::array<TrtCudaStream, static_cast<int>(StreamType::kNUM)>;
+
+using MultiEvent = std::array<std::unique_ptr<TrtCudaEvent>, static_cast<int>(EventType::kNUM)>;
+
+using EnqueueTimes = std::array<TimePoint, 2>;
+
+//!
+//! \class Iteration
+//! \brief Inference iteration and streams management
+//!
+class Iteration
+{
+
+public:
+    Iteration(int id, const InferenceOptions& inference, nvinfer1::IExecutionContext& context, Bindings& bindings)
+        : mBindings(bindings)
+        , mStreamId(id)
+        , mDepth(1 + inference.overlap)
+        , mActive(mDepth)
+        , mEvents(mDepth)
+        , mEnqueueTimes(mDepth)
+    {
+        for (int d = 0; d < mDepth; ++d)
+        {
+            for (int e = 0; e < static_cast<int>(EventType::kNUM); ++e)
+            {
+                mEvents[d][e].reset(new TrtCudaEvent(!inference.spin));
+            }
+        }
+        createEnqueueFunction(inference, context, bindings);
+    }
+
+    bool query(bool skipTransfers)
+    {
+        if (mActive[mNext])
+        {
+            return true;
+        }
+
+        if (!skipTransfers)
+        {
+            record(EventType::kINPUT_S, StreamType::kINPUT);
+            mBindings.transferInputToDevice(getStream(StreamType::kINPUT));
+            record(EventType::kINPUT_E, StreamType::kINPUT);
+            wait(EventType::kINPUT_E, StreamType::kCOMPUTE); // Wait for input DMA before compute
+        }
+
+        record(EventType::kCOMPUTE_S, StreamType::kCOMPUTE);
+        recordEnqueueTime();
+        if (!mEnqueue(getStream(StreamType::kCOMPUTE)))
+        {
+            return false;
+        }
+        recordEnqueueTime();
+        record(EventType::kCOMPUTE_E, StreamType::kCOMPUTE);
+
+        if (!skipTransfers)
+        {
+            wait(EventType::kCOMPUTE_E, StreamType::kOUTPUT); // Wait for compute before output DMA
+            record(EventType::kOUTPUT_S, StreamType::kOUTPUT);
+            mBindings.transferOutputToHost(getStream(StreamType::kOUTPUT));
+            record(EventType::kOUTPUT_E, StreamType::kOUTPUT);
+        }
+
+        mActive[mNext] = true;
+        moveNext();
+        return true;
+    }
+
+    float sync(
+        const TimePoint& cpuStart, const TrtCudaEvent& gpuStart, std::vector<InferenceTrace>& trace, bool skipTransfers)
+    {
+        if (mActive[mNext])
+        {
+            if (skipTransfers)
+            {
+                getEvent(EventType::kCOMPUTE_E).synchronize();
+            }
+            else
+            {
+                getEvent(EventType::kOUTPUT_E).synchronize();
+            }
+            trace.emplace_back(getTrace(cpuStart, gpuStart, skipTransfers));
+            mActive[mNext] = false;
+            return getEvent(EventType::kCOMPUTE_S) - gpuStart;
+        }
+        return 0;
+    }
+
+    void syncAll(
+        const TimePoint& cpuStart, const TrtCudaEvent& gpuStart, std::vector<InferenceTrace>& trace, bool skipTransfers)
+    {
+        for (int d = 0; d < mDepth; ++d)
+        {
+            sync(cpuStart, gpuStart, trace, skipTransfers);
+            moveNext();
+        }
+    }
+
+    void wait(TrtCudaEvent& gpuStart)
+    {
+        getStream(StreamType::kINPUT).wait(gpuStart);
+    }
+
+    void setInputData()
+    {
+        mBindings.transferInputToDevice(getStream(StreamType::kINPUT));
+    }
+
+    void fetchOutputData()
+    {
+        mBindings.transferOutputToHost(getStream(StreamType::kOUTPUT));
+    }
+
+private:
+    void moveNext()
+    {
+        mNext = mDepth - 1 - mNext;
+    }
+
+    TrtCudaStream& getStream(StreamType t)
+    {
+        return mStream[static_cast<int>(t)];
+    }
+
+    TrtCudaEvent& getEvent(EventType t)
+    {
+        return *mEvents[mNext][static_cast<int>(t)];
+    }
+
+    void record(EventType e, StreamType s)
+    {
+        getEvent(e).record(getStream(s));
+    }
+
+    void recordEnqueueTime()
+    {
+        mEnqueueTimes[mNext][enqueueStart] = std::chrono::high_resolution_clock::now();
+        enqueueStart = 1 - enqueueStart;
+    }
+
+    TimePoint getEnqueueTime(bool start)
+    {
+        return mEnqueueTimes[mNext][start ? 0 : 1];
+    }
+
+    void wait(EventType e, StreamType s)
+    {
+        getStream(s).wait(getEvent(e));
+    }
+
+    InferenceTrace getTrace(const TimePoint& cpuStart, const TrtCudaEvent& gpuStart, bool skipTransfers)
+    {
+        float is
+            = skipTransfers ? getEvent(EventType::kCOMPUTE_S) - gpuStart : getEvent(EventType::kINPUT_S) - gpuStart;
+        float ie
+            = skipTransfers ? getEvent(EventType::kCOMPUTE_S) - gpuStart : getEvent(EventType::kINPUT_E) - gpuStart;
+        float os
+            = skipTransfers ? getEvent(EventType::kCOMPUTE_E) - gpuStart : getEvent(EventType::kOUTPUT_S) - gpuStart;
+        float oe
+            = skipTransfers ? getEvent(EventType::kCOMPUTE_E) - gpuStart : getEvent(EventType::kOUTPUT_E) - gpuStart;
+
+        return InferenceTrace(mStreamId,
+            std::chrono::duration<float, std::milli>(getEnqueueTime(true) - cpuStart).count(),
+            std::chrono::duration<float, std::milli>(getEnqueueTime(false) - cpuStart).count(), is, ie,
+            getEvent(EventType::kCOMPUTE_S) - gpuStart, getEvent(EventType::kCOMPUTE_E) - gpuStart, os, oe);
+    }
+
+    void createEnqueueFunction(
+        const InferenceOptions& inference, nvinfer1::IExecutionContext& context, Bindings& bindings)
+    {
+        if (inference.batch)
+        {
+            mEnqueue = EnqueueFunction(EnqueueImplicit(context, mBindings.getDeviceBuffers(), inference.batch));
+        }
+        else
+        {
+            mEnqueue = EnqueueFunction(EnqueueExplicit(context, mBindings.getDeviceBuffers()));
+        }
+        if (inference.graph)
+        {
+            TrtCudaStream& stream = getStream(StreamType::kCOMPUTE);
+            // Avoid capturing initialization calls by executing the enqueue function at least
+            // once before starting CUDA graph capture.
+            const auto ret = mEnqueue(stream);
+            assert(ret);
+            stream.synchronize();
+
+            mGraph.beginCapture(stream);
+            // The built TRT engine may contain operations that are not permitted under CUDA graph capture mode.
+            // When the stream is capturing, the enqueue call may return false if the current CUDA graph capture fails.
+            if (mEnqueue(stream))
+            {
+                mGraph.endCapture(stream);
+                mEnqueue = EnqueueFunction(EnqueueGraph(mGraph));
+            }
+            else
+            {
+                mGraph.endCaptureOnError(stream);
+                // Ensure any CUDA error has been cleaned up.
+                cudaCheck(cudaGetLastError());
+                sample::gLogWarning << "The built TensorRT engine contains operations that are not permitted under "
+                                       "CUDA graph capture mode."
+                                    << std::endl;
+                sample::gLogWarning << "The specified --useCudaGraph flag has been ignored. The inference will be "
+                                       "launched without using CUDA graph launch."
+                                    << std::endl;
+            }
+        }
+    }
+
+    Bindings& mBindings;
+
+    TrtCudaGraph mGraph;
+    EnqueueFunction mEnqueue;
+
+    int mStreamId{0};
+    int mNext{0};
+    int mDepth{2}; // default to double buffer to hide DMA transfers
+
+    std::vector<bool> mActive;
+    MultiStream mStream;
+    std::vector<MultiEvent> mEvents;
+
+    int enqueueStart{0};
+    std::vector<EnqueueTimes> mEnqueueTimes;
+};
+
+using IterationStreams = std::vector<std::unique_ptr<Iteration>>;
+
+bool inferenceLoop(IterationStreams& iStreams, const TimePoint& cpuStart, const TrtCudaEvent& gpuStart, int iterations,
+    float maxDurationMs, float warmupMs, std::vector<InferenceTrace>& trace, bool skipTransfers)
+{
+    float durationMs = 0;
+    int skip = 0;
+
+    for (int i = 0; i < iterations + skip || durationMs < maxDurationMs; ++i)
+    {
+        for (auto& s : iStreams)
+        {
+            if (!s->query(skipTransfers))
+            {
+                return false;
+            }
+        }
+        for (auto& s : iStreams)
+        {
+            durationMs = std::max(durationMs, s->sync(cpuStart, gpuStart, trace, skipTransfers));
+        }
+        if (durationMs < warmupMs) // Warming up
+        {
+            if (durationMs) // Skip complete iterations
+            {
+                ++skip;
+            }
+            continue;
+        }
+    }
+    for (auto& s : iStreams)
+    {
+        s->syncAll(cpuStart, gpuStart, trace, skipTransfers);
+    }
+    return true;
+}
+
+void inferenceExecution(const InferenceOptions& inference, InferenceEnvironment& iEnv, SyncStruct& sync, int offset,
+    int streams, int device, std::vector<InferenceTrace>& trace)
+{
+    float warmupMs = static_cast<float>(inference.warmup);
+    float durationMs = static_cast<float>(inference.duration) * 1000 + warmupMs;
+
+    cudaCheck(cudaSetDevice(device));
+
+    IterationStreams iStreams;
+    for (int s = 0; s < streams; ++s)
+    {
+        Iteration* iteration = new Iteration(offset + s, inference, *iEnv.context[offset], *iEnv.bindings[offset]);
+        if (inference.skipTransfers)
+        {
+            iteration->setInputData();
+        }
+        iStreams.emplace_back(iteration);
+    }
+
+    for (auto& s : iStreams)
+    {
+        s->wait(sync.gpuStart);
+    }
+
+    std::vector<InferenceTrace> localTrace;
+    if (!inferenceLoop(iStreams, sync.cpuStart, sync.gpuStart, inference.iterations, durationMs, warmupMs, localTrace,
+            inference.skipTransfers))
+    {
+        iEnv.error = true;
+    }
+
+    if (inference.skipTransfers)
+    {
+        for (auto& s : iStreams)
+        {
+            s->fetchOutputData();
+        }
+    }
+
+    sync.mutex.lock();
+    trace.insert(trace.end(), localTrace.begin(), localTrace.end());
+    sync.mutex.unlock();
+}
+
+inline std::thread makeThread(const InferenceOptions& inference, InferenceEnvironment& iEnv, SyncStruct& sync,
+    int thread, int streamsPerThread, int device, std::vector<InferenceTrace>& trace)
+{
+    return std::thread(inferenceExecution, std::cref(inference), std::ref(iEnv), std::ref(sync), thread,
+        streamsPerThread, device, std::ref(trace));
+}
+
+} // namespace
+
+bool runInference(
+    const InferenceOptions& inference, InferenceEnvironment& iEnv, int device, std::vector<InferenceTrace>& trace)
+{
+    trace.resize(0);
+
+    SyncStruct sync;
+    sync.sleep = inference.sleep;
+    sync.mainStream.sleep(&sync.sleep);
+    sync.cpuStart = std::chrono::high_resolution_clock::now();
+    sync.gpuStart.record(sync.mainStream);
+
+    int threadsNum = inference.threads ? inference.streams : 1;
+    int streamsPerThread = inference.streams / threadsNum;
+
+    std::vector<std::thread> threads;
+    for (int t = 0; t < threadsNum; ++t)
+    {
+        threads.emplace_back(makeThread(inference, iEnv, sync, t, streamsPerThread, device, trace));
+    }
+    for (auto& th : threads)
+    {
+        th.join();
+    }
+
+    auto cmpTrace = [](const InferenceTrace& a, const InferenceTrace& b) { return a.h2dStart < b.h2dStart; };
+    std::sort(trace.begin(), trace.end(), cmpTrace);
+
+    return !iEnv.error;
+}
+namespace
+{
+size_t reportGpuMemory()
+{
+    static size_t prevFree{0};
+    size_t free{0};
+    size_t total{0};
+    size_t newlyAllocated{0};
+    cudaCheck(cudaMemGetInfo(&free, &total));
+    sample::gLogInfo << "Free GPU memory = " << free / 1024.0_MiB << " GiB";
+    if (prevFree != 0)
+    {
+        newlyAllocated = (prevFree - free);
+        sample::gLogInfo << ", newly allocated GPU memory = " << newlyAllocated / 1024.0_MiB << " GiB";
+    }
+    sample::gLogInfo << ", total GPU memory = " << total / 1024.0_MiB << " GiB" << std::endl;
+    prevFree = free;
+    return newlyAllocated;
+}
+} // namespace
+
+//! Returns true if deserialization is slower than expected or fails.
+bool timeDeserialize(InferenceEnvironment& iEnv)
+{
+    TrtUniquePtr<IRuntime> rt{createInferRuntime(sample::gLogger.getTRTLogger())};
+    constexpr int32_t kNB_ITERS{20};
+    TrtUniquePtr<ICudaEngine> engine;
+    TrtUniquePtr<IHostMemory> serializedEngine{iEnv.engine->serialize()};
+
+    sample::gLogInfo << "Begin deserialization engine..." << std::endl;
+    auto startClock = std::chrono::high_resolution_clock::now();
+    engine.reset(rt->deserializeCudaEngine(serializedEngine->data(), serializedEngine->size(), nullptr));
+    auto endClock = std::chrono::high_resolution_clock::now();
+    auto const first = std::chrono::duration<float, std::milli>(endClock - startClock).count();
+    sample::gLogInfo << "First deserialization time = " << first << " milliseconds" << std::endl;
+
+    // Check if first deserialization suceeded.
+    if (engine == nullptr)
+    {
+        sample::gLogError << "Engine deserialization failed." << std::endl;
+        return true;
+    }
+
+    // Record initial gpu memory state.
+    reportGpuMemory();
+
+    float totalTime{0.F};
+    for (int32_t i = 0; i < kNB_ITERS; ++i)
+    {
+        engine.reset(nullptr);
+
+        startClock = std::chrono::high_resolution_clock::now();
+        engine.reset(rt->deserializeCudaEngine(serializedEngine->data(), serializedEngine->size(), nullptr));
+        endClock = std::chrono::high_resolution_clock::now();
+        totalTime += std::chrono::duration<float, std::milli>(endClock - startClock).count();
+    }
+    const auto averageTime = totalTime / kNB_ITERS;
+    // reportGpuMemory sometimes reports zero after a single deserialization of a small engine,
+    // so use the size of memory for all the iterations.
+    const auto totalEngineSizeGpu = reportGpuMemory();
+    sample::gLogInfo << "Total deserialization time = " << totalTime << " milliseconds, average time = " << averageTime
+                     << ", first time = " << first << "." << std::endl;
+    sample::gLogInfo << "Deserialization Bandwidth = " << 1E-6 * totalEngineSizeGpu / totalTime << " GB/s" << std::endl;
+
+    // If the first deserialization is more than tolerance slower than
+    // the average deserialization, return true, which means an error occurred.
+    const auto tolerance = 1.50F;
+    const bool isSlowerThanExpected = first > averageTime * tolerance;
+    if (isSlowerThanExpected)
+    {
+        sample::gLogInfo << "First deserialization time divided by average time is " << (first / averageTime)
+                         << ". Exceeds tolerance of " << tolerance << "x." << std::endl;
+    }
+    return isSlowerThanExpected;
+}
+} // namespace sample

+ 59 - 0
src/detection/CenterPoint-master/include/common/sampleInference.h

@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TRT_SAMPLE_INFERENCE_H
+#define TRT_SAMPLE_INFERENCE_H
+
+#include <iostream>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "NvInfer.h"
+
+#include "sampleReporting.h"
+#include "sampleUtils.h"
+
+namespace sample
+{
+
+struct InferenceEnvironment
+{
+    TrtUniquePtr<nvinfer1::ICudaEngine> engine;
+    std::unique_ptr<Profiler> profiler;
+    std::vector<TrtUniquePtr<nvinfer1::IExecutionContext>> context;
+    std::vector<std::unique_ptr<Bindings>> bindings;
+    bool error{false};
+};
+
+//!
+//! \brief Set up contexts and bindings for inference
+//!
+bool setUpInference(InferenceEnvironment& iEnv, const InferenceOptions& inference);
+
+//!
+//! \brief Deserialize the engine and time how long it takes.
+//!
+bool timeDeserialize(InferenceEnvironment& iEnv);
+
+//!
+//! \brief Run inference and collect timing, return false if any error hit during inference
+//!
+bool runInference(const InferenceOptions& inference, InferenceEnvironment& iEnv, int device, std::vector<InferenceTrace>& trace);
+
+} // namespace sample
+
+#endif // TRT_SAMPLE_INFERENCE_H

+ 1549 - 0
src/detection/CenterPoint-master/include/common/sampleOptions.cpp

@@ -0,0 +1,1549 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <cctype>
+#include <cstring>
+#include <functional>
+#include <iostream>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "NvInfer.h"
+
+#include "sampleOptions.h"
+#include "sampleUtils.h"
+
+namespace sample
+{
+
+namespace
+{
+
+std::vector<std::string> splitToStringVec(const std::string& option, char separator)
+{
+    std::vector<std::string> options;
+
+    for (size_t start = 0; start < option.length();)
+    {
+        size_t separatorIndex = option.find(separator, start);
+        if (separatorIndex == std::string::npos)
+        {
+            separatorIndex = option.length();
+        }
+        options.emplace_back(option.substr(start, separatorIndex - start));
+        start = separatorIndex + 1;
+    }
+
+    return options;
+}
+
+template <typename T>
+T stringToValue(const std::string& option)
+{
+    return T{option};
+}
+
+template <>
+int stringToValue<int>(const std::string& option)
+{
+    return std::stoi(option);
+}
+
+template <>
+float stringToValue<float>(const std::string& option)
+{
+    return std::stof(option);
+}
+
+template <>
+bool stringToValue<bool>(const std::string& option)
+{
+    return true;
+}
+
+template <>
+std::vector<int> stringToValue<std::vector<int>>(const std::string& option)
+{
+    std::vector<int> shape;
+    std::vector<std::string> dimsStrings = splitToStringVec(option, 'x');
+    for (const auto& d : dimsStrings)
+    {
+        shape.push_back(stringToValue<int>(d));
+    }
+    return shape;
+}
+
+template <>
+nvinfer1::DataType stringToValue<nvinfer1::DataType>(const std::string& option)
+{
+    const std::unordered_map<std::string, nvinfer1::DataType> strToDT{{"fp32", nvinfer1::DataType::kFLOAT},
+        {"fp16", nvinfer1::DataType::kHALF}, {"int8", nvinfer1::DataType::kINT8},
+        {"int32", nvinfer1::DataType::kINT32}};
+    const auto& dt = strToDT.find(option);
+    if (dt == strToDT.end())
+    {
+        throw std::invalid_argument("Invalid DataType " + option);
+    }
+    return dt->second;
+}
+
+template <>
+nvinfer1::TensorFormats stringToValue<nvinfer1::TensorFormats>(const std::string& option)
+{
+    std::vector<std::string> optionStrings = splitToStringVec(option, '+');
+    const std::unordered_map<std::string, nvinfer1::TensorFormat> strToFmt{{"chw", nvinfer1::TensorFormat::kLINEAR},
+        {"chw2", nvinfer1::TensorFormat::kCHW2}, {"chw4", nvinfer1::TensorFormat::kCHW4},
+        {"hwc8", nvinfer1::TensorFormat::kHWC8}, {"chw16", nvinfer1::TensorFormat::kCHW16},
+        {"chw32", nvinfer1::TensorFormat::kCHW32}, {"dhwc8", nvinfer1::TensorFormat::kDHWC8},
+        {"hwc", nvinfer1::TensorFormat::kHWC}, {"dla_linear", nvinfer1::TensorFormat::kDLA_LINEAR},
+        {"dla_hwc4", nvinfer1::TensorFormat::kDLA_HWC4}};
+    nvinfer1::TensorFormats formats{};
+    for (auto f : optionStrings)
+    {
+        const auto& tf = strToFmt.find(f);
+        if (tf == strToFmt.end())
+        {
+            throw std::invalid_argument(std::string("Invalid TensorFormat ") + f);
+        }
+        formats |= 1U << int(tf->second);
+    }
+
+    return formats;
+}
+
+template <>
+IOFormat stringToValue<IOFormat>(const std::string& option)
+{
+    IOFormat ioFormat{};
+    const size_t colon = option.find(':');
+
+    if (colon == std::string::npos)
+    {
+        throw std::invalid_argument(std::string("Invalid IOFormat ") + option);
+    }
+
+    ioFormat.first = stringToValue<nvinfer1::DataType>(option.substr(0, colon));
+    ioFormat.second = stringToValue<nvinfer1::TensorFormats>(option.substr(colon + 1));
+
+    return ioFormat;
+}
+
+template <typename T>
+std::pair<std::string, T> splitNameAndValue(const std::string& s)
+{
+    std::string tensorName;
+    std::string valueString;
+    // Split on the last :
+    std::vector<std::string> nameRange{splitToStringVec(s, ':')};
+    // Everything before the last : is the name
+    tensorName = nameRange[0];
+    for (size_t i = 1; i < nameRange.size() - 1; i++)
+    {
+        tensorName += ":" + nameRange[i];
+    }
+    // Value is the string element after the last :
+    valueString = nameRange[nameRange.size() - 1];
+    return std::pair<std::string, T>(tensorName, stringToValue<T>(valueString));
+}
+
+template <typename T>
+void splitInsertKeyValue(const std::vector<std::string>& kvList, T& map)
+{
+    for (const auto& kv : kvList)
+    {
+        map.insert(splitNameAndValue<typename T::mapped_type>(kv));
+    }
+}
+
+const char* boolToEnabled(bool enable)
+{
+    return enable ? "Enabled" : "Disabled";
+}
+
+//! Check if input option exists in input arguments.
+//! If it does: return its value, erase the argument and return true.
+//! If it does not: return false.
+template <typename T>
+bool getAndDelOption(Arguments& arguments, const std::string& option, T& value)
+{
+    const auto match = arguments.find(option);
+    if (match != arguments.end())
+    {
+        value = stringToValue<T>(match->second);
+        arguments.erase(match);
+        return true;
+    }
+
+    return false;
+}
+
+//! Check if input option exists in input arguments.
+//! If it does: return false in value, erase the argument and return true.
+//! If it does not: return false.
+bool getAndDelNegOption(Arguments& arguments, const std::string& option, bool& value)
+{
+    bool dummy;
+    if (getAndDelOption(arguments, option, dummy))
+    {
+        value = false;
+        return true;
+    }
+    return false;
+}
+
+//! Check if input option exists in input arguments.
+//! If it does: add all the matched arg values to values vector, erase the argument and return true.
+//! If it does not: return false.
+template <typename T>
+bool getAndDelRepeatedOption(Arguments& arguments, const std::string& option, std::vector<T>& values)
+{
+    const auto match = arguments.equal_range(option);
+    if (match.first == match.second)
+    {
+        return false;
+    }
+
+    auto addToValues = [&values](Arguments::value_type& argValue) {values.emplace_back(stringToValue<T>(argValue.second));};
+    std::for_each(match.first, match.second, addToValues);
+    arguments.erase(match.first, match.second);
+
+    return true;
+}
+
+void insertShapesBuild(std::unordered_map<std::string, ShapeRange>& shapes, nvinfer1::OptProfileSelector selector, const std::string& name, const std::vector<int>& dims)
+{
+    shapes[name][static_cast<size_t>(selector)] = dims;
+}
+
+void insertShapesInference(std::unordered_map<std::string, std::vector<int>>& shapes, const std::string& name, const std::vector<int>& dims)
+{
+    shapes[name] = dims;
+}
+
+std::string removeSingleQuotationMarks(std::string& str)
+{
+     std::vector<std::string> strList{splitToStringVec(str, '\'')};
+     // Remove all the escaped single quotation marks
+     std::string retVal = "";
+     // Do not really care about unterminated sequences
+     for (size_t i = 0; i < strList.size(); i++)
+     {
+         retVal += strList[i];
+     }
+     return retVal;
+}
+
+bool getShapesBuild(Arguments& arguments, std::unordered_map<std::string, ShapeRange>& shapes, const char* argument, nvinfer1::OptProfileSelector selector)
+{
+    std::string list;
+    bool retVal = getAndDelOption(arguments, argument, list);
+    std::vector<std::string> shapeList{splitToStringVec(list, ',')};
+    for (const auto& s : shapeList)
+    {
+        auto nameDimsPair = splitNameAndValue<std::vector<int>>(s);
+        auto tensorName = removeSingleQuotationMarks(nameDimsPair.first);
+        auto dims = nameDimsPair.second;
+        insertShapesBuild(shapes, selector, tensorName, dims);
+    }
+    return retVal;
+}
+
+bool getShapesInference(Arguments& arguments, std::unordered_map<std::string, std::vector<int>>& shapes, const char* argument)
+{
+    std::string list;
+    bool retVal = getAndDelOption(arguments, argument, list);
+    std::vector<std::string> shapeList{splitToStringVec(list, ',')};
+    for (const auto& s : shapeList)
+    {
+        auto nameDimsPair = splitNameAndValue<std::vector<int>>(s);
+        auto tensorName = removeSingleQuotationMarks(nameDimsPair.first);
+        auto dims = nameDimsPair.second;
+        insertShapesInference(shapes, tensorName, dims);
+    }
+    return retVal;
+}
+
+void processShapes(std::unordered_map<std::string, ShapeRange>& shapes, bool minShapes, bool optShapes, bool maxShapes, bool calib)
+{
+    // Only accept optShapes only or all three of minShapes, optShapes, maxShapes
+    if ( ((minShapes || maxShapes) && !optShapes)  // minShapes only, maxShapes only, both minShapes and maxShapes
+        || (minShapes && !maxShapes && optShapes)  // both minShapes and optShapes
+        || (!minShapes && maxShapes && optShapes)) // both maxShapes and optShapes
+    {
+        if (calib)
+        {
+            throw std::invalid_argument("Must specify only --optShapesCalib or all of --minShapesCalib, --optShapesCalib, --maxShapesCalib");
+        }
+        else
+        {
+            throw std::invalid_argument("Must specify only --optShapes or all of --minShapes, --optShapes, --maxShapes");
+        }
+    }
+
+    // If optShapes only, expand optShapes to minShapes and maxShapes
+    if (optShapes && !minShapes && !maxShapes)
+    {
+        std::unordered_map<std::string, ShapeRange> newShapes;
+        for (auto& s : shapes)
+        {
+            insertShapesBuild(newShapes, nvinfer1::OptProfileSelector::kMIN, s.first, s.second[static_cast<size_t>(nvinfer1::OptProfileSelector::kOPT)]);
+            insertShapesBuild(newShapes, nvinfer1::OptProfileSelector::kOPT, s.first, s.second[static_cast<size_t>(nvinfer1::OptProfileSelector::kOPT)]);
+            insertShapesBuild(newShapes, nvinfer1::OptProfileSelector::kMAX, s.first, s.second[static_cast<size_t>(nvinfer1::OptProfileSelector::kOPT)]);
+        }
+        shapes = newShapes;
+    }
+}
+
+template <typename T>
+void printShapes(std::ostream& os, const char* phase, const T& shapes)
+{
+    if (shapes.empty())
+    {
+        os << "Input " << phase << " shapes: model" << std::endl;
+    }
+    else
+    {
+        for (const auto& s : shapes)
+        {
+            os << "Input " << phase << " shape: " << s.first << "=" << s.second << std::endl;
+        }
+    }
+}
+
+std::ostream& printBatch(std::ostream& os, int maxBatch)
+{
+    if (maxBatch)
+    {
+        os << maxBatch;
+    }
+    else
+    {
+        os << "explicit";
+    }
+    return os;
+}
+
+std::ostream& printTacticSources(std::ostream& os, nvinfer1::TacticSources enabledSources, nvinfer1::TacticSources disabledSources)
+{
+    if (!enabledSources && !disabledSources)
+    {
+        os << "Using default tactic sources";
+    }
+    else
+    {
+        const auto addSource = [&](uint32_t source, const std::string& name) {
+            if (enabledSources & source)
+            {
+                os << name << " [ON], ";
+            }
+            else if (disabledSources & source)
+            {
+                os << name << " [OFF], ";
+            }
+        };
+
+        addSource(1U << static_cast<uint32_t>(nvinfer1::TacticSource::kCUBLAS), "cublas");
+        addSource(1U << static_cast<uint32_t>(nvinfer1::TacticSource::kCUBLAS_LT), "cublasLt");
+        addSource(1U << static_cast<uint32_t>(nvinfer1::TacticSource::kCUDNN), "cudnn");
+    }
+    return os;
+}
+
+std::ostream& printPrecision(std::ostream& os, const BuildOptions& options)
+{
+    os << "FP32";
+    if (options.fp16)
+    {
+        os << "+FP16";
+    }
+    if (options.int8)
+    {
+        os << "+INT8";
+    }
+    return os;
+}
+
+std::ostream& printTimingCache(std::ostream& os, const BuildOptions& options)
+{
+    switch (options.timingCacheMode)
+    {
+        case TimingCacheMode::kGLOBAL: os << "global"; break;
+        case TimingCacheMode::kLOCAL: os << "local"; break;
+        case TimingCacheMode::kDISABLE: os << "disable"; break;
+    }
+    return os;
+}
+
+std::ostream& printSparsity(std::ostream& os, const BuildOptions& options)
+{
+    switch (options.sparsity)
+    {
+    case SparsityFlag::kDISABLE: os << "Disabled"; break;
+    case SparsityFlag::kENABLE: os << "Enabled"; break;
+    case SparsityFlag::kFORCE: os << "Forced"; break;
+    }
+
+    return os;
+}
+} // namespace
+
+Arguments argsToArgumentsMap(int argc, char* argv[])
+{
+    Arguments arguments;
+    for (int i = 1; i < argc; ++i)
+    {
+        auto valuePtr = strchr(argv[i], '=');
+        if (valuePtr)
+        {
+            std::string value{valuePtr + 1};
+            arguments.emplace(std::string(argv[i], valuePtr - argv[i]), value);
+        }
+        else
+        {
+            arguments.emplace(argv[i], "");
+        }
+    }
+    return arguments;
+}
+
+void BaseModelOptions::parse(Arguments& arguments)
+{
+    if (getAndDelOption(arguments, "--onnx", model))
+    {
+        format = ModelFormat::kONNX;
+    }
+    else if (getAndDelOption(arguments, "--uff", model))
+    {
+        format = ModelFormat::kUFF;
+    }
+    else if (getAndDelOption(arguments, "--model", model))
+    {
+        format = ModelFormat::kCAFFE;
+    }
+}
+
+void UffInput::parse(Arguments& arguments)
+{
+    getAndDelOption(arguments, "--uffNHWC", NHWC);
+    std::vector<std::string> args;
+    if (getAndDelRepeatedOption(arguments, "--uffInput", args))
+    {
+        for (const auto& i : args)
+        {
+            std::vector<std::string> values{splitToStringVec(i, ',')};
+            if (values.size() == 4)
+            {
+                nvinfer1::Dims3 dims{std::stoi(values[1]), std::stoi(values[2]), std::stoi(values[3])};
+                inputs.emplace_back(values[0], dims);
+            }
+            else
+            {
+                throw std::invalid_argument(std::string("Invalid uffInput ") + i);
+            }
+        }
+    }
+}
+
+void ModelOptions::parse(Arguments& arguments)
+{
+    baseModel.parse(arguments);
+
+    switch (baseModel.format)
+    {
+    case ModelFormat::kCAFFE:
+    {
+        getAndDelOption(arguments, "--deploy", prototxt);
+        break;
+    }
+    case ModelFormat::kUFF:
+    {
+        uffInputs.parse(arguments);
+        if (uffInputs.inputs.empty())
+        {
+            throw std::invalid_argument("Uff models require at least one input");
+        }
+        break;
+    }
+    case ModelFormat::kONNX:
+        break;
+    case ModelFormat::kANY:
+    {
+        if (getAndDelOption(arguments, "--deploy", prototxt))
+        {
+            baseModel.format = ModelFormat::kCAFFE;
+        }
+        break;
+    }
+    }
+    if (baseModel.format == ModelFormat::kCAFFE || baseModel.format == ModelFormat::kUFF)
+    {
+        std::vector<std::string> outArgs;
+        if (getAndDelRepeatedOption(arguments, "--output", outArgs))
+        {
+            for (const auto& o : outArgs)
+            {
+                for (auto& v : splitToStringVec(o, ','))
+                {
+                    outputs.emplace_back(std::move(v));
+                }
+            }
+        }
+        if (outputs.empty())
+        {
+            throw std::invalid_argument("Caffe and Uff models require at least one output");
+        }
+    }
+}
+
+void BuildOptions::parse(Arguments& arguments)
+{
+    auto getFormats = [&arguments](std::vector<IOFormat>& formatsVector, const char* argument) {
+        std::string list;
+        getAndDelOption(arguments, argument, list);
+        std::vector<std::string> formats{splitToStringVec(list, ',')};
+        for (const auto& f : formats)
+        {
+            formatsVector.push_back(stringToValue<IOFormat>(f));
+        }
+    };
+
+    getFormats(inputFormats, "--inputIOFormats");
+    getFormats(outputFormats, "--outputIOFormats");
+
+    bool explicitBatch{false};
+    getAndDelOption(arguments, "--explicitBatch", explicitBatch);
+    bool minShapes = getShapesBuild(arguments, shapes, "--minShapes", nvinfer1::OptProfileSelector::kMIN);
+    bool optShapes = getShapesBuild(arguments, shapes, "--optShapes", nvinfer1::OptProfileSelector::kOPT);
+    bool maxShapes = getShapesBuild(arguments, shapes, "--maxShapes", nvinfer1::OptProfileSelector::kMAX);
+    processShapes(shapes, minShapes, optShapes, maxShapes, false);
+    bool minShapesCalib
+        = getShapesBuild(arguments, shapesCalib, "--minShapesCalib", nvinfer1::OptProfileSelector::kMIN);
+    bool optShapesCalib
+        = getShapesBuild(arguments, shapesCalib, "--optShapesCalib", nvinfer1::OptProfileSelector::kOPT);
+    bool maxShapesCalib
+        = getShapesBuild(arguments, shapesCalib, "--maxShapesCalib", nvinfer1::OptProfileSelector::kMAX);
+    processShapes(shapesCalib, minShapesCalib, optShapesCalib, maxShapesCalib, true);
+    explicitBatch = explicitBatch || !shapes.empty();
+
+    getAndDelOption(arguments, "--explicitPrecision", explicitPrecision);
+
+    int batch{0};
+    getAndDelOption(arguments, "--maxBatch", batch);
+    if (explicitBatch && batch)
+    {
+        throw std::invalid_argument(
+            "Explicit batch or dynamic shapes enabled with implicit maxBatch " + std::to_string(batch));
+    }
+
+    if (explicitBatch)
+    {
+        maxBatch = 0;
+    }
+    else
+    {
+        if (batch)
+        {
+            maxBatch = batch;
+        }
+    }
+
+    getAndDelOption(arguments, "--workspace", workspace);
+    getAndDelOption(arguments, "--minTiming", minTiming);
+    getAndDelOption(arguments, "--avgTiming", avgTiming);
+
+    bool best{false};
+    getAndDelOption(arguments, "--best", best);
+    if (best)
+    {
+        int8 = true;
+        fp16 = true;
+    }
+
+    getAndDelOption(arguments, "--refit", refittable);
+    getAndDelNegOption(arguments, "--noTF32", tf32);
+    getAndDelOption(arguments, "--fp16", fp16);
+    getAndDelOption(arguments, "--int8", int8);
+    getAndDelOption(arguments, "--safe", safe);
+
+    std::string sparsityString;
+    getAndDelOption(arguments, "--sparsity", sparsityString);
+    if (sparsityString == "disable")
+    {
+        sparsity = SparsityFlag::kDISABLE;
+    }
+    else if (sparsityString == "enable")
+    {
+        sparsity = SparsityFlag::kENABLE;
+    }
+    else if (sparsityString == "force")
+    {
+        sparsity = SparsityFlag::kFORCE;
+    }
+    else if (!sparsityString.empty())
+    {
+        throw std::invalid_argument(std::string("Unknown sparsity mode: ") + sparsityString);
+    }
+
+    bool calibCheck = getAndDelOption(arguments, "--calib", calibration);
+    if (int8 && calibCheck && !shapes.empty() && shapesCalib.empty())
+    {
+        shapesCalib = shapes;
+    }
+
+    std::string nvtxModeString;
+    getAndDelOption(arguments, "--nvtxMode", nvtxModeString);
+    if (nvtxModeString == "default")
+    {
+        nvtxMode = nvinfer1::ProfilingVerbosity::kDEFAULT;
+    }
+    else if (nvtxModeString == "none")
+    {
+        nvtxMode = nvinfer1::ProfilingVerbosity::kNONE;
+    }
+    else if (nvtxModeString == "verbose")
+    {
+        nvtxMode = nvinfer1::ProfilingVerbosity::kVERBOSE;
+    }
+    else if (!nvtxModeString.empty())
+    {
+        throw std::invalid_argument(std::string("Unknown nvtxMode: ") + nvtxModeString);
+    }
+
+    if (getAndDelOption(arguments, "--loadEngine", engine))
+    {
+        load = true;
+    }
+    if (getAndDelOption(arguments, "--saveEngine", engine))
+    {
+        save = true;
+    }
+    if (load && save)
+    {
+        throw std::invalid_argument("Incompatible load and save engine options selected");
+    }
+
+    std::string tacticSourceArgs;
+    if (getAndDelOption(arguments, "--tacticSources", tacticSourceArgs))
+    {
+        std::vector<std::string> tacticList = splitToStringVec(tacticSourceArgs, ',');
+        for (auto& t : tacticList)
+        {
+            bool enable{false};
+            if (t.front() == '+')
+            {
+                enable = true;
+            }
+            else if (t.front() != '-')
+            {
+                throw std::invalid_argument(
+                    "Tactic source must be prefixed with + or -, indicating whether it should be enabled or disabled "
+                    "respectively.");
+            }
+            t.erase(0, 1);
+
+            const auto toUpper = [](std::string& sourceName) {
+                std::transform(
+                    sourceName.begin(), sourceName.end(), sourceName.begin(), [](char c) { return std::toupper(c); });
+                return sourceName;
+            };
+
+            nvinfer1::TacticSource source{};
+            t = toUpper(t);
+            if (t == "CUBLAS")
+            {
+                source = nvinfer1::TacticSource::kCUBLAS;
+            }
+            else if (t == "CUBLASLT" || t == "CUBLAS_LT")
+            {
+                source = nvinfer1::TacticSource::kCUBLAS_LT;
+            }
+            else if (t == "CUDNN")
+            {
+                source = nvinfer1::TacticSource::kCUDNN;
+            }
+            else
+            {
+                throw std::invalid_argument(std::string("Unknown tactic source: ") + t);
+            }
+
+            uint32_t sourceBit = 1U << static_cast<uint32_t>(source);
+
+            if (enable)
+            {
+                enabledTactics |= sourceBit;
+            }
+            else
+            {
+                disabledTactics |= sourceBit;
+            }
+
+            if (enabledTactics & disabledTactics)
+            {
+                throw std::invalid_argument(std::string("Cannot enable and disable ") + t);
+            }
+        }
+    }
+
+    bool noBuilderCache{false};
+    getAndDelOption(arguments, "--noBuilderCache", noBuilderCache);
+    getAndDelOption(arguments, "--timingCacheFile", timingCacheFile);
+    if (noBuilderCache)
+    {
+        timingCacheMode = TimingCacheMode::kDISABLE;
+    }
+    else if (!timingCacheFile.empty())
+    {
+        timingCacheMode = TimingCacheMode::kGLOBAL;
+    }
+    else
+    {
+        timingCacheMode = TimingCacheMode::kLOCAL;
+    }
+}
+
+void SystemOptions::parse(Arguments& arguments)
+{
+    getAndDelOption(arguments, "--device", device);
+    getAndDelOption(arguments, "--useDLACore", DLACore);
+    getAndDelOption(arguments, "--allowGPUFallback", fallback);
+    std::string pluginName;
+    while (getAndDelOption(arguments, "--plugins", pluginName))
+    {
+        plugins.emplace_back(pluginName);
+    }
+}
+
+void InferenceOptions::parse(Arguments& arguments)
+{
+    getAndDelOption(arguments, "--streams", streams);
+    getAndDelOption(arguments, "--iterations", iterations);
+    getAndDelOption(arguments, "--duration", duration);
+    getAndDelOption(arguments, "--warmUp", warmup);
+    getAndDelOption(arguments, "--sleepTime", sleep);
+    bool exposeDMA{false};
+    if (getAndDelOption(arguments, "--exposeDMA", exposeDMA))
+    {
+        overlap = !exposeDMA;
+    }
+    getAndDelOption(arguments, "--noDataTransfers", skipTransfers);
+    getAndDelOption(arguments, "--useSpinWait", spin);
+    getAndDelOption(arguments, "--threads", threads);
+    getAndDelOption(arguments, "--useCudaGraph", graph);
+    getAndDelOption(arguments, "--separateProfileRun", rerun);
+    getAndDelOption(arguments, "--buildOnly", skip);
+    getAndDelOption(arguments, "--timeDeserialize", timeDeserialize);
+    getAndDelOption(arguments, "--timeRefit", timeRefit);
+
+    std::string list;
+    getAndDelOption(arguments, "--loadInputs", list);
+    std::vector<std::string> inputsList{splitToStringVec(list, ',')};
+    splitInsertKeyValue(inputsList, inputs);
+
+    getShapesInference(arguments, shapes, "--shapes");
+
+    int batchOpt{0};
+    getAndDelOption(arguments, "--batch", batchOpt);
+    if (!shapes.empty() && batchOpt)
+    {
+        throw std::invalid_argument(
+            "Explicit batch or dynamic shapes enabled with implicit batch " + std::to_string(batchOpt));
+    }
+    if (batchOpt)
+    {
+        batch = batchOpt;
+    }
+    else
+    {
+        if (!shapes.empty())
+        {
+            batch = 0;
+        }
+    }
+}
+
+void ReportingOptions::parse(Arguments& arguments)
+{
+    getAndDelOption(arguments, "--percentile", percentile);
+    getAndDelOption(arguments, "--avgRuns", avgs);
+    getAndDelOption(arguments, "--verbose", verbose);
+    getAndDelOption(arguments, "--dumpRefit", refit);
+    getAndDelOption(arguments, "--dumpOutput", output);
+    getAndDelOption(arguments, "--dumpProfile", profile);
+    getAndDelOption(arguments, "--exportTimes", exportTimes);
+    getAndDelOption(arguments, "--exportOutput", exportOutput);
+    getAndDelOption(arguments, "--exportProfile", exportProfile);
+    if (percentile < 0 || percentile > 100)
+    {
+        throw std::invalid_argument(std::string("Percentile ") + std::to_string(percentile) + "is not in [0,100]");
+    }
+}
+
+bool parseHelp(Arguments& arguments)
+{
+    bool helpLong{false};
+    bool helpShort{false};
+    getAndDelOption(arguments, "--help", helpLong);
+    getAndDelOption(arguments, "-h", helpShort);
+    return helpLong || helpShort;
+}
+
+void AllOptions::parse(Arguments& arguments)
+{
+    model.parse(arguments);
+    build.parse(arguments);
+    system.parse(arguments);
+    inference.parse(arguments);
+
+    if (model.baseModel.format == ModelFormat::kONNX)
+    {
+        build.maxBatch = 0; // ONNX only supports explicit batch mode.
+    }
+
+    auto batchWasSet = [](int batch, int defaultValue) { return batch && batch != defaultValue; };
+
+    if (!build.maxBatch && batchWasSet(inference.batch, defaultBatch) && !build.shapes.empty())
+    {
+        throw std::invalid_argument(
+            "Explicit batch + dynamic shapes setting used at build time but inference uses --batch to set batch. "
+            "Conflicting build and inference batch settings.");
+    }
+    if (batchWasSet(build.maxBatch, defaultMaxBatch) && !inference.batch)
+    {
+        throw std::invalid_argument(
+            "Implicit batch option used at build time but inference input shapes specified. Conflicting build and "
+            "inference batch settings.");
+    }
+
+    if (build.shapes.empty() && !inference.shapes.empty())
+    {
+        for (auto& s : inference.shapes)
+        {
+            insertShapesBuild(build.shapes, nvinfer1::OptProfileSelector::kMIN, s.first, s.second);
+            insertShapesBuild(build.shapes, nvinfer1::OptProfileSelector::kOPT, s.first, s.second);
+            insertShapesBuild(build.shapes, nvinfer1::OptProfileSelector::kMAX, s.first, s.second);
+        }
+        build.maxBatch = 0;
+    }
+    else
+    {
+        if (!build.shapes.empty() && inference.shapes.empty())
+        {
+            for (auto& s : build.shapes)
+            {
+                insertShapesInference(
+                    inference.shapes, s.first, s.second[static_cast<size_t>(nvinfer1::OptProfileSelector::kOPT)]);
+            }
+        }
+        if (!build.maxBatch)
+        {
+            inference.batch = 0;
+        }
+    }
+
+    if (build.maxBatch && inference.batch)
+    {
+        // For implicit batch, check for compatibility and if --maxBatch is not given and inference batch is greater
+        // than maxBatch, use inference batch also for maxBatch
+        if (build.maxBatch != defaultMaxBatch && build.maxBatch < inference.batch)
+        {
+            throw std::invalid_argument("Build max batch " + std::to_string(build.maxBatch)
+                + " is less than inference batch " + std::to_string(inference.batch));
+        }
+        else
+        {
+            if (build.maxBatch < inference.batch)
+            {
+                build.maxBatch = inference.batch;
+            }
+        }
+    }
+
+    reporting.parse(arguments);
+    helps = parseHelp(arguments);
+
+    if (!helps)
+    {
+        if (!build.load && model.baseModel.format == ModelFormat::kANY)
+        {
+            throw std::invalid_argument("Model missing or format not recognized");
+        }
+        if (!build.load && !build.maxBatch && model.baseModel.format != ModelFormat::kONNX)
+        {
+            throw std::invalid_argument("Explicit batch size not supported for Caffe and Uff models");
+        }
+        if (build.safe && system.DLACore >= 0)
+        {
+            auto checkSafeDLAFormats = [](const std::vector<IOFormat>& fmt) {
+                return fmt.empty() ? false : std::all_of(fmt.begin(), fmt.end(), [](const IOFormat& pair) {
+                    bool supported{false};
+                    const bool isCHW4{pair.second == 1U << static_cast<int>(nvinfer1::TensorFormat::kCHW4)};
+                    const bool isCHW32{pair.second == 1U << static_cast<int>(nvinfer1::TensorFormat::kCHW32)};
+                    const bool isCHW16{pair.second == 1U << static_cast<int>(nvinfer1::TensorFormat::kCHW16)};
+                    supported |= pair.first == nvinfer1::DataType::kINT8 && (isCHW4 || isCHW32);
+                    supported |= pair.first == nvinfer1::DataType::kHALF && (isCHW4 || isCHW16);
+                    return supported;
+                });
+            };
+            if (!checkSafeDLAFormats(build.inputFormats) || !checkSafeDLAFormats(build.inputFormats))
+            {
+                throw std::invalid_argument(
+                    "I/O formats for safe DLA capability are restricted to fp16:chw16 or int8:chw32");
+            }
+            if (system.fallback)
+            {
+                throw std::invalid_argument("GPU fallback (--allowGPUFallback) not allowed for safe DLA capability");
+            }
+        }
+    }
+}
+
+void SafeBuilderOptions::parse(Arguments& arguments)
+{
+    auto getFormats = [&arguments](std::vector<IOFormat>& formatsVector, const char* argument) {
+        std::string list;
+        getAndDelOption(arguments, argument, list);
+        std::vector<std::string> formats{splitToStringVec(list, ',')};
+        for (const auto& f : formats)
+        {
+            formatsVector.push_back(stringToValue<IOFormat>(f));
+        }
+    };
+
+    getAndDelOption(arguments, "--serialized", serialized);
+    getAndDelOption(arguments, "--onnx", onnxModelFile);
+    getAndDelOption(arguments, "--help", help);
+    getAndDelOption(arguments, "--verbose", verbose);
+    getFormats(inputFormats, "--inputIOFormats");
+    getFormats(outputFormats, "--outputIOFormats");
+    getAndDelOption(arguments, "--int8", int8);
+    getAndDelOption(arguments, "--calib", calibFile);
+    std::string pluginName;
+    while (getAndDelOption(arguments, "--plugins", pluginName))
+    {
+        plugins.emplace_back(pluginName);
+    }
+}
+
+std::ostream& operator<<(std::ostream& os, const BaseModelOptions& options)
+{
+    os << "=== Model Options ===" << std::endl;
+
+    os << "Format: ";
+    switch (options.format)
+    {
+    case ModelFormat::kCAFFE:
+    {
+        os << "Caffe";
+        break;
+    }
+    case ModelFormat::kONNX:
+    {
+        os << "ONNX";
+        break;
+    }
+    case ModelFormat::kUFF:
+    {
+        os << "UFF";
+        break;
+    }
+    case ModelFormat::kANY:
+        os << "*";
+        break;
+    }
+    os << std::endl << "Model: " << options.model << std::endl;
+
+    return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const UffInput& input)
+{
+    os << "Uff Inputs Layout: " << (input.NHWC ? "NHWC" : "NCHW") << std::endl;
+    for (const auto& i : input.inputs)
+    {
+        os << "Input: " << i.first << "," << i.second.d[0] << "," << i.second.d[1] << "," << i.second.d[2] << std::endl;
+    }
+
+    return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const ModelOptions& options)
+{
+    os << options.baseModel;
+    switch (options.baseModel.format)
+    {
+    case ModelFormat::kCAFFE:
+    {
+        os << "Prototxt: " << options.prototxt << std::endl;
+        break;
+    }
+    case ModelFormat::kUFF:
+    {
+        os << options.uffInputs;
+        break;
+    }
+    case ModelFormat::kONNX: // Fallthrough: No options to report for ONNX or the generic case
+    case ModelFormat::kANY:
+        break;
+    }
+
+    os << "Output:";
+    for (const auto& o : options.outputs)
+    {
+        os << " " << o;
+    }
+    os << std::endl;
+
+    return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const IOFormat& format)
+{
+    switch (format.first)
+    {
+    case nvinfer1::DataType::kFLOAT:
+    {
+        os << "fp32:";
+        break;
+    }
+    case nvinfer1::DataType::kHALF:
+    {
+        os << "fp16:";
+        break;
+    }
+    case nvinfer1::DataType::kINT8:
+    {
+        os << "int8:";
+        break;
+    }
+    case nvinfer1::DataType::kINT32:
+    {
+        os << "int32:";
+        break;
+    }
+    case nvinfer1::DataType::kBOOL:
+    {
+        os << "Bool:";
+        break;
+    }
+    }
+
+    for (int f = 0; f < nvinfer1::EnumMax<nvinfer1::TensorFormat>(); ++f)
+    {
+        if ((1U << f) & format.second)
+        {
+            if (f)
+            {
+                os << "+";
+            }
+            switch (nvinfer1::TensorFormat(f))
+            {
+            case nvinfer1::TensorFormat::kLINEAR:
+            {
+                os << "chw";
+                break;
+            }
+            case nvinfer1::TensorFormat::kCHW2:
+            {
+                os << "chw2";
+                break;
+            }
+            case nvinfer1::TensorFormat::kHWC8:
+            {
+                os << "hwc8";
+                break;
+            }
+            case nvinfer1::TensorFormat::kHWC16:
+            {
+                os << "hwc16";
+                break;
+            }
+            case nvinfer1::TensorFormat::kCHW4:
+            {
+                os << "chw4";
+                break;
+            }
+            case nvinfer1::TensorFormat::kCHW16:
+            {
+                os << "chw16";
+                break;
+            }
+            case nvinfer1::TensorFormat::kCHW32:
+            {
+                os << "chw32";
+                break;
+            }
+            case nvinfer1::TensorFormat::kDHWC8:
+            {
+                os << "dhwc8";
+                break;
+            }
+            case nvinfer1::TensorFormat::kCDHW32:
+            {
+                os << "cdhw32";
+                break;
+            }
+            case nvinfer1::TensorFormat::kHWC:
+            {
+                os << "hwc";
+                break;
+            }
+            case nvinfer1::TensorFormat::kDLA_LINEAR:
+            {
+                os << "dla_linear";
+                break;
+            }
+            case nvinfer1::TensorFormat::kDLA_HWC4:
+            {
+                os << "dla_hwc4";
+                break;
+            }
+            }
+        }
+    }
+    return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const ShapeRange& dims)
+{
+    int i = 0;
+    for (const auto& d : dims)
+    {
+        if (!d.size())
+        {
+            break;
+        }
+        os << (i ? "+" : "") << d;
+        ++i;
+    }
+    return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const BuildOptions& options)
+{
+// clang-format off
+    os << "=== Build Options ==="                                                                                       << std::endl <<
+
+          "Max batch: ";        printBatch(os, options.maxBatch)                                                        << std::endl <<
+          "Workspace: "      << options.workspace << " MiB"                                                             << std::endl <<
+          "minTiming: "      << options.minTiming                                                                       << std::endl <<
+          "avgTiming: "      << options.avgTiming                                                                       << std::endl <<
+          "Precision: ";        printPrecision(os, options)                                                             << std::endl <<
+          "Calibration: "    << (options.int8 && options.calibration.empty() ? "Dynamic" : options.calibration.c_str()) << std::endl <<
+          "Refit: "          << boolToEnabled(options.refittable)                                                       << std::endl <<
+          "Sparsity: ";         printSparsity(os, options)                                                              << std::endl <<
+          "Safe mode: "      << boolToEnabled(options.safe)                                                             << std::endl <<
+          "Save engine: "    << (options.save ? options.engine : "")                                                    << std::endl <<
+          "Load engine: "    << (options.load ? options.engine : "")                                                    << std::endl <<
+          "NVTX verbosity: " << static_cast<int>(options.nvtxMode)                                                      << std::endl <<
+          "Tactic sources: ";   printTacticSources(os, options.enabledTactics, options.disabledTactics)                 << std::endl <<
+          "timingCacheMode: ";  printTimingCache(os, options)                                                           << std::endl <<
+          "timingCacheFile: "<< options.timingCacheFile                                                                 << std::endl;
+    // clang-format on
+
+    auto printIOFormats = [](std::ostream& os, const char* direction, const std::vector<IOFormat> formats) {
+        if (formats.empty())
+        {
+            os << direction << "s format: fp32:CHW" << std::endl;
+        }
+        else
+        {
+            for(const auto& f : formats)
+            {
+                os << direction << ": " << f << std::endl;
+            }
+        }
+    };
+
+    printIOFormats(os, "Input(s)", options.inputFormats);
+    printIOFormats(os, "Output(s)", options.outputFormats);
+    printShapes(os, "build", options.shapes);
+    printShapes(os, "calibration", options.shapesCalib);
+
+    return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const SystemOptions& options)
+{
+    // clang-format off
+    os << "=== System Options ==="                                                                << std::endl <<
+
+          "Device: "  << options.device                                                           << std::endl <<
+          "DLACore: " << (options.DLACore != -1 ? std::to_string(options.DLACore) : "")           <<
+                         (options.DLACore != -1 && options.fallback ? "(With GPU fallback)" : "") << std::endl;
+    os << "Plugins:";
+
+    for (const auto& p : options.plugins)
+    {
+        os << " " << p;
+    }
+    os << std::endl;
+
+    return os;
+    // clang-format on
+}
+
+std::ostream& operator<<(std::ostream& os, const InferenceOptions& options)
+{
+// clang-format off
+    os << "=== Inference Options ==="                                     << std::endl <<
+
+          "Batch: ";
+    if (options.batch && options.shapes.empty())
+    {
+                          os << options.batch                             << std::endl;
+    }
+    else
+    {
+                          os << "Explicit"                                << std::endl;
+    }
+    printShapes(os, "inference", options.shapes);
+    os << "Iterations: "         << options.iterations                    << std::endl <<
+          "Duration: "           << options.duration   << "s (+ "
+                                 << options.warmup     << "ms warm up)"   << std::endl <<
+          "Sleep time: "         << options.sleep      << "ms"            << std::endl <<
+          "Streams: "            << options.streams                       << std::endl <<
+          "ExposeDMA: "          << boolToEnabled(!options.overlap)       << std::endl <<
+          "Data transfers: "     << boolToEnabled(!options.skipTransfers) << std::endl <<
+          "Spin-wait: "          << boolToEnabled(options.spin)           << std::endl <<
+          "Multithreading: "     << boolToEnabled(options.threads)        << std::endl <<
+          "CUDA Graph: "         << boolToEnabled(options.graph)          << std::endl <<
+          "Separate profiling: " << boolToEnabled(options.rerun)          << std::endl <<
+          "Time Deserialize: "   << boolToEnabled(options.timeDeserialize) << std::endl <<
+          "Time Refit: "         << boolToEnabled(options.timeRefit) << std::endl <<
+          "Skip inference: "     << boolToEnabled(options.skip)           << std::endl;
+
+// clang-format on
+    os << "Inputs:" << std::endl;
+    for (const auto& input : options.inputs)
+    {
+        os << input.first << "<-" << input.second << std::endl;
+    }
+
+    return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const ReportingOptions& options)
+{
+// clang-format off
+    os << "=== Reporting Options ==="                                       << std::endl <<
+
+          "Verbose: "                     << boolToEnabled(options.verbose) << std::endl <<
+          "Averages: "                    << options.avgs << " inferences"  << std::endl <<
+          "Percentile: "                  << options.percentile             << std::endl <<
+          "Dump refittable layers:"       << boolToEnabled(options.refit)   << std::endl <<
+          "Dump output: "                 << boolToEnabled(options.output)  << std::endl <<
+          "Profile: "                     << boolToEnabled(options.profile) << std::endl <<
+          "Export timing to JSON file: "  << options.exportTimes            << std::endl <<
+          "Export output to JSON file: "  << options.exportOutput           << std::endl <<
+          "Export profile to JSON file: " << options.exportProfile          << std::endl;
+// clang-format on
+
+    return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const AllOptions& options)
+{
+    os << options.model << options.build << options.system << options.inference << options.reporting << std::endl;
+    return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const SafeBuilderOptions& options)
+{
+    auto printIOFormats = [](std::ostream& os, const char* direction, const std::vector<IOFormat> formats) {
+        if (formats.empty())
+        {
+            os << direction << "s format: fp32:CHW" << std::endl;
+        }
+        else
+        {
+            for(const auto& f : formats)
+            {
+                os << direction << ": " << f << std::endl;
+            }
+        }
+    };
+
+    os << "=== Build Options ===" << std::endl;
+    os << "Model ONNX: " << options.onnxModelFile << std::endl;
+
+    os << "Precision: FP16";
+    if (options.int8)
+    {
+        os << " + INT8";
+    }
+    os << std::endl;
+    os << "Calibration file: " << options.calibFile << std::endl;
+    os << "Serialized Network: " << options.serialized << std::endl;
+
+    printIOFormats(os, "Input(s)", options.inputFormats);
+    printIOFormats(os, "Output(s)", options.outputFormats);
+
+    os << "Plugins:";
+    for (const auto& p : options.plugins)
+    {
+        os << " " << p;
+    }
+    os << std::endl;
+    return os;
+}
+
+void BaseModelOptions::help(std::ostream& os)
+{
+// clang-format off
+    os << "  --uff=<file>                UFF model"                                             << std::endl <<
+          "  --onnx=<file>               ONNX model"                                            << std::endl <<
+          "  --model=<file>              Caffe model (default = no model, random weights used)" << std::endl;
+// clang-format on
+}
+
+void UffInput::help(std::ostream& os)
+{
+// clang-format off
+    os << "  --uffInput=<name>,X,Y,Z     Input blob name and its dimensions (X,Y,Z=C,H,W), it can be specified "
+                                                       "multiple times; at least one is required for UFF models" << std::endl <<
+          "  --uffNHWC                   Set if inputs are in the NHWC layout instead of NCHW (use "             <<
+                                                                    "X,Y,Z=H,W,C order in --uffInput)"           << std::endl;
+// clang-format on
+}
+
+void ModelOptions::help(std::ostream& os)
+{
+// clang-format off
+    os << "=== Model Options ==="                                                                                 << std::endl;
+    BaseModelOptions::help(os);
+    os << "  --deploy=<file>             Caffe prototxt file"                                                     << std::endl <<
+          "  --output=<name>[,<name>]*   Output names (it can be specified multiple times); at least one output "
+                                                                                  "is required for UFF and Caffe" << std::endl;
+    UffInput::help(os);
+// clang-format on
+}
+
+void BuildOptions::help(std::ostream& os)
+{
+// clang-format off
+    os << "=== Build Options ==="                                                                                                            << std::endl <<
+
+          "  --maxBatch                  Set max batch size and build an implicit batch engine (default = " << defaultMaxBatch << ")"        << std::endl <<
+          "  --explicitBatch             Use explicit batch sizes when building the engine (default = implicit)"                             << std::endl <<
+          "  --minShapes=spec            Build with dynamic shapes using a profile with the min shapes provided"                             << std::endl <<
+          "  --optShapes=spec            Build with dynamic shapes using a profile with the opt shapes provided"                             << std::endl <<
+          "  --maxShapes=spec            Build with dynamic shapes using a profile with the max shapes provided"                             << std::endl <<
+          "  --minShapesCalib=spec       Calibrate with dynamic shapes using a profile with the min shapes provided"                         << std::endl <<
+          "  --optShapesCalib=spec       Calibrate with dynamic shapes using a profile with the opt shapes provided"                         << std::endl <<
+          "  --maxShapesCalib=spec       Calibrate with dynamic shapes using a profile with the max shapes provided"                         << std::endl <<
+          "                              Note: All three of min, opt and max shapes must be supplied."                                       << std::endl <<
+          "                                    However, if only opt shapes is supplied then it will be expanded so"                          << std::endl <<
+          "                                    that min shapes and max shapes are set to the same values as opt shapes."                     << std::endl <<
+          "                                    In addition, use of dynamic shapes implies explicit batch."                                   << std::endl <<
+          "                                    Input names can be wrapped with escaped single quotes (ex: \\\'Input:0\\\')."                 << std::endl <<
+          "                              Example input shapes spec: input0:1x3x256x256,input1:1x3x128x128"                                   << std::endl <<
+          "                              Each input shape is supplied as a key-value pair where key is the input name and"                   << std::endl <<
+          "                              value is the dimensions (including the batch dimension) to be used for that input."                 << std::endl <<
+          "                              Each key-value pair has the key and value separated using a colon (:)."                             << std::endl <<
+          "                              Multiple input shapes can be provided via comma-separated key-value pairs."                         << std::endl <<
+          "  --inputIOFormats=spec       Type and format of each of the input tensors (default = all inputs in fp32:chw)"                    << std::endl <<
+          "                              See --outputIOFormats help for the grammar of type and format list."                                << std::endl <<
+          "                              Note: If this option is specified, please set comma-separated types and formats for all"            << std::endl <<
+          "                                    inputs following the same order as network inputs ID (even if only one input"                 << std::endl <<
+          "                                    needs specifying IO format) or set the type and format once for broadcasting."                << std::endl <<
+          "  --outputIOFormats=spec      Type and format of each of the output tensors (default = all outputs in fp32:chw)"                  << std::endl <<
+          "                              Note: If this option is specified, please set comma-separated types and formats for all"            << std::endl <<
+          "                                    outputs following the same order as network outputs ID (even if only one output"              << std::endl <<
+          "                                    needs specifying IO format) or set the type and format once for broadcasting."                << std::endl <<
+          "                              IO Formats: spec  ::= IOfmt[\",\"spec]"                                                             << std::endl <<
+          "                                          IOfmt ::= type:fmt"                                                                     << std::endl <<
+          "                                          type  ::= \"fp32\"|\"fp16\"|\"int32\"|\"int8\""                                         << std::endl <<
+          "                                          fmt   ::= (\"chw\"|\"chw2\"|\"chw4\"|\"hwc8\"|\"chw16\"|\"chw32\"|\"dhwc8\")[\"+\"fmt]" << std::endl <<
+          "  --workspace=N               Set workspace size in megabytes (default = "                      << defaultWorkspace << ")"        << std::endl <<
+          "  --nvtxMode=mode             Specify NVTX annotation verbosity. mode ::= default|verbose|none"                                   << std::endl <<
+          "  --minTiming=M               Set the minimum number of iterations used in kernel selection (default = "
+                                                                                                           << defaultMinTiming << ")"        << std::endl <<
+          "  --avgTiming=M               Set the number of times averaged in each iteration for kernel selection (default = "
+                                                                                                           << defaultAvgTiming << ")"        << std::endl <<
+          "  --refit                     Mark the engine as refittable. This will allow the inspection of refittable layers "                << std::endl <<
+          "                              and weights within the engine."                                                                     << std::endl <<
+          "  --sparsity=spec             Control sparsity (default = disabled). "                                                            << std::endl <<
+          "                              Sparsity: spec ::= \"disable\", \"enable\", \"force\""                                              << std::endl <<
+          "                              Note: Description about each of these options is as below"                                          << std::endl <<
+          "                                    disable = do not enable sparse tactics in the builder (this is the default)"                  << std::endl <<
+          "                                    enable  = enable sparse tactics in the builder (but these tactics will only be"               << std::endl <<
+          "                                              considered if the weights have the right sparsity pattern)"                         << std::endl <<
+          "                                    force   = enable sparse tactics in the builder and force-overwrite the weights to have"       << std::endl <<
+          "                                              a sparsity pattern (even if you loaded a model yourself)"                           << std::endl <<
+          "  --noTF32                    Disable tf32 precision (default is to enable tf32, in addition to fp32)"                            << std::endl <<
+          "  --fp16                      Enable fp16 precision, in addition to fp32 (default = disabled)"                                    << std::endl <<
+          "  --int8                      Enable int8 precision, in addition to fp32 (default = disabled)"                                    << std::endl <<
+          "  --best                      Enable all precisions to achieve the best performance (default = disabled)"                         << std::endl <<
+          "  --calib=<file>              Read INT8 calibration cache file"                                                                   << std::endl <<
+          "  --safe                      Only test the functionality available in safety restricted flows"                                   << std::endl <<
+          "  --saveEngine=<file>         Save the serialized engine"                                                                         << std::endl <<
+          "  --loadEngine=<file>         Load a serialized engine"                                                                           << std::endl <<
+          "  --tacticSources=tactics     Specify the tactics to be used by adding (+) or removing (-) tactics from the default "             << std::endl <<
+          "                              tactic sources (default = all available tactics)."                                                  << std::endl <<
+          "                              Note: Currently only cuDNN, cuBLAS and cuBLAS-LT are listed as optional tactics."                   << std::endl <<
+          "                              Tactic Sources: tactics ::= [\",\"tactic]"                                                          << std::endl <<
+          "                                              tactic  ::= (+|-)lib"                                                               << std::endl <<
+          "                                              lib     ::= \"CUBLAS\"|\"CUBLAS_LT\"|\"CUDNN\""                                      << std::endl <<
+          "                              For example, to disable cudnn and enable cublas: --tacticSources=-CUDNN,+CUBLAS"                    << std::endl <<
+          "  --noBuilderCache            Disable timing cache in builder (default is to enable timing cache)"                                << std::endl <<
+          "  --timingCacheFile=<file>    Save/load the serialized global timing cache"                                                       << std::endl
+          ;
+// clang-format on
+}
+
+void SystemOptions::help(std::ostream& os)
+{
+// clang-format off
+    os << "=== System Options ==="                                                                         << std::endl <<
+          "  --device=N                  Select cuda device N (default = "         << defaultDevice << ")" << std::endl <<
+          "  --useDLACore=N              Select DLA core N for layers that support DLA (default = none)"   << std::endl <<
+          "  --allowGPUFallback          When DLA is enabled, allow GPU fallback for unsupported layers "
+                                                                                    "(default = disabled)" << std::endl;
+    os << "  --plugins                   Plugin library (.so) to load (can be specified multiple times)"   << std::endl;
+// clang-format on
+}
+
+void InferenceOptions::help(std::ostream& os)
+{
+    // clang-format off
+    os << "=== Inference Options ==="                                                                                                << std::endl <<
+          "  --batch=N                   Set batch size for implicit batch engines (default = "              << defaultBatch << ")"  << std::endl <<
+          "  --shapes=spec               Set input shapes for dynamic shapes inference inputs."                                      << std::endl <<
+          "                              Note: Use of dynamic shapes implies explicit batch."                                        << std::endl <<
+          "                                    Input names can be wrapped with escaped single quotes (ex: \\\'Input:0\\\')."         << std::endl <<
+          "                              Example input shapes spec: input0:1x3x256x256, input1:1x3x128x128"                          << std::endl <<
+          "                              Each input shape is supplied as a key-value pair where key is the input name and"           << std::endl <<
+          "                              value is the dimensions (including the batch dimension) to be used for that input."         << std::endl <<
+          "                              Each key-value pair has the key and value separated using a colon (:)."                     << std::endl <<
+          "                              Multiple input shapes can be provided via comma-separated key-value pairs."                 << std::endl <<
+          "  --loadInputs=spec           Load input values from files (default = generate random inputs). Input names can be "
+                                                                                       "wrapped with single quotes (ex: 'Input:0')"  << std::endl <<
+          "                              Input values spec ::= Ival[\",\"spec]"                                                      << std::endl <<
+          "                                           Ival ::= name\":\"file"                                                        << std::endl <<
+          "  --iterations=N              Run at least N inference iterations (default = "               << defaultIterations << ")"  << std::endl <<
+          "  --warmUp=N                  Run for N milliseconds to warmup before measuring performance (default = "
+                                                                                                            << defaultWarmUp << ")"  << std::endl <<
+          "  --duration=N                Run performance measurements for at least N seconds wallclock time (default = "
+                                                                                                          << defaultDuration << ")"  << std::endl <<
+          "  --sleepTime=N               Delay inference start with a gap of N milliseconds between launch and compute "
+                                                                                               "(default = " << defaultSleep << ")"  << std::endl <<
+          "  --streams=N                 Instantiate N engines to use concurrently (default = "            << defaultStreams << ")"  << std::endl <<
+          "  --exposeDMA                 Serialize DMA transfers to and from device (default = disabled)."                           << std::endl <<
+          "  --noDataTransfers           Disable DMA transfers to and from device (default = enabled)."                              << std::endl <<
+          "  --useSpinWait               Actively synchronize on GPU events. This option may decrease synchronization time but "
+                                                                             "increase CPU usage and power (default = disabled)"     << std::endl <<
+          "  --threads                   Enable multithreading to drive engines with independent threads (default = disabled)"       << std::endl <<
+          "  --useCudaGraph              Use CUDA graph to capture engine execution and then launch inference (default = disabled)." << std::endl <<
+          "                              This flag may be ignored if the graph capture fails."                                       << std::endl <<
+          "  --timeDeserialize           Time the amount of time it takes to deserialize the network and exit."                      << std::endl <<
+          "  --timeRefit                 Time the amount of time it takes to refit the engine before inference."                     << std::endl <<
+          "  --separateProfileRun        Do not attach the profiler in the benchmark run; if profiling is enabled, a second "
+                                                                                "profile run will be executed (default = disabled)"  << std::endl <<
+          "  --buildOnly                 Skip inference perf measurement (default = disabled)"                                       << std::endl;
+    // clang-format on
+}
+
+void ReportingOptions::help(std::ostream& os)
+{
+// clang-format off
+    os << "=== Reporting Options ==="                                                                    << std::endl <<
+          "  --verbose                   Use verbose logging (default = false)"                          << std::endl <<
+          "  --avgRuns=N                 Report performance measurements averaged over N consecutive "
+                                                       "iterations (default = " << defaultAvgRuns << ")" << std::endl <<
+          "  --percentile=P              Report performance for the P percentage (0<=P<=100, 0 "
+                                        "representing max perf, and 100 representing min perf; (default"
+                                                                      " = " << defaultPercentile << "%)" << std::endl <<
+          "  --dumpRefit                 Print the refittable layers and weights from a refittable "
+                                        "engine"                                                         << std::endl <<
+          "  --dumpOutput                Print the output tensor(s) of the last inference iteration "
+                                                                                  "(default = disabled)" << std::endl <<
+          "  --dumpProfile               Print profile information per layer (default = disabled)"       << std::endl <<
+          "  --exportTimes=<file>        Write the timing results in a json file (default = disabled)"   << std::endl <<
+          "  --exportOutput=<file>       Write the output tensors to a json file (default = disabled)"   << std::endl <<
+          "  --exportProfile=<file>      Write the profile information per layer in a json file "
+                                                                              "(default = disabled)"     << std::endl;
+// clang-format on
+}
+
+void helpHelp(std::ostream& os)
+{
+// clang-format off
+    os << "=== Help ==="                                     << std::endl <<
+          "  --help, -h                  Print this message" << std::endl;
+// clang-format on
+}
+
+void AllOptions::help(std::ostream& os)
+{
+    ModelOptions::help(os);
+    os << std::endl;
+    BuildOptions::help(os);
+    os << std::endl;
+    InferenceOptions::help(os);
+    os << std::endl;
+// clang-format off
+    os << "=== Build and Inference Batch Options ==="                                                                   << std::endl <<
+          "                              When using implicit batch, the max batch size of the engine, if not given, "   << std::endl <<
+          "                              is set to the inference batch size;"                                           << std::endl <<
+          "                              when using explicit batch, if shapes are specified only for inference, they "  << std::endl <<
+          "                              will be used also as min/opt/max in the build profile; if shapes are "         << std::endl <<
+          "                              specified only for the build, the opt shapes will be used also for inference;" << std::endl <<
+          "                              if both are specified, they must be compatible; and if explicit batch is "     << std::endl <<
+          "                              enabled but neither is specified, the model must provide complete static"      << std::endl <<
+          "                              dimensions, including batch size, for all inputs"                              << std::endl <<
+          "                              Using ONNX models automatically forces explicit batch."                        << std::endl <<
+    std::endl;
+    // clang-format on
+    ReportingOptions::help(os);
+    os << std::endl;
+    SystemOptions::help(os);
+    os << std::endl;
+    helpHelp(os);
+}
+
+void SafeBuilderOptions::printHelp(std::ostream& os)
+{
+// clang-format off
+    os << "=== Mandatory ==="                                                                                                                << std::endl <<
+          "  --onnx=<file>               ONNX model"                                                                                         << std::endl <<
+          " "                                                                                                                                << std::endl <<
+          "=== Optional ==="                                                                                                                 << std::endl <<
+          "  --inputIOFormats=spec       Type and format of each of the input tensors (default = all inputs in fp32:chw)"                    << std::endl <<
+          "                              See --outputIOFormats help for the grammar of type and format list."                                << std::endl <<
+          "                              Note: If this option is specified, please set comma-separated types and formats for all"            << std::endl <<
+          "                                    inputs following the same order as network inputs ID (even if only one input"                 << std::endl <<
+          "                                    needs specifying IO format) or set the type and format once for broadcasting."                << std::endl <<
+          "  --outputIOFormats=spec      Type and format of each of the output tensors (default = all outputs in fp32:chw)"                  << std::endl <<
+          "                              Note: If this option is specified, please set comma-separated types and formats for all"            << std::endl <<
+          "                                    outputs following the same order as network outputs ID (even if only one output"              << std::endl <<
+          "                                    needs specifying IO format) or set the type and format once for broadcasting."                << std::endl <<
+          "                              IO Formats: spec  ::= IOfmt[\",\"spec]"                                                             << std::endl <<
+          "                                          IOfmt ::= type:fmt"                                                                     << std::endl <<
+          "                                          type  ::= \"fp32\"|\"fp16\"|\"int32\"|\"int8\""                                         << std::endl <<
+          "                                          fmt   ::= (\"chw\"|\"chw2\"|\"chw4\"|\"hwc8\"|\"chw16\"|\"chw32\"|\"dhwc8\")[\"+\"fmt]" << std::endl <<
+          "  --int8                      Enable int8 precision, in addition to fp16 (default = disabled)"                                    << std::endl <<
+          "  --calib=<file>              Read INT8 calibration cache file"                                                                   << std::endl <<
+          "  --serialized=<file>         Save the serialized network"                                                                        << std::endl <<
+          "  --plugins                   Plugin library (.so) to load (can be specified multiple times)"                                     << std::endl <<
+          "  --verbose                   Use verbose logging (default = false)"                                                              << std::endl <<
+          "  --help                      Print this message"                                                                                 << std::endl <<
+          " "                                                                                                                                << std::endl;
+// clang-format on
+}
+
+} // namespace sample

+ 268 - 0
src/detection/CenterPoint-master/include/common/sampleOptions.h

@@ -0,0 +1,268 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TRT_SAMPLE_OPTIONS_H
+#define TRT_SAMPLE_OPTIONS_H
+
+#include <algorithm>
+#include <array>
+#include <iostream>
+#include <stdexcept>
+#include <string>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+#include "NvInfer.h"
+
+namespace sample
+{
+
+// Build default params
+constexpr int defaultMaxBatch{1};
+constexpr int defaultWorkspace{16};
+constexpr int defaultMinTiming{1};
+constexpr int defaultAvgTiming{8};
+
+// System default params
+constexpr int defaultDevice{0};
+
+// Inference default params
+constexpr int defaultBatch{1};
+constexpr int defaultStreams{1};
+constexpr int defaultIterations{10};
+constexpr int defaultWarmUp{200};
+constexpr int defaultDuration{3};
+constexpr int defaultSleep{0};
+
+// Reporting default params
+constexpr int defaultAvgRuns{10};
+constexpr float defaultPercentile{99};
+
+enum class ModelFormat
+{
+    kANY,
+    kCAFFE,
+    kONNX,
+    kUFF
+};
+
+enum class SparsityFlag
+{
+    kDISABLE,
+    kENABLE,
+    kFORCE
+};
+
+enum class TimingCacheMode
+{
+    kDISABLE,
+    kLOCAL,
+    kGLOBAL
+};
+
+using Arguments = std::unordered_multimap<std::string, std::string>;
+
+using IOFormat = std::pair<nvinfer1::DataType, nvinfer1::TensorFormats>;
+
+using ShapeRange = std::array<std::vector<int>, nvinfer1::EnumMax<nvinfer1::OptProfileSelector>()>;
+
+struct Options
+{
+    virtual void parse(Arguments& arguments) = 0;
+};
+
+struct BaseModelOptions : public Options
+{
+    ModelFormat format{ModelFormat::kANY};
+    std::string model;
+
+    void parse(Arguments& arguments) override;
+
+    static void help(std::ostream& out);
+};
+
+struct UffInput : public Options
+{
+    std::vector<std::pair<std::string, nvinfer1::Dims>> inputs;
+    bool NHWC{false};
+
+    void parse(Arguments& arguments) override;
+
+    static void help(std::ostream& out);
+};
+
+struct ModelOptions : public Options
+{
+    BaseModelOptions baseModel;
+    std::string prototxt;
+    std::vector<std::string> outputs;
+    UffInput uffInputs;
+
+    void parse(Arguments& arguments) override;
+
+    static void help(std::ostream& out);
+};
+
+struct BuildOptions : public Options
+{
+    int maxBatch{defaultMaxBatch}; // Parsing sets maxBatch to 0 if explicitBatch is true
+    int workspace{defaultWorkspace};
+    int minTiming{defaultMinTiming};
+    int avgTiming{defaultAvgTiming};
+    bool tf32{true};
+    bool fp16{false};
+    bool int8{false};
+    bool safe{false};
+    bool save{false};
+    bool load{false};
+    bool refittable{false};
+    bool explicitPrecision{false};
+    SparsityFlag sparsity{SparsityFlag::kDISABLE};
+    nvinfer1::ProfilingVerbosity nvtxMode{nvinfer1::ProfilingVerbosity::kDEFAULT};
+    std::string engine;
+    std::string calibration;
+    std::unordered_map<std::string, ShapeRange> shapes;
+    std::unordered_map<std::string, ShapeRange> shapesCalib;
+    std::vector<IOFormat> inputFormats;
+    std::vector<IOFormat> outputFormats;
+    nvinfer1::TacticSources enabledTactics{0};
+    nvinfer1::TacticSources disabledTactics{0};
+    TimingCacheMode timingCacheMode{TimingCacheMode::kLOCAL};
+    std::string timingCacheFile{};
+    void parse(Arguments& arguments) override;
+
+    static void help(std::ostream& out);
+};
+
+struct SystemOptions : public Options
+{
+    int device{defaultDevice};
+    int DLACore{-1};
+    bool fallback{false};
+    std::vector<std::string> plugins;
+
+    void parse(Arguments& arguments) override;
+
+    static void help(std::ostream& out);
+};
+
+struct InferenceOptions : public Options
+{
+    int batch{defaultBatch}; // Parsing sets batch to 0 is shapes is not empty
+    int iterations{defaultIterations};
+    int warmup{defaultWarmUp};
+    int duration{defaultDuration};
+    int sleep{defaultSleep};
+    int streams{defaultStreams};
+    bool overlap{true};
+    bool skipTransfers{false};
+    bool spin{false};
+    bool threads{false};
+    bool graph{false};
+    bool skip{false};
+    bool rerun{false};
+    bool timeDeserialize{false};
+    bool timeRefit{false};
+    std::unordered_map<std::string, std::string> inputs;
+    std::unordered_map<std::string, std::vector<int>> shapes;
+
+    void parse(Arguments& arguments) override;
+
+    static void help(std::ostream& out);
+};
+
+struct ReportingOptions : public Options
+{
+    bool verbose{false};
+    int avgs{defaultAvgRuns};
+    float percentile{defaultPercentile};
+    bool refit{false};
+    bool output{false};
+    bool profile{false};
+    std::string exportTimes;
+    std::string exportOutput;
+    std::string exportProfile;
+
+    void parse(Arguments& arguments) override;
+
+    static void help(std::ostream& out);
+};
+
+struct SafeBuilderOptions : public Options
+{
+    std::string serialized{};
+    std::string onnxModelFile{};
+    bool help{false};
+    bool verbose{false};
+    std::vector<IOFormat> inputFormats;
+    std::vector<IOFormat> outputFormats;
+    bool int8{false};
+    std::string calibFile{};
+    std::vector<std::string> plugins;
+
+    void parse(Arguments& arguments) override;
+
+    static void printHelp(std::ostream& out);
+};
+
+struct AllOptions : public Options
+{
+    ModelOptions model;
+    BuildOptions build;
+    SystemOptions system;
+    InferenceOptions inference;
+    ReportingOptions reporting;
+    bool helps{false};
+
+    void parse(Arguments& arguments) override;
+
+    static void help(std::ostream& out);
+};
+
+Arguments argsToArgumentsMap(int argc, char* argv[]);
+
+bool parseHelp(Arguments& arguments);
+
+void helpHelp(std::ostream& out);
+
+// Functions to print options
+
+std::ostream& operator<<(std::ostream& os, const BaseModelOptions& options);
+
+std::ostream& operator<<(std::ostream& os, const UffInput& input);
+
+std::ostream& operator<<(std::ostream& os, const IOFormat& format);
+
+std::ostream& operator<<(std::ostream& os, const ShapeRange& dims);
+
+std::ostream& operator<<(std::ostream& os, const ModelOptions& options);
+
+std::ostream& operator<<(std::ostream& os, const BuildOptions& options);
+
+std::ostream& operator<<(std::ostream& os, const SystemOptions& options);
+
+std::ostream& operator<<(std::ostream& os, const InferenceOptions& options);
+
+std::ostream& operator<<(std::ostream& os, const ReportingOptions& options);
+
+std::ostream& operator<<(std::ostream& os, const AllOptions& options);
+
+std::ostream& operator<<(std::ostream& os, const SafeBuilderOptions& options);
+
+} // namespace sample
+
+#endif // TRT_SAMPLES_OPTIONS_H

+ 415 - 0
src/detection/CenterPoint-master/include/common/sampleReporting.cpp

@@ -0,0 +1,415 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <exception>
+#include <fstream>
+#include <iomanip>
+#include <iostream>
+#include <numeric>
+#include <utility>
+
+#include "sampleInference.h"
+#include "sampleOptions.h"
+#include "sampleReporting.h"
+
+using namespace nvinfer1;
+
+namespace sample
+{
+
+namespace
+{
+
+//!
+//! \brief Find percentile in an ascending sequence of timings
+//! \note percentile must be in [0, 100]. Otherwise, an exception is thrown.
+//!
+template <typename T>
+float findPercentile(float percentile, const std::vector<InferenceTime>& timings, const T& toFloat)
+{
+    const int all = static_cast<int>(timings.size());
+    const int exclude = static_cast<int>((1 - percentile / 100) * all);
+    if (timings.empty())
+    {
+        return std::numeric_limits<float>::infinity();
+    }
+    if (percentile < 0.0f || percentile > 100.0f)
+    {
+        throw std::runtime_error("percentile is not in [0, 100]!");
+    }
+    return toFloat(timings[std::max(all - 1 - exclude, 0)]);
+}
+
+//!
+//! \brief Find median in a sorted sequence of timings
+//!
+template <typename T>
+float findMedian(const std::vector<InferenceTime>& timings, const T& toFloat)
+{
+    if (timings.empty())
+    {
+        return std::numeric_limits<float>::infinity();
+    }
+
+    const int m = timings.size() / 2;
+    if (timings.size() % 2)
+    {
+        return toFloat(timings[m]);
+    }
+
+    return (toFloat(timings[m - 1]) + toFloat(timings[m])) / 2;
+}
+
+inline InferenceTime traceToTiming(const InferenceTrace& a)
+{
+    return InferenceTime((a.enqEnd - a.enqStart), (a.h2dEnd - a.h2dStart), (a.computeEnd - a.computeStart),
+        (a.d2hEnd - a.d2hStart), (a.d2hEnd - a.h2dStart));
+}
+
+} // namespace
+
+void printProlog(int warmups, int timings, float warmupMs, float benchTimeMs, std::ostream& os)
+{
+    os << "Warmup completed " << warmups << " queries over " << warmupMs << " ms" << std::endl;
+    os << "Timing trace has " << timings << " queries over " << benchTimeMs / 1000 << " s" << std::endl;
+}
+
+void printTiming(const std::vector<InferenceTime>& timings, int runsPerAvg, std::ostream& os)
+{
+    int count = 0;
+    InferenceTime sum;
+
+    os << std::endl;
+    os << "=== Trace details ===" << std::endl;
+    os << "Trace averages of " << runsPerAvg << " runs:" << std::endl;
+    for (const auto& t : timings)
+    {
+        sum += t;
+
+        if (++count == runsPerAvg)
+        {
+            // clang-format off
+            os << "Average on " << runsPerAvg << " runs - GPU latency: " << sum.compute / runsPerAvg
+               << " ms - Host latency: " << sum.latency() / runsPerAvg << " ms (end to end " << sum.e2e / runsPerAvg
+               << " ms, enqueue " << sum.enq / runsPerAvg << " ms)" << std::endl;
+            // clang-format on
+            count = 0;
+            sum.enq = 0;
+            sum.h2d = 0;
+            sum.compute = 0;
+            sum.d2h = 0;
+            sum.e2e = 0;
+        }
+    }
+}
+
+void printMetricExplanations(std::ostream& os)
+{
+    os << std::endl;
+    os << "=== Explanations of the performance metrics ===" << std::endl;
+    os << "Total Host Walltime: the host walltime from when the first query (after warmups) is enqueued to when the "
+          "last query is completed."
+       << std::endl;
+    os << "GPU Compute Time: the GPU latency to execute the kernels for a query." << std::endl;
+    os << "Total GPU Compute Time: the summation of the GPU Compute Time of all the queries. If this is significantly "
+          "shorter than Total Host Walltime, the GPU may be under-utilized because of host-side overheads or data "
+          "transfers."
+       << std::endl;
+    os << "Throughput: the observed throughput computed by dividing the number of queries by the Total Host Walltime. "
+          "If this is significantly lower than the reciprocal of GPU Compute Time, the GPU may be under-utilized "
+          "because of host-side overheads or data transfers."
+       << std::endl;
+    os << "Enqueue Time: the host latency to enqueue a query. If this is longer than GPU Compute Time, the GPU may be "
+          "under-utilized."
+       << std::endl;
+    os << "H2D Latency: the latency for host-to-device data transfers for input tensors of a single query."
+       << std::endl;
+    os << "D2H Latency: the latency for device-to-host data transfers for output tensors of a single query."
+       << std::endl;
+    os << "Latency: the summation of H2D Latency, GPU Compute Time, and D2H Latency. This is the latency to infer a "
+          "single query."
+       << std::endl;
+    os << "End-to-End Host Latency: the duration from when the H2D of a query is called to when the D2H of the same "
+          "query is completed, which includes the latency to wait for the completion of the previous query. This is "
+          "the latency of a query if multiple queries are enqueued consecutively."
+       << std::endl;
+}
+
+PerformanceResult getPerformanceResult(const std::vector<InferenceTime>& timings,
+    std::function<float(const InferenceTime&)> metricGetter, float percentile)
+{
+    const auto metricComparator
+        = [metricGetter](const InferenceTime& a, const InferenceTime& b) { return metricGetter(a) < metricGetter(b); };
+    const auto metricAccumulator = [metricGetter](float acc, const InferenceTime& a) { return acc + metricGetter(a); };
+    std::vector<InferenceTime> newTimings = timings;
+    std::sort(newTimings.begin(), newTimings.end(), metricComparator);
+    PerformanceResult result;
+    result.min = metricGetter(newTimings.front());
+    result.max = metricGetter(newTimings.back());
+    result.mean = std::accumulate(newTimings.begin(), newTimings.end(), 0.0f, metricAccumulator) / newTimings.size();
+    result.median = findMedian(newTimings, metricGetter);
+    result.percentile = findPercentile(percentile, newTimings, metricGetter);
+    return result;
+}
+
+void printEpilog(const std::vector<InferenceTime>& timings, float walltimeMs, float percentile, int batchSize,
+    std::ostream& osInfo, std::ostream& osWarning, std::ostream& osVerbose)
+{
+    const float throughput = batchSize * timings.size() / walltimeMs * 1000;
+
+    const auto getLatency = [](const InferenceTime& t) { return t.latency(); };
+    const auto latencyResult = getPerformanceResult(timings, getLatency, percentile);
+
+    const auto getEndToEnd = [](const InferenceTime& t) { return t.e2e; };
+    const auto e2eLatencyResult = getPerformanceResult(timings, getEndToEnd, percentile);
+
+    const auto getEnqueue = [](const InferenceTime& t) { return t.enq; };
+    const auto enqueueResult = getPerformanceResult(timings, getEnqueue, percentile);
+
+    const auto getH2d = [](const InferenceTime& t) { return t.h2d; };
+    const auto h2dResult = getPerformanceResult(timings, getH2d, percentile);
+
+    const auto getCompute = [](const InferenceTime& t) { return t.compute; };
+    const auto gpuComputeResult = getPerformanceResult(timings, getCompute, percentile);
+
+    const auto getD2h = [](const InferenceTime& t) { return t.d2h; };
+    const auto d2hResult = getPerformanceResult(timings, getD2h, percentile);
+
+    const auto toPerfString = [percentile](const PerformanceResult& r) {
+        std::stringstream s;
+        s << "min = " << r.min << " ms, max = " << r.max << " ms, mean = " << r.mean << " ms, "
+          << "median = " << r.median << " ms, percentile(" << percentile << "%) = " << r.percentile << " ms";
+        return s.str();
+    };
+
+    osInfo << std::endl;
+    osInfo << "=== Performance summary ===" << std::endl;
+    osInfo << "Throughput: " << throughput << " qps" << std::endl;
+    osInfo << "Latency: " << toPerfString(latencyResult) << std::endl;
+    osInfo << "End-to-End Host Latency: " << toPerfString(e2eLatencyResult) << std::endl;
+    osInfo << "Enqueue Time: " << toPerfString(enqueueResult) << std::endl;
+    osInfo << "H2D Latency: " << toPerfString(h2dResult) << std::endl;
+    osInfo << "GPU Compute Time: " << toPerfString(gpuComputeResult) << std::endl;
+    osInfo << "D2H Latency: " << toPerfString(d2hResult) << std::endl;
+    osInfo << "Total Host Walltime: " << walltimeMs / 1000 << " s" << std::endl;
+    osInfo << "Total GPU Compute Time: " << gpuComputeResult.mean * timings.size() / 1000 << " s" << std::endl;
+
+    // Report warnings if the throughput is bound by other factors than GPU
+    // Compute Time.
+    constexpr float enqueueBoundReportingThreshold{0.8f};
+    if (enqueueResult.median > enqueueBoundReportingThreshold * gpuComputeResult.median)
+    {
+        osWarning
+            << "* Throughput may be bound by Enqueue Time rather than GPU Compute and the GPU may be under-utilized."
+            << std::endl;
+        osWarning << "  If not already in use, --useCudaGraph (utilize CUDA graphs where possible) may increase the "
+                     "throughput."
+                  << std::endl;
+    }
+    if (h2dResult.median >= gpuComputeResult.median)
+    {
+        osWarning << "* Throughput may be bound by host-to-device transfers for the inputs rather than GPU Compute and "
+                     "the GPU may be under-utilized."
+                  << std::endl;
+        osWarning << "  Add --noDataTransfers flag to disable data transfers." << std::endl;
+    }
+    if (d2hResult.median >= gpuComputeResult.median)
+    {
+        osWarning << "* Throughput may be bound by device-to-host transfers for the outputs rather than GPU Compute "
+                     "and the GPU may be under-utilized."
+                  << std::endl;
+        osWarning << "  Add --noDataTransfers flag to disable data transfers." << std::endl;
+    }
+
+    // Explain what the metrics mean.
+    osInfo << "Explanations of the performance metrics are printed in the verbose logs." << std::endl;
+    printMetricExplanations(osVerbose);
+
+    osInfo << std::endl;
+}
+
+void printPerformanceReport(const std::vector<InferenceTrace>& trace, const ReportingOptions& reporting, float warmupMs,
+    int batchSize, std::ostream& osInfo, std::ostream& osWarning, std::ostream& osVerbose)
+{
+    const auto isNotWarmup = [&warmupMs](const InferenceTrace& a) { return a.computeStart >= warmupMs; };
+    const auto noWarmup = std::find_if(trace.begin(), trace.end(), isNotWarmup);
+    const int warmups = noWarmup - trace.begin();
+    const float benchTime = trace.back().d2hEnd - noWarmup->h2dStart;
+    // when implicit batch used, batchSize = options.inference.batch, which is parsed through --batch
+    // when explicit batch used, batchSize = options.inference.batch = 0
+    // treat inference with explicit batch as a single query and report the throughput
+    batchSize = batchSize ? batchSize : 1;
+    printProlog(warmups * batchSize, (trace.size() - warmups) * batchSize, warmupMs, benchTime, osInfo);
+
+    std::vector<InferenceTime> timings(trace.size() - warmups);
+    std::transform(noWarmup, trace.end(), timings.begin(), traceToTiming);
+    printTiming(timings, reporting.avgs, osInfo);
+    printEpilog(timings, benchTime, reporting.percentile, batchSize, osInfo, osWarning, osVerbose);
+
+    if (!reporting.exportTimes.empty())
+    {
+        exportJSONTrace(trace, reporting.exportTimes);
+    }
+}
+
+//! Printed format:
+//! [ value, ...]
+//! value ::= { "start enq : time, "end enq" : time, "start h2d" : time, "end h2d" : time, "start compute" : time,
+//!             "end compute" : time, "start d2h" : time, "end d2h" : time, "h2d" : time, "compute" : time,
+//!             "d2h" : time, "latency" : time, "end to end" : time }
+//!
+void exportJSONTrace(const std::vector<InferenceTrace>& trace, const std::string& fileName)
+{
+    std::ofstream os(fileName, std::ofstream::trunc);
+    os << "[" << std::endl;
+    const char* sep = "  ";
+    for (const auto& t : trace)
+    {
+        const InferenceTime it(traceToTiming(t));
+        os << sep << "{ ";
+        sep = ", ";
+        // clang-format off
+        os << "\"startEnqMs\" : "     << t.enqStart     << sep << "\"endEnqMs\" : "     << t.enqEnd     << sep
+           << "\"startH2dMs\" : "     << t.h2dStart     << sep << "\"endH2dMs\" : "     << t.h2dEnd     << sep
+           << "\"startComputeMs\" : " << t.computeStart << sep << "\"endComputeMs\" : " << t.computeEnd << sep
+           << "\"startD2hMs\" : "     << t.d2hStart     << sep << "\"endD2hMs\" : "     << t.d2hEnd     << sep
+           << "\"h2dMs\" : "          << it.h2d         << sep << "\"computeMs\" : "    << it.compute   << sep
+           << "\"d2hMs\" : "          << it.d2h         << sep << "\"latencyMs\" : "    << it.latency() << sep
+           << "\"endToEndMs\" : "     << it.e2e         << " }"                                         << std::endl;
+        // clang-format on
+    }
+    os << "]" << std::endl;
+}
+
+void Profiler::reportLayerTime(const char* layerName, float timeMs) noexcept
+{
+    if (mIterator == mLayers.end())
+    {
+        const bool first = !mLayers.empty() && mLayers.begin()->name == layerName;
+        mUpdatesCount += mLayers.empty() || first;
+        if (first)
+        {
+            mIterator = mLayers.begin();
+        }
+        else
+        {
+            mLayers.emplace_back();
+            mLayers.back().name = layerName;
+            mIterator = mLayers.end() - 1;
+        }
+    }
+
+    mIterator->timeMs += timeMs;
+    ++mIterator;
+}
+
+void Profiler::print(std::ostream& os) const noexcept
+{
+    const std::string nameHdr("Layer");
+    const std::string timeHdr("   Time (ms)");
+    const std::string avgHdr("   Avg. Time (ms)");
+    const std::string percentageHdr("   Time %");
+
+    const float totalTimeMs = getTotalTime();
+
+    const auto cmpLayer = [](const LayerProfile& a, const LayerProfile& b)
+    {
+        return a.name.size() < b.name.size();
+    };
+    const auto longestName = std::max_element(mLayers.begin(), mLayers.end(), cmpLayer);
+    const auto nameLength = std::max(longestName->name.size() + 1, nameHdr.size());
+    const auto timeLength = timeHdr.size();
+    const auto avgLength = avgHdr.size();
+    const auto percentageLength = percentageHdr.size();
+
+    os << std::endl
+       << "=== Profile (" << mUpdatesCount << " iterations ) ===" << std::endl
+       << std::setw(nameLength) << nameHdr << timeHdr << avgHdr << percentageHdr << std::endl;
+
+    for (const auto& p : mLayers)
+    {
+        // clang-format off
+        os << std::setw(nameLength) << p.name << std::setw(timeLength) << std::fixed << std::setprecision(2) << p.timeMs
+           << std::setw(avgLength) << std::fixed << std::setprecision(4) << p.timeMs / mUpdatesCount
+           << std::setw(percentageLength) << std::fixed << std::setprecision(1) << p.timeMs / totalTimeMs * 100
+           << std::endl;
+    }
+    {
+        os << std::setw(nameLength) << "Total" << std::setw(timeLength) << std::fixed << std::setprecision(2)
+           << totalTimeMs << std::setw(avgLength) << std::fixed << std::setprecision(4) << totalTimeMs / mUpdatesCount
+           << std::setw(percentageLength) << std::fixed << std::setprecision(1) << 100.0 << std::endl;
+        // clang-format on
+    }
+    os << std::endl;
+}
+
+void Profiler::exportJSONProfile(const std::string& fileName) const noexcept
+{
+    std::ofstream os(fileName, std::ofstream::trunc);
+    os << "[" << std::endl << "  { \"count\" : " << mUpdatesCount << " }" << std::endl;
+
+    const auto totalTimeMs = getTotalTime();
+
+    for (const auto& l : mLayers)
+    {
+        // clang-format off
+        os << ", {" << " \"name\" : \""      << l.name << "\""
+                       ", \"timeMs\" : "     << l.timeMs
+           <<          ", \"averageMs\" : "  << l.timeMs / mUpdatesCount
+           <<          ", \"percentage\" : " << l.timeMs / totalTimeMs * 100
+           << " }"  << std::endl;
+        // clang-format on
+    }
+    os << "]" << std::endl;
+}
+
+void dumpInputs(const nvinfer1::IExecutionContext& context, const Bindings& bindings, std::ostream& os)
+{
+    os << "Input Tensors:" << std::endl;
+    bindings.dumpInputs(context, os);
+}
+
+void dumpOutputs(const nvinfer1::IExecutionContext& context, const Bindings& bindings, std::ostream& os)
+{
+    os << "Output Tensors:" << std::endl;
+    bindings.dumpOutputs(context, os);
+}
+
+void exportJSONOutput(
+    const nvinfer1::IExecutionContext& context, const Bindings& bindings, const std::string& fileName, int32_t batch)
+{
+    std::ofstream os(fileName, std::ofstream::trunc);
+    std::string sep = "  ";
+    const auto output = bindings.getOutputBindings();
+    os << "[" << std::endl;
+    for (const auto& binding : output)
+    {
+        // clang-format off
+        os << sep << "{ \"name\" : \"" << binding.first << "\"" << std::endl;
+        sep = ", ";
+        os << "  " << sep << "\"dimensions\" : \"";
+        bindings.dumpBindingDimensions(binding.second, context, os);
+        os << "\"" << std::endl;
+        os << "  " << sep << "\"values\" : [ ";
+        bindings.dumpBindingValues(context, binding.second, os, sep, batch);
+        os << " ]" << std::endl << "  }" << std::endl;
+        // clang-format on
+    }
+    os << "]" << std::endl;
+}
+
+} // namespace sample

+ 221 - 0
src/detection/CenterPoint-master/include/common/sampleReporting.h

@@ -0,0 +1,221 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TRT_SAMPLE_REPORTING_H
+#define TRT_SAMPLE_REPORTING_H
+
+#include <functional>
+#include <iostream>
+
+#include "NvInfer.h"
+
+#include "sampleOptions.h"
+#include "sampleUtils.h"
+
+namespace sample
+{
+
+//!
+//! \struct InferenceTime
+//! \brief Measurement times in milliseconds
+//!
+struct InferenceTime
+{
+    InferenceTime(float q, float i, float c, float o, float e)
+        : enq(q)
+        , h2d(i)
+        , compute(c)
+        , d2h(o)
+        , e2e(e)
+    {
+    }
+
+    InferenceTime() = default;
+    InferenceTime(const InferenceTime&) = default;
+    InferenceTime(InferenceTime&&) = default;
+    InferenceTime& operator=(const InferenceTime&) = default;
+    InferenceTime& operator=(InferenceTime&&) = default;
+    ~InferenceTime() = default;
+
+    float enq{0};     // Enqueue
+    float h2d{0};     // Host to Device
+    float compute{0}; // Compute
+    float d2h{0};     // Device to Host
+    float e2e{0};     // end to end
+
+    // ideal latency
+    float latency() const
+    {
+        return h2d + compute + d2h;
+    }
+};
+
+//!
+//! \struct InferenceTrace
+//! \brief Measurement points in milliseconds
+//!
+struct InferenceTrace
+{
+    InferenceTrace(int s, float es, float ee, float is, float ie, float cs, float ce, float os, float oe)
+        : stream(s)
+        , enqStart(es)
+        , enqEnd(ee)
+        , h2dStart(is)
+        , h2dEnd(ie)
+        , computeStart(cs)
+        , computeEnd(ce)
+        , d2hStart(os)
+        , d2hEnd(oe)
+    {
+    }
+
+    InferenceTrace() = default;
+    InferenceTrace(const InferenceTrace&) = default;
+    InferenceTrace(InferenceTrace&&) = default;
+    InferenceTrace& operator=(const InferenceTrace&) = default;
+    InferenceTrace& operator=(InferenceTrace&&) = default;
+    ~InferenceTrace() = default;
+
+    int stream{0};
+    float enqStart{0};
+    float enqEnd{0};
+    float h2dStart{0};
+    float h2dEnd{0};
+    float computeStart{0};
+    float computeEnd{0};
+    float d2hStart{0};
+    float d2hEnd{0};
+};
+
+inline InferenceTime operator+(const InferenceTime& a, const InferenceTime& b)
+{
+    return InferenceTime(a.enq + b.enq, a.h2d + b.h2d, a.compute + b.compute, a.d2h + b.d2h, a.e2e + b.e2e);
+}
+
+inline InferenceTime operator+=(InferenceTime& a, const InferenceTime& b)
+{
+    return a = a + b;
+}
+
+//!
+//! \struct PerformanceResult
+//! \brief Performance result of a performance metric
+//!
+struct PerformanceResult
+{
+    float min{0};
+    float max{0};
+    float mean{0};
+    float median{0};
+    float percentile{0};
+};
+
+//!
+//! \brief Print benchmarking time and number of traces collected
+//!
+void printProlog(int warmups, int timings, float warmupMs, float walltime, std::ostream& os);
+
+//!
+//! \brief Print a timing trace
+//!
+void printTiming(const std::vector<InferenceTime>& timings, int runsPerAvg, std::ostream& os);
+
+//!
+//! \brief Print the performance summary of a trace
+//!
+void printEpilog(const std::vector<InferenceTime>& timings, float percentile, int batchSize, std::ostream& osInfo,
+    std::ostream& osWarning, std::ostream& osVerbose);
+
+//!
+//! \brief Get the result of a specific performance metric from a trace
+//!
+PerformanceResult getPerformanceResult(const std::vector<InferenceTime>& timings,
+    std::function<float(const InferenceTime&)> metricGetter, float percentile);
+
+//!
+//! \brief Print the explanations of the performance metrics printed in printEpilog() function.
+//!
+void printMetricExplanations(std::ostream& os);
+
+//!
+//! \brief Print and summarize a timing trace
+//!
+void printPerformanceReport(const std::vector<InferenceTrace>& trace, const ReportingOptions& reporting, float warmupMs,
+    int batchSize, std::ostream& osInfo, std::ostream& osWarning, std::ostream& osVerbose);
+
+//!
+//! \brief Export a timing trace to JSON file
+//!
+void exportJSONTrace(const std::vector<InferenceTrace>& trace, const std::string& fileName);
+
+//!
+//! \brief Print input tensors to stream
+//!
+void dumpInputs(const nvinfer1::IExecutionContext& context, const Bindings& bindings, std::ostream& os);
+
+//!
+//! \brief Print output tensors to stream
+//!
+void dumpOutputs(const nvinfer1::IExecutionContext& context, const Bindings& bindings, std::ostream& os);
+
+//!
+//! \brief Export output tensors to JSON file
+//!
+void exportJSONOutput(
+    const nvinfer1::IExecutionContext& context, const Bindings& bindings, const std::string& fileName, int32_t batch);
+
+//!
+//! \struct LayerProfile
+//! \brief Layer profile information
+//!
+struct LayerProfile
+{
+    std::string name;
+    float timeMs{0};
+};
+
+//!
+//! \class Profiler
+//! \brief Collect per-layer profile information, assuming times are reported in the same order
+//!
+class Profiler : public nvinfer1::IProfiler
+{
+
+public:
+    void reportLayerTime(const char* layerName, float timeMs) noexcept override;
+
+    void print(std::ostream& os) const noexcept;
+
+    //!
+    //! \brief Export a profile to JSON file
+    //!
+    void exportJSONProfile(const std::string& fileName) const noexcept;
+
+private:
+    float getTotalTime() const noexcept
+    {
+        const auto plusLayerTime = [](float accumulator, const LayerProfile& lp) { return accumulator + lp.timeMs; };
+        return std::accumulate(mLayers.begin(), mLayers.end(), 0.0, plusLayerTime);
+    }
+
+    std::vector<LayerProfile> mLayers;
+    std::vector<LayerProfile>::iterator mIterator{mLayers.begin()};
+    int mUpdatesCount{0};
+};
+
+} // namespace sample
+
+#endif // TRT_SAMPLE_REPORTING_H

+ 587 - 0
src/detection/CenterPoint-master/include/common/sampleUtils.h

@@ -0,0 +1,587 @@
+/*
+ * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef TRT_SAMPLE_UTILS_H
+#define TRT_SAMPLE_UTILS_H
+
+#include <fstream>
+#include <iostream>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <unordered_map>
+#include <vector>
+
+#include <cuda.h>
+#if CUDA_VERSION < 10000
+#include <half.h>
+#else
+#include <cuda_fp16.h>
+#endif
+
+#include "NvInfer.h"
+
+#include "common.h"
+#include "logger.h"
+#include "sampleDevice.h"
+#include "sampleOptions.h"
+
+namespace sample
+{
+
+inline int dataTypeSize(nvinfer1::DataType dataType)
+{
+    switch (dataType)
+    {
+    case nvinfer1::DataType::kINT32:
+    case nvinfer1::DataType::kFLOAT: return 4;
+    case nvinfer1::DataType::kHALF: return 2;
+    case nvinfer1::DataType::kBOOL:
+    case nvinfer1::DataType::kINT8: return 1;
+    }
+    return 0;
+}
+
+template <typename T>
+inline T roundUp(T m, T n)
+{
+    return ((m + n - 1) / n) * n;
+}
+
+inline int volume(const nvinfer1::Dims& d)
+{
+    return std::accumulate(d.d, d.d + d.nbDims, 1, std::multiplies<int>());
+}
+
+//! comps is the number of components in a vector. Ignored if vecDim < 0.
+inline int64_t volume(const nvinfer1::Dims& dims, const nvinfer1::Dims& strides, int vecDim, int comps, int batch)
+{
+    int maxNbElems = 1;
+    for (int i = 0; i < dims.nbDims; ++i)
+    {
+        // Get effective length of axis.
+        int d = dims.d[i];
+        // Any dimension is 0, it is an empty tensor.
+        if (d == 0)
+        {
+            return 0;
+        }
+        if (i == vecDim)
+        {
+            d = samplesCommon::divUp(d, comps);
+        }
+        maxNbElems = std::max(maxNbElems, d * strides.d[i]);
+    }
+    return static_cast<int64_t>(maxNbElems) * batch * (vecDim < 0 ? 1 : comps);
+}
+
+inline int64_t volume(nvinfer1::Dims dims, int vecDim, int comps, int batch)
+{
+    if (vecDim != -1)
+    {
+        dims.d[vecDim] = roundUp(dims.d[vecDim], comps);
+    }
+    return volume(dims) * std::max(batch, 1);
+}
+
+inline std::ostream& operator<<(std::ostream& os, const nvinfer1::Dims& dims)
+{
+    for (int i = 0; i < dims.nbDims; ++i)
+    {
+        os << (i ? "x" : "") << dims.d[i];
+    }
+    return os;
+}
+inline std::ostream& operator<<(std::ostream& os, const nvinfer1::WeightsRole role)
+{
+    switch (role)
+    {
+    case nvinfer1::WeightsRole::kKERNEL:
+    {
+        os << "Kernel";
+        break;
+    }
+    case nvinfer1::WeightsRole::kBIAS:
+    {
+        os << "Bias";
+        break;
+    }
+    case nvinfer1::WeightsRole::kSHIFT:
+    {
+        os << "Shift";
+        break;
+    }
+    case nvinfer1::WeightsRole::kSCALE:
+    {
+        os << "Scale";
+        break;
+    }
+    case nvinfer1::WeightsRole::kCONSTANT:
+    {
+        os << "Constant";
+        break;
+    }
+    case nvinfer1::WeightsRole::kANY:
+    {
+        os << "Any";
+        break;
+    }
+    }
+
+    return os;
+}
+
+inline std::ostream& operator<<(std::ostream& os, const std::vector<int>& vec)
+{
+    for (int i = 0, e = static_cast<int>(vec.size()); i < e; ++i)
+    {
+        os << (i ? "x" : "") << vec[i];
+    }
+    return os;
+}
+
+inline nvinfer1::Dims toDims(const std::vector<int>& vec)
+{
+    int limit = static_cast<int>(nvinfer1::Dims::MAX_DIMS);
+    if (static_cast<int>(vec.size()) > limit)
+    {
+        sample::gLogWarning << "Vector too long, only first 8 elements are used in dimension." << std::endl;
+    }
+    // Pick first nvinfer1::Dims::MAX_DIMS elements
+    nvinfer1::Dims dims{std::min(static_cast<int>(vec.size()), limit), {}};
+    std::copy_n(vec.begin(), dims.nbDims, std::begin(dims.d));
+    return dims;
+}
+
+template <typename T>
+inline void fillBuffer(void* buffer, int64_t volume, T min, T max)
+{
+    T* typedBuffer = static_cast<T*>(buffer);
+    std::default_random_engine engine;
+    if (std::is_integral<T>::value)
+    {
+        std::uniform_int_distribution<int> distribution(min, max);
+        auto generator = [&engine, &distribution]() { return static_cast<T>(distribution(engine)); };
+        std::generate(typedBuffer, typedBuffer + volume, generator);
+    }
+    else
+    {
+        std::uniform_real_distribution<float> distribution(min, max);
+        auto generator = [&engine, &distribution]() { return static_cast<T>(distribution(engine)); };
+        std::generate(typedBuffer, typedBuffer + volume, generator);
+    }
+}
+
+// Specialization needed for custom type __half
+template <typename H>
+inline void fillBufferHalf(void* buffer, int64_t volume, H min, H max)
+{
+    H* typedBuffer = static_cast<H*>(buffer);
+    std::default_random_engine engine;
+    std::uniform_real_distribution<float> distribution(min, max);
+    auto generator = [&engine, &distribution]() { return static_cast<H>(distribution(engine)); };
+    std::generate(typedBuffer, typedBuffer + volume, generator);
+}
+template <>
+#if CUDA_VERSION < 10000
+inline void fillBuffer<half_float::half>(void* buffer, int64_t volume, half_float::half min, half_float::half max)
+#else
+inline void fillBuffer<__half>(void* buffer, int64_t volume, __half min, __half max)
+#endif
+{
+    fillBufferHalf(buffer, volume, min, max);
+}
+
+template <typename T>
+inline void dumpBuffer(const void* buffer, const std::string& separator, std::ostream& os, const Dims& dims,
+    const Dims& strides, int32_t vectorDim, int32_t spv)
+{
+    const int64_t volume = std::accumulate(dims.d, dims.d + dims.nbDims, 1, std::multiplies<int64_t>());
+    const T* typedBuffer = static_cast<const T*>(buffer);
+    std::string sep;
+    for (int64_t v = 0; v < volume; ++v)
+    {
+        int64_t curV = v;
+        int32_t dataOffset = 0;
+        for (int32_t dimIndex = dims.nbDims - 1; dimIndex >= 0; --dimIndex)
+        {
+            int32_t dimVal = curV % dims.d[dimIndex];
+            if (dimIndex == vectorDim)
+            {
+                dataOffset += (dimVal / spv) * strides.d[dimIndex] * spv + dimVal % spv;
+            }
+            else
+            {
+                dataOffset += dimVal * strides.d[dimIndex] * (vectorDim == -1 ? 1 : spv);
+            }
+            curV /= dims.d[dimIndex];
+            ASSERT(curV >= 0);
+        }
+
+        os << sep << typedBuffer[dataOffset];
+        sep = separator;
+    }
+}
+
+struct Binding
+{
+    bool isInput{false};
+    MirroredBuffer buffer;
+    int64_t volume{0};
+    nvinfer1::DataType dataType{nvinfer1::DataType::kFLOAT};
+
+    void fill(const std::string& fileName)
+    {
+        std::ifstream file(fileName, std::ios::in | std::ios::binary);
+        if (file.is_open())
+        {
+            file.read(static_cast<char*>(buffer.getHostBuffer()), buffer.getSize());
+            file.close();
+        }
+    }
+
+    void fill()
+    {
+        switch (dataType)
+        {
+        case nvinfer1::DataType::kBOOL:
+        {
+            fillBuffer<bool>(buffer.getHostBuffer(), volume, 0, 1);
+            break;
+        }
+        case nvinfer1::DataType::kINT32:
+        {
+            fillBuffer<int32_t>(buffer.getHostBuffer(), volume, -128, 127);
+            break;
+        }
+        case nvinfer1::DataType::kINT8:
+        {
+            fillBuffer<int8_t>(buffer.getHostBuffer(), volume, -128, 127);
+            break;
+        }
+        case nvinfer1::DataType::kFLOAT:
+        {
+            fillBuffer<float>(buffer.getHostBuffer(), volume, -1.0, 1.0);
+            break;
+        }
+        case nvinfer1::DataType::kHALF:
+        {
+#if CUDA_VERSION < 10000
+            fillBuffer<half_float::half>(buffer.getHostBuffer(), volume, static_cast<half_float::half>(-1.0),
+                static_cast<half_float::half>(-1.0));
+#else
+            fillBuffer<__half>(buffer.getHostBuffer(), volume, -1.0, 1.0);
+#endif
+            break;
+        }
+        }
+    }
+
+    void dump(std::ostream& os, Dims dims, Dims strides, int32_t vectorDim, int32_t spv,
+        const std::string separator = " ") const
+    {
+        switch (dataType)
+        {
+        case nvinfer1::DataType::kBOOL:
+        {
+            dumpBuffer<bool>(buffer.getHostBuffer(), separator, os, dims, strides, vectorDim, spv);
+            break;
+        }
+        case nvinfer1::DataType::kINT32:
+        {
+            dumpBuffer<int32_t>(buffer.getHostBuffer(), separator, os, dims, strides, vectorDim, spv);
+            break;
+        }
+        case nvinfer1::DataType::kINT8:
+        {
+            dumpBuffer<int8_t>(buffer.getHostBuffer(), separator, os, dims, strides, vectorDim, spv);
+            break;
+        }
+        case nvinfer1::DataType::kFLOAT:
+        {
+            dumpBuffer<float>(buffer.getHostBuffer(), separator, os, dims, strides, vectorDim, spv);
+            break;
+        }
+        case nvinfer1::DataType::kHALF:
+        {
+#if CUDA_VERSION < 10000
+            dumpBuffer<half_float::half>(buffer.getHostBuffer(), separator, os, dims, strides, vectorDim, spv);
+#else
+            dumpBuffer<__half>(buffer.getHostBuffer(), separator, os, dims, strides, vectorDim, spv);
+#endif
+            break;
+        }
+        }
+    }
+};
+
+class Bindings
+{
+public:
+    void addBinding(int b, const std::string& name, bool isInput, int64_t volume, nvinfer1::DataType dataType,
+        const std::string& fileName = "")
+    {
+        while (mBindings.size() <= static_cast<size_t>(b))
+        {
+            mBindings.emplace_back();
+            mDevicePointers.emplace_back();
+        }
+        mNames[name] = b;
+        mBindings[b].isInput = isInput;
+        // Some memory allocators return nullptr when allocating zero bytes, but TensorRT requires a non-null ptr
+        // even for empty tensors, so allocate a dummy byte.
+        if (volume == 0)
+        {
+            mBindings[b].buffer.allocate(1);
+        }
+        else
+        {
+            mBindings[b].buffer.allocate(static_cast<size_t>(volume) * static_cast<size_t>(dataTypeSize(dataType)));
+        }
+        mBindings[b].volume = volume;
+        mBindings[b].dataType = dataType;
+        mDevicePointers[b] = mBindings[b].buffer.getDeviceBuffer();
+        if (isInput)
+        {
+            if (fileName.empty())
+            {
+                fill(b);
+            }
+            else
+            {
+                fill(b, fileName);
+            }
+        }
+    }
+
+    void** getDeviceBuffers()
+    {
+        return mDevicePointers.data();
+    }
+
+    void transferInputToDevice(TrtCudaStream& stream)
+    {
+        for (auto& b : mNames)
+        {
+            if (mBindings[b.second].isInput)
+            {
+                mBindings[b.second].buffer.hostToDevice(stream);
+            }
+        }
+    }
+
+    void transferOutputToHost(TrtCudaStream& stream)
+    {
+        for (auto& b : mNames)
+        {
+            if (!mBindings[b.second].isInput)
+            {
+                mBindings[b.second].buffer.deviceToHost(stream);
+            }
+        }
+    }
+
+    void fill(int binding, const std::string& fileName)
+    {
+        mBindings[binding].fill(fileName);
+    }
+
+    void fill(int binding)
+    {
+        mBindings[binding].fill();
+    }
+
+    void dumpBindingDimensions(int binding, const nvinfer1::IExecutionContext& context, std::ostream& os) const
+    {
+        const auto dims = context.getBindingDimensions(binding);
+        // Do not add a newline terminator, because the caller may be outputting a JSON string.
+        os << dims;
+    }
+
+    void dumpBindingValues(const nvinfer1::IExecutionContext& context, int binding, std::ostream& os,
+        const std::string& separator = " ", int32_t batch = 1) const
+    {
+        Dims dims = context.getBindingDimensions(binding);
+        Dims strides = context.getStrides(binding);
+        int32_t vectorDim = context.getEngine().getBindingVectorizedDim(binding);
+        const int32_t spv = context.getEngine().getBindingComponentsPerElement(binding);
+
+        if (context.getEngine().hasImplicitBatchDimension())
+        {
+            auto insertN = [](Dims& d, int32_t bs) {
+                const int32_t nbDims = d.nbDims;
+                ASSERT(nbDims < Dims::MAX_DIMS);
+                std::copy_backward(&d.d[0], &d.d[nbDims], &d.d[nbDims + 1]);
+                d.d[0] = bs;
+                d.nbDims = nbDims + 1;
+            };
+            int32_t batchStride = 0;
+            for (int32_t i = 0; i < strides.nbDims; ++i)
+            {
+                if (strides.d[i] * dims.d[i] > batchStride)
+                {
+                    batchStride = strides.d[i] * dims.d[i];
+                }
+            }
+            insertN(dims, batch);
+            insertN(strides, batchStride);
+            vectorDim = (vectorDim == -1) ? -1 : vectorDim + 1;
+        }
+
+        mBindings[binding].dump(os, dims, strides, vectorDim, spv, separator);
+    }
+
+    void dumpInputs(const nvinfer1::IExecutionContext& context, std::ostream& os) const
+    {
+        auto isInput = [](const Binding& b) { return b.isInput; };
+        dumpBindings(context, isInput, os);
+    }
+
+    void dumpOutputs(const nvinfer1::IExecutionContext& context, std::ostream& os) const
+    {
+        auto isOutput = [](const Binding& b) { return !b.isInput; };
+        dumpBindings(context, isOutput, os);
+    }
+
+    void dumpBindings(const nvinfer1::IExecutionContext& context, std::ostream& os) const
+    {
+        auto all = [](const Binding& b) { return true; };
+        dumpBindings(context, all, os);
+    }
+
+    void dumpBindings(
+        const nvinfer1::IExecutionContext& context, bool (*predicate)(const Binding& b), std::ostream& os) const
+    {
+        for (const auto& n : mNames)
+        {
+            const auto binding = n.second;
+            if (predicate(mBindings[binding]))
+            {
+                os << n.first << ": (";
+                dumpBindingDimensions(binding, context, os);
+                os << ")" << std::endl;
+
+                dumpBindingValues(context, binding, os);
+                os << std::endl;
+            }
+        }
+    }
+
+    std::unordered_map<std::string, int> getInputBindings() const
+    {
+        auto isInput = [](const Binding& b) { return b.isInput; };
+        return getBindings(isInput);
+    }
+
+    std::unordered_map<std::string, int> getOutputBindings() const
+    {
+        auto isOutput = [](const Binding& b) { return !b.isInput; };
+        return getBindings(isOutput);
+    }
+
+    std::unordered_map<std::string, int> getBindings() const
+    {
+        auto all = [](const Binding& b) { return true; };
+        return getBindings(all);
+    }
+
+    std::unordered_map<std::string, int> getBindings(bool (*predicate)(const Binding& b)) const
+    {
+        std::unordered_map<std::string, int> bindings;
+        for (const auto& n : mNames)
+        {
+            const auto binding = n.second;
+            if (predicate(mBindings[binding]))
+            {
+                bindings.insert(n);
+            }
+        }
+        return bindings;
+    }
+
+private:
+    std::unordered_map<std::string, int> mNames;
+    std::vector<Binding> mBindings;
+    std::vector<void*> mDevicePointers;
+};
+
+template <typename T>
+struct TrtDestroyer
+{
+    void operator()(T* t)
+    {
+        t->destroy();
+    }
+};
+
+template <typename T>
+using TrtUniquePtr = std::unique_ptr<T, TrtDestroyer<T>>;
+
+inline bool broadcastIOFormats(const std::vector<IOFormat>& formats, size_t nbBindings, bool isInput = true)
+{
+    bool broadcast = formats.size() == 1;
+    bool validFormatsCount = broadcast || (formats.size() == nbBindings);
+    if (!formats.empty() && !validFormatsCount)
+    {
+        if (isInput)
+        {
+            throw std::invalid_argument(
+                "The number of inputIOFormats must match network's inputs or be one for broadcasting.");
+        }
+        else
+        {
+            throw std::invalid_argument(
+                "The number of outputIOFormats must match network's outputs or be one for broadcasting.");
+        }
+    }
+    return broadcast;
+}
+
+inline std::vector<char> loadTimingCacheFile(const std::string inFileName)
+{
+    std::ifstream iFile(inFileName, std::ios::in | std::ios::binary);
+    if (!iFile)
+    {
+        sample::gLogWarning << "Could not read timing cache from: " << inFileName
+                            << ". A new timing cache will be generated and written." << std::endl;
+        return std::vector<char>();
+    }
+    iFile.seekg(0, std::ifstream::end);
+    size_t fsize = iFile.tellg();
+    iFile.seekg(0, std::ifstream::beg);
+    std::vector<char> content(fsize);
+    iFile.read(content.data(), fsize);
+    iFile.close();
+    sample::gLogInfo << "Loaded " << fsize << " bytes of timing cache from " << inFileName << std::endl;
+    return content;
+}
+
+inline void saveTimingCacheFile(const std::string outFileName, const IHostMemory* blob)
+{
+    std::ofstream oFile(outFileName, std::ios::out | std::ios::binary);
+    if (!oFile)
+    {
+        sample::gLogWarning << "Could not write timing cache to: " << outFileName << std::endl;
+        return;
+    }
+    oFile.write((char*) blob->data(), blob->size());
+    oFile.close();
+    sample::gLogInfo << "Saved " << blob->size() << " bytes of timing cache to " << outFileName << std::endl;
+}
+
+} // namespace sample
+
+#endif // TRT_SAMPLE_UTILS_H

+ 568 - 0
src/detection/CenterPoint-master/include/common/windows/getopt.c

@@ -0,0 +1,568 @@
+/*	$OpenBSD: getopt_long.c,v 1.23 2007/10/31 12:34:57 chl Exp $	*/
+/*	$NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $	*/
+
+/*
+ * Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F39502-99-1-0512.
+ */
+/*-
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dieter Baron and Thomas Klausner.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <getopt.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <windows.h>
+
+#define REPLACE_GETOPT /* use this getopt as the system getopt(3) */
+
+#ifdef REPLACE_GETOPT
+int opterr = 1;   /* if error message should be printed */
+int optind = 1;   /* index into parent argv vector */
+int optopt = '?'; /* character checked for validity */
+#undef optreset   /* see getopt.h */
+#define optreset __mingw_optreset
+int optreset; /* reset getopt */
+char* optarg; /* argument associated with option */
+#endif
+
+#define PRINT_ERROR ((opterr) && (*options != ':'))
+
+#define FLAG_PERMUTE 0x01  /* permute non-options to the end of argv */
+#define FLAG_ALLARGS 0x02  /* treat non-options as args to option "-1" */
+#define FLAG_LONGONLY 0x04 /* operate as getopt_long_only */
+
+/* return values */
+#define BADCH (int) '?'
+#define BADARG ((*options == ':') ? (int) ':' : (int) '?')
+#define INORDER (int) 1
+
+#ifndef __CYGWIN__
+#define __progname __argv[0]
+#else
+extern char __declspec(dllimport) * __progname;
+#endif
+
+#ifdef __CYGWIN__
+static char EMSG[] = "";
+#else
+#define EMSG ""
+#endif
+
+static int getopt_internal(int, char* const*, const char*, const struct option*, int*, int);
+static int parse_long_options(char* const*, const char*, const struct option*, int*, int);
+static int gcd(int, int);
+static void permute_args(int, int, int, char* const*);
+
+static char* place = EMSG; /* option letter processing */
+
+/* XXX: set optreset to 1 rather than these two */
+static int nonopt_start = -1; /* first non option argument (for permute) */
+static int nonopt_end = -1;   /* first option after non options (for permute) */
+
+/* Error messages */
+static const char recargchar[] = "option requires an argument -- %c";
+static const char recargstring[] = "option requires an argument -- %s";
+static const char ambig[] = "ambiguous option -- %.*s";
+static const char noarg[] = "option doesn't take an argument -- %.*s";
+static const char illoptchar[] = "unknown option -- %c";
+static const char illoptstring[] = "unknown option -- %s";
+
+static void _vwarnx(const char* fmt, va_list ap)
+{
+    (void) fprintf(stderr, "%s: ", __progname);
+    if (fmt != NULL)
+        (void) vfprintf(stderr, fmt, ap);
+    (void) fprintf(stderr, "\n");
+}
+
+static void warnx(const char* fmt, ...)
+{
+    va_list ap;
+    va_start(ap, fmt);
+    _vwarnx(fmt, ap);
+    va_end(ap);
+}
+
+/*
+ * Compute the greatest common divisor of a and b.
+ */
+static int gcd(int a, int b)
+{
+    int c;
+
+    c = a % b;
+    while (c != 0)
+    {
+        a = b;
+        b = c;
+        c = a % b;
+    }
+
+    return (b);
+}
+
+/*
+ * Exchange the block from nonopt_start to nonopt_end with the block
+ * from nonopt_end to opt_end (keeping the same order of arguments
+ * in each block).
+ */
+static void permute_args(int panonopt_start, int panonopt_end, int opt_end, char* const* nargv)
+{
+    int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos;
+    char* swap;
+
+    /*
+     * compute lengths of blocks and number and size of cycles
+     */
+    nnonopts = panonopt_end - panonopt_start;
+    nopts = opt_end - panonopt_end;
+    ncycle = gcd(nnonopts, nopts);
+    cyclelen = (opt_end - panonopt_start) / ncycle;
+
+    for (i = 0; i < ncycle; i++)
+    {
+        cstart = panonopt_end + i;
+        pos = cstart;
+        for (j = 0; j < cyclelen; j++)
+        {
+            if (pos >= panonopt_end)
+                pos -= nnonopts;
+            else
+                pos += nopts;
+            swap = nargv[pos];
+            /* LINTED const cast */
+            ((char**) nargv)[pos] = nargv[cstart];
+            /* LINTED const cast */
+            ((char**) nargv)[cstart] = swap;
+        }
+    }
+}
+
+/*
+ * parse_long_options --
+ *	Parse long options in argc/argv argument vector.
+ * Returns -1 if short_too is set and the option does not match long_options.
+ */
+static int parse_long_options(
+    char* const* nargv, const char* options, const struct option* long_options, int* idx, int short_too)
+{
+    char *current_argv, *has_equal;
+    size_t current_argv_len;
+    int i, ambiguous, match;
+
+#define IDENTICAL_INTERPRETATION(_x, _y)                                                                               \
+    (long_options[(_x)].has_arg == long_options[(_y)].has_arg && long_options[(_x)].flag == long_options[(_y)].flag    \
+        && long_options[(_x)].val == long_options[(_y)].val)
+
+    current_argv = place;
+    match = -1;
+    ambiguous = 0;
+
+    optind++;
+
+    if ((has_equal = strchr(current_argv, '=')) != NULL)
+    {
+        /* argument found (--option=arg) */
+        current_argv_len = has_equal - current_argv;
+        has_equal++;
+    }
+    else
+        current_argv_len = strlen(current_argv);
+
+    for (i = 0; long_options[i].name; i++)
+    {
+        /* find matching long option */
+        if (strncmp(current_argv, long_options[i].name, current_argv_len))
+            continue;
+
+        if (strlen(long_options[i].name) == current_argv_len)
+        {
+            /* exact match */
+            match = i;
+            ambiguous = 0;
+            break;
+        }
+        /*
+         * If this is a known short option, don't allow
+         * a partial match of a single character.
+         */
+        if (short_too && current_argv_len == 1)
+            continue;
+
+        if (match == -1) /* partial match */
+            match = i;
+        else if (!IDENTICAL_INTERPRETATION(i, match))
+            ambiguous = 1;
+    }
+    if (ambiguous)
+    {
+        /* ambiguous abbreviation */
+        if (PRINT_ERROR)
+            warnx(ambig, (int) current_argv_len, current_argv);
+        optopt = 0;
+        return (BADCH);
+    }
+    if (match != -1)
+    { /* option found */
+        if (long_options[match].has_arg == no_argument && has_equal)
+        {
+            if (PRINT_ERROR)
+                warnx(noarg, (int) current_argv_len, current_argv);
+            /*
+             * XXX: GNU sets optopt to val regardless of flag
+             */
+            if (long_options[match].flag == NULL)
+                optopt = long_options[match].val;
+            else
+                optopt = 0;
+            return (BADARG);
+        }
+        if (long_options[match].has_arg == required_argument || long_options[match].has_arg == optional_argument)
+        {
+            if (has_equal)
+                optarg = has_equal;
+            else if (long_options[match].has_arg == required_argument)
+            {
+                /*
+                 * optional argument doesn't use next nargv
+                 */
+                optarg = nargv[optind++];
+            }
+        }
+        if ((long_options[match].has_arg == required_argument) && (optarg == NULL))
+        {
+            /*
+             * Missing argument; leading ':' indicates no error
+             * should be generated.
+             */
+            if (PRINT_ERROR)
+                warnx(recargstring, current_argv);
+            /*
+             * XXX: GNU sets optopt to val regardless of flag
+             */
+            if (long_options[match].flag == NULL)
+                optopt = long_options[match].val;
+            else
+                optopt = 0;
+            --optind;
+            return (BADARG);
+        }
+    }
+    else
+    { /* unknown option */
+        if (short_too)
+        {
+            --optind;
+            return (-1);
+        }
+        if (PRINT_ERROR)
+            warnx(illoptstring, current_argv);
+        optopt = 0;
+        return (BADCH);
+    }
+    if (idx)
+        *idx = match;
+    if (long_options[match].flag)
+    {
+        *long_options[match].flag = long_options[match].val;
+        return (0);
+    }
+    else
+        return (long_options[match].val);
+#undef IDENTICAL_INTERPRETATION
+}
+
+/*
+ * getopt_internal --
+ *	Parse argc/argv argument vector.  Called by user level routines.
+ */
+static int getopt_internal(
+    int nargc, char* const* nargv, const char* options, const struct option* long_options, int* idx, int flags)
+{
+    const char* oli; /* option letter list index */
+    int optchar, short_too;
+    static int posixly_correct = -1;
+
+    if (options == NULL)
+        return (-1);
+
+    /*
+     * XXX Some GNU programs (like cvs) set optind to 0 instead of
+     * XXX using optreset.  Work around this braindamage.
+     */
+    if (optind == 0)
+        optind = optreset = 1;
+
+    /*
+     * Disable GNU extensions if POSIXLY_CORRECT is set or options
+     * string begins with a '+'.
+     *
+     * CV, 2009-12-14: Check POSIXLY_CORRECT anew if optind == 0 or
+     *                 optreset != 0 for GNU compatibility.
+     */
+    if (posixly_correct == -1 || optreset != 0)
+        posixly_correct = (getenv("POSIXLY_CORRECT") != NULL);
+    if (*options == '-')
+        flags |= FLAG_ALLARGS;
+    else if (posixly_correct || *options == '+')
+        flags &= ~FLAG_PERMUTE;
+    if (*options == '+' || *options == '-')
+        options++;
+
+    optarg = NULL;
+    if (optreset)
+        nonopt_start = nonopt_end = -1;
+start:
+    if (optreset || !*place)
+    { /* update scanning pointer */
+        optreset = 0;
+        if (optind >= nargc)
+        { /* end of argument vector */
+            place = EMSG;
+            if (nonopt_end != -1)
+            {
+                /* do permutation, if we have to */
+                permute_args(nonopt_start, nonopt_end, optind, nargv);
+                optind -= nonopt_end - nonopt_start;
+            }
+            else if (nonopt_start != -1)
+            {
+                /*
+                 * If we skipped non-options, set optind
+                 * to the first of them.
+                 */
+                optind = nonopt_start;
+            }
+            nonopt_start = nonopt_end = -1;
+            return (-1);
+        }
+        if (*(place = nargv[optind]) != '-' || (place[1] == '\0' && strchr(options, '-') == NULL))
+        {
+            place = EMSG; /* found non-option */
+            if (flags & FLAG_ALLARGS)
+            {
+                /*
+                 * GNU extension:
+                 * return non-option as argument to option 1
+                 */
+                optarg = nargv[optind++];
+                return (INORDER);
+            }
+            if (!(flags & FLAG_PERMUTE))
+            {
+                /*
+                 * If no permutation wanted, stop parsing
+                 * at first non-option.
+                 */
+                return (-1);
+            }
+            /* do permutation */
+            if (nonopt_start == -1)
+                nonopt_start = optind;
+            else if (nonopt_end != -1)
+            {
+                permute_args(nonopt_start, nonopt_end, optind, nargv);
+                nonopt_start = optind - (nonopt_end - nonopt_start);
+                nonopt_end = -1;
+            }
+            optind++;
+            /* process next argument */
+            goto start;
+        }
+        if (nonopt_start != -1 && nonopt_end == -1)
+            nonopt_end = optind;
+
+        /*
+         * If we have "-" do nothing, if "--" we are done.
+         */
+        if (place[1] != '\0' && *++place == '-' && place[1] == '\0')
+        {
+            optind++;
+            place = EMSG;
+            /*
+             * We found an option (--), so if we skipped
+             * non-options, we have to permute.
+             */
+            if (nonopt_end != -1)
+            {
+                permute_args(nonopt_start, nonopt_end, optind, nargv);
+                optind -= nonopt_end - nonopt_start;
+            }
+            nonopt_start = nonopt_end = -1;
+            return (-1);
+        }
+    }
+
+    /*
+     * Check long options if:
+     *  1) we were passed some
+     *  2) the arg is not just "-"
+     *  3) either the arg starts with -- we are getopt_long_only()
+     */
+    if (long_options != NULL && place != nargv[optind] && (*place == '-' || (flags & FLAG_LONGONLY)))
+    {
+        short_too = 0;
+        if (*place == '-')
+            place++; /* --foo long option */
+        else if (*place != ':' && strchr(options, *place) != NULL)
+            short_too = 1; /* could be short option too */
+
+        optchar = parse_long_options(nargv, options, long_options, idx, short_too);
+        if (optchar != -1)
+        {
+            place = EMSG;
+            return (optchar);
+        }
+    }
+
+    if ((optchar = (int) *place++) == (int) ':' || (optchar == (int) '-' && *place != '\0')
+        || (oli = strchr(options, optchar)) == NULL)
+    {
+        /*
+         * If the user specified "-" and  '-' isn't listed in
+         * options, return -1 (non-option) as per POSIX.
+         * Otherwise, it is an unknown option character (or ':').
+         */
+        if (optchar == (int) '-' && *place == '\0')
+            return (-1);
+        if (!*place)
+            ++optind;
+        if (PRINT_ERROR)
+            warnx(illoptchar, optchar);
+        optopt = optchar;
+        return (BADCH);
+    }
+    if (long_options != NULL && optchar == 'W' && oli[1] == ';')
+    {
+        /* -W long-option */
+        if (*place) /* no space */
+            /* NOTHING */;
+        else if (++optind >= nargc)
+        { /* no arg */
+            place = EMSG;
+            if (PRINT_ERROR)
+                warnx(recargchar, optchar);
+            optopt = optchar;
+            return (BADARG);
+        }
+        else /* white space */
+            place = nargv[optind];
+        optchar = parse_long_options(nargv, options, long_options, idx, 0);
+        place = EMSG;
+        return (optchar);
+    }
+    if (*++oli != ':')
+    { /* doesn't take argument */
+        if (!*place)
+            ++optind;
+    }
+    else
+    { /* takes (optional) argument */
+        optarg = NULL;
+        if (*place) /* no white space */
+            optarg = place;
+        else if (oli[1] != ':')
+        { /* arg not optional */
+            if (++optind >= nargc)
+            { /* no arg */
+                place = EMSG;
+                if (PRINT_ERROR)
+                    warnx(recargchar, optchar);
+                optopt = optchar;
+                return (BADARG);
+            }
+            else
+                optarg = nargv[optind];
+        }
+        place = EMSG;
+        ++optind;
+    }
+    /* dump back option letter */
+    return (optchar);
+}
+
+#ifdef REPLACE_GETOPT
+/*
+ * getopt --
+ *	Parse argc/argv argument vector.
+ *
+ * [eventually this will replace the BSD getopt]
+ */
+int getopt(int nargc, char* const* nargv, const char* options)
+{
+
+    /*
+     * We don't pass FLAG_PERMUTE to getopt_internal() since
+     * the BSD getopt(3) (unlike GNU) has never done this.
+     *
+     * Furthermore, since many privileged programs call getopt()
+     * before dropping privileges it makes sense to keep things
+     * as simple (and bug-free) as possible.
+     */
+    return (getopt_internal(nargc, nargv, options, NULL, NULL, 0));
+}
+#endif /* REPLACE_GETOPT */
+
+/*
+ * getopt_long --
+ *	Parse argc/argv argument vector.
+ */
+int getopt_long(int nargc, char* const* nargv, const char* options, const struct option* long_options, int* idx)
+{
+
+    return (getopt_internal(nargc, nargv, options, long_options, idx, FLAG_PERMUTE));
+}
+
+/*
+ * getopt_long_only --
+ *	Parse argc/argv argument vector.
+ */
+int getopt_long_only(int nargc, char* const* nargv, const char* options, const struct option* long_options, int* idx)
+{
+
+    return (getopt_internal(nargc, nargv, options, long_options, idx, FLAG_PERMUTE | FLAG_LONGONLY));
+}

+ 107 - 0
src/detection/CenterPoint-master/include/common/windows/getopt.h

@@ -0,0 +1,107 @@
+#ifndef __GETOPT_H__
+/**
+ * DISCLAIMER
+ * This file has no copyright assigned and is placed in the Public Domain.
+ * This file is a part of the w64 mingw-runtime package.
+ *
+ * The w64 mingw-runtime package and its code is distributed in the hope that it
+ * will be useful but WITHOUT ANY WARRANTY.  ALL WARRANTIES, EXPRESSED OR
+ * IMPLIED ARE HEREBY DISCLAIMED.  This includes but is not limited to
+ * warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#define __GETOPT_H__
+
+/* All the headers include this file. */
+#include <crtdefs.h>
+
+#if defined(WINGETOPT_SHARED_LIB)
+#if defined(BUILDING_WINGETOPT_DLL)
+#define WINGETOPT_API __declspec(dllexport)
+#else
+#define WINGETOPT_API __declspec(dllimport)
+#endif
+#else
+#define WINGETOPT_API
+#endif
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+    WINGETOPT_API extern int optind; /* index of first non-option in argv      */
+    WINGETOPT_API extern int optopt; /* single option character, as parsed     */
+    WINGETOPT_API extern int opterr; /* flag to enable built-in diagnostics... */
+    /* (user may set to zero, to suppress)    */
+
+    WINGETOPT_API extern char* optarg; /* pointer to argument of current option  */
+
+    extern int getopt(int nargc, char* const* nargv, const char* options);
+
+#ifdef _BSD_SOURCE
+/*
+ * BSD adds the non-standard `optreset' feature, for reinitialisation
+ * of `getopt' parsing.  We support this feature, for applications which
+ * proclaim their BSD heritage, before including this header; however,
+ * to maintain portability, developers are advised to avoid it.
+ */
+#define optreset __mingw_optreset
+    extern int optreset;
+#endif
+#ifdef __cplusplus
+}
+#endif
+/*
+ * POSIX requires the `getopt' API to be specified in `unistd.h';
+ * thus, `unistd.h' includes this header.  However, we do not want
+ * to expose the `getopt_long' or `getopt_long_only' APIs, when
+ * included in this manner.  Thus, close the standard __GETOPT_H__
+ * declarations block, and open an additional __GETOPT_LONG_H__
+ * specific block, only when *not* __UNISTD_H_SOURCED__, in which
+ * to declare the extended API.
+ */
+#endif /* !defined(__GETOPT_H__) */
+
+#if !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__)
+#define __GETOPT_LONG_H__
+
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+    struct option /* specification for a long form option...	*/
+    {
+        const char* name; /* option name, without leading hyphens */
+        int has_arg;      /* does it take an argument?		*/
+        int* flag;        /* where to save its status, or NULL	*/
+        int val;          /* its associated status value		*/
+    };
+
+    enum /* permitted values for its `has_arg' field...	*/
+    {
+        no_argument = 0,   /* option never takes an argument	*/
+        required_argument, /* option always requires an argument	*/
+        optional_argument  /* option may take an argument		*/
+    };
+
+    extern int getopt_long(
+        int nargc, char* const* nargv, const char* options, const struct option* long_options, int* idx);
+    extern int getopt_long_only(
+        int nargc, char* const* nargv, const char* options, const struct option* long_options, int* idx);
+/*
+ * Previous MinGW implementation had...
+ */
+#ifndef HAVE_DECL_GETOPT
+/*
+ * ...for the long form API only; keep this for compatibility.
+ */
+#define HAVE_DECL_GETOPT 1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__) */

+ 119 - 0
src/detection/CenterPoint-master/include/config.h

@@ -0,0 +1,119 @@
+#ifndef __CENTERNET_CONFIG_H__
+#define __CENTERNET_CONFIG_H__
+
+// ========================================WAYMO CENTERPOINT CONFIG======================================== 
+// point size
+#define MAX_POINTS 220000
+#define POINT_DIM 5
+
+// pillar size
+#define X_STEP 0.32f
+#define Y_STEP 0.32f
+#define X_MIN -74.88f
+#define X_MAX 74.88f
+#define Y_MIN -74.88f
+#define Y_MAX 74.88f
+#define Z_MIN -2.0f
+#define Z_MAX 4.0f
+
+//#define X_CENTER_MIN -80.0f
+//#define X_CENTER_MAX 80.0f
+//#define Y_CENTER_MIN -80.0f
+//#define Y_CENTER_MAX 80.0f
+//#define Z_CENTER_MIN -10.0f
+//#define Z_CENTER_MAX 10.0f
+#define X_CENTER_MIN -10.0f
+#define X_CENTER_MAX 10.0f
+#define Y_CENTER_MIN -10.0f
+#define Y_CENTER_MAX 10.0f
+#define Z_CENTER_MIN -5.0f
+#define Z_CENTER_MAX 5.0f
+
+#define PI 3.141592653f
+// paramerters for preprocess
+#define BEV_W 468
+#define BEV_H 468
+#define MAX_PILLARS 32000 //20000 //32000
+#define MAX_PIONT_IN_PILLARS 20
+#define FEATURE_NUM 10
+#define PFE_OUTPUT_DIM 64
+#define THREAD_NUM 4
+// paramerters for postprocess
+#define SCORE_THREAHOLD 0.1f
+#define NMS_THREAHOLD 0.1f
+//#define INPUT_NMS_MAX_SIZE 4096
+#define INPUT_NMS_MAX_SIZE 1000
+#define OUTPUT_NMS_MAX_SIZE 500
+// #define THREADS_PER_BLOCK_NMS  sizeof(unsigned long long) * 8
+const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
+
+// OUT_SIZE_FACTOR * OUTPUT_H  * Y_STEP = Y_MAX - Y_MIN
+#define OUT_SIZE_FACTOR 1.0f
+
+#define TASK_NUM 1
+#define REG_CHANNEL 2
+#define HEIGHT_CHANNEL 1
+#define ROT_CHANNEL 2
+// #define VEL_CHANNEL 2 //don't defined in waymo
+#define DIM_CHANNEL 3
+
+// spatial output size of rpn
+#define OUTPUT_H 468
+#define OUTPUT_W 468
+#endif
+
+
+
+
+
+
+// ========================================NUSCENES CENTERPOINT CONFIG======================================== 
+
+ // pillar size
+// #define X_STEP 0.2f
+// #define Y_STEP 0.2f
+// #define X_MIN -51.2f
+// #define X_MAX 51.2f
+// #define Y_MIN -51.2f
+// #define Y_MAX 51.2f
+// #define Z_MIN -5.0f
+// #define Z_MAX 3.0f
+// #define PI 3.141592653f
+// // paramerters for preprocess
+// #define BEV_W 512
+// #define BEV_H 512
+// #define MAX_PILLARS 30000
+// #define MAX_PIONT_IN_PILLARS 20
+// #define FEATURE_NUM 10
+// #define THREAD_NUM 2
+// // paramerters for postprocess
+// #define SCORE_THREAHOLD 0.1f
+// #define NMS_THREAHOLD 0.2f
+// #define INPUT_NMS_MAX_SIZE 1000
+// #define OUT_SIZE_FACTOR 4.0f
+// #define TASK_NUM 6
+// #define REG_CHANNEL 2
+// #define HEIGHT_CHANNEL 1
+// #define ROT_CHANNEL 2
+// #define VEL_CHANNEL 2
+// #define DIM_CHANNEL 3
+// #define OUTPUT_H 128
+// #define OUTPUT_W 128
+// #endif
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

+ 6 - 0
src/detection/CenterPoint-master/include/iou3d_nms.h

@@ -0,0 +1,6 @@
+#include <vector>
+#include <cuda.h>
+#include <cuda_runtime_api.h>
+
+int nms_gpu(const float* boxes_data, long* keep_data, int boxs_num,  float nms_overlap_thresh);
+int raw_nms_gpu(const float* reg,  const float* height, const float* dim , const float* rot, const int* indexs, long* keep_data, int boxes_num,  float nms_overlap_thresh);

+ 57 - 0
src/detection/CenterPoint-master/include/postprocess.h

@@ -0,0 +1,57 @@
+#ifndef __CENTERPOINT_POSTPROCESS__
+#define __CENTERPOINT_POSTPROCESS__
+
+#include "buffers.h"
+#include "common.h"
+#include "config.h"
+#include <math.h>
+#include <stdint.h>
+#include <thrust/sort.h>
+#include <thrust/sequence.h>
+#include <thrust/execution_policy.h>
+#include <thrust/host_vector.h>
+
+struct Box{
+    float x;
+    float y;
+    float z;
+    float l;
+    float h;
+    float w;
+    float velX;
+    float velY;
+    float theta;
+
+    float score;
+    int cls;
+    bool isDrop; // for nms
+};
+
+int _raw_nms_gpu(const float* reg,  const float* height, const float* dim , const float* rot,
+                                     const int* indexs, long* dev_keep_data, unsigned long long* mask_cpu, unsigned long long* remv_gpu,
+                                      int boxes_num,  float nms_overlap_thresh);
+
+void _sort_by_key(float* keys, int* values,int size) ;
+
+void _gather_all(float* host_boxes, int* host_label, 
+                                float* reg, float* height, float* dim, float* rot,  float* sorted_score, int32_t* label,  
+                                int* dev_indexs, long* host_keep_indexs,  int boxSizeBef, int boxSizeAft) ;
+
+void _box_assign_launcher(float* reg, float* height , float* dim, float*rot, float* boxes, float*score, int* label,  float* out_score, int*out_label,
+                                                int* validIndexs ,int boxSize,  int output_h, int output_w) ;
+void _index_assign_launcher(int* indexs, int output_h, int output_w) ;
+int _find_valid_score_num(float* score, float thre, int output_h, int output_w) ;
+// void _find_valid_score_num(float* score, float thre, int output_h, int output_w, int* box_size); //,  thrust::host_vector<int>  host_box_size);
+void postprocessGPU(samplesCommon::BufferManager * buffers,
+                                                 std::vector<Box>& predResult ,
+                                                 std::map<std::string, std::vector<std::string>>rpnOutputTensorNames,
+                                                 int* dev_score_indexs,
+                                                 unsigned long long* mask_cpu,
+                                                 unsigned long long* remv_cpu,
+                                                 int* host_score_indexs,
+                                                 long* host_keep_data,
+                                                 float* host_boxes,
+                                                 int* host_label);
+void postprocess(samplesCommon::BufferManager * buffers, std::vector<Box>& predResult);
+
+#endif

+ 42 - 0
src/detection/CenterPoint-master/include/preprocess.h

@@ -0,0 +1,42 @@
+#ifndef __CENTERPOINT_PREPROCESS__
+#define __CENTERPOINT_PREPROCESS__
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include "config.h"
+#include "buffers.h"
+#include "common.h"
+#include "logger.h"
+#include "NvInfer.h"
+#include <cuda_runtime_api.h>
+using namespace std;
+#define GPU_CHECK(ans)                                                                                                                               \
+  {                                                                                                                                                                                 \                                      
+    GPUAssert((ans), __FILE__, __LINE__);                                                                                                 \
+  }
+                                                                                                                                                                                   
+inline void GPUAssert(cudaError_t code, const char* file, int line, bool abort = true)
+{
+  if (code != cudaSuccess)
+  {
+    fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
+    if (abort)
+      exit(code);
+  }
+}
+
+
+void _preprocess_gpu(float* points, float* feature,int* indices,
+ bool* p_mask, int* p_bev_idx, int* p_point_num_assigned, int* bev_voxel_idx, float* v_point_sum, int* v_range, int* v_point_num,
+int pointNum);
+
+void preprocessGPU(float* points, float* feature,int* indices,
+ bool* p_mask, int* p_bev_idx, int* p_point_num_assigned, int* bev_voxel_idx, float* v_point_sum, int* v_range, int* v_point_num,
+int pointNum, int pointDim);
+
+
+void preprocess(float* points, float* feature, int* indices, int pointNum, int pointDim);
+
+bool readBinFile(std::string& filename, void*& bufPtr, int& pointNum, int pointDim );
+
+#endif

+ 58 - 0
src/detection/CenterPoint-master/include/scatter_cuda.h

@@ -0,0 +1,58 @@
+/*
+ * Copyright 2018-2019 Autoware Foundation. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+* @file scatter_cuda.h
+* @brief CUDA code for scatter operation
+* @author Kosuke Murakami
+* @date 2019/02/26
+*/
+
+#ifndef SCATTERCUDA_H
+#define SCATTERCUDA_H
+
+class ScatterCuda
+{
+private:
+  const int NUM_THREADS_;
+  const int FEATURE_NUM_;
+  const int GRID_X_SIZE_;
+  const int GRID_Y_SIZE_;
+
+public:
+  /**
+  * @brief Constructor
+  * @param[in] NUM_THREADS The number of threads to launch cuda kernel
+  * @param[in] MAX_NUM_PILLARS Maximum number of pillars
+  * @param[in] GRID_X_SIZE Number of pillars in x-coordinate
+  * @param[in] GRID_Y_SIZE Number of pillars in y-coordinate
+  * @details Captital variables never change after the compile
+  */
+  ScatterCuda(const int NUM_THREADS, const int MAX_NUM_PILLARS, const int GRID_X_SIZE, const int GRID_Y_SIZE);
+
+  /**
+  * @brief Call scatter cuda kernel
+  * @param[in] pillar_count The valid number of pillars
+  * @param[in] x_coors X-coordinate indexes for corresponding pillars
+  * @param[in] y_coors Y-coordinate indexes for corresponding pillars
+  * @param[in] pfe_output Output from Pillar Feature Extractor
+  * @param[out] scattered_feature Gridmap representation for pillars' feature
+  * @details Allocate pillars in gridmap based on index(coordinates) information
+  */
+  void doScatterCuda(const int pillar_count,  int* coors, float* pfe_output, float* scattered_feature);
+};
+
+#endif  // SCATTERCUDA_H

+ 27 - 0
src/detection/CenterPoint-master/include/utils.h

@@ -0,0 +1,27 @@
+#include <string>
+#include <vector>
+#include "iostream"
+#include <fstream>
+#include <iostream>
+#include <sstream>
+#include <glob.h>
+std::vector<std::string> glob(const std::string pattern)
+{
+    std::vector<std::string> filenames;
+    using namespace std;
+    glob_t glob_result;
+    memset(&glob_result, 0, sizeof(glob_result));
+    int return_value = glob(pattern.c_str(), GLOB_TILDE, NULL, &glob_result);
+    if(return_value != 0){
+        globfree(&glob_result);
+        return filenames;
+    }
+    for(auto idx =0; idx <glob_result.gl_pathc; idx++){
+        filenames.push_back(string(glob_result.gl_pathv[idx]));
+    }
+    globfree(&glob_result);
+    return filenames;
+}
+
+
+

+ 0 - 0
src/detection/CenterPoint-master/lidars/0a0d6b8c2e884134a3b48df43d54c36a.bin.txt


BIN
src/detection/CenterPoint-master/lidars/seq_0_frame_100.bin


BIN
src/detection/CenterPoint-master/lidars/seq_0_frame_101.bin


+ 7 - 0
src/detection/CenterPoint-master/lidars/test.py

@@ -0,0 +1,7 @@
+import numpy as np
+
+data = np.fromfile("./seq_0_frame_100.bin",dtype=np.float32)
+data_reshape = data.reshape(-1,5)
+xyz = data_reshape[:, :3]
+
+print("xyz",xyz[0][0])

BIN
src/detection/CenterPoint-master/models/pfe_baseline32000.onnx


BIN
src/detection/CenterPoint-master/models/pfe_fp.engine


BIN
src/detection/CenterPoint-master/models/rpn_baseline.onnx


BIN
src/detection/CenterPoint-master/models/rpn_fp.engine


+ 543 - 0
src/detection/CenterPoint-master/requirements.txt

@@ -0,0 +1,543 @@
+absl-py==0.13.0
+actionlib==1.12.1
+addict==2.4.0
+aiohttp==3.7.4.post0
+alabaster @ file:///home/ktietz/src/ci/alabaster_1611921544520/work
+anaconda-client==1.7.2
+anaconda-navigator==2.0.3
+anaconda-project @ file:///tmp/build/80754af9/anaconda-project_1610472525955/work
+angles==1.9.12
+antlr4-python3-runtime==4.8
+anyio @ file:///tmp/build/80754af9/anyio_1617783275907/work/dist
+appdirs==1.4.4
+apptools==5.1.0
+argcomplete==1.12.3
+argh==0.26.2
+argon2-cffi @ file:///tmp/build/80754af9/argon2-cffi_1613037097816/work
+arrow==1.2.1
+as==0.1
+asn1crypto @ file:///tmp/build/80754af9/asn1crypto_1596577642040/work
+astroid @ file:///tmp/build/80754af9/astroid_1613500854201/work
+astropy @ file:///tmp/build/80754af9/astropy_1617745353437/work
+astunparse==1.6.3
+async-generator @ file:///home/ktietz/src/ci/async_generator_1611927993394/work
+async-timeout==3.0.1
+atomicwrites==1.4.0
+attrs @ file:///tmp/build/80754af9/attrs_1604765588209/work
+autobahn==21.3.1
+Automat==20.2.0
+autopep8 @ file:///tmp/build/80754af9/autopep8_1615918855173/work
+Babel @ file:///tmp/build/80754af9/babel_1607110387436/work
+backcall @ file:///home/ktietz/src/ci/backcall_1611930011877/work
+backports.functools-lru-cache @ file:///tmp/build/80754af9/backports.functools_lru_cache_1618170165463/work
+backports.shutil-get-terminal-size @ file:///tmp/build/80754af9/backports.shutil_get_terminal_size_1608222128777/work
+backports.tempfile @ file:///home/linux1/recipes/ci/backports.tempfile_1610991236607/work
+backports.weakref==1.0.post1
+bagpy==0.4.7
+beautifulsoup4 @ file:///home/linux1/recipes/ci/beautifulsoup4_1610988766420/work
+bitarray @ file:///tmp/build/80754af9/bitarray_1620827551536/work
+bitstring==3.1.9
+bkcharts==0.2
+black==21.4b2
+bleach @ file:///tmp/build/80754af9/bleach_1612211392645/work
+bokeh @ file:///tmp/build/80754af9/bokeh_1620779595936/work
+bondpy==1.8.5
+boto==2.49.0
+Bottleneck==1.3.2
+brotlipy==0.7.0
+cachetools==4.2.2
+cairocffi==1.3.0
+camera_calibration==1.15.0
+camera_calibration_parsers==1.11.13
+catkin==0.7.29
+catkin-pkg==0.4.24
+ccimport==0.3.6
+certifi==2020.12.5
+cffi @ file:///tmp/build/80754af9/cffi_1613246945912/work
+chardet @ file:///tmp/build/80754af9/chardet_1607706746162/work
+clang==5.0
+click @ file:///home/linux1/recipes/ci/click_1610990599742/work
+cloudpickle @ file:///tmp/build/80754af9/cloudpickle_1598884132938/work
+clyent==1.2.2
+colorama @ file:///tmp/build/80754af9/colorama_1607707115595/work
+commonmark==0.9.1
+conda==4.10.3
+conda-build==3.21.4
+conda-content-trust @ file:///tmp/build/80754af9/conda-content-trust_1617045594566/work
+conda-package-handling @ file:///tmp/build/80754af9/conda-package-handling_1618262148928/work
+conda-repo-cli @ file:///tmp/build/80754af9/conda-repo-cli_1620168426516/work
+conda-token @ file:///tmp/build/80754af9/conda-token_1620076980546/work
+conda-verify==3.4.2
+configobj==5.0.6
+constantly==15.1.0
+contextlib2==0.6.0.post1
+controller_manager==0.18.4
+controller_manager_msgs==0.18.4
+cryptography @ file:///tmp/build/80754af9/cryptography_1616769286105/work
+cumm-cu102==0.2.5
+cumm-cu111==0.2.5
+cumm-cu113==0.2.5
+cv_bridge==1.13.0
+cycler==0.10.0
+Cython @ file:///tmp/build/80754af9/cython_1618435160151/work
+cytoolz==0.11.0
+dask @ file:///tmp/build/80754af9/dask-core_1617390489108/work
+dataclasses==0.6
+-e git+https://github.com/jinfagang/DCNv2_latest.git@fa9b2fd740ced2a22e0e7e913c3bf3934fd08098#egg=DCNv2
+decorator @ file:///tmp/build/80754af9/decorator_1617916966915/work
+defusedxml @ file:///tmp/build/80754af9/defusedxml_1615228127516/work
+deprecation==2.1.0
+descartes==1.1.0
+-e git+https://github.com/facebookresearch/detectron2.git@ef2c3abbd36d4093a604f874243037691f634c2f#egg=detectron2
+diagnostic_analysis==1.9.7
+diagnostic_common_diagnostics==1.9.7
+diagnostic_updater==1.9.7
+diff-match-patch @ file:///tmp/build/80754af9/diff-match-patch_1594828741838/work
+distributed @ file:///tmp/build/80754af9/distributed_1620902833129/work
+distro==1.6.0
+docker-pycreds==0.4.0
+docutils @ file:///tmp/build/80754af9/docutils_1620827984873/work
+dynamic_reconfigure==1.6.3
+easydict==1.9
+entrypoints==0.3
+envisage==6.0.1
+et-xmlfile==1.0.1
+evo==1.16.0
+fastcache==1.1.0
+filelock @ file:///home/linux1/recipes/ci/filelock_1610993975404/work
+fire==0.4.0
+flake8 @ file:///tmp/build/80754af9/flake8_1615834841867/work
+Flask @ file:///home/ktietz/src/ci/flask_1611932660458/work
+flatbuffers==1.12
+fonttools==4.29.1
+fsspec @ file:///tmp/build/80754af9/fsspec_1617959894824/work
+future==0.18.2
+fvcore==0.1.5.post20220119
+gast==0.4.0
+gazebo_plugins==2.8.7
+gazebo_ros==2.8.7
+gencpp==0.6.5
+geneus==2.2.6
+genlisp==0.4.16
+genmsg==0.5.16
+gennodejs==2.0.1
+genpy==0.6.16
+gevent @ file:///tmp/build/80754af9/gevent_1616770671827/work
+ghp-import==2.0.2
+gitdb==4.0.9
+GitPython==3.1.26
+glob2 @ file:///home/linux1/recipes/ci/glob2_1610991677669/work
+gmpy2==2.0.8
+gnupg==2.3.1
+google-auth==1.35.0
+google-auth-oauthlib==0.4.6
+google-pasta==0.2.0
+gps_common==0.3.1
+gql==2.0.0
+graphql-core==2.3.2
+graphviz==0.17
+greenlet @ file:///tmp/build/80754af9/greenlet_1611957705398/work
+grpcio==1.40.0
+h5py==3.1.0
+halo==0.0.31
+HeapDict==1.0.1
+html5lib @ file:///tmp/build/80754af9/html5lib_1593446221756/work
+hydra-core==1.1.1
+hyperlink==21.0.0
+idna @ file:///home/linux1/recipes/ci/idna_1610986105248/work
+image_geometry==1.13.0
+imagecorruptions==1.1.2
+imageio @ file:///tmp/build/80754af9/imageio_1617700267927/work
+imagesize @ file:///home/ktietz/src/ci/imagesize_1611921604382/work
+importlib-metadata @ file:///tmp/build/80754af9/importlib-metadata_1617874469820/work
+importlib-resources==5.2.2
+incremental==21.3.0
+iniconfig @ file:///home/linux1/recipes/ci/iniconfig_1610983019677/work
+interactive-markers==1.11.5
+intervaltree @ file:///tmp/build/80754af9/intervaltree_1598376443606/work
+iopath==0.1.9
+iou3d-nms==0.0.0
+ipykernel @ file:///tmp/build/80754af9/ipykernel_1596207638929/work/dist/ipykernel-5.3.4-py3-none-any.whl
+ipython @ file:///tmp/build/80754af9/ipython_1617120885885/work
+ipython-genutils @ file:///tmp/build/80754af9/ipython_genutils_1606773439826/work
+ipywidgets @ file:///tmp/build/80754af9/ipywidgets_1610481889018/work
+isort @ file:///tmp/build/80754af9/isort_1616355431277/work
+itsdangerous @ file:///home/ktietz/src/ci/itsdangerous_1611932585308/work
+jdcal==1.4.1
+jedi @ file:///tmp/build/80754af9/jedi_1606932564285/work
+jeepney @ file:///tmp/build/80754af9/jeepney_1606148855031/work
+Jinja2 @ file:///tmp/build/80754af9/jinja2_1612213139570/work
+joblib @ file:///tmp/build/80754af9/joblib_1613502643832/work
+joint_state_publisher==1.12.15
+jsk-recognition-utils==1.2.15
+jsk_rviz_plugins==2.1.7
+jsk_topic_tools==2.2.11
+json5==0.9.5
+jsonschema @ file:///tmp/build/80754af9/jsonschema_1602607155483/work
+jupyter==1.0.0
+jupyter-client @ file:///tmp/build/80754af9/jupyter_client_1616770841739/work
+jupyter-console @ file:///tmp/build/80754af9/jupyter_console_1616615302928/work
+jupyter-core @ file:///tmp/build/80754af9/jupyter_core_1612213311222/work
+jupyter-packaging==0.11.1
+jupyter-server @ file:///tmp/build/80754af9/jupyter_server_1616083640759/work
+jupyterlab @ file:///tmp/build/80754af9/jupyterlab_1619133235951/work
+jupyterlab-pygments @ file:///tmp/build/80754af9/jupyterlab_pygments_1601490720602/work
+jupyterlab-server @ file:///tmp/build/80754af9/jupyterlab_server_1617134334258/work
+jupyterlab-widgets @ file:///tmp/build/80754af9/jupyterlab_widgets_1609884341231/work
+kdl_parser_py==1.13.1
+keras==2.6.0
+Keras-Preprocessing==1.1.2
+keyring @ file:///tmp/build/80754af9/keyring_1614616740399/work
+kiwisolver @ file:///tmp/build/80754af9/kiwisolver_1612282420641/work
+kornia==0.5.2
+laser_geometry==1.6.7
+lazy-object-proxy @ file:///tmp/build/80754af9/lazy-object-proxy_1616526917483/work
+libarchive-c @ file:///tmp/build/80754af9/python-libarchive-c_1617780486945/work
+llvmlite==0.36.0
+locket==0.2.1
+log-symbols==0.0.14
+lxml @ file:///tmp/build/80754af9/lxml_1616443220220/work
+m2r2==0.3.2
+Mako==1.2.0
+Markdown==3.3.4
+MarkupSafe==1.1.1
+matplotlib==3.2.1
+mayavi==4.7.4
+mccabe==0.6.1
+mergedeep==1.3.4
+message_filters==1.14.11
+mistune==0.8.4
+mkdocs==1.2.3
+mkl-fft==1.3.0
+mkl-random @ file:///tmp/build/80754af9/mkl_random_1618853849286/work
+mkl-service==2.3.0
+mock @ file:///tmp/build/80754af9/mock_1607622725907/work
+more-itertools @ file:///tmp/build/80754af9/more-itertools_1613676688952/work
+motmetrics==1.1.3
+mpi4py==3.1.3
+mpmath==1.2.1
+msgpack @ file:///tmp/build/80754af9/msgpack-python_1612287151062/work
+multidict==5.1.0
+multipledispatch==0.6.0
+munkres==1.1.4
+mypy-extensions==0.4.3
+natsort==8.0.2
+navigator-updater==0.2.1
+nbclassic @ file:///tmp/build/80754af9/nbclassic_1616085367084/work
+nbclient @ file:///tmp/build/80754af9/nbclient_1614364831625/work
+nbconvert @ file:///tmp/build/80754af9/nbconvert_1601914830498/work
+nbformat @ file:///tmp/build/80754af9/nbformat_1617383369282/work
+nest-asyncio @ file:///tmp/build/80754af9/nest-asyncio_1613680548246/work
+networkx @ file:///tmp/build/80754af9/networkx_1598376031484/work
+ninja==1.10.2
+nltk @ file:///tmp/build/80754af9/nltk_1618327084230/work
+nmea_navsat_driver==0.5.2
+nose @ file:///tmp/build/80754af9/nose_1606773131901/work
+notebook @ file:///tmp/build/80754af9/notebook_1616443462982/work
+np==1.0.2
+numba @ file:///tmp/build/80754af9/numba_1616774046117/work
+numexpr @ file:///tmp/build/80754af9/numexpr_1618856167419/work
+numpy==1.19.5
+numpydoc @ file:///tmp/build/80754af9/numpydoc_1605117425582/work
+nuscenes-devkit==1.0.5
+nvidia-cublas==11.5.1.101
+nvidia-cuda-nvrtc==11.3.58
+nvidia-cuda-runtime==11.3.58
+nvidia-cudnn==8.2.0.51
+nvidia-pyindex==1.0.9
+nvidia-tensorrt @ file:///home/wanghao/Downloads/nvidia_tensorrt-8.0.1.6-cp38-none-linux_x86_64.whl
+oauthlib==3.1.1
+objgraph==3.5.0
+olefile==0.46
+omegaconf==2.1.1
+onnx==1.10.1
+onnx-simplifier==0.3.6
+onnxoptimizer==0.2.6
+onnxruntime==1.9.0
+open3d==0.14.1
+open3d-python==0.3.0.0
+opencv-contrib-python==4.5.3.56
+opencv-python==4.5.3.56
+openpyxl @ file:///tmp/build/80754af9/openpyxl_1615411699337/work
+opt-einsum==3.3.0
+packaging @ file:///tmp/build/80754af9/packaging_1611952188834/work
+pandas==1.2.4
+pandocfilters @ file:///tmp/build/80754af9/pandocfilters_1605120460739/work
+parso==0.7.0
+partd @ file:///tmp/build/80754af9/partd_1618000087440/work
+path @ file:///tmp/build/80754af9/path_1614022220526/work
+pathlib==1.0.1
+pathlib2 @ file:///tmp/build/80754af9/pathlib2_1607024983162/work
+pathspec==0.9.0
+pathtools==0.1.2
+patsy==0.5.1
+pccm==0.2.21
+# Editable install with no version control (pcdet==0.3.0+0)
+-e /home/wanghao/anaconda3/lib/python3.8/site-packages
+pep8==1.7.1
+pexpect @ file:///tmp/build/80754af9/pexpect_1605563209008/work
+pickleshare @ file:///tmp/build/80754af9/pickleshare_1606932040724/work
+Pillow==9.0.1
+pkginfo==1.7.0
+platformdirs==2.5.2
+pluggy @ file:///tmp/build/80754af9/pluggy_1615976321666/work
+ply==3.11
+portalocker==2.3.2
+progress==1.6
+prometheus-client @ file:///tmp/build/80754af9/prometheus_client_1618088486455/work
+promise==2.3
+prompt-toolkit @ file:///tmp/build/80754af9/prompt-toolkit_1616415428029/work
+protobuf==3.18.0
+psutil @ file:///tmp/build/80754af9/psutil_1612298023621/work
+ptyprocess @ file:///tmp/build/80754af9/ptyprocess_1609355006118/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
+py @ file:///tmp/build/80754af9/py_1607971587848/work
+py3rosmsgs==1.18.1
+pyasn1==0.4.8
+pyasn1-modules==0.2.8
+pybind11==2.7.1
+pycairo==1.20.1
+pycocotools @ git+https://github.com/cocodataset/cocoapi.git@8c9bcc3cf640524c4c20a9c40e89cb6a2f2fa0e9#subdirectory=PythonAPI
+pycodestyle @ file:///home/ktietz/src/ci_mi/pycodestyle_1612807597675/work
+pycosat==0.6.3
+pycparser @ file:///tmp/build/80754af9/pycparser_1594388511720/work
+pycryptodomex==3.12.0
+pycuda==2021.1
+pyculib==1.0.1
+pycurl==7.43.0.6
+pydash==5.1.0
+pydocstyle @ file:///tmp/build/80754af9/pydocstyle_1616182067796/work
+pydot==1.4.2
+pyerfa @ file:///tmp/build/80754af9/pyerfa_1619390903914/work
+pyface==7.3.0
+pyflakes @ file:///home/ktietz/src/ci_ipy2/pyflakes_1612551159640/work
+Pygments @ file:///tmp/build/80754af9/pygments_1615143339740/work
+pylint @ file:///tmp/build/80754af9/pylint_1617135829881/work
+pyls-black @ file:///tmp/build/80754af9/pyls-black_1607553132291/work
+pyls-spyder @ file:///tmp/build/80754af9/pyls-spyder_1613849700860/work
+pymdown-extensions==9.1
+pyodbc===4.0.0-unsupported
+pyOpenSSL @ file:///tmp/build/80754af9/pyopenssl_1608057966937/work
+pyparsing @ file:///home/linux1/recipes/ci/pyparsing_1610983426697/work
+PyQt5==5.15.4
+PyQt5-Qt5==5.15.2
+PyQt5-sip==12.9.0
+pyquaternion==0.9.9
+pyrsistent @ file:///tmp/build/80754af9/pyrsistent_1600141720057/work
+pyserial==3.5
+PySocks @ file:///tmp/build/80754af9/pysocks_1605305779399/work
+pytest==6.2.3
+pytest-runner==5.3.1
+python-dateutil @ file:///home/ktietz/src/ci/python-dateutil_1611928101742/work
+python-jsonrpc-server @ file:///tmp/build/80754af9/python-jsonrpc-server_1600278539111/work
+python-language-server @ file:///tmp/build/80754af9/python-language-server_1607972495879/work
+python_qt_binding==0.4.4
+pytools==2022.1.6
+pytorch3d==0.3.0
+pytz @ file:///tmp/build/80754af9/pytz_1612215392582/work
+PyWavelets @ file:///tmp/build/80754af9/pywavelets_1601658317819/work
+pyxdg @ file:///tmp/build/80754af9/pyxdg_1603822279816/work
+PyYAML==5.4.1
+pyyaml_env_tag==0.1
+pyzmq==20.0.0
+QDarkStyle==2.8.1
+qt-dotgraph==0.4.2
+qt-gui==0.4.2
+qt-gui-cpp==0.4.2
+qt-gui-py-common==0.4.2
+QtAwesome @ file:///tmp/build/80754af9/qtawesome_1615991616277/work
+qtconsole @ file:///tmp/build/80754af9/qtconsole_1616775094278/work
+QtPy==1.9.0
+recommonmark==0.7.1
+regex @ file:///tmp/build/80754af9/regex_1617569202463/work
+requests @ file:///tmp/build/80754af9/requests_1608241421344/work
+requests-oauthlib==1.3.0
+resource_retriever==1.12.6
+rinoh-typeface-dejavuserif==0.1.3
+rinoh-typeface-texgyrecursor==0.1.1
+rinoh-typeface-texgyreheros==0.1.1
+rinoh-typeface-texgyrepagella==0.1.1
+rinohtype==0.5.3
+rope @ file:///tmp/build/80754af9/rope_1602264064449/work
+rosapi==0.11.13
+rosbag==1.14.11
+rosboost-cfg==1.14.9
+rosbridge_library==0.11.13
+rosbridge_server==0.11.13
+rosclean==1.14.9
+roscreate==1.14.9
+rosgraph==1.14.11
+roslaunch==1.14.11
+roslib==1.14.9
+roslint==0.11.2
+roslz4==1.14.11
+rosmake==1.14.9
+rosmaster==1.14.11
+rosmsg==1.14.11
+rosnode==1.14.11
+rosparam==1.14.11
+rospkg==1.3.0
+rospy==1.14.11
+rosservice==1.14.11
+rostest==1.14.11
+rostopic==1.14.11
+rosunit==1.14.9
+roswtf==1.14.11
+rqt-moveit==0.5.10
+rqt-reconfigure==0.5.4
+rqt-robot-monitor==0.5.13
+rqt-rviz==0.7.0
+rqt_action==0.4.9
+rqt_bag==0.5.1
+rqt_bag_plugins==0.5.1
+rqt_console==0.4.9
+rqt_dep==0.4.9
+rqt_graph==0.4.11
+rqt_gui==0.5.2
+rqt_gui_py==0.5.2
+rqt_image_view==0.4.16
+rqt_launch==0.4.8
+rqt_logger_level==0.4.8
+rqt_msg==0.4.8
+rqt_nav_view==0.5.7
+rqt_plot==0.4.13
+rqt_pose_view==0.5.8
+rqt_publisher==0.4.8
+rqt_py_common==0.5.2
+rqt_py_console==0.4.8
+rqt_robot_dashboard==0.5.7
+rqt_robot_steering==0.5.10
+rqt_runtime_monitor==0.5.7
+rqt_service_caller==0.4.8
+rqt_shell==0.4.9
+rqt_srv==0.4.8
+rqt_tf_tree==0.6.0
+rqt_top==0.4.8
+rqt_topic==0.4.11
+rqt_web==0.4.8
+rsa==4.7.2
+Rtree @ file:///tmp/build/80754af9/rtree_1618420845272/work
+ruamel-yaml-conda @ file:///tmp/build/80754af9/ruamel_yaml_1616016699510/work
+rviz==1.13.18
+Rx==1.6.1
+scikit-image==0.18.1
+scikit-learn @ file:///tmp/build/80754af9/scikit-learn_1614446682169/work
+scipy==1.8.0
+seaborn @ file:///tmp/build/80754af9/seaborn_1608578541026/work
+SecretStorage @ file:///tmp/build/80754af9/secretstorage_1614022784285/work
+Send2Trash @ file:///tmp/build/80754af9/send2trash_1607525499227/work
+sensor-msgs==1.12.8
+sentry-sdk==1.5.5
+Shapely==1.7.1
+shortuuid==1.0.8
+simplegeneric==0.8.1
+singledispatch @ file:///tmp/build/80754af9/singledispatch_1614366001199/work
+sip==4.19.13
+six @ file:///tmp/build/80754af9/six_1605205327372/work
+smach==2.0.1
+smach_ros==2.0.1
+smclib==1.8.5
+smmap==5.0.0
+sniffio @ file:///tmp/build/80754af9/sniffio_1614030475067/work
+snowballstemmer @ file:///tmp/build/80754af9/snowballstemmer_1611258885636/work
+sortedcollections @ file:///tmp/build/80754af9/sortedcollections_1611172717284/work
+sortedcontainers @ file:///tmp/build/80754af9/sortedcontainers_1606865132123/work
+sound-play==0.3.11
+soupsieve @ file:///tmp/build/80754af9/soupsieve_1616183228191/work
+-e git+https://github.com/facebookresearch/SparseConvNet.git@89818ebd2a508bb05e552168c83d6b60add8a051#egg=sparseconvnet
+spconv==1.0
+Sphinx==3.2.1
+sphinx-autodoc-typehints==1.4.0
+sphinx-bootstrap-theme==0.8.0
+sphinx-markdown-parser==0.2.4
+sphinx-rtd-theme==1.0.0
+sphinxcontrib-applehelp @ file:///home/ktietz/src/ci/sphinxcontrib-applehelp_1611920841464/work
+sphinxcontrib-devhelp @ file:///home/ktietz/src/ci/sphinxcontrib-devhelp_1611920923094/work
+sphinxcontrib-htmlhelp @ file:///home/ktietz/src/ci/sphinxcontrib-htmlhelp_1611920974801/work
+sphinxcontrib-jsmath @ file:///home/ktietz/src/ci/sphinxcontrib-jsmath_1611920942228/work
+sphinxcontrib-qthelp @ file:///home/ktietz/src/ci/sphinxcontrib-qthelp_1611921055322/work
+sphinxcontrib-serializinghtml @ file:///home/ktietz/src/ci/sphinxcontrib-serializinghtml_1611920755253/work
+sphinxcontrib-websupport @ file:///tmp/build/80754af9/sphinxcontrib-websupport_1597081412696/work
+spinners==0.0.24
+spyder @ file:///tmp/build/80754af9/spyder_1616775618138/work
+spyder-kernels @ file:///tmp/build/80754af9/spyder-kernels_1614030590686/work
+SQLAlchemy @ file:///tmp/build/80754af9/sqlalchemy_1620712430742/work
+statsmodels @ file:///tmp/build/80754af9/statsmodels_1614023746358/work
+sympy @ file:///tmp/build/80754af9/sympy_1618252284338/work
+tables==3.6.1
+tabulate==0.8.9
+tblib @ file:///tmp/build/80754af9/tblib_1597928476713/work
+tensorboard==2.6.0
+tensorboard-data-server==0.6.1
+tensorboard-plugin-wit==1.8.0
+tensorboardX==2.4
+tensorflow==2.6.0
+tensorflow-estimator==2.6.0
+tensorflow-gpu==2.6.0
+tensorrt==0.0.1
+termcolor==1.1.0
+terminado==0.9.4
+terminaltables==3.1.0
+testpath @ file:///home/ktietz/src/ci/testpath_1611930608132/work
+textdistance @ file:///tmp/build/80754af9/textdistance_1612461398012/work
+tf==1.12.1
+tf2_geometry_msgs==0.6.5
+tf2_kdl==0.6.5
+tf2_py==0.6.5
+tf2_ros==0.6.5
+tf_conversions==1.12.1
+thop==0.0.31.post2005241907
+threadpoolctl @ file:///tmp/tmp9twdgx9k/threadpoolctl-2.1.0-py3-none-any.whl
+three-merge @ file:///tmp/build/80754af9/three-merge_1607553261110/work
+tifffile==2020.10.1
+toml @ file:///tmp/build/80754af9/toml_1616166611790/work
+tomlkit==0.9.2
+toolz @ file:///home/linux1/recipes/ci/toolz_1610987900194/work
+topic_tools==1.14.11
+torch==1.8.0+cu111
+torchaudio==0.8.0
+torchvision==0.9.0+cu111
+tornado @ file:///tmp/build/80754af9/tornado_1606942300299/work
+tqdm @ file:///tmp/build/80754af9/tqdm_1615925068909/work
+traitlets @ file:///home/ktietz/src/ci/traitlets_1611929699868/work
+traits==6.2.0
+traitsui==7.2.1
+transform==0.0.60
+transform-tools==0.0.23
+Twisted==21.7.0
+txaio==21.2.1
+typed-ast @ file:///tmp/build/80754af9/typed-ast_1610484547928/work
+typing_extensions==4.2.0
+ujson @ file:///tmp/build/80754af9/ujson_1611259522456/work
+unicodecsv==0.14.1
+unify==0.5
+untokenize==0.1.1
+update-checker==0.18.0
+urdfdom-py==0.4.5
+urllib3 @ file:///tmp/build/80754af9/urllib3_1615837158687/work
+validators==0.18.2
+vtk==9.0.3
+wandb==0.12.10
+watchdog==2.1.6
+waymo-open-dataset-tf-2-3-0==1.3.1
+waymo-open-dataset-tf-2-6-0==1.4.1
+wcwidth @ file:///tmp/build/80754af9/wcwidth_1593447189090/work
+webencodings==0.5.1
+Werkzeug @ file:///home/ktietz/src/ci/werkzeug_1611932622770/work
+widgetsnbextension==3.5.1
+wrapt==1.12.1
+wslink==1.0.7
+wurlitzer @ file:///tmp/build/80754af9/wurlitzer_1617224664226/work
+xacro==1.13.12
+xlrd @ file:///tmp/build/80754af9/xlrd_1608072521494/work
+XlsxWriter @ file:///tmp/build/80754af9/xlsxwriter_1617224712951/work
+xlwt==1.3.0
+xmltodict==0.12.0
+yacs==0.1.8
+yapf @ file:///tmp/build/80754af9/yapf_1615749224965/work
+yarl==1.6.3
+yaspin==2.1.0
+ytsphinx==1.2.1.dev20200430
+zict==2.0.0
+zipp @ file:///tmp/build/80754af9/zipp_1615904174917/work
+zope.event==4.5.0
+zope.interface @ file:///tmp/build/80754af9/zope.interface_1616357211867/work

+ 500 - 0
src/detection/CenterPoint-master/results1/seq_0_frame_100.bin.txt

@@ -0,0 +1,500 @@
+7.54047 16.3822 0.704102 2.12321 4.71515 1.73207 133637 0 0.00658527 0.939137 0
+30.5006 3.95125 0.854492 2.54736 6.07807 2.6398 115457 0 1.56783 0.929312 0
+31.2044 -6.29321 0.0266113 2.1036 5.13828 1.87739 100483 0 -1.61179 0.928409 0
+4.07906 16.1288 0.739746 2.20241 4.79877 1.77661 133158 0 -0.0362901 0.923868 0
+43.7084 26.3845 0.280762 2.24475 5.16343 1.91629 148258 0 1.49931 0.921358 0
+-33.1467 -6.92265 0.629883 2.01611 4.52567 1.50448 99346 0 -1.58718 0.917006 0
+-25.7158 -6.64164 0.603516 1.98389 4.3608 1.50375 99837 0 -1.64766 0.904313 0
+18.1466 3.25422 0.459961 2.13152 4.46422 1.91816 114482 0 1.54757 0.894052 0
+-22.1566 6.32125 0.644531 1.86096 4.07664 1.58285 118568 0 1.54263 0.893681 0
+1.09476 20.9287 0.658203 2.33759 5.35864 1.92379 140169 0 -0.00169903 0.891999 0
+-36.9688 6.01485 0.640625 1.96653 4.57455 1.60347 118054 0 1.52928 0.890294 0
+-10.6372 6.32984 0.640625 2.02994 4.69677 1.48695 118604 0 1.5664 0.889912 0
+-16.5344 6.36719 0.791016 2.03689 4.58349 1.79843 118586 0 1.54951 0.878835 0
+53.6348 11.017 0.198608 0.823381 0.931193 1.77141 125825 0 1.46522 0.875148 1
+49.2053 14.5508 0.297852 2.11597 4.85534 1.82319 130959 0 -1.58695 0.874399 0
+24.905 17.3769 0.197021 2.0359 4.57902 1.6755 135095 0 0.768802 0.869825 0
+27.7753 13.9575 0.200195 2.09745 4.43814 1.72111 129956 0 0.922817 0.863046 0
+13.0522 12.0305 0.527832 0.832071 0.987385 1.6837 127102 0 1.6037 0.861538 1
+18.5212 33.4448 0.0800781 1.92943 4.51684 1.53603 158475 0 3.09528 0.849847 0
+40.4738 41.3945 0.192993 1.96557 4.20194 1.68947 170244 0 -1.83203 0.848972 0
+39.8872 31.8586 0.123657 2.04586 4.81285 1.52036 156202 0 -0.0619305 0.842215 0
+-50.7295 5.965 0.893555 2.34445 5.37962 2.04586 118011 0 1.53469 0.840913 0
+18.0607 56.7806 -0.015625 1.98874 4.53895 1.53415 192638 0 3.11903 0.835349 0
+33.2439 43.9887 0.135986 1.96461 4.50363 1.51517 173965 0 3.13422 0.832509 0
+-45.5522 -6.96652 0.661133 2.0171 4.61493 1.57399 99307 0 -1.59222 0.824603 0
+18.5144 28.0919 0.234131 1.97133 4.32264 1.73715 150519 0 3.12764 0.824603 0
+28.537 -9.76047 0.0368652 0.873707 0.94148 1.76882 95327 0 -1.63774 0.823328 1
+40.5541 3.46344 0.140259 1.96461 4.4079 1.47538 114552 0 1.5613 0.82176 0
+53.9364 11.6341 0.223022 0.802934 0.9181 1.81785 126762 0 1.51522 0.820182 1
+39.2603 12.2402 0.307617 0.856181 0.891157 1.81165 127652 0 1.51946 0.818737 1
+35.7697 44.2941 0.336182 1.94647 4.42083 1.81697 174441 0 3.11399 0.815085 0
+4.11313 23.4572 0.49292 1.96941 4.45116 1.52259 143922 0 -0.0234561 0.814496 0
+17.6928 10.1544 0.342285 0.929206 1.00342 1.72532 124309 0 1.34952 0.809878 1
+38.3369 -6.12469 -0.0686035 1.90975 4.30159 1.50963 100505 0 -1.55794 0.807461 0
+-39.8253 -7.10234 0.620117 2.09029 4.67846 1.52855 98857 0 -1.63388 0.804866 0
+18.3523 49.4994 0.12146 1.91816 4.04887 1.63829 181875 0 3.05837 0.804866 0
+-31.2898 10.3928 0.693359 0.821373 0.921468 1.6796 124624 0 1.52138 0.803482 1
+51.8969 -9.77031 0.0256348 0.799999 0.880345 1.6196 95400 0 1.57244 0.802865 1
+-18.7359 11.1142 0.833008 0.942457 0.95188 1.72616 125599 0 -1.65346 0.802246 1
+-45.6087 9.98473 0.814453 0.830853 0.928695 1.76968 124111 0 1.52444 0.801936 1
+30.8233 44.3037 0.309082 2.00825 4.42948 1.73885 174426 0 -3.10336 0.801936 0
+56.5044 -5.81171 -0.0341797 1.95027 4.33533 1.65194 101030 0 -1.58554 0.796134 0
+22.5531 6.01625 0.346436 2.32393 5.01925 2.03789 118240 0 1.58254 0.794066 0
+19.7559 12.5547 0.209351 0.801563 0.946088 1.66816 128059 0 1.15083 0.791176 1
+49.777 24.2691 0.144653 1.84288 3.96281 1.53191 145001 0 -1.56251 0.785144 0
+49.1756 19.3922 0.233521 1.8339 3.96281 1.52221 137979 0 -1.58425 0.784814 0
+43.9469 -6.17078 0.0209961 2.04686 4.3993 1.77748 100523 0 -1.60985 0.78016 0
+-64.2061 -11.747 0.786133 0.805487 0.809429 1.71943 92229 0 -1.55833 0.779658 1
+-58.4728 -11.6008 0.804688 0.780323 0.809825 1.75763 92247 0 -1.6311 0.776962 1
+19.6282 10.9273 0.199585 0.806864 0.894645 1.61014 125719 0 1.43762 0.776623 1
+-22.5253 -0.285706 0.973633 2.25794 4.80815 1.7431 109207 0 -1.57214 0.762601 0
+19.3789 11.695 0.129395 0.745135 0.849103 1.60073 126654 0 1.51662 0.761716 1
+-59.3027 -12.3886 0.841797 0.763178 0.768976 1.78792 91308 0 1.59921 0.761006 1
+53.8264 12.3359 0.252441 0.854093 0.957006 1.77835 127698 0 1.57307 0.759582 1
+-45.3341 9.22453 0.794922 0.834512 0.930738 1.75591 122708 0 1.52223 0.755637 1
+39.4246 11.4294 0.124878 0.793386 0.847447 1.67632 126249 0 1.49003 0.754915 1
+-48.6966 -10.0947 0.659668 0.750063 0.771232 1.66003 94617 0 -1.5837 0.753829 1
+-48.1164 -3.67468 0.703125 2.0609 4.65567 1.50853 103979 0 -1.5761 0.750918 0
+-48.478 -10.9187 0.71582 0.795325 0.835531 1.70938 93214 0 -1.56428 0.749087 1
+-33.6762 -3.5525 0.713867 2.0609 4.57902 1.51332 104024 0 -1.60101 0.741674 0
+49.1403 26.6996 0.329102 1.91723 4.33109 1.64631 148743 0 -1.60481 0.738858 0
+-59.6156 -11.6098 0.828125 0.793967 0.766727 1.79054 92243 0 1.38979 0.735262 1
+52.617 3.62664 0.156128 2.03988 4.46858 1.46963 115058 0 1.61622 0.732783 0
+-69.5938 2.34297 0.889648 2.09438 4.56116 1.72111 112804 0 1.58577 0.718002 0
+17.9756 67.3523 -0.000488281 1.93415 4.28064 1.45109 208082 0 -3.13305 0.714827 0
+-43.6997 5.85211 1.40527 2.85709 6.73439 2.93057 118033 0 1.54497 0.708818 0
+15.9397 9.64953 0.380371 0.80588 0.966162 1.66084 123835 0 1.54753 0.708717 1
+-46.8544 9.86031 0.757812 0.830244 0.909622 1.72027 123639 0 1.5755 0.702428 1
+70.5642 -5.595 0.0544434 2.0112 4.47294 1.69608 101542 0 1.56063 0.695958 0
+18.0038 72.1097 0.0587158 2.05287 4.43814 1.7414 215102 0 3.13296 0.684053 0
+-57.5087 5.80961 0.694824 1.93604 4.18965 1.55337 117990 0 1.55598 0.64646 0
+-46.9812 10.6391 0.766602 0.813193 0.87777 1.66816 125043 0 1.55513 0.642769 1
+16.9863 11.1611 0.32373 0.878413 0.952112 1.6319 125711 0 1.3204 0.622689 1
+1.74102 -4.0693 1.7373 3.25336 13.9128 3.57661 103667 0 -1.66959 0.617916 0
+36.0353 38.2073 0.240967 0.856599 0.95188 1.8339 165550 0 -0.187779 0.617859 1
+-52.0913 10.8013 0.876953 0.843113 0.912959 1.74395 125027 0 1.50806 0.61451 1
+55.1735 9.7293 0.134766 1.00947 1.08814 1.82675 123958 0 -1.55636 0.606382 1
+-50.0964 -7.6386 0.907227 0.840647 0.894208 1.74907 98357 0 -1.48828 0.59843 1
+0.801567 32.1571 0.609375 2.06292 4.59245 1.6994 156548 0 -0.0977699 0.57684 0
+-11.3695 0.0128174 0.80957 2.11493 4.83169 1.56632 109710 0 -1.57798 0.571348 0
+49.4556 17.1616 0.254395 2.08011 4.66022 1.71273 134704 0 -1.55537 0.570391 0
+-59.29 -12.5177 0.851562 0.792612 0.798048 1.80635 90840 0 1.63382 0.569494 1
+-65.5541 -9.66125 0.837891 0.796297 0.812201 1.75505 95033 0 -1.49177 0.562777 1
+-59.4872 -11.6425 0.854492 0.77444 0.773495 1.78618 92244 0 -0.578898 0.560614 1
+17.5347 10.3575 0.327881 0.874134 0.941709 1.65033 124776 0 1.43525 0.551936 1
+0.735466 42.0952 0.666992 2.10258 4.75678 1.84288 171056 0 0.0718126 0.547162 0
+41.2284 36.2484 0.306641 0.866062 0.855137 1.68288 162758 0 0.238893 0.54153 1
+-59.6527 -11.4825 0.810547 0.770103 0.744226 1.73885 92711 0 1.56348 0.520009 1
+-18.6962 11.2212 0.844727 0.943263 0.963571 1.75334 126067 0 2.21858 0.515864 1
+22.6047 -8.27172 -0.357178 0.790679 0.818171 1.62078 97648 0 2.75149 0.505432 1
+-71.5245 5.5761 0.643066 1.94077 4.32687 1.54807 117478 0 1.57329 0.50177 0
+27.3095 -10.4241 -0.17041 0.807456 0.889418 1.63909 94387 0 1.71242 0.497253 1
+59.6991 3.62086 0.150146 1.90696 4.17332 1.48296 115080 0 1.58146 0.49347 0
+-72.1366 -4.02469 0.578125 2.01907 4.47731 1.5511 103436 0 -1.56094 0.465995 0
+67.6605 3.4314 0.213257 1.98292 4.38215 1.67796 114637 0 1.66281 0.461077 0
+-46.6798 10.0099 0.770508 0.81518 0.888333 1.70938 124108 0 1.60195 0.455865 1
+0.521873 47.8555 0.699219 2.00238 4.42948 1.72279 179479 0 0.0269876 0.455229 0
+24.4172 36.9603 0.113037 2.07099 4.76608 1.65356 163642 0 3.13345 0.454412 0
+53.7913 10.9969 0.148682 0.830853 0.963336 1.77228 125826 0 1.50767 0.438184 1
+25.7163 35.7961 0.346191 0.839211 0.764857 1.76709 161774 0 -2.05678 0.433981 1
+-65.6375 -9.56039 0.810547 0.794937 0.823381 1.76278 95500 0 -1.46727 0.433262 1
+54.9845 9.7914 0.118652 1.01733 1.50853 1.81874 123957 0 -1.52597 0.417806 2
+19.3844 11.8755 0.178467 0.823381 0.930284 1.65517 127122 0 1.49927 0.417746 1
+-59.1791 -12.3889 0.864258 0.799609 0.80588 1.81077 91309 0 1.57655 0.407569 1
+-50.6209 -8.84093 0.700195 0.772363 0.768226 1.64953 96483 0 -1.57627 0.400105 1
+37.5553 9.21219 0.834961 0.776902 0.832274 1.65841 122967 0 1.53029 0.396828 1
+-52.1975 10.9238 0.911133 0.843525 0.90829 1.7363 125494 0 1.50635 0.394434 1
+-65.4269 5.91172 0.605469 1.95218 4.42083 1.52407 117965 0 1.5781 0.392104 0
+19.551 11.6858 0.162109 0.781086 0.899903 1.63909 126655 0 1.5437 0.38122 1
+67.9714 -5.60992 -0.116211 0.801955 1.9191 1.43278 101534 0 -3.11269 0.378861 0
+59.4081 3.44594 0.152222 1.6192 3.48353 1.43173 114611 0 1.60127 0.372962 0
+-71.8044 5.32125 0.673828 2.10155 4.71976 1.61132 117009 0 1.53556 0.369315 0
+-46.6738 9.84906 0.756836 0.833697 0.9114 1.72447 123640 0 1.59735 0.362743 1
+-45.4914 9.3534 0.773438 0.840441 0.911622 1.75249 123175 0 1.55942 0.352648 1
+24.2509 36.3341 0.134399 2.04487 4.59245 1.62158 162705 0 -0.0388693 0.350199 0
+-18.9009 11.0481 0.822266 0.977789 0.9631 1.73545 125598 0 -1.65713 0.339718 1
+38.8853 35.9541 0.122559 0.792418 0.825797 1.6192 162283 0 1.47138 0.337094 1
+62.6213 14.928 0.631836 0.800195 1.28716 1.76278 131469 0 1.56005 0.336549 2
+27.3104 -10.6273 -0.194824 0.822979 0.934381 1.67878 93919 0 1.71989 0.332202 1
+-47.0759 10.5362 0.77832 0.835327 0.897708 1.67223 124574 0 1.53359 0.331336 1
+-71.5227 -7.56609 0.541992 1.83659 4.0966 1.48804 98290 0 -1.58745 0.329176 0
+27.1475 -10.4392 -0.152832 0.815778 0.928015 1.65114 94386 0 -1.42983 0.324664 1
+38.8942 35.7719 0.0991211 0.790679 0.816974 1.62912 161815 0 1.49238 0.323487 1
+52.4322 37.9523 1.15039 0.792612 0.814982 1.70771 165133 0 1.47019 0.322739 1
+66.4114 3.66187 0.193481 2.024 4.57455 1.72279 115101 0 1.66298 0.322526 0
+22.7549 -8.26688 -0.463135 0.823582 0.882497 1.69857 97649 0 2.85117 0.321673 1
+60.2542 3.67672 0.174316 2.15034 4.92698 1.56098 115082 0 1.58566 0.321247 0
+62.7758 14.92 0.61084 0.822176 1.80371 1.79931 131470 0 1.60196 0.319758 2
+25.7133 47.6406 1.06543 0.820973 0.773495 1.85009 179090 0 -2.85329 0.314366 1
+-64.4866 -7.21914 0.600098 1.87831 4.25147 1.49095 98780 0 -1.57661 0.31363 0
+-62.333 10.9657 0.799805 0.825192 0.906961 1.70189 125463 0 1.61058 0.31321 1
+39.3378 11.5644 0.20105 0.813789 0.833087 1.71607 126716 0 1.48999 0.309232 1
+-45.7781 9.99989 0.789062 0.870089 0.953275 1.77401 124110 0 1.53125 0.30767 1
+-59.3089 -12.976 0.900391 0.768976 0.791065 1.82319 90372 0 1.55296 0.307358 1
+-59.1727 -12.5537 0.866211 0.8262 0.816176 1.81519 90841 0 1.59275 0.303629 1
+53.8495 12.5193 0.268066 0.864794 0.953508 1.73207 128166 0 1.49496 0.302391 1
+53.9253 11.4731 0.201416 0.837369 0.97018 1.82854 126294 0 1.4381 0.298491 1
+-71.2652 5.3336 0.664062 1.69277 3.71182 1.48188 117011 0 1.557 0.297062 0
+-16.8508 66.3991 1.46387 1.99555 4.3736 1.63589 206569 0 -0.0600854 0.294824 0
+41.3384 36.1239 0.337402 0.931477 0.945395 1.73461 162291 0 0.138227 0.292696 1
+22.3059 64.1383 0.575195 0.830042 1.6431 1.81077 203415 0 2.97254 0.29209 2
+-59.3845 -12.0052 0.834961 0.770856 0.771986 1.80371 91776 0 1.4731 0.290779 1
+0.739685 48.437 0.689453 2.0609 4.63752 1.80283 180416 0 0.0204204 0.290176 0
+0.571716 32.7584 0.633789 2.06191 4.63299 1.66328 157483 0 -0.148226 0.288369 0
+1.12766 58.4014 0.794922 2.15034 4.81755 1.87739 194925 0 -0.109187 0.287568 0
+54.0987 11.6558 0.204834 0.863528 0.955606 1.77921 126763 0 1.55991 0.286469 1
+0.767426 43.3587 0.654297 2.12425 4.65113 1.84738 172928 0 -3.11995 0.28597 0
+56.8125 -17.1631 -0.125732 1.9936 4.48607 1.56594 84651 0 -1.48937 0.284377 0
+53.88 12.0281 0.22583 0.874775 0.950023 1.81431 127230 0 1.54723 0.283781 1
+0.471878 58.3841 0.798828 2.17994 4.85534 1.8519 194923 0 -0.0269807 0.278649 0
+-71.8117 5.84187 0.674805 2.16826 4.89342 1.65356 117945 0 1.56173 0.277865 0
+53.6494 10.868 0.196411 0.908013 1.00244 1.81697 125357 0 1.50104 0.275617 1
+19.4003 11.4834 0.192749 0.795908 0.871364 1.59566 126186 0 1.46689 0.272896 1
+53.6681 12.5461 0.277344 0.873281 0.93758 1.76709 128165 0 1.53925 0.269133 1
+-47.6164 -3.62406 0.714355 1.80195 3.70097 1.44367 103981 0 -1.57737 0.268941 0
+-14.835 -14.8377 0.172119 2.06191 4.66477 1.50705 87703 0 1.44034 0.26588 0
+-71.8774 -3.77289 0.533203 1.89304 4.1368 1.50705 103905 0 -1.52741 0.265309 0
+-0.496796 13.9281 0.761719 0.875843 2.17569 1.58711 129868 0 -0.188206 0.265309 0
+36.0153 38.4721 0.210449 0.885248 1.78182 1.8519 166018 0 -0.371824 0.265119 2
+0.16391 58.0923 0.827148 2.13986 4.77539 1.83569 194454 0 -0.0553633 0.263599 0
+0.811562 58.1088 0.775391 2.13778 4.76608 1.85733 194456 0 -0.030994 0.259638 0
+-65.0797 -7.19625 0.611328 2.08723 4.80815 1.58982 98778 0 -1.60138 0.256832 0
+-64.7725 -6.94226 0.573242 2.03292 4.62395 1.58672 99247 0 -1.53683 0.255715 0
+55.2037 9.56969 0.151245 0.955081 1.1149 1.82497 123490 0 -1.5542 0.250179 1
+36.1918 38.2002 0.20166 0.937237 1.17541 1.8339 165551 0 -0.231429 0.249813 1
+-59.4822 -11.4778 0.861328 0.806471 0.795325 1.78095 92712 0 -1.56717 0.24963 1
+-0.511955 13.2948 0.772461 0.884276 1.44755 1.79931 128932 0 -1.28451 0.24817 2
+-50.2909 -7.76172 0.935547 0.887954 1.33648 1.7888 97888 0 -1.4524 0.245447 1
+-71.5958 -4.01992 0.550781 1.72785 3.63648 1.47322 103438 0 -1.55984 0.245447 0
+-62.4873 10.8306 0.839844 0.873707 1.0632 1.7144 124994 0 1.67452 0.243283 1
+-71.2436 5.83367 0.666504 1.82675 3.95894 1.51665 117947 0 1.6316 0.243103 0
+-50.2709 -7.6243 0.928711 0.929432 1.19116 1.78095 98356 0 -1.44742 0.242564 1
+-31.2741 10.2219 0.6875 0.861212 0.945857 1.69195 124156 0 1.52594 0.242564 1
+-47.0716 9.81875 0.770508 0.841263 0.950254 1.71607 123638 0 1.59023 0.241847 1
+54.9069 10.0155 0.0805664 0.955314 1.27279 1.72616 124425 0 -1.44275 0.241489 1
+22.2351 65.7963 0.088623 0.814982 0.795714 1.61604 205755 0 -0.0529446 0.240239 1
+-47.0831 10.0176 0.772461 0.811606 0.895957 1.68042 124106 0 1.56963 0.240061 1
+67.8284 -5.62414 -0.0708008 0.812597 1.96845 1.42824 101533 0 -3.11745 0.239527 0
+25.5713 35.8898 0.322266 0.886438 0.835531 1.78095 162241 0 -1.71914 0.239349 1
+1.01398 32.7605 0.625488 2.08113 4.66477 1.69277 157485 0 -0.0178552 0.238107 0
+1.02625 31.5597 0.597656 2.07707 4.57008 1.72363 155613 0 -0.0864734 0.236869 0
+-20.8897 12.7139 0.991211 0.75153 0.699477 1.71105 127932 0 -0.055111 0.235988 1
+19.1891 11.697 0.155518 0.809825 0.909622 1.63989 126653 0 1.5287 0.23546 1
+-64.1836 -11.4684 0.744141 0.777091 0.799218 1.71105 92697 0 -1.55892 0.234407 1
+-58.5831 -11.4906 0.782227 0.806864 0.814982 1.76623 92714 0 -1.6057 0.233357 1
+51.9004 -9.93703 0.034668 0.842496 0.894645 1.64953 94932 0 1.57037 0.23231 1
+22.2853 12.218 0.397461 0.801759 0.954673 1.77055 127599 0 1.2259 0.231614 1
+1.11735 59.3436 0.864258 2.236 5.04875 1.92943 196329 0 0.038546 0.231093 0
+23.8686 -43.3794 1.6123 0.851595 1.64872 1.57245 46172 0 2.97888 0.229881 2
+-35.2903 12.3124 1.25195 0.843525 1.84018 1.75763 127419 0 2.59297 0.229708 2
+24.2509 37.5553 0.13562 2.08927 4.74286 1.69029 164577 0 3.12008 0.229535 0
+62.6094 15.0962 0.654297 0.830447 1.61841 1.77574 131937 0 1.57814 0.228501 2
+-59.8927 -11.4245 0.757812 0.699477 0.667446 1.55945 92710 0 1.60421 0.227813 1
+-45.429 10.0119 0.830078 0.855137 0.951415 1.77574 124112 0 1.85369 0.227126 1
+27.1387 -10.6305 -0.188721 0.827614 1.00981 1.68124 93918 0 2.60424 0.226784 1
+54.6312 10.0392 -0.0402832 0.811408 0.898585 1.50411 124424 0 -1.40565 0.224735 1
+-50.6297 -9.00078 0.726074 0.792999 0.820171 1.65356 96015 0 -1.56473 0.221687 1
+17.7301 9.87328 0.335938 0.857645 0.94355 1.67142 123841 0 1.3058 0.221519 1
+0.785233 33.4024 0.646973 2.07301 4.57902 1.69195 158420 0 -0.103955 0.219505 0
+-45.795 9.85687 0.78418 0.864583 0.95188 1.75163 123642 0 1.52646 0.219337 1
+0.841248 59.0359 0.848633 2.10772 4.66933 1.85733 195860 0 -0.0415607 0.218503 0
+13.0469 12.1745 0.513672 0.858274 0.996831 1.69691 127570 0 1.61434 0.218003 1
+55.1699 10.0211 0.107178 0.967224 1.18782 1.79054 124426 0 -1.51825 0.21767 1
+-0.727814 13.9047 0.760742 0.855554 1.96749 1.60425 129867 0 -0.243187 0.217005 0
+36.0569 38.0542 0.255127 0.941422 1.11408 1.83032 165082 0 -2.82938 0.21684 1
+39.3873 12.1128 0.262939 0.881851 0.913628 1.82943 127185 0 1.51713 0.21305 1
+-72.1328 -7.7875 0.646484 1.95027 4.39072 1.58169 97820 0 -1.57908 0.212232 0
+-71.2239 -7.7889 0.583008 1.92191 4.33533 1.52967 97823 0 -1.57934 0.211906 0
+-64.7569 -7.45969 0.588867 1.93699 4.46858 1.50264 98311 0 -1.58919 0.21158 0
+23.8191 -43.6673 1.49023 0.883305 1.15945 1.57976 45704 0 -3.07725 0.211091 2
+1.40492 57.7817 0.773438 2.13673 4.80815 1.8719 193990 0 -0.119021 0.210279 0
+54.1513 12.3297 0.217529 0.803719 0.912736 1.69691 127699 0 1.52145 0.210117 1
+-46.5798 6.19539 1.50195 3.11957 12.278 3.28849 118492 0 1.57064 0.209146 0
+49.4081 16.8798 0.259766 1.79229 3.7666 1.73207 134236 0 -1.56664 0.208985 0
+-18.8104 10.8156 0.799805 0.818571 0.861633 1.6192 125131 0 -1.6463 0.208179 1
+19.8679 12.4463 0.210449 0.891212 1.00957 1.7448 127592 0 1.23313 0.207215 1
+19.5946 11.3913 0.200684 0.82822 0.914074 1.62197 126187 0 1.49626 0.206254 1
+-0.122032 58.3961 0.830078 2.10772 4.71976 1.81874 194921 0 -0.107912 0.205775 0
+-4.6375 12.2733 0.760742 0.809429 0.870514 1.67223 127515 0 -1.68585 0.205615 1
+-71.5385 5.05813 0.663574 1.93038 4.27646 1.5511 116542 0 1.54208 0.204978 0
+1.73273 -4.51312 1.66113 3.08925 12.6678 3.38626 102731 0 -1.67098 0.204501 0
+17.467 10.6363 0.313477 0.789907 0.87777 1.54581 125244 0 1.43341 0.203708 1
+-62.8477 5.93844 0.713867 1.66734 3.665 1.51147 117973 0 1.43161 0.203549 0
+-45.1018 -6.88515 0.648438 1.78443 3.665 1.49569 99309 0 -1.63886 0.200871 0
+52.1044 3.60078 0.146606 1.82675 3.74094 1.4614 115056 0 1.61126 0.200871 0
+-59.8784 -11.6178 0.799805 0.760574 0.731973 1.69774 92242 0 1.51208 0.200401 1
+25.0832 35.0384 0.192749 0.773117 0.761503 1.51776 160836 0 -1.89736 0.200245 1
+53.6498 11.2988 0.229248 0.826402 0.924623 1.80371 126293 0 1.42398 0.199152 1
+27.324 -10.2019 -0.119873 0.870514 0.96263 1.70355 94855 0 -1.81456 0.198685 1
+51.922 -9.57867 0.0456543 0.872216 0.961691 1.66003 95868 0 1.56896 0.198375 1
+-59.1479 -12.9614 0.90332 0.78204 0.812201 1.8223 90373 0 1.52873 0.198064 1
+68.2483 3.6386 0.284668 2.25574 5.20901 1.83569 115107 0 1.63425 0.198064 0
+19.5025 10.8342 0.210327 0.847861 0.959814 1.5941 125250 0 1.5221 0.197444 1
+10.0748 -11.4252 0.487061 0.890668 0.996345 1.79404 92929 0 0.441633 0.196209 1
+66.0812 3.44781 0.126709 1.75078 4.04887 1.61644 114632 0 1.63872 0.196209 0
+-46.6219 10.3734 0.779297 0.812399 0.900782 1.69029 124576 0 1.61681 0.192843 1
+-0.509918 -16.7975 0.985352 2.06897 4.60593 1.809 84940 0 2.86797 0.192691 0
+38.6403 35.9674 0.132568 0.832884 0.874988 1.66246 162282 0 1.44759 0.192691 1
+-48.725 -9.88429 0.663086 0.801563 0.823381 1.68782 95085 0 -1.6194 0.191933 1
+37.8858 16.8538 0.798828 0.882066 1.61881 1.85371 134200 0 -2.30667 0.191328 2
+0.785858 33.738 0.657227 2.06897 4.53895 1.71105 158888 0 -2.91505 0.191177 0
+24.58 -10.3741 -0.112305 0.825797 0.975882 1.74395 94378 0 -1.63943 0.190724 1
+37.4006 9.20172 0.830078 0.804897 0.884006 1.71775 122966 0 1.49514 0.190724 1
+-46.8717 9.54859 0.743164 0.862054 0.926431 1.73376 123171 0 1.64517 0.189821 1
+-14.5403 39.8581 0.891602 2.06594 4.57008 1.69774 167732 0 2.79173 0.189671 0
+-20.6898 12.6691 0.944336 0.753735 0.69607 1.65275 127933 0 0.3171 0.188026 1
+1.03512 47.8617 0.652344 2.04287 4.50363 1.73461 179481 0 0.11122 0.188026 0
+-1.14304 66.0458 0.736328 0.893172 0.905633 1.77055 206150 0 -0.622016 0.188026 1
+-35.1518 12.3196 1.22754 0.824991 1.63589 1.75591 127420 0 2.00316 0.186836 2
+-1.37922 51.442 2.26367 0.830853 0.885302 1.81165 184621 0 0.95104 0.186836 1
+55.394 9.71593 0.148438 1.0239 1.18175 1.85642 123959 0 -1.52761 0.186539 1
+28.5611 -9.93656 0.0187988 0.907459 0.952577 1.77835 94859 0 -1.59579 0.185357 1
+-8.80539 -13.6081 0.391846 2.01513 4.50363 1.57591 89594 0 1.42962 0.184327 0
+-64.3406 -11.91 0.779297 0.780514 0.799609 1.68206 91760 0 -1.55941 0.183301 1
+0.453911 57.4533 0.775391 2.072 4.59245 1.83032 193519 0 -0.0416426 0.183155 0
+18.5807 49.7262 0.110229 2.12736 4.64205 1.69691 182344 0 3.06446 0.183009 0
+-0.500465 14.187 0.748047 0.899189 2.2801 1.60464 130336 0 -2.75269 0.182863 0
+49.6663 26.9384 0.309082 2.22076 5.255 1.78095 149213 0 -1.62211 0.182863 0
+1.13172 57.4583 0.748047 2.10155 4.74286 1.83928 193521 0 -0.0511808 0.181989 0
+-59.3295 -13.1927 0.932617 0.832274 0.880345 1.86551 89904 0 1.49923 0.181844 1
+-48.5864 -10.3303 0.699707 0.786828 0.806667 1.70771 94150 0 -1.63118 0.181553 1
+-72.4562 -7.56445 0.645508 1.96557 4.33533 1.60503 98287 0 -1.53404 0.181408 0
+24.4055 35.7453 0.189941 2.09029 4.46422 1.66084 161770 0 0.0126768 0.181408 0
+34.9747 30.3025 -0.0334473 0.644074 0.567561 1.14735 153847 0 -0.120547 0.180974 1
+-48.6989 -10.2697 0.659668 0.775764 0.788751 1.68864 94149 0 -1.62848 0.180107 1
+-31.265 10.5934 0.702148 0.917596 1.00049 1.70688 125092 0 1.48532 0.180107 1
+39.1113 35.957 0.137573 0.794355 0.863318 1.58208 162284 0 1.53372 0.179818 1
+-52.063 10.5013 0.826172 0.884384 0.976835 1.75249 124559 0 1.53444 0.179531 1
+22.02 57.4227 0.724121 0.854719 0.834308 1.81077 193586 0 0.263947 0.179531 1
+28.5512 -9.58063 0.0240479 0.940216 1.02122 1.77228 95795 0 -1.65787 0.179243 1
+46.5499 45.6123 1.25 2.24694 4.88387 1.93509 176347 0 -1.66423 0.178812 0
+48.5653 26.6855 0.313477 1.71022 3.73364 1.59488 148741 0 -1.63122 0.17824 0
+65.4795 3.71563 0.17749 1.89212 4.25563 1.62951 115098 0 1.64528 0.178097 0
+39.1009 35.7602 0.149048 0.786636 0.897927 1.58944 161816 0 1.59044 0.177811 1
+-3.69531 74.7656 0.300781 0.882066 0.992218 1.75591 218778 0 2.88427 0.177811 1
+-12.2422 -0.0367203 0.817383 2.07099 4.78473 1.55603 109239 0 -1.59428 0.177668 0
+-35.3119 12.5792 1.18359 0.847033 1.95599 1.76968 127887 0 3.02157 0.177241 2
+-23.5847 30.8498 -0.158936 0.812201 1.22478 1.73885 154600 0 -1.22463 0.176956 1
+27.0939 -10.1662 -0.11377 0.845587 0.944472 1.66165 94854 0 -1.45073 0.176388 1
+-69.0545 2.1825 0.799805 1.90882 3.97056 1.58363 112338 0 1.53968 0.175679 0
+-18.549 11.1378 0.821289 0.959287 0.955606 1.7542 125600 0 -1.73279 0.175538 1
+-0.500313 13.6009 0.77832 0.909566 2.06191 1.67305 129400 0 -0.527014 0.17441 0
+18.0578 72.7084 -0.0239258 2.3228 5.49645 1.95313 216038 0 3.10204 0.17427 0
+-23.7744 30.8278 -0.177734 0.789329 1.01302 1.73461 154599 0 3.07387 0.174129 1
+-48.2982 -10.9506 0.736816 0.809825 0.860372 1.70938 93215 0 -1.58324 0.173849 1
+34.3642 32.117 0.496582 0.739698 0.742411 1.76537 156653 0 -2.84383 0.17245 1
+-0.745934 13.6227 0.804688 0.869876 1.9191 1.66897 129399 0 -0.701605 0.1712 0
+10.0693 -11.5712 0.480713 0.904804 1.01203 1.79931 92461 0 0.443255 0.171061 1
+-70.9626 5.615 0.689453 1.6196 3.49717 1.45073 117480 0 1.62164 0.169543 0
+29.2878 6.84164 0.0352783 0.861002 1.71273 1.76106 119665 0 -0.735934 0.169131 1
+1.10516 27.3562 0.560547 1.94267 4.3608 1.56518 149529 0 -0.0557674 0.168583 0
+-70.594 -7.5518 0.602539 1.90045 4.21839 1.56594 98293 0 -1.55177 0.168173 0
+-0.751564 -17.0787 1.00195 2.12736 4.80815 1.83838 84471 0 2.90441 0.167491 0
+69.6692 -5.60757 -0.135498 1.80283 3.86723 1.56632 101539 0 1.57306 0.166811 0
+-35.3094 12.0939 1.26465 0.849933 1.81077 1.7958 126951 0 0.326359 0.166675 2
+0.272034 48.1365 0.662598 2.00042 4.50803 1.738 179946 0 -0.0436854 0.165998 0
+27.347 -9.10687 -0.15332 0.829029 0.865005 1.74736 96259 0 -1.52597 0.165863 1
+-71.8194 -7.25851 0.535645 1.85824 4.07266 1.52333 98757 0 -1.54444 0.165593 0
+0.738518 47.2628 0.552734 2.12944 4.66477 1.77661 178544 0 0.0802196 0.165054 0
+0.160469 59.018 0.84082 2.10772 4.68761 1.83749 195858 0 0.00641017 0.16492 0
+26.3366 -8.06672 0.0236816 0.876913 0.974691 1.76709 97660 0 1.67864 0.164785 1
+53.7431 11.642 0.221924 0.809627 0.891375 1.79755 126761 0 1.46795 0.164785 1
+24.8928 35.0336 0.200073 0.808245 0.820171 1.56365 160835 0 -0.37162 0.164785 1
+52.8786 3.89414 0.136719 2.28121 5.16848 1.5763 115527 0 1.57122 0.164651 0
+22.3519 -8.25242 -0.254639 0.846619 0.866273 1.67142 97647 0 2.74826 0.164516 1
+53.4219 11.0377 0.189453 0.837778 0.938955 1.77314 125824 0 1.46191 0.164516 1
+49.1597 16.4847 0.290771 0.935808 1.10339 1.69277 133767 0 -1.62788 0.16398 1
+-53.2937 -12.948 0.780273 0.781086 0.787212 1.67223 90391 0 -1.1708 0.16238 1
+-63.1839 5.94734 0.701172 1.39652 3.28528 1.48441 117972 0 1.27265 0.161982 0
+22.3388 -8.41812 -0.24292 0.81022 0.877341 1.64069 97179 0 2.6401 0.161849 1
+19.6011 11.9539 0.163208 0.833494 0.970654 1.67878 127123 0 1.48922 0.161849 1
+67.3951 3.14562 0.138306 1.79492 4.06472 1.59644 114168 0 1.6662 0.161188 0
+22.4372 64.1068 0.583496 0.836552 1.74821 1.76192 203416 0 3.01951 0.160529 2
+-66.0887 -7.21328 0.709961 1.94362 4.30579 1.6066 98775 0 -1.58813 0.160397 0
+39.4288 16.703 0.132446 0.795908 0.815778 1.62435 134205 0 2.69602 0.160266 1
+-63.5144 5.94516 0.644531 1.1268 2.62951 1.46999 117971 0 0.92995 0.158956 0
+17.8908 71.6269 0.0893555 1.6796 3.72635 1.62197 214165 0 3.10458 0.158565 0
+30.6588 43.7663 0.314453 1.91535 4.32264 1.72785 173489 0 3.10251 0.158044 0
+68.2394 -5.59985 -0.109619 0.843731 1.97712 1.42685 101535 0 -3.13513 0.157785 0
+-70.8917 -7.23421 0.529297 1.87373 4.22251 1.52445 98760 0 -1.56231 0.157525 0
+24.6755 37.2727 0.107666 2.09438 4.87434 1.72195 164111 0 3.13809 0.157008 0
+-17.0606 67.0095 1.45801 2.16932 4.75213 1.76365 207504 0 -0.0318208 0.156749 0
+67.6955 3.89879 0.295898 2.22946 4.79877 1.83569 115573 0 1.5954 0.154695 0
+-14.2754 40.1073 0.933594 2.1874 4.8982 1.77921 168201 0 2.91395 0.154695 0
+39.3187 16.7087 0.133789 0.812399 0.812994 1.66734 134204 0 2.679 0.154312 1
+39.4656 11.1553 0.139038 0.856599 0.894645 1.69525 125781 0 1.55321 0.154058 1
+-49.9028 -7.615 0.914062 0.895683 1.01972 1.78531 98358 0 -1.50606 0.153549 1
+-52.2522 10.4986 0.826172 0.879056 1.00489 1.74821 124558 0 1.61438 0.153549 1
+-62.7887 10.9921 0.842773 0.828624 1.70355 1.70855 125461 0 1.67236 0.152789 2
+-0.715935 13.2852 0.819336 0.861212 1.51037 1.7958 128931 0 -1.42352 0.152032 1
+-62.4959 -9.11234 0.679688 0.809034 0.794161 1.73291 95978 0 -2.42579 0.151278 1
+62.3494 14.9167 0.633789 0.842907 1.93604 1.8223 131468 0 1.47378 0.150778 2
+0.852188 30.9291 0.632812 2.09234 4.59694 1.6994 154676 0 -0.086558 0.150653 0
+27.5747 -10.4023 -0.167969 0.843731 0.898585 1.67796 94388 0 2.06364 0.150278 1
+-23.798 30.6445 -0.192627 0.793967 1.22882 1.72027 154131 0 -3.04903 0.150029 1
+0.526878 34.0448 0.691406 2.09643 4.66022 1.74395 159355 0 3.12174 0.150029 0
+0.586563 31.5728 0.618164 2.03292 4.49045 1.68617 155611 0 -0.114541 0.149283 0
+-0.235466 13.9333 0.741211 0.857645 2.15981 1.58247 129869 0 -0.555683 0.148911 0
+-62.3202 -9.12594 0.648438 0.80529 0.806667 1.71943 95979 0 -2.57941 0.148788 1
+-48.6359 -10.95 0.700684 0.821373 0.867967 1.73291 93213 0 -1.5626 0.148541 1
+-41.4394 -12.663 0.740234 0.788751 0.776522 1.62276 90896 0 3.03275 0.148294 1
+-48.6341 -10.8023 0.705078 0.802542 0.838801 1.7397 93681 0 -1.57478 0.148294 1
+38.6395 35.7548 0.0964355 0.820171 0.865428 1.67878 161814 0 1.4407 0.148294 1
+69.9747 -5.81125 -0.109131 2.02994 4.4036 1.67468 101072 0 1.53022 0.14817 0
+-64.2028 -12.2469 0.788086 0.732688 0.748965 1.64751 91293 0 -1.57709 0.148047 1
+-35.1175 12.5851 1.14355 0.832274 1.95886 1.74821 127888 0 2.76401 0.148047 2
+-0.460938 -17.4161 1.00684 2.31262 4.81285 1.94742 84004 0 3.06894 0.147924 0
+1.4314 73.2017 0.522461 0.830042 0.87179 1.70771 216454 0 0.0904457 0.147801 1
+0.789536 57.138 0.724609 2.10669 4.69677 1.82943 193052 0 -0.0372634 0.147678 0
+27.1053 -9.1332 -0.123047 0.79552 0.81022 1.69029 96258 0 -1.55951 0.147555 1
+-65.7106 -9.98781 0.849609 0.808047 0.836756 1.74225 94564 0 -1.46553 0.14731 1
+26.1427 -8.16078 -0.0241699 0.892627 0.953974 1.78356 97659 0 1.84725 0.14731 1
+24.4381 -14.2109 -0.864258 0.841673 0.949327 1.70106 88762 0 -2.82076 0.146575 1
+-65.4976 -9.97906 0.860352 0.822979 0.856181 1.73715 94565 0 -1.27129 0.146331 1
+-0.415314 58.722 0.831055 2.13569 4.72437 1.809 195388 0 -0.136933 0.144511 0
+34.7714 30.2995 0.0726318 0.750979 0.718166 1.43488 153846 0 -0.000317259 0.143908 1
+0.495316 49.7664 0.637695 2.00336 4.37787 1.70688 182287 0 3.12167 0.143668 0
+0.513283 59.3413 0.849609 2.17038 4.8982 1.88566 196327 0 0.0195326 0.143548 0
+-46.2687 10.0795 0.75293 0.819171 0.94309 1.73545 124109 0 1.56199 0.143188 1
+9.65141 50.8017 0.37793 0.835531 1.23363 1.71022 183720 0 0.726234 0.143188 1
+57.4334 -28.2559 -0.483154 0.876485 1.03427 1.84648 68273 0 -0.676293 0.142948 1
+24.5591 -10.1779 -0.0793457 0.866062 1.0022 1.74821 94846 0 -1.62403 0.142709 1
+-45.9455 29.9238 0.453369 1.98098 4.4338 1.62475 153126 0 1.57194 0.14259 0
+24.1339 -43.0505 1.51074 0.855345 1.11463 1.60347 46641 0 0.0473783 0.141519 1
+-59.1377 -12.0376 0.830078 0.7435 0.768226 1.78182 91777 0 -1.47595 0.141045 1
+1.85844 12.3366 0.303711 0.576218 0.643445 0.947475 127535 0 1.53402 0.141045 1
+-73.4255 -7.52469 0.765625 2.09848 4.63299 1.72869 98284 0 1.56091 0.140808 0
+0.180786 57.1525 0.768555 2.10053 4.67846 1.8223 193050 0 -0.0909711 0.14069 0
+-16.8038 65.82 1.33789 2.24694 5.17858 1.81697 205633 0 -0.112041 0.140572 0
+-70.5108 6.89375 0.648438 2.01022 4.48169 1.59099 119353 0 3.08012 0.140454 0
+-44.9181 5.97125 1.41895 2.94204 8.47159 3.15018 118029 0 1.58775 0.140218 0
+-72.1519 -4.54531 0.575195 2.10463 4.61944 1.57476 102500 0 -1.60672 0.139045 0
+26.1128 35.9616 0.294189 0.802151 0.794937 1.73122 162243 0 1.62451 0.138695 1
+-53.3113 -12.7227 0.821289 0.818971 0.84538 1.69195 90859 0 -1.6368 0.137532 1
+-59.5456 -12.3364 0.836914 0.778421 0.790679 1.78531 91307 0 1.55523 0.137532 1
+24.0978 -43.3244 1.56738 0.914241 1.58092 1.58517 46173 0 0.171325 0.136378 2
+8.39297 -2.76054 0.635742 0.816375 0.931647 1.72616 105560 0 1.72455 0.136148 1
+26.3509 -7.93797 0.118286 0.850971 0.911622 1.74565 98128 0 1.8426 0.135919 1
+1.41203 73.3409 0.537109 0.868179 0.954207 1.72785 216922 0 0.115402 0.135919 1
+46.2903 45.892 1.35645 2.21751 4.62395 1.85009 176814 0 -1.729 0.135575 0
+-35.1544 12.1047 1.25684 0.837778 1.4779 1.78967 126952 0 0.158771 0.135461 2
+-48.2986 -10.8137 0.771484 0.808442 0.850971 1.72279 93683 0 -1.63011 0.134321 1
+-16.5638 66.1003 1.40137 2.02302 4.56562 1.7144 206102 0 -0.0492488 0.133868 0
+-23.4356 -0.235313 0.991211 2.18954 5.01925 1.70855 109204 0 -1.55834 0.133528 0
+52.4108 38.1167 1.12891 0.861633 0.984496 1.75078 165601 0 1.54008 0.132515 1
+-59.6072 -11.9348 0.822266 0.798828 0.776143 1.80019 91775 0 1.46586 0.13229 1
+-20.3595 12.5892 0.74707 0.661282 0.635949 1.4116 127934 0 1.17844 0.132066 1
+26.0145 -8.41375 -0.276123 0.815778 0.894208 1.74992 97191 0 1.8087 0.131173 1
+39.2381 12.5373 0.30127 0.844555 0.901442 1.77055 128120 0 1.56013 0.131173 1
+-0.723747 14.2054 0.767578 0.981796 2.26346 1.6323 130335 0 0.396253 0.131062 0
+18.0585 56.2808 -0.000976562 1.60543 3.38296 1.42964 191702 0 3.12742 0.13084 0
+48.8911 17.1422 0.228516 1.85915 3.81472 1.54242 134702 0 -1.53766 0.130396 0
+68.2763 12.0506 -0.0842285 0.759832 0.860372 1.71691 127275 0 1.50342 0.130285 1
+-62.3073 5.90328 0.729492 1.8719 4.15299 1.56824 117975 0 1.4757 0.130064 0
+-1.09312 -42.3841 0.134521 2.07707 4.60593 1.72785 47498 0 2.97807 0.129954 0
+-63.7928 5.93687 0.657715 1.08967 2.47382 1.48152 117970 0 0.690797 0.129954 0
+68.268 12.2842 -0.207275 0.739156 0.795714 1.67142 127743 0 1.47959 0.129403 1
+-38.5966 23.8408 0.804688 1.98486 4.48169 1.58208 144257 0 -1.52957 0.128854 0
+27.0759 -9.39391 -0.0942383 0.838392 0.866273 1.66084 95790 0 -1.51791 0.128525 1
+34.3705 31.9516 0.29834 0.783377 0.781467 1.73885 156185 0 -2.88218 0.12787 1
+53.7297 12.0941 0.224854 0.857227 0.949791 1.81254 127229 0 1.56137 0.127653 1
+53.8088 10.8484 0.144409 0.89875 0.986903 1.78182 125358 0 1.53144 0.127435 1
+-38.2081 -9.39765 0.942383 0.827816 0.823381 1.85099 95586 0 1.59247 0.127218 1
+-18.7162 11.6816 0.739258 0.784334 0.849103 1.57476 126535 0 1.50014 0.127218 1
+-1.38031 51.5577 2.17578 0.839826 0.902543 1.79404 185089 0 0.925739 0.127218 1
+-66.3995 -7.45406 0.790039 2.05087 4.49045 1.64229 98306 0 -1.59188 0.12711 0
+17.8553 66.8169 -0.0522461 1.6706 3.6223 1.38059 207145 0 -3.09789 0.12711 0
+25.1048 35.2806 0.245972 0.8262 0.854928 1.65679 161304 0 -1.63322 0.127002 1
+-45.5903 10.3245 0.822266 0.843937 0.946781 1.73885 124579 0 1.766 0.125923 1
+54.6428 9.85609 0.0354004 0.849933 0.989074 1.65356 123956 0 -1.52157 0.125708 1
+-15.9005 65.3591 0.991211 0.804897 0.865428 1.6936 205168 0 -0.329285 0.125708 1
+0.477112 37.2745 0.679688 2.09643 4.56116 1.66084 164035 0 0.0294421 0.125386 0
+68.0075 12.2728 0.141357 0.803523 0.899463 1.77574 127742 0 1.46514 0.125279 1
+34.3497 30.3091 0.0749512 0.705652 0.69607 1.46211 153845 0 -0.137685 0.125279 1
+34.9702 30.4561 0.0998535 0.717991 0.685948 1.34335 154315 0 -0.405564 0.125279 1
+42.0542 46.553 -0.115967 0.84229 0.887033 1.74225 177737 0 -2.04865 0.125279 1
+-73.1453 -7.2414 0.72998 1.97037 4.27646 1.65841 98753 0 -1.37449 0.124959 0
+1.10133 28.3118 0.557617 1.9868 4.42948 1.56709 150933 0 -0.0363917 0.124426 0
+41.752 36.0497 0.299805 0.906684 0.938496 1.74395 162292 0 1.21614 0.124001 1
+-62.2858 -9.34515 0.70459 0.820171 0.832274 1.72111 95511 0 2.73924 0.123789 1
+-50.6356 -8.59312 0.770508 0.811804 0.888333 1.76192 96951 0 -1.51583 0.123577 1
+39.3153 16.5878 0.0872803 0.773684 0.777281 1.59371 133736 0 2.66578 0.123577 1
+25.0815 34.8169 0.243896 0.817373 0.833901 1.42337 160368 0 -1.1637 0.123577 1
+0.586563 42.6695 0.625977 2.18847 4.78473 1.90696 171991 0 0.0211073 0.123471 0
+37.6766 16.8572 0.895508 0.866273 0.980179 1.82141 134199 0 2.10184 0.123366 2
+-70.5637 -6.91438 0.55957 1.87373 4.20194 1.52631 99229 0 -1.57845 0.12326 0
+46.6675 -8.37781 0.695312 0.821775 0.826604 1.80283 97255 0 1.52753 0.123155 1
+-1.33063 66.0494 0.666016 0.843319 0.817772 1.72447 206149 0 -1.02942 0.123155 1
+-2.05235 -18.088 0.992188 2.21319 4.86958 1.88935 83063 0 1.37112 0.123049 0
+19.1566 11.9085 0.18689 0.815977 0.90497 1.6455 127121 0 1.56377 0.122944 1
+-71.8459 -6.91859 0.523438 1.87373 4.1247 1.54091 99225 0 -1.54266 0.122628 0
+0.756332 49.4436 0.618164 2.07099 4.51684 1.74907 181820 0 -3.11287 0.122628 0
+-18.9025 11.2535 0.819336 0.981497 1.05466 1.75677 126066 0 -2.25524 0.122313 1
+34.7634 34.0136 0.0913086 0.864372 0.896832 1.44437 159462 0 1.70762 0.121895 1
+24.4621 38.1848 0.195557 2.0599 4.66022 1.67468 165514 0 -3.13715 0.121373 0
+-73.1207 -7.77875 0.758789 2.01218 4.42948 1.65194 97817 0 -1.51913 0.120957 0
+35.8131 38.255 0.251221 0.907127 1.21317 1.84828 165549 0 -0.450766 0.120853 1
+-0.477814 66.0106 1.21875 1.13059 1.24149 2.02895 206152 0 -0.529328 0.120646 1
+-45.3933 9.84078 0.803711 0.818971 0.921243 1.74225 123644 0 1.55382 0.120439 1
+17.7661 62.2442 -0.195557 1.95218 4.27646 1.51591 200593 0 3.08124 0.120335 0
+27.5746 -10.6475 -0.185059 0.812201 0.895519 1.67468 93920 0 1.84876 0.120232 1
+-15.8997 65.2189 1.00586 0.80039 0.847033 1.67305 204700 0 -0.0598789 0.120232 1
+-58.2184 -11.6391 0.816406 0.782995 0.829434 1.7414 92248 0 -1.66072 0.120026 1
+5.88657 -2.98328 0.794922 0.811408 1.34302 1.77488 105084 0 1.36345 0.120026 2
+-46.6411 10.6756 0.788086 0.84538 0.96569 1.72447 125044 0 1.60915 0.119819 1
+62.8317 14.6636 0.620117 0.855137 2.0014 1.81077 131002 0 1.65434 0.119819 2
+62.3419 15.1386 0.668945 0.843113 2.0112 1.82052 131936 0 1.63833 0.119819 2
+9.46031 46.5939 -0.205811 0.945395 1.62356 1.85461 177635 0 1.88826 0.118998 1
+19.7161 59.9243 0.65918 0.872428 0.81538 1.91255 197323 0 -0.278617 0.118998 1
+-50.472 -8.58054 0.788086 0.802346 0.973264 1.75163 96952 0 -1.54305 0.118589 1
+-1.24976 51.4384 2.16406 0.857645 0.972789 1.86096 184622 0 1.10063 0.118589 1
+-0.16008 59.318 0.811523 2.22837 5.12826 1.92943 196325 0 -0.0636347 0.118589 0
+1.97914 12.3289 0.378418 0.580454 0.643131 0.992702 127536 0 -1.3469 0.118181 1
+54.147 12.5464 0.249756 0.813789 0.883359 1.65679 128167 0 1.50706 0.118181 1
+43.3891 43.5989 0.510742 0.838187 0.992218 1.7431 173529 0 0.924352 0.117978 1
+1.05804 48.7513 0.673828 2.10772 4.73361 1.78618 180885 0 0.0943208 0.117978 0
+-37.5492 25.115 0.681641 2.072 4.54338 1.57092 146132 0 -0.0667811 0.117775 0
+46.8529 45.3002 1.06348 2.2801 4.99969 1.95122 175880 0 -1.59814 0.117775 0
+17.9495 10.1191 0.329102 0.884816 0.997075 1.67878 124310 0 1.32704 0.117572 1
+0.880623 27.0581 0.543945 1.89212 4.28482 1.56518 149060 0 -0.0832224 0.117167 0
+9.84813 35.6259 0.742188 0.817373 0.899024 1.7397 161724 0 -0.889393 0.117167 1
+24.7136 36.3394 0.163208 2.05487 4.14489 1.65437 162707 0 -0.0781924 0.116563 0
+-13.967 39.8609 0.912109 2.01022 4.39501 1.64269 167734 0 3.00029 0.116161 0
+24.8094 -43.3668 1.62109 2.09131 4.63752 1.5906 46175 0 0.722248 0.115961 0
+-3.07063 -15.1903 0.564453 2.03193 4.61493 1.61447 87272 0 1.53421 0.115761 0
+-57.762 6.11742 0.700195 2.26457 4.9511 1.6447 118457 0 1.5658 0.115761 0
+-72.0841 5.03594 0.675781 2.24914 5.20393 1.71189 116540 0 1.53473 0.115561 0
+-20.9152 12.8465 1 0.734121 0.69675 1.71859 128400 0 0.175854 0.115561 1
+58.7634 3.63016 0.186523 1.76192 3.7082 1.65194 115077 0 1.57888 0.115361 0
+-18.7286 12.0042 0.719727 0.7486 0.752999 1.35554 127003 0 1.50961 0.115361 1
+-2.69812 15.8168 0.895508 0.797075 0.858903 1.72111 132669 0 0.0744968 0.115361 1
+-0.220001 -16.5233 0.978516 2.10155 4.61944 1.86551 85409 0 2.86563 0.115162 0
+-1.07562 -14.8222 0.817383 0.773873 0.795714 1.79142 87746 0 1.59986 0.115162 1
+-4.64094 12.1053 0.734375 0.800781 0.850348 1.63629 127047 0 -1.35636 0.115162 1
+49.3441 16.5 0.2771 1.00147 1.29821 1.72616 133768 0 -1.61397 0.115162 1
+-64.5055 -6.6575 0.571289 2.05588 4.61493 1.61289 99716 0 -1.4163 0.114963 0
+-45.0769 9.20125 0.800781 0.862475 0.953275 1.7602 122709 0 1.59618 0.114963 1
+23.9303 -43.1028 1.60645 0.874134 1.32835 1.60739 46640 0 2.55256 0.114765 2
+57.3739 -17.3981 0.0234375 2.39069 5.72657 1.88198 84185 0 -1.53177 0.114765 0
+-23.5916 30.6541 -0.145508 0.814584 1.5349 1.75163 154132 0 -0.665658 0.114765 1
+35.7072 43.8027 0.348633 1.69774 3.55224 1.73122 173505 0 -3.10157 0.114765 0
+46.7662 -8.27695 0.627441 0.806077 0.870939 1.78967 97724 0 1.4476 0.114567 1
+58.3306 11.3919 -0.0366211 0.910121 0.979223 1.66409 126308 0 2.10271 0.114567 1
+23.1677 -8.52836 -0.314209 0.847861 0.980658 1.77488 97182 0 2.8295 0.114171 1
+-70.2698 6.59235 0.647461 1.81608 3.89756 1.51628 118886 0 -3.09396 0.114171 0
+-59.7043 -11.1289 0.798828 0.75872 0.74459 1.72027 93179 0 1.56528 0.113776 1
+27.6679 -9.06656 -0.0751953 0.857436 0.94079 1.77228 96260 0 -1.53814 0.113776 1
+-71.532 6.16531 0.665039 2.13882 4.76143 1.6339 118414 0 1.61531 0.113776 0
+9.86625 -11.415 0.514648 0.935694 1.06216 1.82052 92928 0 0.0126676 0.113383 1
+59.4241 3.87547 0.148071 1.9579 4.01736 1.53041 115547 0 1.54739 0.113187 0
+58.1203 11.388 -0.00390625 0.920175 0.988832 1.6796 126307 0 -1.7062 0.113187 1
+19.4736 12.3731 0.250244 0.846206 0.946319 1.66979 127590 0 1.44063 0.112991 1
+23.9569 36.9499 0.100586 2.08011 4.8506 1.63469 163640 0 2.9939 0.112795 0
+-48.1464 -3.14758 0.724609 2.08723 4.71055 1.54581 104915 0 -1.49544 0.1126 0
+-31.4294 10.6109 0.683594 0.916924 0.976835 1.72447 125091 0 1.58699 0.1126 1
+23.9444 36.6634 0.115112 2.02697 4.69218 1.61683 163172 0 1.45277 0.1126 0
+-1.76031 -40.4758 0.0244141 0.810616 0.921468 1.66003 50304 0 2.42238 0.11221 1
+-0.460152 -17.7487 0.983398 2.23272 4.85534 1.91816 83536 0 -0.418643 0.112016 0
+-62.5058 -9.33797 0.734375 0.820572 0.831868 1.74992 95510 0 -2.40344 0.111822 1
+1.09977 30.2675 0.618164 2.08113 4.58797 1.64591 153741 0 -0.0894294 0.111628 0

+ 500 - 0
src/detection/CenterPoint-master/results1/seq_0_frame_101.bin.txt

@@ -0,0 +1,500 @@
+-11.4172 6.29781 0.601074 2.00629 4.59694 1.49022 118602 0 1.56324 0.933431 0
+26.9856 13.9747 0.229492 2.11597 4.56116 1.67142 129954 0 0.926614 0.914291 0
+-17.3837 6.35344 0.723633 2.02796 4.59694 1.76365 118583 0 1.54971 0.909586 0
+30.3231 -6.2711 0.159302 2.10053 5.05862 1.88198 100480 0 -1.58207 0.90764 0
+-34.0216 -6.93859 0.557129 2.03491 4.61493 1.48478 99343 0 -1.58798 0.90599 0
+17.6791 33.4413 0.0379639 2.05789 4.67389 1.51813 158473 0 3.07306 0.904313 0
+-26.6822 -6.67664 0.55957 1.99652 4.4338 1.48877 99834 0 -1.62233 0.901053 0
+42.9354 26.3628 0.334473 2.23491 5.40067 1.92661 148256 0 1.49172 0.898053 0
+37.6039 -6.15937 0.0343018 1.97519 4.49923 1.5334 100503 0 -1.55503 0.888566 0
+29.3322 3.91785 0.861328 2.62182 6.05438 2.98837 115453 0 1.54248 0.88642 0
+3.27453 16.1441 0.728516 2.20133 4.78941 1.80547 133156 0 -0.0274469 0.883236 0
+32.4152 43.9979 0.122559 1.99847 4.42948 1.56174 173963 0 3.12222 0.879043 0
+0.274689 20.7023 0.655273 2.20779 4.8791 1.89396 139698 0 0.0166325 0.877162 0
+17.7203 28.1003 0.178101 2.00531 4.289 1.75163 150517 0 3.11696 0.876951 0
+-22.9487 6.30516 0.583496 1.81874 3.99779 1.5443 118566 0 1.54476 0.874399 0
+52.6672 11.0217 0.346436 0.802934 0.91229 1.77574 125822 0 1.47956 0.873215 1
+12.0769 11.9847 0.551758 0.820572 0.971602 1.67305 127099 0 1.55286 0.871037 1
+6.68953 16.3768 0.666992 2.09438 4.70136 1.73122 133634 0 0.0043988 0.867598 0
+52.9808 11.6602 0.348633 0.795131 0.892464 1.76623 126759 0 1.57256 0.86681 1
+-37.8691 5.99563 0.518555 1.98292 4.64205 1.6435 118051 0 1.51185 0.865338 0
+48.5286 19.3766 0.285645 1.92285 4.27228 1.59099 137977 0 -1.58593 0.860486 0
+24.1384 17.4135 0.244141 2.07707 4.51684 1.69277 135093 0 0.765793 0.859782 0
+16.8873 3.24289 0.547852 2.09131 4.4338 1.87739 114478 0 1.55684 0.855972 0
+48.4983 17.1525 0.410156 2.03292 4.86483 1.79317 134701 0 -1.56942 0.854521 0
+39.5074 41.366 0.241699 1.93132 4.1006 1.67142 170241 0 -1.82348 0.845942 0
+17.1862 56.7948 -0.146484 1.95027 4.46858 1.553 192635 0 -3.10154 0.844152 0
+55.5695 -5.82906 0.184082 1.90882 4.21839 1.63111 101027 0 -1.56723 0.843895 0
+-46.3067 -6.95156 0.589844 2.00238 4.3608 1.61604 99305 0 -1.57542 0.832781 0
+43.0609 -6.15015 0.192383 2.01415 4.39072 1.77835 100520 0 -1.624 0.832373 0
+-49.1682 -10.7925 0.669434 0.744045 0.786443 1.68042 93680 0 -1.61471 0.829216 1
+-60.3575 -11.5769 0.6875 0.769352 0.77274 1.75505 92241 0 1.32008 0.824744 1
+17.4806 49.4686 0.00524902 1.89952 4.01736 1.59566 181872 0 3.05308 0.821474 0
+27.6792 -9.77797 0.162109 0.897325 0.984016 1.76796 95324 0 -1.56449 0.820757 1
+18.3809 11.7222 0.232666 0.714319 0.791838 1.58401 126651 0 1.56432 0.813757 1
+-19.4424 10.9742 0.775391 0.888496 0.88142 1.76192 125597 0 -1.55649 0.812867 1
+-49.3941 -10.0803 0.59082 0.770291 0.792612 1.62753 94615 0 -1.61761 0.808672 1
+-70.8827 2.35594 0.636719 2.0171 4.48607 1.54279 112800 0 1.59478 0.807764 0
+38.2883 12.2449 0.408203 0.857018 0.880345 1.78967 127649 0 1.45897 0.804406 1
+39.8224 3.57469 0.281006 2.1139 4.73823 1.53528 115018 0 1.57593 0.801781 0
+48.4105 14.5634 0.418701 2.10463 5.02907 1.81342 130957 0 -1.61685 0.801315 0
+-40.5709 -7.10046 0.506348 2.02697 4.49045 1.51037 98855 0 -1.63702 0.800848 0
+-60.2444 -12.3632 0.666992 0.769164 0.781467 1.7542 91305 0 1.58505 0.798187 1
+29.8985 44.0494 0.284668 1.97133 4.22664 1.77661 173955 0 3.12145 0.797714 0
+-32.2199 10.3799 0.591797 0.835735 0.943781 1.70688 124621 0 1.52808 0.783657 1
+-34.0198 -3.54938 0.639648 2.09029 4.54782 1.51776 104023 0 -1.57135 0.783492 0
+21.8833 6.03062 0.396973 2.34674 5.44835 2.05588 118238 0 1.54876 0.777469 0
+3.3025 23.2459 0.485107 2.05387 4.45986 1.57707 143452 0 -0.0229005 0.770637 0
+50.9237 -9.77641 0.193481 0.814186 0.909178 1.65922 95397 0 1.50348 0.770119 1
+-44.6378 5.85532 1.33594 2.8936 7.20381 3.15634 118030 0 1.58141 0.76856 0
+-46.3043 9.19891 0.6875 0.873494 0.966398 1.78095 122705 0 1.61019 0.76769 1
+38.4718 11.4341 0.210938 0.789329 0.821775 1.62832 126246 0 1.47668 0.763484 1
+14.9456 9.64703 0.385742 0.757794 0.932558 1.57476 123832 0 1.56252 0.76207 1
+51.6484 3.74375 0.34375 1.98195 4.4036 1.49861 115055 0 1.56457 0.760651 0
+-59.1588 -11.6498 0.706055 0.83329 0.861002 1.74992 92245 0 -1.6801 0.758868 1
+16.7463 10.1144 0.402588 0.840852 0.938955 1.6796 124306 0 0.55411 0.757077 1
+-48.1923 -3.66031 0.538086 2.03391 4.41221 1.51999 103979 0 -1.58215 0.756717 0
+48.4302 26.7786 0.380371 2.00531 4.50803 1.68288 148741 0 -1.64947 0.745213 0
+34.9516 44.3182 0.388184 1.98971 4.45116 1.84738 174439 0 -3.13692 0.744471 0
+18.7508 12.5439 0.209473 0.788751 0.8929 1.60974 128056 0 -0.254179 0.741674 1
+-58.4378 5.82883 0.493652 2.0112 4.39501 1.57284 117987 0 1.56433 0.735831 0
+-47.8686 9.84109 0.615234 0.81538 0.945857 1.70438 123636 0 1.60824 0.727879 1
+-64.9047 -11.7634 0.647949 0.753367 0.757054 1.67142 92227 0 -1.54503 0.715325 1
+-23.1953 -0.266251 0.901367 2.29126 4.82226 1.70023 109205 0 -1.57089 0.715225 0
+52.8416 12.3997 0.391357 0.850971 0.950023 1.77921 127695 0 1.61067 0.709724 1
+-0.166641 42.1078 0.688965 2.11493 4.68761 1.86278 171053 0 0.0476756 0.708616 0
+17.0224 72.1498 -0.0915527 2.03292 4.3993 1.69029 215099 0 3.10969 0.695752 0
+-46.5762 9.96906 0.682617 0.800781 0.933925 1.72616 124108 0 1.53245 0.687945 1
+18.6495 10.8494 0.250732 0.773684 0.866697 1.59099 125248 0 1.56092 0.661814 1
+49.0681 24.2752 0.220703 1.94267 4.34805 1.61762 144999 0 -1.53169 0.66061 0
+40.4172 36.2647 0.380127 0.768413 0.756685 1.65922 162756 0 -3.12372 0.656548 1
+16.5516 10.2975 0.385254 0.772551 0.849933 1.58285 124773 0 0.968086 0.652574 1
+-11.6545 0.0124207 0.780273 2.08011 4.53452 1.53228 109709 0 -1.59838 0.642881 0
+-47.8952 10.61 0.660156 0.838392 0.918997 1.69691 125040 0 1.66211 0.641647 1
+16.5816 10.1881 0.405762 0.817572 0.920344 1.66328 124305 0 -1.4575 0.637595 1
+-51.3765 5.87649 0.787109 2.30923 5.34297 2.11287 118009 0 1.56263 0.636579 0
+35.1416 38.2038 0.17749 0.839006 1.57476 1.77748 165547 0 -0.340459 0.629775 2
+16.043 11.1577 0.367676 0.864161 0.906075 1.62633 125708 0 1.30121 0.626124 1
+54.4946 9.80281 0.288574 0.992097 1.13177 1.7888 123956 0 -1.58909 0.622804 1
+69.6172 -5.54719 0.121826 1.96749 4.35655 1.48514 101539 0 -1.61301 0.621713 0
+38.9825 31.8586 0.203857 1.9723 4.61944 1.51037 156199 0 -0.0952059 0.619874 0
+17.0622 67.3975 -0.0673828 1.90975 4.25979 1.51258 208079 0 -3.12965 0.61607 0
+18.7378 12.4545 0.223755 0.755577 0.866273 1.60151 127588 0 0.448184 0.613815 1
+59.0263 3.66539 0.302246 2.02006 4.54338 1.54581 115078 0 1.55587 0.60679 0
+-66.377 -9.54242 0.648438 0.812002 0.810616 1.80723 95498 0 -1.45441 0.599954 1
+-53.0478 10.7489 0.738281 0.807258 0.890722 1.70355 125024 0 1.31249 0.592077 1
+23.4545 36.6833 0.142212 1.99457 4.52567 1.54997 163171 0 3.08312 0.57087 0
+-58.1755 -12.9377 0.711914 0.814186 0.852219 1.74821 90376 0 1.52282 0.550245 1
+35.2784 37.9784 0.182495 0.830244 1.7144 1.79843 165080 0 -0.228429 0.539105 2
+-43.3098 -24.13 1.00781 0.757979 0.780704 1.70938 74042 0 -2.30321 0.530723 1
+24.835 35.8902 0.298096 0.864161 0.813391 1.72953 162239 0 -1.69702 0.526708 1
+-51.1033 -7.63539 0.714844 0.859532 0.977074 1.76278 98354 0 1.75625 0.526099 1
+1.42235 -4.04 1.74805 3.24701 13.9673 3.64003 103666 0 -1.71477 0.513181 0
+-65.4225 -7.19375 0.449219 1.96749 4.4338 1.47142 98777 0 -1.54599 0.509215 0
+-0.203278 48.1595 0.711914 2.11081 4.70136 1.809 179945 0 0.00533976 0.502136 0
+-32.3834 10.3714 0.573242 0.852427 1.05389 1.69608 124620 0 1.58834 0.484746 1
+24.9116 -34.0627 -1.30957 0.808837 0.900782 1.74055 59747 0 -3.13425 0.478468 1
+-60.2497 -12.5211 0.65918 0.788944 0.798048 1.74821 90837 0 1.53793 0.474814 1
+-63.2736 10.9388 0.603516 0.786443 0.797659 1.6837 125460 0 1.07057 0.471588 1
+-63.2777 10.8147 0.639648 0.801759 0.82822 1.67305 124992 0 1.44249 0.468608 1
+61.8213 14.9386 0.77832 0.803523 1.81697 1.78356 131467 0 1.48824 0.466268 2
+18.3909 11.8761 0.241943 0.772929 0.859322 1.58944 127119 0 1.4728 0.453625 1
+40.2944 36.2681 0.385254 0.815778 0.807061 1.68288 162755 0 -2.99335 0.436082 1
+16.697 10.2868 0.388672 0.827816 0.891157 1.62237 124774 0 0.645111 0.431524 1
+26.3745 -10.6322 -0.0568848 0.79552 0.879915 1.49387 93916 0 1.66038 0.431105 1
+-72.4592 -4.27937 0.44751 2.04187 4.53895 1.61841 102967 0 -1.56973 0.429968 0
+-48.0372 9.82625 0.636719 0.832477 0.968997 1.71189 123635 0 1.60022 0.427038 1
+66.7427 3.73656 0.395996 2.06594 4.51684 1.69112 115102 0 1.56517 0.423756 0
+40.4167 36.1206 0.365723 0.812597 0.835531 1.66653 162288 0 -3.06355 0.422862 1
+-58.3231 -12.7411 0.731445 0.839416 0.883359 1.78095 90843 0 1.25951 0.4133 1
+-72.4869 5.60047 0.525879 2.0171 4.41221 1.67305 117475 0 1.56359 0.413063 0
+-0.162346 73.7712 0.397217 2.143 4.72899 1.74395 217385 0 -0.028857 0.406037 0
+52.7439 12.5128 0.421631 0.893936 0.952577 1.78182 128162 0 1.57157 0.403626 1
+-66.3942 5.65282 0.408936 1.94077 4.39072 1.54543 117494 0 1.57208 0.401805 0
+26.3777 -10.4855 -0.0249023 0.787596 0.850348 1.42894 94384 0 1.80829 0.398933 1
+52.8274 11.0152 0.338379 0.812597 0.936665 1.78443 125823 0 1.5081 0.392686 1
+-72.1877 -4.04054 0.407227 1.89767 4.14893 1.57053 103436 0 -1.5172 0.390825 0
+-58.1694 -12.7351 0.724121 0.833697 0.878198 1.77055 90844 0 1.51892 0.38914 1
+58.4259 47.867 0.298828 0.824186 0.856599 1.7448 179660 0 0.0792112 0.378976 1
+-59.2758 -11.4728 0.666992 0.792225 0.820171 1.76451 92712 0 -1.62237 0.378344 1
+-63.4172 10.7978 0.65332 0.837369 0.861002 1.6994 124991 0 1.2396 0.375821 1
+-60.4991 -11.4562 0.675781 0.744226 0.736634 1.71775 92708 0 -1.21314 0.371365 1
+36.6995 9.34047 0.882812 0.776902 0.815778 1.69277 123432 0 1.50552 0.367724 1
+-63.4003 10.9307 0.644531 0.807456 0.839621 1.69691 125459 0 0.763343 0.363647 1
+-72.1372 -7.55367 0.413818 1.96461 4.49484 1.63589 98288 0 -1.57648 0.35354 0
+-53.0344 10.9205 0.745605 0.830244 0.905412 1.71607 125492 0 0.770929 0.350421 1
+21.6731 -8.39437 0.020874 0.828422 0.959346 1.67468 97177 0 3.02872 0.345768 1
+-19.5666 10.8328 0.745117 0.799804 0.784909 1.70189 125128 0 -1.57769 0.344885 1
+-15.5612 -14.832 0.152832 1.94837 4.36933 1.4779 87701 0 1.4536 0.344003 0
+16.8066 9.87437 0.399658 0.799023 0.899903 1.69029 123838 0 0.488511 0.343672 1
+-48.0359 10.5214 0.651367 0.825797 0.939413 1.70438 124571 0 1.60704 0.341473 1
+18.5453 10.9125 0.281738 0.762992 0.833901 1.57938 125715 0 1.54344 0.337422 1
+-46.4862 9.35985 0.678711 0.862686 0.971128 1.77228 123172 0 1.56501 0.331985 1
+18.2225 11.7175 0.244263 0.775954 0.839621 1.64591 126650 0 1.581 0.330687 1
+-60.3012 -12.0406 0.675781 0.792999 0.793386 1.80107 91773 0 1.51485 0.330255 1
+-43.3236 -23.9505 1.01953 0.799218 0.91229 1.7431 74510 0 -2.65256 0.329284 1
+-60.4872 -11.5881 0.685547 0.782995 0.778801 1.75505 92240 0 1.52757 0.328422 1
+-65.166 -7.45891 0.430908 1.80107 3.88237 1.41817 98310 0 -1.57782 0.326379 0
+52.6687 10.8606 0.359131 0.884492 0.972552 1.78356 125354 0 1.52857 0.321673 1
+-43.1727 -24.1416 1.00293 0.780704 0.809034 1.72111 74043 0 -2.34878 0.316053 1
+61.8381 15.0981 0.791016 0.824387 1.92849 1.80547 131935 0 1.40473 0.309128 2
+22.6209 -51.9712 -0.851562 0.763924 1.38025 1.80371 33532 0 0.0787352 0.305076 2
+47.9516 26.7118 0.395264 1.71775 3.72271 1.61368 148739 0 -1.6558 0.301362 0
+27.673 -9.94156 0.148193 0.868391 0.950951 1.75505 94856 0 -1.58141 0.301156 1
+28.3369 6.56453 0.0358887 1.91442 4.35229 1.48913 119194 0 1.46763 0.297878 0
+-47.6074 9.81532 0.616211 0.837778 0.948169 1.72363 123637 0 1.58222 0.297062 1
+-60.1224 -11.6433 0.696289 0.7565 0.785676 1.76365 92242 0 -1.65441 0.296451 1
+58.4775 3.45281 0.319336 1.65922 3.63648 1.49715 114608 0 1.59217 0.294012 0
+-66.207 -9.51391 0.676758 0.827008 0.861633 1.80547 95499 0 1.73431 0.293607 1
+67.0037 -5.57844 0.131836 0.814584 1.93699 1.38464 101531 0 -2.87219 0.291787 0
+-48.0494 9.98852 0.643555 0.852427 0.990524 1.71943 124103 0 1.59668 0.289271 1
+21.8063 -8.26664 -0.0356445 0.821574 0.890722 1.64671 97646 0 3.11822 0.285472 1
+-72.7483 5.8607 0.5625 2.21968 4.98019 1.79317 117942 0 1.55909 0.285173 0
+-49.3994 -9.88508 0.587891 0.83268 0.864583 1.65679 95083 0 -1.59927 0.284178 1
+53.1331 11.6731 0.345703 0.869452 0.948632 1.75763 126760 0 1.63546 0.282691 1
+18.8949 12.5627 0.240967 0.849933 0.956539 1.66003 128057 0 -0.523239 0.280715 1
+-72.2133 5.86094 0.552246 1.93699 4.18556 1.61604 117944 0 1.63608 0.279042 0
+38.3722 11.5742 0.276367 0.811408 0.828624 1.66734 126713 0 1.51775 0.279042 1
+69.273 -16.443 0.0673828 2.00923 4.52125 1.58672 85626 0 -1.53616 0.278257 0
+-58.1689 -13.1691 0.739258 0.833697 0.87179 1.80195 89908 0 1.89967 0.270577 1
+25.0481 -33.8716 -1.30371 0.831461 1.21258 1.75505 60216 0 -3.06225 0.269037 2
+45.833 -8.26968 0.788086 0.808245 0.825394 1.81254 97721 0 -1.70134 0.267982 1
+12.0616 11.8252 0.536621 0.857018 0.99004 1.67632 126631 0 1.54405 0.266262 1
+-72.4586 -3.75906 0.419678 2.024 4.51684 1.59995 103903 0 -1.49994 0.263789 0
+-53.0359 10.5125 0.716797 0.873068 0.97018 1.72279 124556 0 1.47471 0.26322 1
+-51.2584 -7.75562 0.696289 0.899079 1.34565 1.80723 97885 0 1.67282 0.262842 1
+-58.3306 -13.1719 0.741211 0.840236 0.882066 1.79054 89907 0 2.3032 0.262653 1
+54.3019 9.9725 0.29248 0.958585 1.35918 1.78095 124423 0 -1.59604 0.262086 1
+17.1481 62.2561 -0.206787 2.03292 4.53452 1.59605 200591 0 3.13278 0.258513 0
+26.1941 -10.6441 -0.0546875 0.824387 0.955139 1.60503 93915 0 -1.81885 0.256459 1
+-63.5189 5.87555 0.5625 1.94932 4.18556 1.62713 117971 0 1.53554 0.256273 0
+-65.7181 -7.4625 0.462402 2.09131 4.78941 1.50154 98308 0 -1.59439 0.255901 0
+25.0426 35.9259 0.30249 0.907791 0.889418 1.77401 162240 0 -1.72006 0.254602 1
+-47.5037 5.96453 1.47266 3.08624 12.3743 3.3962 118021 0 1.55327 0.253676 0
+27.6827 -9.56531 0.197144 0.957065 1.0632 1.75078 95792 0 -1.56018 0.250913 1
+25.0058 35.7884 0.270752 0.863318 0.840031 1.70522 161772 0 -1.70142 0.250913 1
+24.8984 -33.8839 -1.33496 0.837165 1.19247 1.75591 60215 0 3.08374 0.249082 1
+-0.40937 48.7481 0.688477 2.19704 4.85534 1.83749 180880 0 -0.0719302 0.247442 0
+12.2632 -20.55 0.540039 0.871364 0.788751 1.70855 79364 0 0.0311184 0.24726 1
+-0.463127 58.4216 0.505371 1.95695 4.289 1.59371 194920 0 3.08782 0.246533 0
+-32.2213 10.2206 0.59668 0.870726 0.966162 1.7144 124153 0 1.49425 0.245808 1
+68.9984 -16.1985 0.0269775 1.8339 4.08062 1.52519 86093 0 -1.57468 0.244904 0
+41.193 36.2578 0.355469 0.815579 0.795714 1.61289 162758 0 1.87063 0.244724 1
+-39.1797 -9.36671 0.827148 0.806274 0.818171 1.76709 95583 0 1.56436 0.244363 1
+-47.5997 9.99707 0.638672 0.837983 0.960752 1.73122 124105 0 1.61137 0.243283 1
+69.9159 -5.80469 0.154663 2.23928 4.8982 1.54959 101072 0 -1.59078 0.241668 0
+18.5813 11.7027 0.267578 0.736994 0.857436 1.62395 126652 0 1.64737 0.239349 1
+8.71617 50.8206 0.439453 0.814982 1.08099 1.6431 183717 0 -1.46935 0.239349 1
+26.1859 -10.4812 -0.0134277 0.788366 0.893772 1.48586 94383 0 -1.45864 0.238994 1
+-21.8322 12.7014 0.966797 0.785292 0.765978 1.7888 127929 0 1.3334 0.238284 1
+50.8288 -9.94781 0.206909 0.818771 0.893336 1.6439 94928 0 1.47363 0.23793 1
+-21.6762 12.6897 0.921875 0.751897 0.745317 1.7448 127930 0 1.45272 0.236869 1
+-19.3859 11.2724 0.674805 0.785101 0.795714 1.72532 126065 0 -1.57004 0.236516 1
+-24.5586 30.806 -0.216553 0.806471 1.62832 1.7542 154597 0 -2.26258 0.234407 2
+-66.215 -9.66531 0.702637 0.825797 0.85639 1.80019 95031 0 1.59137 0.233357 1
+-72.2134 5.34063 0.541016 1.7958 3.86346 1.55641 117008 0 1.55752 0.232659 0
+-64.9837 -11.8786 0.650391 0.758905 0.767476 1.66246 91758 0 -1.5655 0.23144 1
+18.4062 11.4725 0.271973 0.768976 0.816974 1.58944 126183 0 1.58211 0.231267 1
+-0.178284 58.7127 0.5 1.99652 4.33533 1.61881 195389 0 -3.13233 0.231267 0
+-0.485703 55.8411 0.533203 2.01218 4.43814 1.60229 191176 0 -0.0110398 0.230573 0
+-53.1656 10.9148 0.74707 0.837369 0.913628 1.71859 125491 0 0.652215 0.229535 1
+-43.3216 -24.3769 1.00977 0.834512 1.07994 1.74736 73574 0 -2.4205 0.22919 1
+-71.5689 -7.78437 0.504395 1.98486 4.46422 1.66897 97822 0 -1.53911 0.227813 0
+22.7803 -51.9603 -0.862305 0.761689 1.52594 1.79229 33533 0 0.0115835 0.226784 2
+-0.385002 47.5719 0.710938 2.1139 4.60143 1.84468 179008 0 0.0386393 0.22627 0
+17.4273 62.5398 0.0140381 2.05287 4.60593 1.6796 201060 0 3.12402 0.225246 0
+24.8841 -34.2953 -1.31055 0.860792 1.44015 1.77228 59279 0 1.80126 0.225076 1
+56.1421 -17.1209 0.19873 2.22728 5.01925 1.79142 84649 0 1.54723 0.224565 0
+0.112892 74.3525 0.26709 2.35823 5.68756 1.85371 218322 0 0.107217 0.223886 0
+-51.406 -8.86578 0.453125 0.799218 0.81538 1.68864 96481 0 -1.52915 0.223716 1
+23.4784 35.7622 0.134888 1.97712 4.31842 1.5875 161767 0 -0.040157 0.223547 0
+27.8646 -9.76992 0.156738 0.914241 1.10272 1.79317 95325 0 -1.58763 0.223377 1
+67.2977 3.9325 0.43335 2.3081 5.32734 1.80723 115572 0 1.53704 0.221014 0
+-66.0092 -7.19453 0.477539 2.23382 5.19378 1.61092 98775 0 -1.5908 0.218669 0
+-46.7928 9.84406 0.660156 0.865428 1.28654 1.75591 123639 0 1.58576 0.218336 1
+-59.1107 -11.908 0.691406 0.827008 0.839211 1.74565 91777 0 -1.64416 0.216674 1
+27.5038 -9.7746 0.186279 0.944991 1.01823 1.78095 95323 0 -1.57999 0.216343 1
+-24.0689 -0.226173 0.916016 2.22185 5.07346 1.6837 109202 0 -1.5787 0.216343 0
+52.9702 11.4906 0.331543 0.82479 0.926883 1.77748 126291 0 1.4928 0.215351 1
+-0.168823 57.7717 0.454346 2.05889 4.59245 1.60503 193985 0 -1.38303 0.215021 0
+-72.148 -7.25242 0.406006 1.6319 3.19667 1.62078 98756 0 -1.58593 0.214691 0
+51.4209 3.48375 0.328125 1.79142 4.07266 1.47863 114586 0 1.56632 0.214198 0
+-1.45586 26.7875 0.54248 0.793192 0.888117 1.6649 148585 0 0.141355 0.213869 1
+25.0461 -34.2992 -1.29492 0.857436 1.43804 1.77921 59280 0 -2.03816 0.213377 1
+-53.1897 10.5159 0.710449 0.850141 0.993672 1.71607 124555 0 1.40154 0.212395 1
+-0.44828 57.1123 0.435059 2.05688 4.61043 1.59605 193048 0 0.00349649 0.212232 0
+41.2078 36.0625 0.357422 0.840031 0.801563 1.61565 162290 0 2.83202 0.212069 1
+67.0198 3.44813 0.403809 2.0995 4.73361 1.68617 114635 0 1.58572 0.21158 0
+36.8362 9.22516 0.875977 0.850556 0.94102 1.72111 122965 0 1.46104 0.210279 1
+68.7069 -5.5789 0.0643311 1.81431 3.909 1.48986 101536 0 -1.58231 0.209631 0
+58.4131 48.056 0.317627 0.828018 1.2206 1.77748 180128 0 -0.0121945 0.209308 2
+-71.8617 -7.25703 0.391113 2.00629 4.57008 1.63469 98757 0 -1.57023 0.208824 0
+-0.195312 57.4519 0.406006 2.02994 4.59694 1.59839 193517 0 0.0473541 0.208662 0
+54.2628 -16.4269 -0.0725098 0.802934 1.77574 1.99945 85579 0 -1.83713 0.207215 2
+19.7156 -7.76344 0.263672 0.933982 1.04595 1.84919 98107 0 3.11096 0.205934 1
+8.06554 -2.76453 0.777344 0.821373 1.10744 1.72616 105559 0 1.69392 0.205615 2
+-0.417183 74.3561 0.320312 2.33417 5.6213 1.84018 218320 0 -0.0335029 0.205456 0
+52.4569 11.018 0.322266 0.852427 0.999756 1.79054 125821 0 1.50674 0.204183 1
+52.935 11.9429 0.361572 0.874775 0.940331 1.78182 127227 0 1.58307 0.204025 1
+17.0678 66.812 -0.101562 1.65598 3.65785 1.48732 207143 0 -3.08978 0.202917 0
+-0.168518 42.9817 0.677734 2.25684 4.98019 1.92379 172457 0 -0.0402843 0.201656 0
+18.8112 59.9042 0.373535 0.902157 0.818571 1.8519 197320 0 2.81046 0.201185 1
+2.14047 -28.0002 0.300781 0.779371 0.94125 1.68288 68568 0 1.09742 0.200401 1
+2.34117 -27.9997 0.341309 0.794161 0.94309 1.70355 68569 0 0.129703 0.200401 1
+65.7966 3.73 0.37915 2.04686 4.48607 1.68617 115099 0 1.57055 0.200401 0
+50.9541 -9.57742 0.191284 0.862054 0.959814 1.67142 95865 0 1.49565 0.200245 1
+-70.5472 -3.71562 0.356445 0.755946 1.26258 1.41851 103909 0 -0.899796 0.19962 2
+-24.7175 30.792 -0.215576 0.821574 1.61565 1.77921 154596 0 -2.05885 0.198996 2
+22.5956 -52.2719 -0.688477 0.748965 1.67142 1.80459 33064 0 0.122395 0.198685 2
+-24.5677 30.6581 -0.253662 0.824387 1.59488 1.7363 154129 0 -2.90644 0.198064 2
+-49.395 -10.2956 0.602539 0.77444 0.793386 1.64792 94147 0 -1.64799 0.19729 1
+-51.0509 6.10641 0.850586 2.68137 6.41972 2.29798 118478 0 1.57692 0.196981 0
+28.3209 6.82234 0.0412598 1.65517 3.60818 1.49314 119662 0 1.52721 0.196981 0
+19.6848 -7.60387 0.258545 0.931932 1.0373 1.8339 98575 0 -3.04082 0.195594 1
+-66.4063 6.1482 0.462891 2.12632 4.83169 1.6196 118430 0 1.59084 0.194827 0
+-46.7825 9.97778 0.649414 0.84229 1.20977 1.73545 124107 0 1.56066 0.194368 1
+21.3169 -8.48203 0.0699463 0.868179 1.00957 1.738 97176 0 2.74586 0.194062 1
+-71.9228 5.61594 0.557129 1.69774 3.64003 1.51665 117477 0 1.61611 0.1933 0
+15.5544 -6.87297 0.383301 0.848482 0.997318 1.71607 99498 0 -1.76701 0.193147 1
+28.0175 6.84805 0.057251 1.95886 4.51684 1.50301 119661 0 1.51881 0.19163 0
+-59.1125 -14.293 0.59668 0.812002 0.818571 1.65275 88501 0 3.08468 0.190423 1
+18.936 59.9036 0.404297 0.877984 0.811012 1.83838 197321 0 3.06307 0.190423 1
+-73.1156 -7.55719 0.647461 1.98195 4.39072 1.66328 98285 0 -1.53626 0.189671 0
+-72.4816 6.16008 0.557617 2.18954 4.83169 1.7431 118411 0 1.63517 0.188175 0
+-70.3541 2.33437 0.569824 1.80723 3.86723 1.46676 112802 0 1.59627 0.187877 0
+41.3404 36.0395 0.392822 0.808245 0.792225 1.52929 162291 0 2.62125 0.187728 1
+-32.1904 10.6044 0.572266 0.939872 1.02772 1.72869 125089 0 1.53626 0.18743 1
+38.423 12.0928 0.359863 0.877984 0.885951 1.78356 127182 0 1.51966 0.18743 1
+18.5153 12.3742 0.249512 0.773306 0.872642 1.60817 127587 0 0.923337 0.187133 1
+-64.8712 -11.4438 0.637695 0.745499 0.751897 1.687 92695 0 -1.58752 0.185357 1
+-66.58 -9.51539 0.632812 0.842701 0.884222 1.82586 95497 0 -1.47837 0.185062 1
+58.6088 47.8613 0.302246 0.843731 1.21228 1.76623 179661 0 -0.228687 0.184474 1
+19.6389 13.0097 0.362305 0.833087 0.850556 1.66165 128527 0 -0.902982 0.183887 1
+-0.208595 55.5294 0.517578 1.98971 4.31 1.6066 190709 0 -0.00462059 0.18374 0
+15.0487 9.57641 0.368652 0.793386 0.970417 1.6196 123365 0 1.51616 0.182717 1
+26.5956 -10.6486 -0.0114746 0.812796 0.897051 1.46461 93917 0 1.73808 0.181263 1
+23.5981 -10.4077 0.0424805 0.915582 1.00489 1.81254 94375 0 1.57799 0.181263 1
+-1.4768 26.9419 0.533203 0.791065 0.897708 1.66571 149053 0 0.171049 0.178956 1
+-65.8184 5.86219 0.463135 1.89304 4.11665 1.55413 117964 0 1.55141 0.178812 0
+52.7903 11.6785 0.349609 0.800976 0.89007 1.78967 126758 0 1.50644 0.177811 1
+-72.2003 -4.54594 0.42627 1.98486 4.32264 1.59527 102500 0 -1.56787 0.177098 0
+34.7872 43.7966 0.389648 1.62991 3.4598 1.72027 173502 0 -3.09188 0.176672 0
+40.4341 36.5178 0.376465 0.860372 0.938496 1.71022 163224 0 0.713522 0.175821 1
+25.4814 34.7972 -0.0297852 0.665168 0.581021 1.07311 160369 0 -0.603602 0.175256 1
+18.9087 12.4259 0.270264 0.814982 0.906518 1.64792 127589 0 -0.204527 0.174692 1
+68.6981 -16.4338 0.0563965 1.64631 3.5906 1.42268 85624 0 1.57039 0.174551 0
+-60.0836 -11.4125 0.719238 0.79552 0.811804 1.76709 92710 0 -1.63541 0.173849 1
+48.2375 14.7747 0.429688 1.90975 3.72999 1.77921 131424 0 -1.59485 0.173708 0
+-0.746246 56.1406 0.522461 2.01316 4.4079 1.6066 191643 0 0.0559099 0.173428 0
+-15.8445 14.8255 0.795898 0.837165 1.84828 1.79317 131224 0 1.4907 0.173288 2
+16.9837 10.068 0.394775 0.803523 0.915861 1.66003 124307 0 0.671559 0.173009 1
+9.74414 -10.0373 0.605957 0.781849 0.816974 1.80019 94800 0 -3.06408 0.172172 1
+40.7349 36.2977 0.396484 0.836552 0.862475 1.67796 162757 0 -2.90591 0.171894 1
+-0.764374 58.1155 0.533691 1.90789 4.14893 1.57169 194451 0 2.92862 0.171755 0
+25.3619 -33.8118 -1.19043 0.846826 1.80283 1.79492 60217 0 -2.51561 0.171338 2
+-0.749527 55.5569 0.578125 2.08622 4.59694 1.6576 190707 0 -0.0964738 0.171338 0
+15.7586 -6.85844 0.374512 0.847654 1.0076 1.72616 99499 0 -1.71445 0.169681 1
+20.0178 12.5895 0.308594 0.7901 0.835531 1.62395 128060 0 0.94945 0.169681 1
+28.6116 6.32656 0.0505371 2.00042 4.4338 1.55679 118727 0 1.43159 0.169543 0
+25.4563 -8.07031 0.144043 0.932842 0.993187 1.74992 97657 0 -1.1364 0.169406 1
+-70.5923 5.52891 0.628906 0.825999 1.71105 1.58285 117481 0 0.115051 0.168583 2
+-66.5916 -9.6864 0.654297 0.824991 0.867543 1.80019 95029 0 -1.41547 0.168036 1
+59.6092 3.9168 0.345947 2.35133 5.73777 1.73715 115548 0 1.52948 0.168036 0
+-58.7251 6.11687 0.553711 2.33759 5.16848 1.70189 118454 0 1.58599 0.1679 0
+-38.878 23.8675 0.452148 1.99165 4.52567 1.66084 144256 0 1.59313 0.167355 0
+34.0532 30.3017 0.191528 0.688969 0.612483 1.58711 153844 0 0.0293467 0.167219 1
+15.5458 -6.65195 0.359863 0.856808 0.981137 1.67632 99966 0 -1.87803 0.166947 1
+38.5355 11.1562 0.237305 0.831868 0.862264 1.62554 125778 0 1.57808 0.166947 1
+-1.66594 26.9579 0.603516 0.792999 0.876485 1.69195 149052 0 0.328254 0.166947 1
+19.6588 60.6802 0.360107 0.848896 0.856181 1.89304 198259 0 3.13038 0.166947 1
+-46.2733 8.9225 0.678711 0.916252 0.989798 1.79492 122237 0 1.64095 0.166134 1
+-43.5722 -24.1419 0.988281 0.864372 1.12433 1.76623 74041 0 -2.87997 0.165323 1
+-2.72406 -4.35906 1.43359 2.67875 6.1919 2.47502 103185 0 -1.65654 0.165189 0
+-43.1334 -24.3866 1.0166 0.850971 1.07888 1.77488 73575 0 -2.55789 0.165054 1
+-47.6109 10.4375 0.655273 0.858693 1.02272 1.74992 124573 0 1.64716 0.164785 1
+26.4081 -10.9642 0.0843506 0.851595 1.14917 1.67387 93448 0 2.35483 0.164516 1
+25.4942 -7.92352 0.169434 0.844555 0.885951 1.72027 98125 0 0.714091 0.164516 1
+20.5934 -13.3258 -0.49292 0.843525 0.922143 1.63669 90154 0 -1.94347 0.164248 1
+-46.3798 9.99758 0.714844 0.834308 0.974453 1.75163 124109 0 1.48531 0.164248 1
+-21.3017 12.6345 0.726562 0.665493 0.660314 1.53865 127931 0 1.61385 0.16398 1
+-65.4239 -6.67937 0.435547 2.117 4.72437 1.56785 99713 0 -1.43674 0.163579 0
+-70.7969 5.30703 0.616211 0.82822 1.73545 1.56441 117012 0 -0.114524 0.162912 2
+-47.8398 9.55453 0.588867 0.865851 0.940101 1.65679 123168 0 1.61264 0.162912 1
+68.9895 -15.898 -0.000732422 1.89952 4.20605 1.56098 86561 0 -2.13294 0.162115 0
+58.64 48.0873 0.308105 0.829637 1.75677 1.78269 180129 0 -0.699575 0.162115 2
+-41.1412 5.82332 0.467041 0.756315 0.671368 1.64591 118041 0 1.79058 0.161849 1
+69.2655 -15.9098 0.0133057 2.0599 4.61043 1.6319 86562 0 -1.57593 0.161717 0
+48.2353 16.9289 0.424805 1.79404 4.25979 1.69525 134232 0 -1.55184 0.161717 0
+-0.220779 56.4834 0.448975 2.01316 4.37787 1.58324 192113 0 -0.0391337 0.161452 0
+17.0941 73.0282 -0.186768 2.35478 5.85665 2.02994 216503 0 3.10551 0.160135 0
+9.37172 -11.6356 0.560547 0.789136 0.774629 1.7958 92459 0 0.0162124 0.160003 1
+-3.01406 -22.8222 1.06738 0.825192 0.952112 1.72869 76040 0 3.12737 0.159741 1
+8.57766 50.9198 0.412598 0.83696 1.45003 1.67142 184184 0 -1.55218 0.159479 2
+-15.8108 -15.0887 0.228638 2.13152 4.91737 1.59605 87232 0 1.56842 0.159348 0
+-60.0981 -12.0319 0.686523 0.771797 0.798048 1.80371 91774 0 1.33467 0.159217 1
+-70.5688 5.26805 0.611328 0.659509 1.34861 1.4272 117013 0 -0.260746 0.159217 2
+-70.8547 5.54922 0.632812 0.92694 1.99068 1.64953 117480 0 0.061198 0.159217 2
+9.07617 16.5944 3.00195 0.793774 0.7486 1.67223 133642 0 0.0136284 0.158956 1
+20.9516 -8.73781 0.0631104 0.921637 1.03023 1.809 96707 0 2.07482 0.157914 1
+-0.163902 74.7281 0.247925 2.34559 5.55038 1.90882 218789 0 -0.210649 0.157785 0
+-10.4335 -7.56062 0.501953 1.95313 4.34805 1.46283 98481 0 -1.77732 0.157525 0
+28.9578 6.82687 0.0444336 1.9975 4.54338 1.52482 119664 0 1.47983 0.157525 0
+29.2478 6.595 0.0844727 2.06695 4.60593 1.56136 119197 0 1.47477 0.157008 0
+-41.1866 5.66578 0.521484 0.749148 0.665168 1.68535 117573 0 1.50511 0.15662 1
+-71.2256 -7.53984 0.477051 1.97519 4.35655 1.66003 98291 0 -1.55516 0.156234 0
+23.5491 -14.2826 -0.663086 0.859742 0.91229 1.69029 88759 0 0.0864392 0.156105 1
+19.9077 -7.78062 0.210693 0.891538 0.994158 1.78008 98108 0 3.03605 0.156105 1
+67.9586 -5.61977 0.0709229 1.02697 2.072 1.48586 101534 0 -2.68999 0.156105 0
+-49.6084 -10.0947 0.601562 0.815579 0.84538 1.67142 94614 0 -1.60338 0.155591 1
+-43.1665 -23.9544 1.01465 0.799414 0.897927 1.7542 74511 0 -2.46434 0.155335 1
+20.9664 -10.6712 0.267578 0.895574 0.967342 1.77314 93899 0 0.237867 0.155078 1
+35.0191 17.123 0.102295 0.857436 1.85552 1.48369 134659 0 1.55629 0.154823 0
+41.326 36.243 0.397461 0.813789 0.794549 1.60425 162759 0 2.42836 0.154823 1
+-0.201172 46.9303 0.609375 2.17144 4.63299 1.85009 178073 0 0.0219906 0.154695 0
+21.2528 -8.72594 0.0788574 0.895465 1.03401 1.76192 96708 0 2.20281 0.154312 1
+-54.2531 -12.9739 0.94043 0.798633 0.770856 1.84558 90388 0 -2.83628 0.153549 1
+35.2451 38.4256 0.178223 0.879271 1.90882 1.79229 166016 0 -0.250676 0.152032 2
+20.9416 -8.51555 0.109497 0.862054 0.96876 1.80019 97175 0 2.52623 0.151529 1
+-64.1012 5.89805 0.508789 1.79492 3.96281 1.59099 117969 0 1.38238 0.151404 0
+16.8581 61.9581 -0.306396 1.90882 4.21839 1.51554 200122 0 -3.13227 0.151153 0
+33.8073 34.028 0.0882568 0.852219 0.896175 1.4265 159459 0 1.88262 0.150528 1
+-73.0559 5.3375 0.601562 2.29798 5.26527 1.89952 117005 0 1.5242 0.150153 0
+-1.65094 26.7972 0.591797 0.819371 0.913182 1.69195 148584 0 0.196163 0.149283 1
+-49.2058 -10.4147 0.643066 0.801172 0.817373 1.68617 94148 0 -1.65453 0.149035 1
+-51.4013 -9.0125 0.510254 0.813987 0.849518 1.6796 96013 0 -1.54705 0.148788 1
+-63.4597 10.5056 0.709961 0.868603 0.927336 1.6936 124523 0 1.62912 0.148294 1
+18.2 11.8842 0.25293 0.760388 0.83065 1.6125 127118 0 1.59144 0.148294 1
+12.0541 12.1891 0.535645 0.84538 0.981377 1.67305 127567 0 1.60986 0.148294 1
+36.3328 28.2973 0.231689 0.824991 0.95281 1.62912 151043 0 1.5549 0.148294 1
+0.124146 59.0119 0.490723 2.0599 4.55671 1.6649 195858 0 -3.09214 0.148294 0
+-66.675 5.35062 0.416992 2.03888 4.71515 1.58401 117025 0 1.57654 0.14817 0
+-16.0786 14.8578 0.820312 0.876913 1.85371 1.80723 131223 0 1.26787 0.147801 2
+66.8634 -5.61422 0.162354 0.815778 1.96941 1.40096 101530 0 -3.01521 0.146942 0
+26.7297 -9.0625 0.135254 0.938324 1.10016 1.64872 96257 0 1.66405 0.14682 1
+-51.5647 -8.86633 0.507324 0.80039 0.807061 1.68947 96480 0 -1.52555 0.14682 1
+19.618 13.2575 0.403564 1.05148 1.05055 1.84738 128995 0 -1.53368 0.14682 1
+20.0584 12.3758 0.347168 0.787981 0.792612 1.72279 127592 0 0.924658 0.146087 1
+24.7732 -52.0092 -0.483643 2.07099 4.71515 1.70688 33539 0 -1.61212 0.145965 0
+26.6083 -10.4646 0.0201416 0.85451 0.950486 1.52743 94385 0 1.97255 0.145601 1
+67.267 -5.59296 0.124023 0.874775 1.95408 1.42476 101532 0 -2.80165 0.145237 0
+15.7873 -6.64339 0.337891 0.894481 1.05957 1.71524 99967 0 -0.787267 0.145115 1
+-66.9606 5.90539 0.547852 2.23709 5.19378 1.72279 117960 0 1.5896 0.145115 0
+-65.4105 -7.73421 0.460693 1.91068 4.18556 1.44015 97841 0 -1.60966 0.144994 0
+24.5173 -52.2972 -0.575195 1.97133 4.4338 1.65275 33070 0 -1.60991 0.144149 0
+-59.2666 -11.9056 0.679688 0.812201 0.863318 1.75591 91776 0 -1.62715 0.144149 1
+0.399376 74.0859 0.259277 2.08011 4.78941 1.77488 217855 0 0.129989 0.143788 0
+3.32453 -3.99906 1.46875 3.27248 11.9936 3.1379 103672 0 -1.64914 0.143548 0
+-66.3781 -9.20434 0.577148 0.826806 0.940331 1.78792 95966 0 -2.80423 0.143427 1
+49.7764 31.1466 0.62207 0.722034 0.644703 1.64953 155297 0 3.02963 0.143427 1
+35.0063 17.4191 0.104736 0.91838 2.14195 1.43034 135127 0 1.18551 0.143068 0
+66.127 3.43391 0.348389 1.92285 4.18556 1.6315 114632 0 1.56835 0.142829 0
+25.359 -34.0645 -1.28223 0.882281 1.81431 1.80195 59749 0 -2.40935 0.142709 2
+-59.2561 -14.2839 0.59082 0.850141 0.861212 1.6755 88500 0 3.11739 0.14247 1
+0.103752 58.085 0.448242 2.05087 4.51684 1.60268 194454 0 0.0866855 0.141756 0
+-60.3495 -11.1379 0.688965 0.753735 0.75484 1.727 93177 0 -1.34778 0.141519 1
+-65.0378 -11.4297 0.625 0.739517 0.736634 1.69112 92694 0 -1.5263 0.141281 1
+18.5409 12.5677 0.235229 0.82822 0.915414 1.65275 128055 0 -0.182852 0.141281 1
+21.6012 -8.70094 0.0693359 0.8846 1.05158 1.73038 96709 0 2.67774 0.141045 1
+9.205 -11.6352 0.592285 0.836552 0.863528 1.78792 92458 0 -0.0601492 0.140808 1
+-58.6194 -12.9366 0.750977 0.824588 0.882066 1.76968 90374 0 -1.53031 0.140572 1
+46.1496 -8.41047 0.673828 0.826806 1.79667 1.79317 97254 0 -1.60657 0.140101 2
+-46.8278 9.48563 0.649414 0.833494 1.02797 1.73715 123171 0 1.49684 0.139866 1
+61.3944 14.9161 0.818359 0.875629 1.94457 1.86825 131465 0 1.42357 0.139162 2
+23.543 -14.4723 -0.671875 0.831868 0.907404 1.6994 88291 0 0.0420743 0.138229 1
+20.2688 -1.16875 0.20874 0.723445 0.805093 1.61053 107937 0 1.46526 0.138229 1
+-46.3673 9.82375 0.692871 0.809825 0.93142 1.72953 123641 0 1.51791 0.138229 1
+26.7678 -8.82906 0.13562 0.934552 1.09802 1.70438 96725 0 1.63802 0.137996 1
+67.765 -5.62031 0.0749512 0.90602 1.89952 1.45428 101533 0 -2.84922 0.137532 0
+-63.2639 10.5091 0.693848 0.851595 0.928695 1.68535 124524 0 1.66703 0.137532 1
+34.738 17.4216 0.171875 0.85326 2.01907 1.37689 135126 0 1.33551 0.137185 0
+18.4977 -12.8756 -0.415283 0.869239 0.95958 1.65033 90615 0 -2.52627 0.13707 1
+26.3208 -11.687 0.415039 0.847033 0.913182 1.87739 92512 0 -2.30849 0.13707 1
+33.4133 34.025 0.0803223 0.852219 1.1111 1.44649 159458 0 2.72091 0.13707 1
+33.1803 34.0445 0.0740967 0.794937 0.840441 1.38836 159457 0 -3.03748 0.136839 1
+58.4141 47.6295 0.304443 0.850348 1.12351 1.7397 179192 0 0.0634539 0.136608 1
+0.0915604 57.7834 0.419434 2.024 4.47731 1.59761 193986 0 0.506053 0.136378 0
+0.0458603 47.5825 0.690918 2.06191 4.47294 1.79755 179010 0 0.0388531 0.136148 0
+35.2302 44.5676 0.412598 2.1725 5.01925 1.88014 174908 0 -3.11333 0.136033 0
+-59.1027 -14.4558 0.592773 0.824387 0.852219 1.69112 88033 0 3.09677 0.13569 1
+18.6325 -12.8798 -0.459717 0.862686 0.964041 1.63949 90616 0 -2.64743 0.13569 1
+18.6138 11.9727 0.26123 0.768226 0.896394 1.59956 127120 0 1.52297 0.135461 1
+66.7459 4.21984 0.452637 2.28791 4.97048 1.83749 116038 0 1.5014 0.135346 0
+17.6381 49.967 -0.0305176 2.18527 4.98506 1.66734 182809 0 3.07814 0.135004 0
+68.99 -5.81453 0.109253 2.13569 4.71976 1.59917 101069 0 -1.58621 0.134662 0
+-3.01375 -22.6448 1.02051 0.803523 0.94309 1.738 76508 0 3.03252 0.134321 2
+38.7503 11.4067 0.243042 0.849933 0.919895 1.70688 126247 0 1.47142 0.134321 1
+69.0134 -16.7225 0.0546875 1.87098 4.15705 1.53153 85157 0 -1.5551 0.134208 0
+55.883 -16.8684 0.133545 2.00434 4.4338 1.68617 85116 0 1.50065 0.133528 0
+67.0172 -5.87719 0.074707 0.795908 2.01218 1.42059 101063 0 -2.90099 0.133415 0
+-1.73438 14.5822 0.711914 0.780704 0.801172 1.68124 130800 0 -0.542857 0.133415 1
+34.0312 30.5038 0.225342 0.733762 0.699819 1.6837 154312 0 -0.296413 0.13319 1
+-72.4844 5.04938 0.499756 1.98874 4.31421 1.65679 116539 0 1.51288 0.133077 0
+3.35047 24.067 0.479004 2.26346 5.51257 1.61683 144856 0 -0.0345451 0.133077 0
+-70.5422 -3.91781 0.418457 0.982515 1.7448 1.53903 103441 0 -1.32688 0.132066 2
+-52.2389 5.92297 0.798828 2.41769 5.60485 2.17462 118006 0 1.56882 0.131731 0
+21.9174 -8.71875 0.0323486 0.870301 0.967815 1.738 96710 0 3.08628 0.131619 1
+30.1116 44.5317 0.32959 2.15349 4.90298 1.85009 174892 0 -3.08446 0.130951 0
+-0.732346 57.1123 0.473633 1.98874 4.3736 1.58092 193047 0 0.250947 0.13084 0
+24.6272 35.9283 0.288574 0.923664 0.862896 1.76451 162238 0 -1.64242 0.130729 1
+45.7353 -8.34797 0.783203 0.843319 0.866062 1.80812 97252 0 -1.7 0.130507 1
+-47.1537 9.79469 0.625977 0.848896 1.01525 1.71859 123638 0 1.54715 0.130285 1
+12.6775 -0.848824 0.546875 0.820171 1.23077 1.71943 108381 0 1.55699 0.130064 2
+-32.7309 27.2824 -0.0986328 0.834308 1.35223 1.75505 149423 0 -1.33806 0.129623 2
+25.1758 34.7859 -0.00463867 0.644231 0.579887 1.00858 160368 0 -1.53506 0.129623 1
+0.0571899 48.7459 0.663086 2.16192 4.80346 1.80371 180882 0 -0.0038722 0.129403 0
+15.5234 -7.14531 0.419922 0.854302 1.033 1.71943 99030 0 -1.82761 0.129183 1
+-36.2537 12.259 1.13086 0.836552 1.87739 1.77401 127416 0 1.55583 0.128744 2
+65.1172 3.73531 0.374023 1.93321 4.32687 1.63111 115097 0 1.59299 0.128525 0
+-71.9234 -3.7525 0.366699 1.81431 3.82218 1.54279 103905 0 -1.49454 0.128416 0
+20.4094 -13.3187 -0.445068 0.848068 0.928469 1.63071 90153 0 -1.78668 0.128307 1
+17.6923 62.2722 0.0057373 2.06191 4.66022 1.71691 200593 0 3.10721 0.128307 0
+0.094223 73.1847 0.392334 1.90045 4.15705 1.70355 216450 0 0.0566111 0.128197 0
+-47.6155 10.6762 0.683594 0.878842 1.04187 1.74651 125041 0 1.64472 0.128088 1
+33.4183 33.8509 0.0334473 0.793192 0.875202 1.41505 158990 0 -3.11185 0.12787 1
+48.1324 15.1722 0.426514 1.12584 1.36817 1.75677 131892 0 -1.64436 0.127653 1
+33.9894 34.0263 0.127808 0.810022 0.816176 1.3496 159460 0 1.72049 0.127653 1
+18.6352 -12.7481 -0.442627 0.877127 0.980179 1.61407 91084 0 0.438441 0.127435 1
+-1.44421 13.2703 0.667969 0.883521 0.887683 1.68782 128929 0 -1.67806 0.127218 1
+26.3849 -9.40515 0.0744629 0.901937 1.01055 1.58517 95788 0 -2.58186 0.127002 1
+22.7996 -52.2659 -0.774414 0.788751 1.61841 1.78705 33065 0 -0.0327326 0.126785 2
+19.6739 -10.098 -0.046875 0.916924 1.01624 1.727 94831 0 1.61542 0.126785 1
+34.4269 17.1684 0.227417 0.863107 1.87098 1.32867 134657 0 2.62385 0.126677 0
+19.3848 -10.738 -0.204834 0.899738 0.997075 1.68947 93894 0 1.4672 0.126569 1
+20.2834 -1.38609 0.195923 0.759461 0.908068 1.60229 107469 0 1.91219 0.126569 1
+61.8797 14.6672 0.783203 0.838392 1.97615 1.81697 130999 0 1.50493 0.126569 2
+33.8084 33.8439 0.0474854 0.788944 0.846206 1.36784 158991 0 2.55008 0.125923 1
+20.5956 -13.4909 -0.450928 0.851179 0.927109 1.6837 89686 0 -1.83065 0.125708 1
+1.16985 14.9614 0.484863 0.750613 0.81538 1.43139 131277 0 0.101215 0.125708 1
+-32.7466 27.1353 -0.101807 0.825394 1.34368 1.74055 148955 0 -1.57329 0.125708 1
+52.6894 11.2931 0.362305 0.820973 0.908512 1.80459 126290 0 1.4653 0.125493 1
+-47.6023 -3.5875 0.561523 1.90231 3.5871 1.49168 103981 0 -1.57393 0.125386 0
+17.5062 48.9133 0.000732422 1.69112 3.28849 1.56785 180936 0 3.09434 0.125386 0
+25.1456 -8.41203 0.0354004 0.871151 0.910288 1.68617 97188 0 -1.38857 0.125279 1
+58.6522 47.6045 0.343018 0.842496 1.63669 1.77228 179193 0 -0.255383 0.125279 2
+-2.78437 -22.8266 1.04688 0.853676 0.991976 1.75849 76041 0 3.14159 0.125065 1
+58.1828 47.8506 0.275391 0.864372 1.38194 1.75934 179659 0 0.330411 0.125065 1
+-38.9023 24.0918 0.517578 2.0954 4.65113 1.72785 144724 0 0.283818 0.124959 0
+19.6895 12.6719 0.279297 0.791451 0.839621 1.6121 128059 0 0.393607 0.124852 1
+36.3388 28.0622 0.218262 0.819571 0.986903 1.6339 150575 0 1.53191 0.124639 1
+-72.7803 -7.24641 0.496582 1.93793 4.32687 1.6066 98754 0 -1.50429 0.124532 0
+38.2603 12.5259 0.387695 0.820973 0.873921 1.72869 128117 0 1.51766 0.124213 1
+68.6822 -15.9017 0.0355225 1.71022 3.75558 1.47718 86560 0 1.5583 0.124107 0
+-15.8292 14.672 0.767578 0.844349 1.81785 1.81077 130756 0 1.45736 0.124001 2
+-72.1641 -6.93094 0.394531 1.45286 2.54239 1.62713 99224 0 -1.55487 0.123895 0
+16.8916 71.6428 -0.0310059 1.63869 3.60818 1.54997 214162 0 3.10683 0.123895 0
+-17.4641 58.0931 1.21875 2.54488 5.93148 2.27343 194399 0 -0.112199 0.123471 0
+25.1 -34.6484 -1.21094 0.823381 1.85915 1.79755 58812 0 -0.108761 0.123366 2
+-73.4552 -7.24226 0.680664 1.95599 4.25979 1.65356 98752 0 -1.46951 0.12326 0
+1.41797 -4.51344 1.69336 3.15943 12.6184 3.46318 102730 0 -1.70245 0.12326 0
+-62.9768 10.9837 0.65918 0.832071 1.06086 1.71273 125461 0 1.41205 0.123155 1
+-35.0166 27.3666 0.854492 2.143 4.81285 1.81697 149416 0 -1.64192 0.123155 0
+19.6534 -10.4069 -0.121826 0.918156 1.03225 1.6755 94363 0 1.37187 0.122523 1
+33.1762 33.848 0.0220947 0.735915 0.735915 1.3938 158989 0 -3.02312 0.122313 1
+-43.5983 -23.9002 1.03809 0.840031 1.58982 1.79229 74509 0 3.05407 0.122104 2
+-64.8306 -12.2566 0.640625 0.708414 0.720273 1.6192 91291 0 -1.51552 0.122104 1
+33.4853 32.1116 0.358643 0.775764 0.716765 1.77748 156650 0 -1.11583 0.122104 1
+-0.737656 57.4327 0.483154 1.98002 4.36933 1.57707 193515 0 0.563248 0.12179 0
+44.6106 43.567 0.217407 0.934894 0.985458 1.7958 173533 0 -1.56387 0.121686 1
+8.22282 50.7522 0.461426 0.869027 1.90882 1.69608 183715 0 -1.3796 0.121686 2
+-71.8625 -6.92117 0.386719 1.95982 4.3438 1.63589 99225 0 -1.54603 0.121373 0
+19.9016 -7.61539 0.224487 0.897106 1.00342 1.79229 98576 0 3.09932 0.121061 1

+ 500 - 0
src/detection/CenterPoint-master/results2/seq_0_frame_100.bin.txt

@@ -0,0 +1,500 @@
+7.54188 16.3843 0.704102 2.12114 4.70595 1.73038 133637 0 0.00683583 0.939359 0
+30.4992 3.9511 0.853516 2.54985 6.08401 2.6398 115457 0 1.5674 0.928149 0
+31.2072 -6.29609 0.0297852 2.10978 5.1433 1.87007 100483 0 -1.61197 0.927232 0
+4.07829 16.1288 0.735352 2.20456 4.80815 1.77661 133158 0 -0.0367768 0.923316 0
+43.7087 26.3844 0.28125 2.24475 5.16343 1.91723 148258 0 1.49943 0.921074 0
+-33.146 -6.92281 0.629883 2.01611 4.52567 1.50411 99346 0 -1.58712 0.917006 0
+-25.7158 -6.64188 0.603516 1.98389 4.3608 1.50375 99837 0 -1.64742 0.904987 0
+18.1466 3.2552 0.45874 2.13465 4.46422 1.92191 114482 0 1.54661 0.895523 0
+1.09664 20.9245 0.661133 2.34216 5.35341 1.92661 140169 0 -0.0026699 0.894605 0
+-22.155 6.32219 0.644531 1.86005 4.08461 1.57899 118568 0 1.54261 0.892187 0
+-10.6359 6.32844 0.640625 2.02499 4.67846 1.48152 118604 0 1.56846 0.890294 0
+-36.9685 6.01469 0.640625 1.96557 4.57455 1.60425 118054 0 1.52934 0.890103 0
+-16.5329 6.36562 0.792969 2.03491 4.57455 1.80019 118586 0 1.55096 0.882022 0
+53.635 11.0173 0.200195 0.823582 0.930965 1.77141 125825 0 1.46431 0.875574 1
+49.2047 14.5508 0.29834 2.11597 4.85534 1.82319 130959 0 -1.58713 0.874399 0
+24.9053 17.3773 0.194824 2.03888 4.58349 1.67796 135095 0 0.767419 0.869382 0
+27.7752 13.9578 0.19873 2.0954 4.43814 1.72279 129956 0 0.922817 0.863276 0
+13.0528 12.0309 0.527344 0.829434 0.984496 1.68206 127102 0 1.59783 0.861771 1
+18.5216 33.443 0.0793457 1.92849 4.51684 1.53603 158475 0 3.09536 0.850593 0
+40.4737 41.3944 0.193237 1.96557 4.19784 1.68947 170244 0 -1.83227 0.849347 0
+-50.7297 5.96438 0.893555 2.34445 5.37962 2.04586 118011 0 1.53442 0.840913 0
+39.8872 31.8587 0.12207 2.04986 4.81285 1.52333 156202 0 -0.06187 0.838812 0
+18.0609 56.7806 -0.0175781 1.98874 4.53452 1.53453 192638 0 3.11853 0.83602 0
+33.2438 43.9884 0.134155 1.96461 4.50363 1.51628 173965 0 3.1341 0.832917 0
+18.5144 28.0916 0.233154 1.97133 4.32264 1.738 150519 0 3.12776 0.825308 0
+-45.552 -6.96656 0.661133 2.0171 4.61043 1.57437 99307 0 -1.59201 0.824603 0
+40.5544 3.46344 0.139648 1.96461 4.4079 1.4761 114552 0 1.56111 0.821474 0
+39.2606 12.2408 0.308594 0.85639 0.891157 1.80988 127652 0 1.52005 0.819316 1
+53.9366 11.6341 0.223633 0.803523 0.9181 1.81608 126762 0 1.51463 0.819026 1
+4.11407 23.4577 0.494141 1.97037 4.45551 1.52259 143922 0 -0.0237004 0.815085 0
+35.7691 44.2936 0.336426 1.94457 4.42083 1.81785 174441 0 3.11365 0.815085 0
+17.6933 10.1556 0.344727 0.939872 1.00735 1.73461 124309 0 1.34795 0.812272 1
+-18.7346 11.1133 0.833984 0.937637 0.950951 1.72195 125599 0 -1.59814 0.810479 1
+38.3359 -6.12469 -0.0688477 1.90975 4.30159 1.50926 100505 0 -1.55785 0.808823 0
+18.3525 49.4994 0.122803 1.91816 4.05282 1.63869 181875 0 3.05886 0.804252 0
+28.5341 -9.76148 0.0402832 0.868179 0.944934 1.77314 95327 0 -1.63761 0.804098 1
+51.8968 -9.77 0.0217285 0.798633 0.878198 1.6192 95400 0 1.57231 0.803174 1
+-39.8252 -7.10188 0.620117 2.08927 4.67846 1.52892 98857 0 -1.6337 0.803019 0
+-31.2898 10.3927 0.694336 0.821574 0.920793 1.6796 124624 0 1.52241 0.802865 1
+-45.6089 9.98508 0.81543 0.830853 0.928469 1.77228 124111 0 1.52545 0.802246 1
+30.823 44.3037 0.307861 2.01022 4.42515 1.73885 174426 0 -3.10336 0.800848 0
+22.5501 6.01688 0.347412 2.32053 5.00457 2.03391 118240 0 1.58113 0.798344 0
+56.5042 -5.81094 -0.0354004 1.95122 4.33533 1.65356 101030 0 -1.58562 0.794544 0
+49.7803 24.2691 0.143433 1.84288 3.95894 1.53191 145001 0 -1.56316 0.787441 0
+19.7566 12.5546 0.208374 0.802542 0.947243 1.66734 128059 0 1.14486 0.78695 1
+49.1759 19.3928 0.233643 1.8348 3.96281 1.52407 137979 0 -1.58396 0.785144 0
+43.9473 -6.17125 0.0196533 2.04686 4.39501 1.77748 100523 0 -1.60846 0.780997 0
+-64.2063 -11.7469 0.787109 0.805487 0.809034 1.71943 92229 0 -1.56029 0.779322 1
+19.6288 10.9276 0.196777 0.806274 0.895738 1.61171 125719 0 1.44493 0.778649 1
+-58.4728 -11.6008 0.803711 0.780323 0.810616 1.75763 92247 0 -1.62976 0.776962 1
+-22.5255 -0.285545 0.974609 2.25904 4.80815 1.74225 109207 0 -1.57227 0.762954 0
+-59.3028 -12.3886 0.841797 0.762992 0.768976 1.78705 91308 0 1.59999 0.761006 1
+19.3788 11.6959 0.126343 0.747139 0.852011 1.60151 126654 0 1.52139 0.760651 1
+53.8265 12.3359 0.253418 0.854093 0.957006 1.77748 127698 0 1.57208 0.759225 1
+-45.3341 9.22437 0.793945 0.834716 0.930965 1.75677 122708 0 1.52151 0.755997 1
+39.4248 11.4294 0.125732 0.794549 0.849103 1.67714 126249 0 1.48942 0.754553 1
+-48.6969 -10.0944 0.660156 0.750246 0.771986 1.66003 94617 0 -1.58367 0.753829 1
+-48.1159 -3.67516 0.702637 2.05889 4.65567 1.50779 103979 0 -1.57653 0.751101 0
+-48.4775 -10.9187 0.714844 0.794549 0.834716 1.70771 93214 0 -1.565 0.749087 1
+-33.6759 -3.5525 0.713867 2.06292 4.58349 1.51369 104024 0 -1.60126 0.741674 0
+49.1413 26.6995 0.328613 1.92097 4.33109 1.64711 148743 0 -1.60419 0.740362 0
+-59.6156 -11.6098 0.828125 0.793967 0.766727 1.79054 92243 0 1.38776 0.734501 1
+52.6171 3.62609 0.155884 2.03988 4.46858 1.46963 115058 0 1.61587 0.732783 0
+-69.5939 2.34305 0.889648 2.09336 4.56562 1.71859 112804 0 1.58583 0.718002 0
+17.9755 67.352 -0.000488281 1.93415 4.27646 1.45109 208082 0 -3.13281 0.714429 0
+15.9397 9.64766 0.38208 0.808837 0.972552 1.66246 123835 0 1.55275 0.711331 1
+-43.6984 5.85242 1.40527 2.85709 6.73439 2.93343 118033 0 1.54482 0.708213 0
+-46.8544 9.86047 0.757812 0.830042 0.909622 1.72027 123639 0 1.57525 0.703244 1
+70.5644 -5.59484 0.0539551 2.0112 4.47294 1.69525 101542 0 1.55995 0.694717 0
+18.0038 72.1099 0.05896 2.05187 4.43814 1.7414 215102 0 3.13236 0.684053 0
+-57.5088 5.80961 0.694336 1.93604 4.18556 1.55262 117990 0 1.5562 0.64579 0
+-46.9813 10.6391 0.766602 0.813193 0.878198 1.66979 125043 0 1.55401 0.643441 1
+16.9877 11.157 0.32959 0.886438 0.957006 1.63949 125711 0 1.35647 0.635222 1
+36.0358 38.207 0.240601 0.856599 0.950486 1.8348 165550 0 -0.189054 0.615088 1
+-52.0912 10.8014 0.876953 0.842907 0.913182 1.7448 125027 0 1.50968 0.61451 1
+55.1736 9.72945 0.134766 1.00984 1.08708 1.82586 123958 0 -1.55341 0.606498 1
+-50.0963 -7.63891 0.908203 0.841468 0.894427 1.74992 98357 0 -1.48955 0.597256 1
+-11.374 0.0114822 0.8125 2.10669 4.7475 1.56365 109710 0 -1.57872 0.590602 0
+1.75594 -4.07265 1.73145 3.23119 13.7508 3.57311 103667 0 -1.67466 0.580887 0
+0.802345 32.157 0.608887 2.06191 4.58797 1.6994 156548 0 -0.0975264 0.577674 0
+49.4566 17.1616 0.255859 2.0791 4.66022 1.71273 134704 0 -1.55647 0.571946 0
+-59.2903 -12.518 0.851562 0.792418 0.798048 1.80547 90840 0 1.63328 0.569254 1
+-65.5541 -9.66125 0.837891 0.796491 0.812994 1.75505 95033 0 -1.49049 0.563858 1
+-59.4873 -11.6425 0.853516 0.774629 0.773873 1.78618 92244 0 -0.811373 0.560373 1
+0.735001 42.095 0.666992 2.10258 4.76143 1.84288 171056 0 0.071081 0.545709 0
+17.5316 10.3601 0.330811 0.881851 0.944934 1.65437 124776 0 1.44815 0.544922 1
+41.2288 36.2487 0.305664 0.866273 0.855345 1.68535 162758 0 0.238755 0.541045 1
+-59.6527 -11.4824 0.810547 0.77048 0.74459 1.73885 92711 0 1.56325 0.518546 1
+22.6055 -8.27187 -0.352295 0.789714 0.816176 1.61762 97648 0 2.75521 0.501465 1
+-71.5246 5.57648 0.642578 1.94172 4.32687 1.54807 117478 0 1.57329 0.500793 0
+27.3102 -10.4259 -0.181396 0.80313 0.884222 1.63869 94387 0 1.70377 0.49707 1
+59.6989 3.62109 0.149414 1.90696 4.17332 1.48296 115080 0 1.58158 0.493348 0
+-72.1366 -4.02445 0.578125 2.01808 4.47731 1.55148 103436 0 -1.56086 0.466451 0
+67.6606 3.43156 0.214722 1.98292 4.38643 1.67796 114637 0 1.66301 0.459743 0
+-46.6798 10.0102 0.770508 0.814783 0.8879 1.70938 124108 0 1.60149 0.456531 1
+0.521095 47.8555 0.698242 2.00434 4.42948 1.72279 179479 0 0.0255295 0.455289 0
+24.4176 36.9602 0.112305 2.06998 4.76608 1.65517 163642 0 3.13356 0.450813 0
+53.791 10.997 0.150146 0.830853 0.963571 1.77228 125826 0 1.50552 0.437643 1
+-65.6378 -9.56047 0.810547 0.795131 0.823783 1.76278 95500 0 -1.46785 0.432782 1
+25.712 35.8899 0.380615 0.856808 0.787212 1.78792 162242 0 -2.04465 0.428831 1
+19.3844 11.8758 0.176025 0.825192 0.933925 1.65517 127122 0 1.50447 0.420064 1
+54.9844 9.79156 0.118896 1.01795 1.50558 1.81874 123957 0 -1.52472 0.418222 2
+-59.179 -12.3887 0.864258 0.799609 0.80588 1.81165 91309 0 1.57823 0.407274 1
+-50.6206 -8.84109 0.701172 0.773117 0.768226 1.65033 96483 0 -1.57529 0.401453 1
+-65.4267 5.91109 0.604492 1.95122 4.42083 1.52407 117965 0 1.57831 0.393735 0
+-52.1975 10.9238 0.911133 0.843113 0.907404 1.73715 125494 0 1.50609 0.393618 1
+37.5556 9.21265 0.834961 0.777661 0.833087 1.65922 122967 0 1.53133 0.393036 1
+67.9706 -5.60984 -0.115967 0.801368 1.91629 1.43278 101534 0 -3.11363 0.382776 0
+19.5514 11.6864 0.158081 0.781467 0.899903 1.63909 126655 0 1.55363 0.381796 1
+59.4075 3.44579 0.151367 1.61683 3.48353 1.43104 114611 0 1.60138 0.374905 0
+-71.8044 5.32094 0.67334 2.10258 4.71976 1.61171 117009 0 1.53533 0.368178 0
+-46.674 9.84937 0.756836 0.833494 0.9114 1.72447 123640 0 1.59732 0.363082 1
+-45.4912 9.35368 0.774414 0.840852 0.911845 1.7542 123175 0 1.56029 0.352648 1
+24.2506 36.3334 0.134033 2.04487 4.59245 1.62316 162705 0 -0.0379765 0.350421 0
+62.6214 14.928 0.631836 0.799609 1.28465 1.76278 131469 0 1.56041 0.338514 2
+38.8856 35.9541 0.121094 0.792031 0.825394 1.62039 162283 0 1.47468 0.337422 1
+-18.8992 11.0486 0.826172 0.974037 0.96216 1.73122 125598 0 -1.6182 0.334916 1
+27.3106 -10.6272 -0.206787 0.820371 0.929376 1.67878 93919 0 1.70716 0.334807 1
+-47.0762 10.5362 0.77832 0.83492 0.89727 1.67305 124574 0 1.53279 0.332202 1
+-71.5234 -7.56609 0.541504 1.83749 4.1006 1.4884 98290 0 -1.58719 0.329284 0
+52.4322 37.9522 1.15039 0.793192 0.816575 1.70771 165133 0 1.47098 0.326486 1
+-0.514061 13.2663 0.75 0.899408 1.42233 1.79843 128932 0 -1.66225 0.325735 2
+22.7552 -8.26601 -0.457275 0.821975 0.881205 1.70355 97649 0 2.88505 0.324235 1
+27.1478 -10.4399 -0.1604 0.814186 0.924172 1.64953 94386 0 -1.43103 0.323808 1
+38.8944 35.7716 0.0961914 0.790679 0.816575 1.62951 161815 0 1.49251 0.322099 1
+60.2543 3.67687 0.174927 2.15139 4.92698 1.56098 115082 0 1.58557 0.32178 0
+66.4116 3.66117 0.193481 2.024 4.57455 1.72279 115101 0 1.66305 0.321673 0
+62.7756 14.92 0.611328 0.821775 1.80283 1.79931 131470 0 1.60183 0.320928 2
+-62.3328 10.9656 0.801758 0.825192 0.907404 1.70438 125463 0 1.61091 0.315103 1
+-64.4864 -7.21938 0.600586 1.87739 4.24318 1.49059 98780 0 -1.57664 0.314051 0
+-45.7781 10.0002 0.789062 0.870301 0.953275 1.77574 124110 0 1.53156 0.30819 1
+-59.3091 -12.9759 0.900391 0.768976 0.791065 1.82319 90372 0 1.55405 0.307774 1
+25.7133 47.6406 1.0625 0.819771 0.769727 1.85009 179090 0 -2.86555 0.30715 1
+39.3372 11.5648 0.202515 0.814385 0.834716 1.71691 126716 0 1.49106 0.306527 1
+-59.1727 -12.5539 0.866211 0.825797 0.81538 1.81519 90841 0 1.59313 0.303422 1
+53.8502 12.5191 0.269043 0.864583 0.953275 1.73038 128166 0 1.49211 0.299925 1
+53.925 11.4731 0.202148 0.837369 0.97018 1.82675 126294 0 1.43715 0.296858 1
+-71.2655 5.33344 0.663086 1.69443 3.71545 1.48188 117011 0 1.55687 0.29523 0
+22.3059 64.1386 0.574219 0.829839 1.6447 1.81077 203415 0 2.97247 0.294824 2
+-16.8503 66.3991 1.46387 1.99457 4.37787 1.63669 206569 0 -0.0596527 0.294215 0
+41.3387 36.1239 0.336914 0.931249 0.944934 1.73545 162291 0 0.133608 0.293202 1
+0.766411 43.3593 0.65332 2.12321 4.65113 1.84828 172928 0 -3.11947 0.291384 0
+-59.3844 -12.0052 0.835938 0.770668 0.771986 1.80371 91776 0 1.47321 0.290779 1
+1.12766 58.4014 0.794922 2.15139 4.81755 1.87831 194925 0 -0.109187 0.289171 0
+54.0988 11.6559 0.205444 0.864372 0.956072 1.77835 126763 0 1.55894 0.288168 1
+0.738205 48.4368 0.688477 2.06493 4.63752 1.80283 180416 0 0.0205434 0.287968 0
+0.572029 32.7585 0.633789 2.06392 4.63299 1.66328 157483 0 -0.146969 0.286968 0
+56.8134 -17.1629 -0.127441 1.99165 4.49045 1.5667 84651 0 -1.48953 0.281801 0
+53.8801 12.0283 0.226807 0.875416 0.950486 1.81165 127230 0 1.54625 0.281504 1
+0.472107 58.384 0.799805 2.17675 4.85534 1.85371 194923 0 -0.0272259 0.279238 0
+-71.8119 5.84203 0.674805 2.16932 4.8982 1.65356 117945 0 1.56131 0.278061 0
+53.6494 10.868 0.197998 0.908567 1.00269 1.81697 125357 0 1.49799 0.276007 1
+19.4006 11.483 0.190186 0.796491 0.872642 1.59644 126186 0 1.47225 0.272799 1
+-47.6163 -3.62421 0.714355 1.80195 3.70458 1.44332 103981 0 -1.57798 0.268558 0
+36.0158 38.4721 0.210205 0.885356 1.79054 1.8528 166018 0 -0.390016 0.267408 2
+-14.8353 -14.8378 0.172485 2.06191 4.66933 1.50742 87703 0 1.44027 0.266835 0
+53.6678 12.5461 0.278564 0.873068 0.937122 1.76623 128165 0 1.53808 0.266644 1
+-71.8772 -3.77289 0.533203 1.89304 4.1368 1.50705 103905 0 -1.52754 0.266453 0
+0.16375 58.0922 0.827148 2.13882 4.78473 1.83659 194454 0 -0.0553094 0.264358 0
+0.811562 58.1091 0.775391 2.13778 4.76143 1.85824 194456 0 -0.0322548 0.260201 0
+-65.0798 -7.19609 0.611328 2.0852 4.80346 1.58944 98778 0 -1.60141 0.257018 0
+-64.7725 -6.94203 0.572266 2.03391 4.61944 1.58633 99247 0 -1.53689 0.255529 0
+-59.4819 -11.4777 0.860352 0.806667 0.795714 1.78095 92712 0 -1.56695 0.250179 1
+55.2039 9.56969 0.150879 0.955081 1.11096 1.82408 123490 0 -1.55147 0.250179 1
+36.1918 38.2002 0.20105 0.936265 1.17714 1.8348 165551 0 -0.230931 0.249447 1
+65.7822 3.6961 0.184204 1.87647 4.2349 1.63589 115099 0 1.65911 0.249082 0
+-50.2909 -7.76203 0.935547 0.888171 1.33975 1.78967 97888 0 -1.45683 0.246171 1
+-71.5959 -4.01977 0.551758 1.727 3.63648 1.47322 103438 0 -1.55972 0.24599 0
+-62.4872 10.8308 0.841797 0.873707 1.05957 1.71524 124994 0 1.67484 0.245085 1
+-71.2435 5.83395 0.666016 1.82764 3.95894 1.51628 117947 0 1.63172 0.243283 0
+-31.2743 10.2217 0.688477 0.861212 0.945857 1.69277 124156 0 1.52492 0.243283 1
+67.8284 -5.62437 -0.0710449 0.812597 1.97133 1.42824 101533 0 -3.11887 0.242923 0
+-47.0719 9.81891 0.769531 0.841468 0.950486 1.71607 123638 0 1.59023 0.242206 1
+54.9072 10.0158 0.081543 0.955664 1.27093 1.72616 124425 0 -1.44056 0.240775 1
+-50.2712 -7.62492 0.930664 0.930113 1.19495 1.78182 98356 0 -1.45073 0.240418 1
+-47.0828 10.0177 0.771484 0.811012 0.895082 1.68042 124106 0 1.56889 0.240061 1
+1.01398 32.7603 0.625977 2.08214 4.66933 1.69277 157485 0 -0.0176107 0.239527 0
+22.2353 65.7964 0.0883789 0.815977 0.79727 1.61604 205755 0 -0.0522228 0.238994 1
+1.02609 31.56 0.597656 2.07707 4.57008 1.72447 155613 0 -0.0867171 0.238462 0
+22.2864 12.2181 0.39502 0.799218 0.953275 1.76882 127599 0 1.23166 0.237576 1
+-20.8872 12.7153 0.99707 0.748965 0.695391 1.70771 127932 0 -0.0459686 0.237222 1
+19.1891 11.6972 0.152466 0.811012 0.912513 1.64029 126653 0 1.53153 0.236869 1
+25.5713 35.8897 0.320068 0.887304 0.835939 1.78443 162241 0 -1.70293 0.234757 1
+-64.1836 -11.4684 0.743652 0.776902 0.798828 1.71189 92697 0 -1.56083 0.234582 1
+-58.5831 -11.4908 0.78125 0.807061 0.81538 1.76623 92714 0 -1.60496 0.233007 1
+1.1175 59.3432 0.864258 2.23709 5.05368 1.92943 196329 0 0.0384238 0.231788 0
+23.8691 -43.3795 1.61328 0.850348 1.65114 1.57284 46172 0 2.98324 0.231614 2
+51.9002 -9.93719 0.0307617 0.840852 0.892246 1.64953 94932 0 1.57122 0.23144 1
+-35.2903 12.3128 1.25293 0.843319 1.84018 1.75763 127419 0 2.59909 0.230227 2
+-0.493279 13.9277 0.760742 0.883629 2.13986 1.5763 129868 0 -0.379085 0.229535 0
+24.2506 37.5555 0.134888 2.08927 4.75213 1.69195 164577 0 3.11984 0.229535 0
+62.6095 15.096 0.654297 0.829839 1.61762 1.77574 131937 0 1.57807 0.22919 2
+-59.8928 -11.4247 0.756836 0.699307 0.667446 1.55907 92710 0 1.60576 0.227985 1
+27.1397 -10.6317 -0.199219 0.82721 1.00539 1.68124 93918 0 2.05705 0.227469 1
+-45.429 10.0122 0.830078 0.854928 0.950951 1.77748 124112 0 1.8614 0.226784 1
+54.6319 10.0392 -0.0390625 0.811804 0.899903 1.50558 124424 0 -1.40556 0.223038 1
+-50.6297 -9.00063 0.726074 0.792999 0.820973 1.65437 96015 0 -1.56425 0.222362 1
+13.0475 12.1746 0.514648 0.855554 0.994643 1.69608 127570 0 1.60963 0.220007 1
+-45.7947 9.85672 0.78418 0.864794 0.952345 1.75334 123642 0 1.527 0.21917 1
+0.785782 33.4021 0.646973 2.07504 4.57455 1.69195 158420 0 -0.105845 0.219003 0
+0.841721 59.0356 0.848633 2.10875 4.67389 1.85733 195860 0 -0.0418063 0.218336 0
+55.1697 10.0208 0.108154 0.96811 1.18898 1.78967 124426 0 -1.51577 0.217005 1
+36.0572 38.0544 0.25415 0.941652 1.11436 1.83032 165082 0 -2.89544 0.216674 1
+17.7305 9.8736 0.337402 0.859742 0.946319 1.67142 123841 0 1.30218 0.215351 1
+-46.5804 6.19531 1.50195 3.12262 12.2541 3.28849 118492 0 1.57052 0.213705 0
+39.3877 12.113 0.26416 0.883844 0.915861 1.82854 127185 0 1.51955 0.213541 1
+23.8197 -43.6669 1.49121 0.883521 1.16229 1.57938 45704 0 -3.07901 0.212395 2
+-72.1328 -7.78719 0.645508 1.95027 4.39501 1.58285 97820 0 -1.57903 0.212395 0
+-64.7567 -7.45969 0.589355 1.93415 4.45986 1.50228 98311 0 -1.58909 0.211743 0
+1.405 57.7823 0.773438 2.13986 4.80815 1.8719 193990 0 -0.120109 0.210929 0
+19.8682 12.4461 0.210205 0.890668 1.00932 1.74395 127592 0 1.23131 0.210442 1
+-71.2241 -7.78922 0.583008 1.92097 4.33533 1.53004 97823 0 -1.57886 0.209955 0
+49.4091 16.8798 0.260742 1.79142 3.76292 1.73291 134236 0 -1.56601 0.20947 0
+54.1509 12.3297 0.218018 0.803915 0.914074 1.69525 127699 0 1.52034 0.208985 1
+19.5954 11.3909 0.196167 0.826806 0.913182 1.62276 126187 0 1.50534 0.208501 1
+-0.121559 58.3959 0.830078 2.10772 4.72437 1.81963 194921 0 -0.107428 0.206094 0
+17.4577 10.6402 0.313232 0.786828 0.875202 1.53678 125244 0 1.44974 0.205296 1
+-71.5387 5.05766 0.663086 1.92943 4.27646 1.5511 116542 0 1.54203 0.204978 0
+-62.8472 5.93875 0.713867 1.67142 3.665 1.51258 117973 0 1.43611 0.202601 0
+1.73985 -4.51781 1.65234 2.99129 11.9702 3.34681 102731 0 -1.6842 0.202286 0
+-45.1017 -6.88492 0.648438 1.78356 3.665 1.49606 99309 0 -1.63886 0.201813 0
+-18.8089 10.82 0.801758 0.813987 0.857855 1.61368 125131 0 -1.63136 0.201813 1
+25.0832 35.0381 0.191162 0.771797 0.76076 1.52221 160836 0 -1.85011 0.201656 1
+-59.8787 -11.6178 0.798828 0.761131 0.73233 1.69774 92242 0 1.51189 0.200245 1
+52.1045 3.60094 0.145996 1.82586 3.73728 1.46104 115056 0 1.61162 0.200245 0
+53.6497 11.2984 0.230713 0.826806 0.924849 1.80371 126293 0 1.42271 0.199308 1
+-59.148 -12.9613 0.90332 0.78204 0.811804 1.8223 90373 0 1.53127 0.198064 1
+68.2483 3.63883 0.284424 2.25684 5.20901 1.83659 115107 0 1.63456 0.197909 0
+27.3245 -10.2008 -0.12915 0.866062 0.954673 1.70272 94855 0 -1.90096 0.197754 1
+51.9216 -9.57875 0.0419922 0.870301 0.958409 1.66003 95868 0 1.56927 0.197754 1
+19.5019 10.8337 0.20874 0.847861 0.960283 1.59488 125250 0 1.5221 0.196672 1
+66.0812 3.44781 0.126221 1.75249 4.05678 1.61723 114632 0 1.63887 0.196209 0
+10.0747 -11.425 0.488037 0.889473 0.99343 1.79317 92929 0 0.543148 0.19498 1
+-0.736252 13.9081 0.762695 0.843319 1.81431 1.57437 129867 0 -0.86284 0.193909 0
+38.6403 35.9676 0.130371 0.833087 0.874988 1.66328 162282 0 1.44974 0.193757 1
+-46.6219 10.3734 0.77832 0.812201 0.900342 1.69112 124576 0 1.6173 0.193604 1
+-0.509842 -16.7973 0.985352 2.06897 4.59694 1.81077 84940 0 2.8676 0.192084 0
+-48.7252 -9.88445 0.663086 0.801955 0.823381 1.687 95085 0 -1.61918 0.191933 1
+-4.63984 12.2916 0.782227 0.797464 0.843319 1.6435 127515 0 -1.67775 0.190724 1
+-20.6919 12.672 0.950195 0.752815 0.694373 1.65194 127933 0 0.28714 0.190423 1
+-46.8717 9.54875 0.743164 0.862054 0.926205 1.73376 123171 0 1.6438 0.190122 1
+0.786644 33.7377 0.656738 2.06897 4.53452 1.71022 158888 0 -2.88596 0.189972 0
+-1.14258 66.0454 0.736328 0.893172 0.906961 1.77055 206150 0 -0.622521 0.189821 1
+37.4002 9.20156 0.831055 0.805683 0.885735 1.71859 122966 0 1.49623 0.189221 1
+-14.5402 39.8583 0.892578 2.06695 4.56562 1.69774 167732 0 2.79197 0.189221 0
+-35.1515 12.3202 1.22852 0.82479 1.62673 1.75591 127420 0 2.002 0.18743 2
+49.6667 26.9383 0.309082 2.22185 5.255 1.78095 149213 0 -1.6221 0.187281 0
+55.3937 9.71593 0.14856 1.02437 1.17743 1.85552 123959 0 -1.52814 0.187133 1
+1.03477 47.8613 0.652344 2.04387 4.50363 1.73461 179481 0 0.110872 0.185504 0
+-1.37906 51.4422 2.26562 0.831665 0.884654 1.81254 184621 0 0.952393 0.185062 1
+24.4053 35.7452 0.189697 2.08927 4.47294 1.66328 161770 0 0.0116628 0.183447 0
+18.5808 49.7264 0.111328 2.12632 4.63752 1.69691 182344 0 3.06434 0.183447 0
+-64.3403 -11.9097 0.779297 0.780323 0.799609 1.68288 91760 0 -1.55941 0.183301 1
+0.453751 57.4534 0.775391 2.07099 4.58797 1.83122 193519 0 -0.0416426 0.183155 0
+-8.80524 -13.6079 0.39209 2.01415 4.49484 1.57476 89594 0 1.42921 0.182863 0
+-48.5865 -10.3306 0.699219 0.786636 0.806667 1.70771 94150 0 -1.63245 0.182426 1
+-72.4561 -7.56429 0.645508 1.96653 4.33533 1.60582 98287 0 -1.53453 0.182134 0
+-59.3297 -13.1925 0.933594 0.832274 0.880775 1.86551 89904 0 1.5012 0.181844 1
+1.13172 57.4586 0.748047 2.10053 4.73823 1.84018 193521 0 -0.0528918 0.181844 0
+34.9768 30.3028 -0.029541 0.643445 0.567839 1.15212 153847 0 -0.118351 0.180974 1
+-48.6986 -10.2698 0.660156 0.776143 0.788751 1.68782 94149 0 -1.62836 0.180684 1
+39.1107 35.957 0.135742 0.793967 0.862475 1.58324 162284 0 1.53532 0.180107 1
+22.02 57.4227 0.723633 0.854928 0.835123 1.81077 193586 0 0.269062 0.180107 1
+-31.2654 10.5934 0.704102 0.917148 1.00049 1.70771 125092 0 1.48356 0.179818 1
+-52.063 10.5013 0.826172 0.884168 0.976835 1.75334 124559 0 1.53422 0.179531 1
+24.5792 -10.3758 -0.108154 0.827816 0.977789 1.74395 94378 0 -1.6462 0.178669 1
+46.5506 45.6123 1.24805 2.24804 4.88864 1.93604 176347 0 -1.66456 0.177954 0
+-23.5847 30.8498 -0.157959 0.811606 1.2203 1.738 154600 0 -1.19819 0.177811 1
+-3.69547 74.7656 0.301758 0.881851 0.991491 1.75591 218778 0 2.88351 0.177525 1
+-35.3114 12.5791 1.18359 0.845793 1.9579 1.77055 127887 0 3.02393 0.177241 2
+39.1004 35.76 0.145752 0.786443 0.89727 1.59099 161816 0 1.59163 0.177241 1
+-0.500153 14.1881 0.75 0.88142 2.19489 1.59995 130336 0 -2.6895 0.176814 0
+27.0928 -10.1641 -0.121338 0.841879 0.937122 1.66003 94854 0 -1.46932 0.176672 1
+48.5656 26.6855 0.312988 1.71189 3.74825 1.59644 148741 0 -1.63195 0.17653 0
+-69.0545 2.18234 0.799805 1.90789 3.96668 1.58169 112338 0 1.5395 0.176104 0
+-48.298 -10.9509 0.735352 0.809034 0.859112 1.70771 93215 0 -1.58226 0.174692 1
+-23.7742 30.828 -0.176758 0.789714 1.00932 1.73461 154599 0 3.05961 0.174692 1
+18.0577 72.7087 -0.0231934 2.3228 5.50182 1.95408 216038 0 3.10204 0.174692 0
+28.5549 -9.93813 0.020752 0.90337 0.954673 1.78095 94859 0 -1.58835 0.173849 1
+-18.5501 11.1375 0.822266 0.953101 0.954673 1.7542 125600 0 -1.66738 0.173288 1
+28.5485 -9.57969 0.036499 0.935579 1.03099 1.77401 95795 0 -1.66634 0.170784 1
+10.0693 -11.5714 0.481934 0.905246 1.01154 1.79843 92461 0 0.486781 0.169956 1
+-0.484688 13.5994 0.759766 0.913126 1.9868 1.66003 129400 0 -1.23572 0.169819 0
+29.2864 6.84141 0.0345459 0.859952 1.72532 1.76192 119665 0 -0.262656 0.169681 2
+-70.9626 5.61562 0.689453 1.62039 3.50058 1.45073 117480 0 1.6211 0.169543 0
+1.10492 27.3564 0.5625 1.94077 4.35655 1.56441 149529 0 -0.0562513 0.168994 0
+-70.594 -7.5518 0.602539 1.89859 4.21839 1.56594 98293 0 -1.55145 0.1679 0
+69.6691 -5.60773 -0.135742 1.80195 3.86723 1.5667 101539 0 1.57405 0.167627 0
+0.271873 48.1367 0.662109 2.00042 4.50803 1.738 179946 0 -0.0444643 0.167627 0
+-35.3094 12.0942 1.26367 0.849933 1.80988 1.79667 126951 0 0.308085 0.167491 2
+-0.751091 -17.0787 1.00195 2.12944 4.80815 1.83928 84471 0 2.90194 0.166811 0
+34.3653 32.1175 0.495605 0.738435 0.747504 1.76623 156653 0 -2.9573 0.166134 1
+-71.8198 -7.25859 0.536133 1.85915 4.07664 1.52407 98757 0 -1.54453 0.165728 0
+19.6021 11.9551 0.160645 0.835735 0.973027 1.68042 127123 0 1.49746 0.165323 1
+0.737816 47.2631 0.551758 2.1284 4.66477 1.77748 178544 0 0.0785915 0.16492 0
+52.8788 3.89422 0.136353 2.28233 5.16343 1.5763 115527 0 1.571 0.164785 0
+49.1597 16.4847 0.291504 0.935465 1.10151 1.69443 133767 0 -1.6279 0.164785 1
+39.4298 16.7035 0.141479 0.790486 0.813789 1.62475 134205 0 2.71243 0.164785 1
+0.160156 59.0177 0.84082 2.10978 4.68761 1.83749 195858 0 0.00714978 0.164651 0
+24.8925 35.034 0.198486 0.808442 0.820171 1.56862 160835 0 -0.348066 0.164516 1
+53.4219 11.038 0.190552 0.837983 0.938955 1.77401 125824 0 1.46267 0.163713 1
+53.7431 11.6417 0.222412 0.809627 0.891375 1.7958 126761 0 1.46844 0.163713 1
+27.35 -9.10766 -0.154053 0.827411 0.861212 1.75934 96259 0 -1.50026 0.163445 1
+22.3512 -8.25304 -0.251221 0.846206 0.865428 1.66979 97647 0 2.74965 0.163445 1
+26.3281 -8.06406 0.0100098 0.875629 0.971602 1.76106 97660 0 1.70147 0.162646 1
+22.3375 -8.41812 -0.242676 0.808837 0.876699 1.63749 97179 0 2.67333 0.161585 1
+22.4366 64.1074 0.583008 0.836552 1.74736 1.76106 203416 0 3.01822 0.161585 2
+-53.2937 -12.948 0.78125 0.781086 0.787212 1.67223 90391 0 -1.20529 0.16132 1
+-66.089 -7.21273 0.710938 1.94077 4.30159 1.6066 98775 0 -1.58816 0.161188 0
+-63.1834 5.94734 0.700195 1.40062 3.28528 1.48586 117972 0 1.27688 0.160792 0
+67.395 3.14562 0.139282 1.79667 4.07266 1.59722 114168 0 1.66585 0.160003 0
+17.8913 71.6269 0.0900879 1.6796 3.72635 1.62158 214165 0 3.10504 0.159217 0
+30.6584 43.7663 0.312988 1.91723 4.31842 1.72785 173489 0 3.10286 0.159087 0
+68.2398 -5.59953 -0.109619 0.843937 1.97615 1.42685 101535 0 -3.13563 0.158826 0
+-12.2369 -0.0373459 0.813477 2.08011 4.84586 1.56403 109239 0 -1.59588 0.157655 0
+24.6759 37.2727 0.10498 2.0954 4.88387 1.72447 164111 0 3.13785 0.157525 0
+-70.8917 -7.23414 0.530273 1.87464 4.22251 1.52445 98760 0 -1.56207 0.157008 0
+-63.5147 5.945 0.643555 1.12832 2.63465 1.47142 117971 0 0.932715 0.157008 0
+-17.0606 67.0096 1.45996 2.16826 4.74286 1.76365 207504 0 -0.0320654 0.155719 0
+46.6669 -8.28593 0.665527 0.846206 0.809825 1.84288 97723 0 1.51013 0.154823 1
+-14.2752 40.1073 0.933594 2.18847 4.90298 1.77921 168201 0 2.91469 0.15444 0
+67.6958 3.89875 0.295166 2.23055 4.80346 1.83569 115573 0 1.59571 0.154185 0
+-52.2519 10.4984 0.825195 0.879056 1.00489 1.74821 124558 0 1.61047 0.153803 1
+-62.7884 10.9923 0.844727 0.828624 1.70272 1.70938 125461 0 1.67307 0.153295 2
+39.466 11.1552 0.139282 0.857645 0.896613 1.69691 125781 0 1.55266 0.153295 1
+-49.9028 -7.61507 0.915039 0.89623 1.01972 1.78705 98358 0 -1.50783 0.153042 1
+0.851875 30.9287 0.632324 2.09336 4.59694 1.6994 154676 0 -0.0866428 0.152158 0
+62.3494 14.9166 0.633789 0.84229 1.93415 1.8223 131468 0 1.47344 0.151529 2
+-62.4959 -9.11203 0.678711 0.808639 0.793386 1.73291 95978 0 -2.42259 0.151028 1
+26.1439 -8.15531 -0.0275879 0.89339 0.954207 1.78095 97659 0 1.83888 0.150778 1
+-23.7978 30.6445 -0.191162 0.794355 1.22418 1.71943 154131 0 -3.05766 0.150029 1
+-41.4392 -12.6637 0.741211 0.789136 0.776522 1.62197 90896 0 3.0293 0.149531 1
+27.5741 -10.4037 -0.179443 0.839826 0.893772 1.67632 94388 0 1.99171 0.149531 1
+-62.3202 -9.12578 0.647949 0.8047 0.806274 1.71859 95979 0 -2.56791 0.149035 1
+0.586876 31.5725 0.619141 2.03391 4.48169 1.68617 155611 0 -0.113944 0.149035 0
+69.9744 -5.81125 -0.109863 2.02598 4.39501 1.6755 101072 0 1.52398 0.148788 0
+-35.117 12.5853 1.14355 0.831665 1.95982 1.74821 127888 0 2.85056 0.148541 2
+38.6394 35.7545 0.0944824 0.819971 0.865428 1.67878 161814 0 1.44133 0.148541 1
+-48.6341 -10.8021 0.705078 0.802346 0.838801 1.7397 93681 0 -1.57233 0.148047 1
+0.526718 34.0442 0.691406 2.0995 4.66022 1.7431 159355 0 3.12215 0.147924 0
+-64.2027 -12.2469 0.789062 0.73233 0.748234 1.64711 91293 0 -1.57709 0.147801 1
+24.438 -14.2109 -0.868164 0.842907 0.951648 1.70106 88762 0 -2.69088 0.147555 1
+-48.6359 -10.9497 0.699219 0.820572 0.867543 1.73207 93213 0 -1.56188 0.147555 1
+-0.460777 -17.4159 1.00586 2.30698 4.80346 1.94932 84004 0 3.06374 0.14731 0
+-65.7106 -9.9875 0.849609 0.808245 0.836348 1.74225 94564 0 -1.46415 0.14731 1
+27.1092 -9.13515 -0.130127 0.789329 0.802346 1.69029 96258 0 -1.55598 0.14731 1
+1.4314 73.2016 0.521484 0.83065 0.872855 1.70855 216454 0 0.0914667 0.14731 1
+0.789223 57.1383 0.725586 2.10566 4.69218 1.82943 193052 0 -0.0372998 0.14682 0
+-65.4978 -9.9789 0.860352 0.822979 0.856181 1.73715 94565 0 -1.25726 0.146331 1
+-0.755936 13.6303 0.789062 0.869239 1.70189 1.65356 129399 0 -1.35774 0.144873 0
+46.7539 -8.37344 0.638672 0.811804 0.833087 1.79755 97256 0 1.49915 0.14439 1
+0.49469 49.7664 0.638184 2.00336 4.37787 1.70855 182287 0 3.12221 0.144269 0
+-0.414688 58.7217 0.831055 2.13465 4.72437 1.809 195388 0 -0.136933 0.144269 0
+26.3434 -7.9341 0.101074 0.850971 0.907404 1.73885 98128 0 1.79878 0.143427 1
+-46.2684 10.0799 0.75293 0.819171 0.942629 1.73545 124109 0 1.56199 0.143188 1
+0.513435 59.3413 0.849609 2.16826 4.89342 1.88658 196327 0 0.0203978 0.143068 0
+-45.9459 29.9238 0.453369 1.98098 4.4338 1.62514 153126 0 1.57168 0.142709 0
+57.4329 -28.2559 -0.483154 0.876913 1.03452 1.84648 68273 0 -0.680732 0.14247 1
+9.65148 50.8019 0.37793 0.835735 1.22807 1.71105 183720 0 0.737323 0.14247 1
+34.7712 30.2991 0.0751953 0.750613 0.717115 1.43558 153846 0 -0.00159439 0.141994 1
+24.1341 -43.0512 1.5127 0.855345 1.11694 1.60347 46641 0 0.0407521 0.141045 1
+-59.1377 -12.0376 0.831055 0.743318 0.768976 1.78095 91777 0 -1.4862 0.140808 1
+0.180473 57.1527 0.769531 2.0995 4.67846 1.82141 193050 0 -0.0910604 0.140808 0
+-73.425 -7.52477 0.764648 2.09745 4.63299 1.73038 98284 0 1.55859 0.140454 0
+26.1138 35.9591 0.290039 0.803326 0.796102 1.73545 162243 0 1.61888 0.140101 1
+-16.8036 65.8197 1.33887 2.24804 5.18871 1.81874 205633 0 -0.112285 0.140101 0
+-70.5109 6.89375 0.648438 2.0112 4.48169 1.59021 119353 0 3.07943 0.139748 0
+-44.9183 5.97078 1.41699 2.94491 8.48816 3.15018 118029 0 1.58427 0.138928 0
+-72.1519 -4.54531 0.575195 2.10566 4.62395 1.57476 102500 0 -1.6066 0.138462 0
+24.0978 -43.3245 1.56738 0.913572 1.58131 1.58517 46173 0 0.175604 0.137764 2
+-59.5456 -12.3364 0.835938 0.777851 0.789907 1.78443 91307 0 1.55558 0.137532 1
+8.39727 -2.7475 0.637695 0.820772 0.933925 1.72447 105560 0 1.6262 0.136608 1
+-53.3115 -12.723 0.822266 0.818971 0.84538 1.69277 90859 0 -1.64056 0.136378 1
+-0.226875 13.9264 0.741699 0.86395 2.15665 1.58131 129869 0 -0.751845 0.136378 0
+-35.1542 12.1044 1.25781 0.837778 1.47035 1.78967 126952 0 0.155938 0.13569 2
+-48.2986 -10.8135 0.769531 0.808047 0.850348 1.72195 93683 0 -1.62806 0.135461 1
+24.5584 -10.1778 -0.0739746 0.866062 1.0022 1.74821 94846 0 -1.6317 0.135461 1
+1.41164 73.3408 0.537109 0.868815 0.955606 1.72953 216922 0 0.116809 0.134776 1
+46.2906 45.8923 1.35742 2.21643 4.62847 1.85009 176814 0 -1.72908 0.134435 0
+-16.5637 66.1006 1.40137 2.02302 4.56562 1.7144 206102 0 -0.0497556 0.133981 0
+52.4109 38.1168 1.12793 0.862475 0.98594 1.75078 165601 0 1.53979 0.133868 1
+-23.4359 -0.235313 0.990234 2.18954 5.01435 1.70771 109204 0 -1.55849 0.133754 0
+-20.3597 12.5914 0.75293 0.659026 0.634708 1.40679 127934 0 1.13277 0.133415 1
+-18.7144 11.6786 0.73291 0.779942 0.83065 1.56441 126535 0 1.47661 0.13229 1
+-59.6072 -11.935 0.822266 0.798828 0.776522 1.79931 91775 0 1.46439 0.132066 1
+39.2384 12.5373 0.302734 0.844968 0.903204 1.76882 128120 0 1.56226 0.131619 1
+68.2756 12.0513 -0.0778809 0.760203 0.861633 1.71691 127275 0 1.50327 0.131396 1
+27.0784 -9.39343 -0.110352 0.836552 0.861212 1.65517 95790 0 -1.51205 0.131173 1
+18.0587 56.2808 -0.00244141 1.60464 3.38296 1.42964 191702 0 3.12791 0.13084 0
+68.2674 12.2839 -0.197021 0.738976 0.796491 1.67142 127743 0 1.4801 0.130507 1
+-62.3071 5.90375 0.729492 1.87647 4.15705 1.56862 117975 0 1.47771 0.130285 0
+-1.09296 -42.3836 0.134521 2.07605 4.60593 1.72953 47498 0 2.97717 0.129623 0
+-63.7931 5.93703 0.657227 1.08934 2.47019 1.48224 117970 0 0.689323 0.129623 0
+48.8906 17.1422 0.229248 1.85552 3.811 1.54016 134702 0 -1.53331 0.129183 0
+68.0058 12.2714 0.147461 0.804308 0.900562 1.77748 127742 0 1.4659 0.128744 1
+-38.5965 23.8402 0.803711 1.98486 4.48169 1.58131 144257 0 -1.53074 0.128635 0
+34.3701 31.9403 0.308594 0.782995 0.779562 1.738 156185 0 -2.96794 0.128307 1
+-66.3997 -7.45344 0.791016 2.05087 4.48607 1.64229 98306 0 -1.59157 0.127761 0
+17.8553 66.8172 -0.0534668 1.66979 3.6223 1.37991 207145 0 -3.09833 0.127761 0
+53.8088 10.8484 0.145752 0.899189 0.986662 1.78095 125358 0 1.52807 0.126785 1
+53.7297 12.0944 0.22583 0.857436 0.949559 1.81077 127229 0 1.55799 0.126785 1
+26.0155 -8.41656 -0.281982 0.814982 0.893117 1.74736 97191 0 1.78238 0.126569 1
+-45.5903 10.3244 0.822266 0.843525 0.946319 1.7397 124579 0 1.76874 0.126569 1
+-15.9002 65.359 0.992188 0.805487 0.866062 1.6936 205168 0 -0.2812 0.126569 1
+-1.38016 51.5577 2.17578 0.840236 0.901442 1.79404 185089 0 0.92774 0.126353 1
+25.1045 35.2807 0.244507 0.825595 0.854302 1.66003 161304 0 -1.61083 0.126138 1
+-38.208 -9.39781 0.943359 0.830244 0.82822 1.8519 95586 0 1.59402 0.125923 1
+34.9719 30.4552 0.103271 0.715541 0.684276 1.3463 154315 0 -0.403938 0.125923 1
+0.477577 37.2744 0.679688 2.0954 4.55671 1.66165 164035 0 0.0273472 0.125386 0
+42.0542 46.5527 -0.115479 0.842496 0.8879 1.7431 177737 0 -2.03805 0.125279 1
+-73.1448 -7.24125 0.727539 1.9723 4.28482 1.65922 98753 0 -1.40402 0.125172 0
+54.643 9.85594 0.0362549 0.850348 0.989798 1.65517 123956 0 -1.52348 0.125065 1
+39.3163 16.5859 0.0985107 0.767663 0.775007 1.59566 133736 0 2.6877 0.125065 1
+41.7523 36.05 0.296875 0.906131 0.937122 1.7431 162292 0 1.11228 0.124639 1
+1.10031 28.3119 0.557129 1.9868 4.4338 1.56632 150933 0 -0.0373975 0.124532 0
+25.0817 34.8166 0.242188 0.816774 0.833087 1.42859 160368 0 -1.07728 0.124426 1
+-0.724998 14.2015 0.767578 0.92265 2.02006 1.62872 130335 0 -2.26589 0.124213 0
+37.8584 16.8553 0.736328 0.865005 1.33029 1.82943 134200 0 -3.07101 0.124213 2
+-1.33031 66.0491 0.666016 0.843525 0.818171 1.72532 206149 0 -0.998891 0.124213 1
+-0.477654 66.0108 1.21777 1.13398 1.24574 2.03193 206152 0 -0.528932 0.124001 1
+0.58625 42.6703 0.625977 2.18954 4.79408 1.90789 171991 0 0.0220894 0.123683 0
+-50.6356 -8.59328 0.769531 0.812201 0.888984 1.76278 96951 0 -1.51576 0.123577 1
+19.1569 11.9088 0.185425 0.816974 0.908734 1.6451 127121 0 1.56652 0.123577 1
+-0.258125 13.2862 0.71582 0.852219 1.30329 1.75763 128933 0 -1.60455 0.123577 1
+-2.05235 -18.0882 0.989258 2.21427 4.87434 1.89027 83063 0 1.37161 0.123471 0
+-70.5634 -6.91422 0.55957 1.87556 4.21016 1.52668 99229 0 -1.57913 0.12326 0
+-71.8464 -6.91867 0.524414 1.87464 4.13276 1.54129 99225 0 -1.54253 0.123049 0
+-62.286 -9.34438 0.703613 0.819771 0.832274 1.72027 95511 0 2.7595 0.122944 1
+34.7637 34.0131 0.0888672 0.862896 0.896832 1.44332 159462 0 1.70732 0.122733 1
+0.755707 49.4442 0.618652 2.072 4.51684 1.74992 181820 0 -3.11692 0.122628 0
+5.88594 -2.98281 0.825195 0.820772 1.39686 1.78269 105084 0 1.33132 0.122313 2
+34.3489 30.3081 0.0771484 0.706169 0.698112 1.46069 153845 0 -0.134445 0.122313 1
+-73.1205 -7.77875 0.757812 2.01022 4.42948 1.65356 97817 0 -1.52752 0.121373 0
+-15.8995 65.2191 1.00684 0.80039 0.847033 1.67223 204700 0 -0.0592422 0.121269 1
+24.4619 38.1853 0.194824 2.05889 4.65113 1.67714 165514 0 -3.13666 0.120853 0
+62.8316 14.6634 0.621094 0.855137 2.00336 1.81077 131002 0 1.65449 0.120646 2
+-46.641 10.6759 0.789062 0.845174 0.965219 1.72532 125044 0 1.60867 0.120232 1
+-58.2185 -11.6389 0.814453 0.782804 0.829029 1.7414 92248 0 -1.66039 0.120026 1
+27.5737 -10.6475 -0.194824 0.808639 0.887683 1.67468 93920 0 1.80264 0.120026 1
+62.3419 15.1384 0.668945 0.842496 2.00727 1.82052 131936 0 1.63714 0.120026 2
+35.8131 38.2547 0.250732 0.905688 1.21599 1.84919 165549 0 -0.456329 0.120026 1
+-45.3932 9.84047 0.803711 0.818771 0.920793 1.74395 123644 0 1.55524 0.119819 1
+17.7659 62.2438 -0.195312 1.95122 4.28482 1.51665 200593 0 3.08336 0.119717 0
+-5.33297 12.0609 0.72168 0.79552 0.857855 1.67796 127045 0 -0.824748 0.119203 1
+65.1498 3.94938 0.210205 2.0852 4.64659 1.70688 115565 0 1.67614 0.118998 0
+-18.9019 11.2552 0.821289 0.977491 1.01873 1.74992 126066 0 -1.80068 0.118998 1
+9.46047 46.5942 -0.207031 0.945568 1.62237 1.85461 177635 0 1.88842 0.118998 1
+-50.4716 -8.58047 0.787109 0.802346 0.976597 1.75249 96952 0 -1.54342 0.118793 1
+19.7159 59.9245 0.658203 0.872642 0.81538 1.91255 197323 0 -0.276552 0.118793 1
+-0.159843 59.318 0.810547 2.22837 5.12325 1.92849 196325 0 -0.0632099 0.118589 0
+0.88031 27.0577 0.545898 1.89027 4.28482 1.56441 149060 0 -0.0827351 0.117978 0
+54.1467 12.5461 0.250244 0.813987 0.882928 1.65517 128167 0 1.505 0.117775 1
+-37.5491 25.1148 0.681641 2.07403 4.54338 1.57015 146132 0 -0.0680095 0.117775 0
+43.3894 43.5989 0.508789 0.837574 0.995129 1.7431 173529 0 0.925648 0.117775 1
+24.7137 36.3391 0.161987 2.0599 4.15705 1.65679 162707 0 -0.0814411 0.117572 0
+-1.24921 51.4386 2.16211 0.857645 0.970417 1.86096 184622 0 1.1044 0.117572 1
+-3.31469 14.8249 1.03613 0.824991 0.882066 1.8646 131263 0 -1.11876 0.11737 1
+46.8527 45.3009 1.06152 2.2801 4.98993 1.95218 175880 0 -1.59944 0.11737 0
+1.85844 12.3342 0.30542 0.580737 0.658383 0.937809 127535 0 1.6026 0.116563 1
+24.8092 -43.3666 1.62207 2.09131 4.64205 1.59099 46175 0 0.723993 0.116362 0
+9.84813 35.6259 0.743164 0.817972 0.896613 1.7397 161724 0 -0.911676 0.116362 1
+-13.9667 39.8613 0.914062 2.00923 4.3993 1.64269 167734 0 2.9999 0.116362 0
+23.9303 -43.1034 1.60645 0.873921 1.3329 1.60739 46640 0 2.54764 0.115761 2
+-20.9131 12.8483 1.00781 0.733404 0.695391 1.71775 128400 0 0.130548 0.115761 1
+49.3445 16.5003 0.277832 1.00254 1.29916 1.727 133768 0 -1.6129 0.115761 1
+1.05797 48.7518 0.672852 2.10978 4.73361 1.78618 180885 0 0.094744 0.115761 0
+-3.07078 -15.1902 0.563477 2.03093 4.61493 1.61486 87272 0 1.53468 0.115561 0
+-57.7623 6.11742 0.700195 2.26346 4.94627 1.6443 118457 0 1.56585 0.115561 0
+35.7069 43.8025 0.349121 1.69774 3.55224 1.73207 173505 0 -3.10253 0.115561 0
+-72.0841 5.03594 0.675293 2.24804 5.19885 1.71189 116540 0 1.5347 0.115361 0
+-23.5914 30.6544 -0.144043 0.814584 1.53228 1.75163 154132 0 -0.662838 0.115361 1
+23.1602 -8.53484 -0.294678 0.852843 0.980419 1.78618 97182 0 2.92942 0.115162 1
+-64.5052 -6.65719 0.571289 2.05487 4.61493 1.6125 99716 0 -1.41539 0.115162 0
+-0.219604 -16.5232 0.977539 2.10258 4.62847 1.86733 85409 0 2.86503 0.114963 0
+-1.07531 -14.8225 0.814453 0.773873 0.795714 1.79142 87746 0 1.58901 0.114765 1
+-45.077 9.20141 0.799805 0.862475 0.953275 1.76106 122709 0 1.59504 0.114765 1
+58.7641 3.62969 0.185547 1.76106 3.70458 1.65114 115077 0 1.57934 0.114567 0
+57.374 -17.3981 0.0197754 2.38836 5.72098 1.88198 84185 0 -1.53252 0.114369 0
+19.4737 12.3734 0.249512 0.845587 0.947243 1.66816 127590 0 1.44107 0.114369 1
+22.9128 -8.72125 -0.330811 0.874775 1.01006 1.78618 96713 0 2.63855 0.114171 1
+-71.5321 6.16531 0.665039 2.143 4.77073 1.6339 118414 0 1.61476 0.114171 0
+58.3304 11.3916 -0.0371094 0.909455 0.978267 1.66571 126308 0 2.06947 0.113973 1
+-70.2694 6.59203 0.647461 1.81519 3.90518 1.51591 118886 0 -3.09332 0.113776 0
+23.9444 36.6627 0.115234 2.02796 4.68761 1.61723 163172 0 1.39392 0.113776 0
+59.4241 3.87547 0.146973 1.95695 4.02129 1.52967 115547 0 1.54745 0.11358 0
+-59.7041 -11.1288 0.797852 0.758905 0.745317 1.72027 93179 0 1.56467 0.113187 1
+23.9569 36.95 0.100098 2.0791 4.84113 1.63549 163640 0 2.99208 0.113187 0
+58.1203 11.3881 -0.00463867 0.919726 0.988108 1.68042 126307 0 -1.7083 0.112795 1
+48.857 -12.3941 1.28418 0.78204 0.877555 1.67142 91646 0 1.67007 0.1126 1
+1.09922 30.2678 0.616699 2.07808 4.58349 1.6439 153741 0 -0.0894294 0.1126 0
+9.86687 -11.415 0.515625 0.934438 1.05931 1.81785 92928 0 0.0523539 0.112405 1
+17.9484 10.1172 0.328613 0.892627 1.00318 1.68535 124310 0 1.31973 0.112405 1
+27.6695 -9.05922 -0.0246582 0.863107 0.949791 1.78705 96260 0 -1.51991 0.11221 1
+-48.1462 -3.14734 0.724609 2.08825 4.71515 1.54505 104915 0 -1.49555 0.11221 0
+-1.76062 -40.4759 0.0236816 0.810418 0.922594 1.66003 50304 0 2.42705 0.112016 1
+-31.4289 10.6109 0.685547 0.916812 0.977551 1.72447 125091 0 1.58533 0.112016 1
+0.84922 28.0084 0.546875 1.88935 4.19374 1.55186 150464 0 -0.051411 0.112016 0
+-71.468 -9.51218 1.06152 0.812201 0.862054 1.7397 95482 0 -1.79278 0.111628 1
+-62.5059 -9.33813 0.73291 0.820171 0.831461 1.74907 95510 0 -2.39566 0.111628 1
+-18.728 11.9979 0.711426 0.74641 0.75484 1.34992 127003 0 1.51786 0.111628 1

+ 500 - 0
src/detection/CenterPoint-master/results2/seq_0_frame_101.bin.txt

@@ -0,0 +1,500 @@
+-11.4115 6.29516 0.604492 2.0014 4.59245 1.49168 118602 0 1.56374 0.935817 0
+26.9859 13.9752 0.229004 2.11597 4.57008 1.66979 129954 0 0.927003 0.91383 0
+-17.3794 6.35204 0.724609 2.02598 4.59245 1.75505 118583 0 1.54822 0.90813 0
+30.3238 -6.27383 0.156982 2.10463 5.06356 1.8875 100480 0 -1.58365 0.906983 0
+-34.0212 -6.93844 0.556641 2.03491 4.61944 1.48514 99343 0 -1.58803 0.905657 0
+17.6789 33.4414 0.0377197 2.05889 4.67846 1.5185 158473 0 3.07261 0.904819 0
+-26.6819 -6.67703 0.559082 1.9975 4.43814 1.48804 99834 0 -1.62254 0.900705 0
+42.935 26.3624 0.334473 2.23491 5.40067 1.92661 148256 0 1.49191 0.897516 0
+3.27613 16.132 0.728027 2.21103 4.82697 1.80635 133156 0 -0.0260155 0.888759 0
+37.6042 -6.15906 0.0336914 1.97422 4.49045 1.53266 100503 0 -1.55524 0.888372 0
+29.3336 3.91782 0.861328 2.62054 6.05438 2.98837 115453 0 1.54208 0.88642 0
+0.274223 20.6963 0.651367 2.20671 4.87434 1.89767 139698 0 0.0188334 0.880283 0
+32.4153 43.9992 0.123535 1.99847 4.42948 1.5625 173963 0 3.12198 0.879665 0
+17.72 28.1003 0.177734 2.00629 4.289 1.75249 150517 0 3.11723 0.877162 0
+-22.9491 6.30469 0.582031 1.81785 3.98999 1.54355 118566 0 1.54513 0.873754 0
+52.6666 11.0221 0.347412 0.80529 0.912513 1.78269 125822 0 1.48808 0.872565 1
+12.0777 11.9798 0.552246 0.821574 0.967342 1.6576 127099 0 1.54759 0.870157 1
+52.9823 11.6601 0.346191 0.795908 0.894208 1.77228 126759 0 1.56675 0.868158 1
+-37.8694 5.99594 0.518555 1.98292 4.64659 1.64189 118051 0 1.5113 0.866019 0
+48.528 19.3766 0.283203 1.92191 4.26811 1.59215 137977 0 -1.58682 0.860017 0
+6.69109 16.3756 0.667969 2.08927 4.69677 1.72953 133634 0 0.00244618 0.859546 0
+24.1388 17.4137 0.244629 2.07707 4.51684 1.69112 135093 0 0.76613 0.859546 0
+16.8847 3.24297 0.546875 2.09131 4.45116 1.8829 114478 0 1.55681 0.85931 0
+48.4978 17.1522 0.408691 2.03491 4.86483 1.79492 134701 0 -1.5696 0.854278 0
+39.5076 41.3661 0.241943 1.93038 4.0966 1.67223 170241 0 -1.8235 0.845815 0
+17.1872 56.795 -0.146484 1.94837 4.46858 1.55186 192635 0 -3.10178 0.844794 0
+55.5694 -5.82938 0.184937 1.90882 4.21839 1.62832 101027 0 -1.56881 0.844537 0
+-46.307 -6.95195 0.591797 2.0014 4.35655 1.61447 99305 0 -1.57631 0.832917 0
+43.0614 -6.15015 0.192261 2.01415 4.38643 1.78008 100520 0 -1.62366 0.831006 0
+-49.1681 -10.7925 0.669922 0.744226 0.786443 1.6796 93680 0 -1.61347 0.829768 1
+27.6738 -9.78062 0.158203 0.895137 0.985218 1.77314 95324 0 -1.56596 0.826432 1
+-60.3577 -11.5769 0.688477 0.769727 0.77274 1.75677 92241 0 1.30898 0.824462 1
+17.4803 49.4694 0.00415039 1.89767 4.01736 1.59488 181872 0 3.05247 0.822189 0
+-19.4332 10.9743 0.776367 0.895246 0.879056 1.76192 125597 0 -1.52143 0.821044 1
+18.3822 11.7219 0.234375 0.71694 0.801172 1.5875 126651 0 1.56877 0.809577 1
+-49.3939 -10.0802 0.591797 0.77048 0.792225 1.62753 94615 0 -1.61639 0.807461 1
+-70.8828 2.35617 0.635254 2.01611 4.48607 1.54204 112800 0 1.59408 0.807309 0
+38.2887 12.245 0.407715 0.857018 0.879486 1.78967 127649 0 1.45931 0.804406 1
+39.8226 3.57488 0.280762 2.11493 4.73361 1.53565 115018 0 1.57581 0.801781 0
+-40.5708 -7.09969 0.505859 2.02598 4.49484 1.51037 98855 0 -1.63757 0.799129 0
+29.8981 44.0506 0.285645 1.97037 4.23077 1.77488 173955 0 3.12095 0.798344 0
+-60.2444 -12.363 0.666992 0.769164 0.781086 1.75505 91305 0 1.58418 0.797872 1
+48.4065 14.5645 0.418945 2.1036 5.02907 1.81431 130957 0 -1.61636 0.797083 0
+-32.2207 10.3795 0.592773 0.835735 0.94309 1.70605 124621 0 1.52744 0.784979 1
+21.8823 6.03281 0.397461 2.35363 5.47502 2.0599 118238 0 1.54948 0.784814 0
+-34.0198 -3.54922 0.63916 2.09029 4.54782 1.51739 104023 0 -1.57119 0.78316 0
+14.9452 9.64617 0.38623 0.760388 0.933013 1.57591 123832 0 1.54381 0.770465 1
+3.30281 23.2463 0.484619 2.05387 4.45986 1.57745 143452 0 -0.021905 0.770292 0
+50.9237 -9.77594 0.195068 0.813789 0.909178 1.6576 95397 0 1.50577 0.770119 1
+-44.6383 5.85571 1.33887 2.89642 7.20381 3.15943 118030 0 1.58146 0.7696 0
+-46.3041 9.19922 0.689453 0.873068 0.966634 1.77921 122705 0 1.60904 0.768386 1
+38.472 11.4338 0.211182 0.788558 0.820171 1.62832 126246 0 1.4743 0.763484 1
+51.6491 3.74375 0.344482 1.98195 4.4036 1.49825 115055 0 1.5642 0.761184 0
+16.7466 10.1103 0.408203 0.844761 0.942169 1.67796 124306 0 0.990436 0.759939 1
+-59.1589 -11.6498 0.706055 0.83329 0.860792 1.74907 92245 0 -1.67883 0.758868 1
+-48.1922 -3.66078 0.539062 2.03292 4.4079 1.52036 103979 0 -1.58237 0.756897 0
+34.9517 44.3187 0.38916 1.98971 4.44681 1.84738 174439 0 -3.13766 0.745954 0
+48.4305 26.7784 0.380371 2.00629 4.50803 1.68535 148741 0 -1.65016 0.744471 0
+-58.4377 5.82867 0.493164 2.01218 4.39501 1.57437 117987 0 1.56429 0.735831 0
+18.7528 12.5438 0.209106 0.792612 0.899903 1.60739 128056 0 -0.177079 0.732592 1
+-47.8684 9.84063 0.614746 0.814982 0.945857 1.70355 123636 0 1.60928 0.728845 1
+52.8447 12.3969 0.381836 0.852427 0.951415 1.78182 127695 0 1.61533 0.719482 1
+-23.1958 -0.266251 0.900391 2.29238 4.82697 1.70023 109205 0 -1.5708 0.716219 0
+-64.9046 -11.7637 0.648438 0.753183 0.756685 1.67142 92227 0 -1.54596 0.715523 1
+-0.165466 42.1072 0.689453 2.11493 4.68761 1.86096 171053 0 0.047722 0.708414 0
+17.0224 72.1498 -0.0915527 2.03292 4.3993 1.69029 215099 0 3.10993 0.695752 0
+-46.5764 9.96891 0.683105 0.800976 0.933925 1.727 124108 0 1.53276 0.688783 1
+-11.6534 0.0137482 0.780273 2.0791 4.53452 1.5334 109709 0 -1.59847 0.664323 0
+49.068 24.275 0.219971 1.94457 4.34805 1.62039 144999 0 -1.53171 0.661048 0
+16.5447 10.2995 0.387695 0.771232 0.849933 1.58054 124773 0 1.11663 0.656328 1
+40.4172 36.2645 0.380859 0.768038 0.756315 1.65922 162756 0 -3.12319 0.655226 1
+18.6484 10.8491 0.251709 0.772174 0.87179 1.59566 125248 0 1.56999 0.643889 1
+-47.8952 10.6099 0.659668 0.838187 0.918997 1.69608 125040 0 1.66235 0.640973 1
+-51.3768 5.87664 0.785156 2.31262 5.34819 2.11287 118009 0 1.56266 0.635675 0
+16.5806 10.1861 0.409668 0.822176 0.919221 1.66165 124305 0 0.140011 0.634657 1
+16.0417 11.1583 0.368652 0.868603 0.920793 1.63549 125708 0 1.29027 0.633524 1
+35.1416 38.2041 0.178711 0.838392 1.5763 1.77661 165547 0 -0.341605 0.629775 2
+38.9828 31.8592 0.201904 1.97037 4.62395 1.50779 156199 0 -0.0948709 0.624523 0
+69.6177 -5.54688 0.121826 1.96653 4.35229 1.48478 101539 0 -1.61283 0.622058 0
+54.4952 9.80312 0.284912 0.991734 1.13481 1.78792 123956 0 -1.59055 0.620277 1
+17.062 67.3975 -0.0686035 1.90975 4.25563 1.51221 208079 0 -3.12918 0.61757 0
+18.7406 12.4541 0.225098 0.758164 0.875202 1.598 127588 0 0.676629 0.614047 1
+59.026 3.66594 0.302734 2.01907 4.54338 1.54581 115078 0 1.55557 0.607838 0
+-66.3769 -9.54218 0.647461 0.812201 0.811012 1.80812 95498 0 -1.45491 0.60054 1
+-53.0474 10.7494 0.738281 0.808639 0.890722 1.70438 125024 0 1.33168 0.597608 1
+1.13812 -4.05648 1.69141 3.28528 13.0699 3.55224 103665 0 -1.71235 0.575409 0
+23.4541 36.6825 0.14209 1.99457 4.52125 1.54959 163171 0 3.08007 0.569673 0
+-58.3128 -12.9336 0.720703 0.810616 0.854719 1.74565 90375 0 1.57105 0.551453 1
+35.2786 37.9781 0.183228 0.830244 1.71607 1.79667 165080 0 -0.229392 0.539105 2
+-43.3098 -24.1298 1.00781 0.758164 0.781086 1.70938 74042 0 -2.30241 0.531574 1
+-51.1031 -7.63515 0.716797 0.860582 0.977312 1.76365 98354 0 1.76215 0.526586 1
+24.8325 35.8911 0.305664 0.86712 0.815778 1.73207 162239 0 -1.69416 0.522324 1
+-65.4223 -7.19328 0.448242 1.96845 4.43814 1.47142 98777 0 -1.54636 0.508544 0
+-0.202965 48.1595 0.712891 2.11184 4.70136 1.809 179945 0 0.00533976 0.50177 0
+-32.3825 10.3713 0.573242 0.852427 1.04978 1.69608 124620 0 1.58714 0.487795 1
+24.9122 -34.0628 -1.30762 0.808442 0.899463 1.74055 59747 0 -3.13698 0.479747 1
+-60.25 -12.5211 0.659668 0.788944 0.798048 1.74736 90837 0 1.5381 0.475362 1
+-63.2737 10.9389 0.604492 0.786252 0.798048 1.6837 125460 0 1.08356 0.471588 1
+-63.2777 10.815 0.639648 0.801368 0.82822 1.67305 124992 0 1.45363 0.468243 1
+61.8209 14.9386 0.77832 0.803326 1.81077 1.78356 131467 0 1.48946 0.464294 2
+18.3921 11.877 0.244141 0.776902 0.87179 1.59371 127119 0 1.5173 0.455562 1
+26.3754 -10.6325 -0.0690918 0.7901 0.870939 1.48007 93916 0 1.69999 0.439868 1
+40.2944 36.268 0.385254 0.815977 0.807456 1.68288 162755 0 -2.99738 0.435301 1
+-72.4594 -4.27891 0.447266 2.04088 4.53895 1.61762 102967 0 -1.5697 0.430686 0
+-48.0373 9.82578 0.637207 0.832274 0.96876 1.71189 123635 0 1.60213 0.428233 1
+40.4168 36.1206 0.366211 0.812399 0.835531 1.66653 162288 0 -3.06539 0.422624 1
+66.7428 3.73656 0.395996 2.06292 4.52125 1.69112 115102 0 1.56572 0.422445 0
+16.6987 10.2883 0.390869 0.832477 0.900782 1.62475 124774 0 1.00052 0.415314 1
+-58.3231 -12.741 0.731445 0.839416 0.883575 1.78095 90843 0 1.24386 0.4133 1
+-72.487 5.59969 0.525391 2.0171 4.41221 1.67305 117475 0 1.56365 0.412057 0
+-0.162346 73.7711 0.396484 2.14195 4.72437 1.7431 217385 0 -0.0293458 0.406921 0
+-66.3941 5.65282 0.409424 1.93982 4.38643 1.54581 117494 0 1.57177 0.401923 0
+52.7462 12.5121 0.40918 0.890016 0.952345 1.78705 128162 0 1.57679 0.399343 1
+26.378 -10.4856 -0.0380859 0.778041 0.840031 1.4061 94384 0 1.8819 0.39788 1
+-72.1877 -4.04047 0.406738 1.89674 4.15299 1.56977 103436 0 -1.51714 0.392395 0
+52.828 11.0167 0.337402 0.814783 0.93758 1.79054 125823 0 1.51359 0.391057 1
+-58.1694 -12.735 0.724609 0.833697 0.878198 1.77055 90844 0 1.51969 0.38914 1
+58.4262 47.8669 0.299805 0.823985 0.855972 1.74395 179660 0 0.0801302 0.379608 1
+-59.2756 -11.4728 0.666992 0.792418 0.819771 1.76451 92712 0 -1.62316 0.379378 1
+-63.4173 10.7981 0.654297 0.837165 0.861212 1.6994 124991 0 1.25448 0.375592 1
+-60.4992 -11.4562 0.675781 0.74459 0.736634 1.71859 92708 0 -1.27549 0.371251 1
+36.6995 9.34023 0.883789 0.777091 0.81538 1.69277 123432 0 1.50514 0.367497 1
+-63.4003 10.9306 0.645508 0.807061 0.840852 1.69691 125459 0 0.776375 0.363421 1
+-53.0339 10.9202 0.74707 0.830244 0.906297 1.71691 125492 0 0.783586 0.35488 1
+-72.138 -7.55375 0.413818 1.96461 4.49923 1.63549 98288 0 -1.57694 0.35354 0
+-19.5703 10.8158 0.739258 0.785292 0.773873 1.68947 125128 0 -1.55555 0.352982 1
+16.803 9.87422 0.404297 0.802346 0.898805 1.6837 123838 0 0.806275 0.350755 1
+-15.5612 -14.8325 0.153564 1.94932 4.3736 1.4779 87701 0 1.45464 0.342462 0
+12.2589 -20.5475 0.571289 0.851387 0.771232 1.69277 79364 0 0.0531998 0.341802 1
+-48.0359 10.5212 0.651367 0.825595 0.939872 1.70438 124571 0 1.60659 0.341143 1
+-60.3011 -12.0409 0.67627 0.79358 0.794161 1.80195 91773 0 1.51414 0.331228 1
+-43.3237 -23.9505 1.01953 0.799804 0.9114 1.7431 74510 0 -2.65144 0.33112 1
+-46.4866 9.36 0.679688 0.862054 0.97018 1.77141 123172 0 1.56335 0.331012 1
+-60.4872 -11.5883 0.686523 0.783377 0.779562 1.75591 92240 0 1.52637 0.328314 1
+21.675 -8.39453 0.022583 0.823783 0.954673 1.67305 97177 0 3.04545 0.328314 1
+-65.1657 -7.45891 0.430176 1.80107 3.88616 1.41851 98310 0 -1.5777 0.325521 0
+18.5447 10.913 0.283203 0.762619 0.839211 1.58247 125715 0 1.54761 0.324235 1
+18.2222 11.7172 0.247192 0.780133 0.851595 1.64872 126650 0 1.58424 0.324128 1
+52.667 10.8597 0.356934 0.885356 0.972077 1.78792 125354 0 1.53342 0.321673 1
+-43.173 -24.1417 1.00195 0.780323 0.808245 1.72111 74043 0 -2.34976 0.317426 1
+27.6684 -9.94187 0.145874 0.870089 0.950951 1.76106 94856 0 -1.5814 0.315209 1
+61.8378 15.098 0.791016 0.824387 1.92285 1.80547 131935 0 1.40573 0.307774 2
+22.6211 -51.9716 -0.852539 0.763737 1.38532 1.80371 33532 0 0.0770569 0.306631 2
+47.9516 26.7113 0.39502 1.71859 3.72635 1.61604 148739 0 -1.65543 0.30054 0
+-47.6071 9.81547 0.616211 0.837983 0.949095 1.72363 123637 0 1.5827 0.297674 1
+-60.1221 -11.6436 0.696289 0.756869 0.785676 1.76451 92242 0 -1.65172 0.297062 1
+-66.207 -9.51391 0.67627 0.827411 0.862054 1.80547 95499 0 1.73357 0.294215 1
+28.3355 6.56532 0.036377 1.91816 4.35229 1.4884 119194 0 1.47038 0.294012 0
+58.4775 3.45281 0.319336 1.65679 3.62938 1.49679 114608 0 1.59154 0.291989 0
+67.0043 -5.57844 0.131592 0.81359 1.93888 1.38396 101531 0 -2.8817 0.291384 0
+-48.0494 9.98836 0.643555 0.852635 0.991249 1.71859 124103 0 1.5979 0.289271 1
+18.8949 12.5609 0.240601 0.854719 0.9631 1.65679 128057 0 -0.450835 0.286968 1
+-49.3992 -9.88523 0.587891 0.832477 0.863739 1.65598 95083 0 -1.59868 0.283781 1
+-72.7484 5.86047 0.5625 2.21968 4.98506 1.79492 117942 0 1.55885 0.28279 0
+38.3722 11.5745 0.275879 0.81022 0.826604 1.66734 126713 0 1.51811 0.280715 1
+12.0631 11.8252 0.536621 0.852427 0.983776 1.6576 126631 0 1.53447 0.280124 1
+69.273 -16.4433 0.0664062 2.00923 4.52125 1.58633 85626 0 -1.53632 0.278453 0
+53.133 11.6734 0.342285 0.870939 0.952112 1.76451 126760 0 1.63097 0.277375 1
+-72.213 5.86086 0.552246 1.93793 4.18556 1.61683 117944 0 1.63605 0.277277 0
+45.8333 -8.27148 0.790039 0.807258 0.818971 1.81519 97721 0 -1.71322 0.274254 1
+-58.1687 -13.1694 0.740234 0.833494 0.871364 1.80283 89908 0 1.8992 0.270384 1
+21.807 -8.2682 -0.0351562 0.812796 0.881635 1.64269 97646 0 3.11993 0.269903 1
+25.048 -33.8722 -1.30176 0.831868 1.20535 1.7542 60216 0 -3.06529 0.268174 2
+-53.0361 10.513 0.71582 0.873707 0.97018 1.72447 124556 0 1.48556 0.265309 1
+-72.4587 -3.75945 0.419434 2.024 4.51684 1.59917 103903 0 -1.49983 0.265119 0
+-58.3306 -13.1714 0.740234 0.840031 0.881635 1.79142 89907 0 2.31395 0.262464 1
+-51.2584 -7.75578 0.698242 0.899738 1.34696 1.80812 97885 0 1.67926 0.262275 1
+54.3019 9.97219 0.290039 0.959287 1.3655 1.78182 124423 0 -1.5965 0.262086 1
+17.1488 62.2562 -0.210205 2.03391 4.53452 1.59605 200591 0 3.13376 0.260578 0
+26.195 -10.6442 -0.0629883 0.823783 0.950719 1.60112 93915 0 -1.68531 0.259075 1
+25.0495 35.9262 0.307373 0.909899 0.893336 1.77748 162240 0 -1.7033 0.2587 1
+-39.1786 -9.36875 0.838867 0.809232 0.820973 1.76537 95583 0 1.56877 0.256273 1
+-47.5034 5.96359 1.47559 3.08624 12.3985 3.3962 118021 0 1.55914 0.255715 0
+-63.5184 5.87563 0.561523 1.95122 4.18147 1.62753 117971 0 1.53557 0.255158 0
+-65.718 -7.4625 0.460938 2.09131 4.78473 1.50228 98308 0 -1.59463 0.253676 0
+24.8984 -33.8841 -1.33105 0.837574 1.18579 1.75591 60215 0 3.08217 0.250179 1
+27.6787 -9.56445 0.19397 0.955314 1.06138 1.75334 95792 0 -1.55894 0.247987 1
+-0.46328 58.4217 0.506836 1.9579 4.289 1.59332 194920 0 3.08801 0.247987 0
+-32.2218 10.2211 0.597656 0.870726 0.965926 1.7144 124153 0 1.49401 0.247805 1
+-19.3798 11.2704 0.671387 0.777471 0.786828 1.72195 126065 0 -1.51695 0.247805 1
+-0.40937 48.7482 0.69043 2.19704 4.85534 1.83749 180880 0 -0.0727576 0.247623 0
+41.1928 36.2583 0.355225 0.815579 0.796102 1.61328 162758 0 1.93777 0.246171 1
+68.9981 -16.1987 0.026123 1.83301 4.08062 1.52445 86093 0 -1.57496 0.245266 0
+-47.5998 9.99672 0.638672 0.837778 0.961221 1.73122 124105 0 1.61096 0.243283 1
+69.916 -5.80484 0.155029 2.23818 4.8982 1.54845 101072 0 -1.59072 0.240953 0
+-21.6747 12.6913 0.90918 0.752264 0.746774 1.74565 127930 0 1.45783 0.239705 1
+8.71617 50.8206 0.441406 0.814584 1.07731 1.64149 183717 0 -1.46756 0.239349 1
+26.1859 -10.4802 -0.0219727 0.785292 0.884006 1.47358 94383 0 -1.483 0.238994 1
+18.5823 11.7027 0.27002 0.742049 0.867755 1.62713 126652 0 1.6601 0.238994 1
+50.8286 -9.94797 0.209106 0.818171 0.893554 1.64029 94928 0 1.47436 0.238284 1
+25.0099 35.7909 0.275879 0.870301 0.849933 1.71105 161772 0 -1.6865 0.236516 1
+-21.8309 12.7031 0.955078 0.783569 0.76411 1.78967 127929 0 1.34975 0.235812 1
+-24.5585 30.8057 -0.218018 0.806864 1.62792 1.7542 154597 0 -2.27956 0.234757 2
+-66.215 -9.66484 0.702637 0.825797 0.856181 1.80019 95031 0 1.59169 0.233357 1
+-72.2138 5.34032 0.540527 1.7958 3.86723 1.55679 117008 0 1.55728 0.232833 0
+-0.178513 58.7125 0.500977 1.99555 4.33956 1.6192 195389 0 -3.13191 0.232136 0
+-53.1653 10.915 0.747559 0.83696 0.91229 1.71859 125491 0 0.66685 0.231962 1
+-64.9837 -11.8781 0.650391 0.75872 0.767101 1.66246 91758 0 -1.56622 0.231093 1
+-0.485313 55.8411 0.533691 2.00923 4.43814 1.60229 191176 0 -0.0110506 0.230746 0
+-43.3216 -24.3769 1.01074 0.83492 1.07731 1.74736 73574 0 -2.41622 0.230227 1
+27.505 -9.7782 0.180176 0.940503 1.01401 1.78356 95323 0 -1.58362 0.22919 1
+22.7803 -51.9605 -0.861328 0.761503 1.52668 1.79229 33533 0 0.0107871 0.228845 2
+18.4067 11.4734 0.274414 0.768226 0.820973 1.5875 126183 0 1.58647 0.228845 1
+-71.5687 -7.78484 0.503906 1.98486 4.46422 1.66816 97822 0 -1.53988 0.227813 0
+17.4278 62.5395 0.0115967 2.05187 4.59694 1.6796 201060 0 3.12375 0.227641 0
+23.4784 35.7623 0.131592 1.97615 4.32687 1.58672 161767 0 -0.0438272 0.226099 0
+24.8841 -34.2952 -1.30957 0.861002 1.43664 1.77141 59279 0 1.92332 0.225758 1
+-0.384689 47.5719 0.712402 2.11493 4.61043 1.84468 179008 0 0.0383958 0.225587 0
+56.142 -17.121 0.198608 2.22619 5.02416 1.79054 84649 0 1.54633 0.225076 0
+-51.4061 -8.86578 0.454102 0.798828 0.814584 1.68864 96481 0 -1.52986 0.224225 1
+0.113281 74.3524 0.266602 2.35708 5.68201 1.85371 218322 0 0.107521 0.223377 0
+67.2979 3.93266 0.433838 2.30923 5.32214 1.80723 115572 0 1.53707 0.220846 0
+-24.0691 -0.225777 0.916016 2.22185 5.06851 1.6837 109202 0 -1.57873 0.218503 0
+-46.7928 9.84406 0.660645 0.866273 1.28293 1.75677 123639 0 1.58723 0.21767 1
+27.8645 -9.77234 0.154175 0.914018 1.10366 1.79755 95325 0 -1.58765 0.217337 1
+-59.1105 -11.9077 0.691895 0.826806 0.839211 1.74565 91777 0 -1.64343 0.21684 1
+-66.0091 -7.19437 0.477539 2.23491 5.19378 1.61171 98775 0 -1.59089 0.21684 0
+-0.168518 57.7716 0.45459 2.0599 4.59694 1.60464 193985 0 -0.494672 0.216177 0
+-1.45609 26.7878 0.541992 0.793192 0.88855 1.66409 148585 0 0.127686 0.215681 1
+52.9705 11.4881 0.330566 0.8262 0.926657 1.78443 126291 0 1.4973 0.215021 1
+-72.1486 -7.25227 0.405762 1.62872 3.19355 1.62039 98756 0 -1.5863 0.214856 0
+25.0462 -34.2991 -1.29395 0.857645 1.43769 1.77921 59280 0 -2.06995 0.214362 1
+-0.44812 57.112 0.435547 2.05487 4.61043 1.59566 193048 0 0.00348952 0.213213 0
+51.4212 3.48344 0.328613 1.79142 4.06869 1.47827 114586 0 1.56614 0.212886 0
+67.0198 3.44828 0.404785 2.0995 4.72899 1.68617 114635 0 1.58671 0.211091 0
+36.8363 9.22516 0.87793 0.850556 0.940561 1.72195 122965 0 1.45871 0.210766 1
+41.2069 36.0627 0.356445 0.839826 0.802346 1.61525 162290 0 2.84835 0.210766 1
+-0.195 57.4517 0.406006 2.02994 4.59694 1.598 193517 0 0.0456517 0.209793 0
+68.7066 -5.57844 0.0638428 1.81519 3.909 1.49022 101536 0 -1.58188 0.209631 0
+58.4134 48.056 0.318359 0.82822 1.2151 1.77748 180128 0 -0.00906915 0.209146 2
+54.2628 -16.4266 -0.0710449 0.802738 1.77574 1.99847 85579 0 -1.83755 0.20834 2
+-71.8618 -7.25671 0.390625 2.00531 4.57455 1.63429 98757 0 -1.57083 0.207857 0
+52.9362 11.9466 0.354736 0.872855 0.94125 1.78618 127227 0 1.5865 0.206894 1
+8.06305 -2.76289 0.774414 0.821975 1.06009 1.71022 105559 0 1.48919 0.206574 2
+-0.417496 74.3559 0.319336 2.33645 5.63229 1.83928 218320 0 -0.0350254 0.205456 0
+52.4575 11.0172 0.323486 0.853676 0.996101 1.79755 125821 0 1.51213 0.204978 1
+-0.16758 42.982 0.675293 2.25464 4.98019 1.92379 172457 0 -0.0395807 0.201971 0
+17.0681 66.8122 -0.101807 1.6576 3.65071 1.48695 207143 0 -3.08978 0.201971 0
+2.14031 -28.0001 0.30127 0.779181 0.94102 1.68288 68568 0 1.10206 0.200871 1
+2.34109 -27.9997 0.341064 0.793774 0.942169 1.70272 68569 0 0.122728 0.200871 1
+65.7963 3.73 0.379639 2.04686 4.48607 1.687 115099 0 1.57031 0.200871 0
+50.9539 -9.5775 0.193115 0.862264 0.959346 1.66897 95865 0 1.49592 0.200714 1
+18.8114 59.9043 0.370605 0.902488 0.816974 1.8528 197320 0 2.78826 0.200714 1
+22.5959 -52.2717 -0.69043 0.749514 1.67305 1.80459 33064 0 0.121418 0.200245 2
+-24.7175 30.7916 -0.217041 0.821574 1.6196 1.77921 154596 0 -2.05739 0.199308 2
+-70.5475 -3.71523 0.356934 0.757979 1.26443 1.42059 103909 0 -0.905015 0.198375 2
+-24.5677 30.6578 -0.254639 0.823985 1.59527 1.7363 154129 0 -2.92183 0.198375 2
+-49.3947 -10.2961 0.603516 0.774818 0.793386 1.64792 94147 0 -1.64962 0.196826 1
+-51.0512 6.10656 0.849609 2.68399 6.41972 2.30023 118478 0 1.57671 0.196826 0
+15.5541 -6.87297 0.378418 0.848689 0.994643 1.71691 99498 0 -1.76496 0.19544 1
+28.3197 6.82265 0.0404053 1.63749 3.55224 1.49277 119662 0 1.53117 0.19544 0
+28.0161 6.84836 0.0561523 1.9579 4.51243 1.50154 119661 0 1.52022 0.19498 0
+-46.7825 9.97774 0.648926 0.843113 1.20815 1.7363 124107 0 1.56064 0.194368 1
+18.5144 12.3734 0.250977 0.778041 0.887466 1.60896 127587 0 1.06433 0.194062 1
+-66.4063 6.14797 0.463623 2.12529 4.82697 1.61999 118430 0 1.58666 0.193757 0
+-71.9229 5.61594 0.556641 1.69691 3.64359 1.51665 117477 0 1.61548 0.192691 0
+19.7153 -7.76484 0.261719 0.932159 1.0373 1.84558 98107 0 -3.07771 0.191026 1
+-73.1155 -7.55766 0.646973 1.98002 4.39072 1.66328 98285 0 -1.53454 0.190423 0
+-59.1123 -14.2928 0.597656 0.812399 0.818571 1.65356 88501 0 3.08349 0.190122 1
+18.9362 59.9038 0.40332 0.877984 0.809825 1.83928 197321 0 3.05845 0.189821 1
+-70.3542 2.33422 0.568848 1.80635 3.85969 1.4664 112802 0 1.59639 0.188026 0
+-32.1907 10.6045 0.572266 0.939356 1.02722 1.72785 125089 0 1.53497 0.18743 1
+26.5959 -10.6486 -0.0251465 0.808837 0.886167 1.45819 93917 0 1.81261 0.186836 1
+38.423 12.093 0.359619 0.876699 0.883575 1.78269 127182 0 1.51942 0.186539 1
+-72.4816 6.16008 0.557617 2.18954 4.83641 1.7448 118411 0 1.63526 0.186391 0
+-64.8708 -11.4438 0.638184 0.744953 0.752264 1.687 92695 0 -1.58802 0.185062 1
+58.6086 47.8613 0.302734 0.843731 1.20564 1.76537 179661 0 -0.22588 0.185062 1
+-66.5797 -9.51524 0.633301 0.842907 0.88487 1.82586 95497 0 -1.4782 0.184767 1
+-0.208435 55.5297 0.517578 1.98874 4.31 1.60621 190709 0 -0.00438167 0.18462 0
+41.3399 36.0381 0.394043 0.808047 0.792999 1.52892 162291 0 2.67756 0.183887 1
+18.9093 12.4247 0.270996 0.817173 0.916308 1.6451 127589 0 0.106054 0.182134 1
+25.4819 34.7947 -0.0253906 0.669731 0.583295 1.08205 160369 0 -0.601173 0.181553 1
+48.2381 14.7728 0.429932 1.92285 3.7666 1.77921 131424 0 -1.59516 0.181263 0
+-1.47719 26.9419 0.532227 0.791451 0.898585 1.66571 149053 0 0.15853 0.181263 1
+23.6022 -10.4112 0.0428467 0.917036 1.01006 1.809 94375 0 1.60373 0.178956 1
+19.6852 -7.60461 0.25708 0.931704 1.03351 1.83211 98575 0 -2.99682 0.178383 1
+-65.8184 5.86195 0.463135 1.89212 4.11665 1.55413 117964 0 1.55049 0.178383 0
+21.3175 -8.4832 0.0688477 0.863739 1.00539 1.7397 97176 0 2.79268 0.178097 1
+15.0488 9.5786 0.365967 0.798828 0.972314 1.62078 123365 0 1.50217 0.177525 1
+-72.2006 -4.54562 0.426025 1.98486 4.32264 1.59488 102500 0 -1.56793 0.175962 0
+40.4341 36.5179 0.376221 0.861002 0.938725 1.71022 163224 0 2.57367 0.175821 1
+34.787 43.7964 0.389648 1.63031 3.46318 1.72111 173502 0 -3.09188 0.175679 0
+68.6984 -16.4334 0.0561523 1.64591 3.5871 1.42268 85624 0 1.57046 0.174973 0
+-60.0837 -11.4123 0.720215 0.795714 0.811408 1.76796 92710 0 -1.63395 0.174129 1
+-0.746246 56.1405 0.522461 2.01218 4.4036 1.60699 191643 0 0.0570589 0.173428 0
+16.983 10.0686 0.39917 0.810616 0.92327 1.65841 124307 0 0.802573 0.173288 1
+34.0518 30.3028 0.192139 0.685613 0.607716 1.59371 153844 0 0.0177601 0.17245 1
+40.7353 36.2977 0.396729 0.83696 0.863739 1.67796 162757 0 -2.91663 0.17245 1
+-0.764061 58.1156 0.535156 1.90789 4.15299 1.57092 194451 0 2.92941 0.172311 0
+9.74375 -10.0377 0.603516 0.779562 0.81538 1.80195 94800 0 -3.04976 0.172172 1
+19.6592 60.6803 0.358887 0.848068 0.855345 1.89581 198259 0 3.12983 0.171894 1
+-0.749687 55.5567 0.578125 2.08927 4.59694 1.65841 190707 0 -0.0962658 0.171755 0
+-2.71391 -4.3632 1.47656 2.7236 6.28327 2.53867 103185 0 -1.64678 0.1712 0
+25.3622 -33.8122 -1.19043 0.847033 1.79667 1.79492 60217 0 -2.52058 0.171061 2
+15.7584 -6.85859 0.368896 0.846826 1.00489 1.72616 99499 0 -1.71354 0.170508 1
+-15.8471 14.8241 0.795898 0.837369 1.85733 1.79229 131224 0 1.49214 0.169956 2
+19.6386 13.0062 0.365723 0.841263 0.858693 1.66897 128527 0 -0.950121 0.169406 1
+-70.5921 5.52914 0.627441 0.8262 1.71607 1.58363 117481 0 0.110825 0.169131 2
+-1.66563 26.958 0.602539 0.792805 0.877341 1.69195 149052 0 0.302246 0.169131 1
+-21.303 12.6361 0.72168 0.665981 0.662252 1.54091 127931 0 1.6125 0.168857 1
+59.6091 3.91692 0.346191 2.35133 5.73217 1.73715 115548 0 1.52944 0.168173 0
+38.5355 11.1562 0.237549 0.831258 0.861423 1.62673 125778 0 1.57426 0.167219 1
+-58.725 6.11687 0.554688 2.33417 5.16848 1.70355 118454 0 1.58583 0.167083 0
+-38.8781 23.8673 0.451904 1.98971 4.52567 1.66165 144256 0 1.59285 0.167083 0
+-66.5916 -9.68625 0.654297 0.824991 0.867755 1.80107 95029 0 -1.4177 0.166947 1
+15.545 -6.65227 0.355469 0.857855 0.979223 1.67878 99966 0 -1.86451 0.166947 1
+-43.5722 -24.1417 0.989258 0.864794 1.12337 1.76537 74041 0 -2.8786 0.166134 1
+-43.1337 -24.3866 1.01758 0.850763 1.07678 1.77401 73575 0 -2.55699 0.165863 1
+26.4089 -10.9648 0.0726318 0.848896 1.14455 1.67305 93448 0 2.50163 0.165863 1
+-46.2731 8.9225 0.679199 0.915693 0.990282 1.79492 122237 0 1.64151 0.165593 1
+52.7919 11.6781 0.34668 0.800195 0.885302 1.79142 126758 0 1.50614 0.165593 1
+20.5944 -13.3262 -0.501953 0.842701 0.921693 1.63749 90154 0 -1.9881 0.165323 1
+28.6102 6.3261 0.0507812 2.00042 4.4338 1.55679 118727 0 1.43181 0.165323 0
+-65.4234 -6.67969 0.435059 2.11493 4.72437 1.56785 99713 0 -1.43674 0.164651 0
+48.235 16.9291 0.424316 1.79492 4.25979 1.69691 134232 0 -1.55171 0.164651 0
+-47.611 10.4377 0.655273 0.859532 1.02446 1.75078 124573 0 1.64973 0.164516 1
+-46.3797 9.99742 0.714355 0.834308 0.974929 1.75249 124109 0 1.48559 0.16398 1
+-70.7966 5.30688 0.615234 0.828422 1.73715 1.56556 117012 0 -0.126279 0.163445 2
+-47.8397 9.55485 0.588867 0.865428 0.939872 1.65598 123168 0 1.61329 0.163445 1
+68.9894 -15.898 -0.00170898 1.89767 4.20605 1.56022 86561 0 -2.18085 0.162646 0
+-0.220467 56.4839 0.448486 2.01316 4.38643 1.58324 192113 0 -0.0388531 0.16238 0
+69.2653 -15.9097 0.012085 2.05889 4.61043 1.63111 86562 0 -1.57617 0.162115 0
+58.6398 48.087 0.308105 0.830244 1.75677 1.78182 180129 0 -0.696398 0.161849 2
+9.37125 -11.635 0.558594 0.787981 0.773117 1.79667 92459 0 0.0138639 0.16132 1
+-39.1953 -9.23804 0.84082 0.796297 0.817373 1.74565 96051 0 1.60804 0.160792 1
+-41.142 5.82383 0.467041 0.759646 0.673996 1.64872 118041 0 1.81651 0.160792 1
+17.0944 73.028 -0.1875 2.35478 5.85665 2.02994 216503 0 3.10551 0.160661 0
+-3.01422 -22.8219 1.06738 0.825797 0.952577 1.72869 76040 0 3.12676 0.159741 1
+-70.5687 5.26805 0.608398 0.659348 1.3496 1.4272 117013 0 -0.258708 0.159741 2
+-70.8542 5.54938 0.631836 0.927053 1.99068 1.65114 117480 0 0.0518267 0.159741 2
+-60.0979 -12.0318 0.6875 0.771609 0.798048 1.80459 91774 0 1.33192 0.159479 1
+35.019 17.1225 0.0977783 0.852843 1.85733 1.48659 134659 0 1.54338 0.159348 0
+-15.8108 -15.0889 0.22937 2.12944 4.91257 1.59644 87232 0 1.56892 0.159087 0
+33.8058 34.03 0.0793457 0.848482 0.895519 1.41782 159459 0 1.88759 0.158435 1
+8.57781 50.9198 0.414551 0.83696 1.43664 1.66979 184184 0 -1.55328 0.158174 2
+-41.187 5.66531 0.523926 0.752999 0.666469 1.68617 117573 0 1.52113 0.157914 1
+-0.163986 74.7281 0.246948 2.34559 5.54497 1.90882 218789 0 -0.211503 0.157785 0
+-10.4331 -7.5614 0.500977 1.95599 4.35229 1.46354 98481 0 -1.77992 0.15662 0
+-43.1669 -23.9545 1.01465 0.799609 0.896613 1.7542 74511 0 -2.46492 0.156362 1
+25.4563 -8.07172 0.134521 0.938382 1.00588 1.75334 97657 0 -1.17321 0.156362 1
+-71.2257 -7.54031 0.477539 1.97519 4.3608 1.66003 98291 0 -1.55552 0.156362 0
+67.9587 -5.61992 0.0703125 1.02559 2.07605 1.48586 101534 0 -2.68843 0.156362 0
+-0.20047 46.9305 0.612305 2.17038 4.63299 1.85099 178073 0 0.021237 0.155976 0
+41.3252 36.245 0.39502 0.812597 0.794161 1.6019 162759 0 2.46801 0.155848 1
+-49.6087 -10.0945 0.601562 0.814982 0.844143 1.67142 94614 0 -1.60248 0.155078 1
+-54.2532 -12.974 0.94043 0.798243 0.77048 1.84468 90388 0 -2.8424 0.153549 1
+28.9564 6.82789 0.0435791 1.99555 4.54338 1.52333 119664 0 1.48584 0.151906 0
+35.2451 38.4248 0.179565 0.879056 1.90231 1.79054 166016 0 -0.252041 0.151781 2
+-64.1014 5.89797 0.508789 1.79404 3.95894 1.59138 117969 0 1.3818 0.151028 0
+-1.65094 26.7977 0.59082 0.818971 0.914074 1.69195 148584 0 0.184012 0.150778 1
+16.8587 61.9584 -0.30835 1.90975 4.21839 1.51517 200122 0 -3.13203 0.150653 0
+25.4934 -7.92242 0.161133 0.848482 0.893336 1.72195 98125 0 -0.948314 0.150528 1
+-73.0559 5.33734 0.600586 2.29798 5.27042 1.89952 117005 0 1.52369 0.149904 0
+29.2474 6.59422 0.0834961 2.06695 4.60593 1.56022 119197 0 1.47846 0.149904 0
+-49.2052 -10.4151 0.643555 0.801172 0.817772 1.68617 94148 0 -1.65289 0.14978 1
+23.548 -14.283 -0.669922 0.858903 0.91452 1.69112 88759 0 0.0703111 0.149531 1
+-51.4014 -9.0125 0.510742 0.813391 0.848275 1.6796 96013 0 -1.54619 0.148788 1
+36.3334 28.297 0.226929 0.8262 0.958175 1.63549 151043 0 1.60578 0.148788 1
+-63.4597 10.5053 0.709961 0.868391 0.927109 1.6936 124523 0 1.63053 0.148047 1
+0.123909 59.0116 0.491699 2.0609 4.55227 1.6649 195858 0 -3.09292 0.147924 0
+-66.6747 5.35062 0.416992 2.03789 4.71055 1.58479 117025 0 1.57628 0.147678 0
+20.9659 -10.67 0.296387 0.892736 0.96687 1.76709 93899 0 0.269098 0.147555 1
+35.0071 17.4169 0.101562 0.914018 2.14091 1.42859 135127 0 1.15269 0.147187 0
+-16.0786 14.8555 0.818359 0.875202 1.86005 1.80723 131223 0 1.26202 0.147065 2
+66.8641 -5.61367 0.161743 0.814982 1.97615 1.40062 101530 0 -3.02503 0.146942 0
+-51.565 -8.86625 0.507324 0.80039 0.80588 1.69029 96480 0 -1.52551 0.146575 1
+26.6095 -10.4652 0.00683594 0.848689 0.936665 1.5148 94385 0 2.09775 0.146331 1
+24.7736 -52.009 -0.483154 2.06998 4.71515 1.70522 33539 0 -1.61315 0.145722 0
+18.2012 11.8848 0.255859 0.766727 0.843731 1.61604 127118 0 1.60653 0.145601 1
+20.9532 -8.73937 0.0605469 0.921075 1.02898 1.809 96707 0 2.16691 0.145115 1
+20.0167 12.5865 0.305664 0.797464 0.84538 1.6196 128060 0 0.809725 0.145115 1
+-66.9606 5.90531 0.547852 2.23818 5.19378 1.72363 117960 0 1.58859 0.144994 0
+21.2544 -8.72734 0.0776367 0.891429 1.02722 1.76192 96708 0 2.24067 0.144873 1
+15.7878 -6.64394 0.333008 0.894481 1.05931 1.71691 99967 0 -0.67637 0.144873 1
+67.2664 -5.5939 0.123779 0.873921 1.95599 1.42407 101532 0 -2.80627 0.144873 0
+12.1341 -20.5641 0.552246 0.909233 0.856599 1.71189 79363 0 0.137384 0.144631 1
+-59.2666 -11.9058 0.679688 0.812399 0.863107 1.75677 91776 0 -1.62592 0.14439 1
+12.0492 12.1951 0.536133 0.828827 0.95958 1.65517 127567 0 1.61216 0.144149 1
+3.01703 -4.00281 1.60742 3.34681 13.1467 3.49375 103671 0 -1.6795 0.143788 0
+0.399452 74.0858 0.259277 2.08214 4.79408 1.77488 217855 0 0.129989 0.143788 0
+-65.4103 -7.73406 0.458984 1.91162 4.18965 1.44015 97841 0 -1.60976 0.143668 0
+-66.3783 -9.20453 0.577148 0.827008 0.94125 1.7888 95966 0 -2.80156 0.143427 1
+66.1266 3.43391 0.349121 1.92191 4.18556 1.63111 114632 0 1.56822 0.143308 0
+24.5175 -52.2969 -0.575195 1.9723 4.43814 1.65275 33070 0 -1.61036 0.143188 0
+25.359 -34.0647 -1.2832 0.882281 1.81254 1.80195 59749 0 -2.4123 0.143188 2
+33.4143 34.0306 0.0759277 0.847654 1.14931 1.43699 159458 0 2.77238 0.143188 1
+19.9057 -7.78 0.204102 0.890994 0.988108 1.78008 98108 0 -3.11668 0.142948 1
+0.103905 58.0848 0.448486 2.04986 4.52125 1.60229 194454 0 0.0646262 0.142351 0
+-59.2562 -14.2836 0.591797 0.850348 0.861212 1.67632 88500 0 3.1179 0.142232 1
+18.5397 12.5689 0.235229 0.832071 0.924849 1.65194 128055 0 -0.0944692 0.142232 1
+9.205 -11.6353 0.589844 0.836348 0.863107 1.7888 92458 0 -0.0536951 0.141756 1
+49.7766 31.1471 0.626953 0.72221 0.646279 1.65033 155297 0 3.03061 0.141756 1
+-60.3494 -11.1381 0.689453 0.753735 0.754471 1.727 93177 0 -1.35596 0.141519 1
+-65.038 -11.4295 0.625 0.739517 0.736634 1.69112 92694 0 -1.52689 0.140808 1
+-1.73421 14.5858 0.710938 0.774062 0.783377 1.67632 130800 0 -0.53026 0.140572 1
+-58.6194 -12.9362 0.750977 0.824387 0.882066 1.77055 90374 0 -1.53161 0.140336 1
+18.6177 11.9753 0.263672 0.773117 0.905854 1.60308 127120 0 1.58033 0.140336 1
+20.9425 -8.51578 0.112671 0.861002 0.966398 1.80019 97175 0 2.64848 0.139396 1
+20.2686 -1.16828 0.207642 0.722386 0.802346 1.60896 107937 0 1.47113 0.139396 1
+61.3947 14.9159 0.819336 0.875843 1.94552 1.86825 131465 0 1.42416 0.139396 2
+26.7608 -8.80664 0.156006 0.93 1.07994 1.74736 96725 0 1.63592 0.138928 1
+-46.828 9.48563 0.650391 0.833697 1.02471 1.73715 123171 0 1.49506 0.138462 1
+-46.3673 9.82359 0.693359 0.809429 0.931193 1.72953 123641 0 1.51746 0.137996 1
+67.7655 -5.62102 0.074585 0.904583 1.90231 1.45357 101533 0 -2.84809 0.137648 0
+34.7394 17.4185 0.169189 0.853468 2.02994 1.3789 135126 0 1.26319 0.137648 0
+-63.2638 10.5089 0.694336 0.851387 0.928242 1.68617 124524 0 1.66863 0.137532 1
+0.0916443 57.7834 0.419922 2.024 4.47294 1.59683 193986 0 0.393651 0.137417 0
+58.4142 47.6297 0.304688 0.850556 1.12173 1.7397 179192 0 0.0632311 0.13707 1
+35.2301 44.5681 0.413574 2.1725 5.01925 1.87922 174908 0 -3.11405 0.136954 0
+0.045784 47.5825 0.692383 2.06191 4.47294 1.79755 179010 0 0.0381995 0.136954 0
+-1.44586 13.2683 0.670898 0.879271 0.876485 1.68782 128929 0 -1.66539 0.136839 1
+66.7459 4.22031 0.452637 2.28791 4.97048 1.83749 116038 0 1.50122 0.13569 0
+-59.1024 -14.4558 0.594727 0.824387 0.852843 1.69195 88033 0 3.09626 0.135232 1
+33.1828 34.0491 0.0662842 0.792225 0.842907 1.38093 159457 0 -3.03072 0.135232 1
+25.1753 34.7827 -0.00976562 0.646595 0.582157 1.01006 160368 0 -1.51135 0.135232 1
+34.0323 30.5017 0.228027 0.725037 0.689979 1.67632 154312 0 -0.32483 0.135004 1
+26.321 -11.6877 0.403809 0.848689 0.917204 1.8829 92512 0 -2.31358 0.134776 1
+38.7505 11.4067 0.243164 0.848068 0.916532 1.70605 126247 0 1.46853 0.134776 1
+69.0136 -16.7225 0.0537109 1.87007 4.15705 1.53153 85157 0 -1.55707 0.134208 0
+-3.01375 -22.645 1.01855 0.803915 0.94355 1.738 76508 0 3.03259 0.134094 2
+18.4978 -12.8752 -0.414307 0.867755 0.957006 1.64832 90615 0 -2.58944 0.134094 1
+55.8825 -16.8689 0.133057 2.00434 4.43814 1.68535 85116 0 1.5011 0.133981 0
+68.9898 -5.81469 0.109253 2.13569 4.71515 1.59878 101069 0 -1.58593 0.133981 0
+3.35094 24.0672 0.476318 2.26015 5.51257 1.61841 144856 0 -0.0340555 0.133981 0
+23.5419 -14.4741 -0.678711 0.836348 0.915861 1.70106 88291 0 0.0484007 0.133868 1
+45.7359 -8.34843 0.780273 0.839826 0.857855 1.80723 97252 0 -1.70701 0.133415 1
+-72.4841 5.04969 0.5 1.98874 4.31421 1.65679 116539 0 1.51209 0.133415 0
+-52.2391 5.92313 0.796875 2.41651 5.59938 2.17144 118006 0 1.56878 0.133302 0
+9.07781 16.5966 3.04297 0.778421 0.728408 1.66816 133642 0 0.00966465 0.132964 1
+30.1104 44.5321 0.333496 2.14824 4.90777 1.84828 174892 0 -3.08667 0.132964 0
+67.0179 -5.87656 0.0756836 0.794549 2.00923 1.41921 101063 0 -2.91204 0.132852 0
+17.6377 49.9672 -0.03125 2.1874 4.98506 1.66571 182809 0 3.07723 0.132852 0
+18.6322 -12.8794 -0.459229 0.860792 0.961691 1.63669 90616 0 -2.69511 0.132739 1
+20.0581 12.3731 0.348633 0.795131 0.798828 1.71524 127592 0 0.844519 0.132739 1
+33.8089 33.8464 0.0212402 0.784526 0.840441 1.34894 158991 0 2.54353 0.132739 1
+-36.2556 12.2587 1.13184 0.836348 1.87556 1.77574 127416 0 1.56828 0.132515 2
+24.6275 35.9287 0.289551 0.927053 0.864583 1.76192 162238 0 -1.62799 0.13229 1
+21.6027 -8.7025 0.072876 0.879271 1.04085 1.72953 96709 0 2.71941 0.131619 1
+26.7227 -9.05578 0.131104 0.939069 1.08655 1.68617 96257 0 1.64566 0.131173 1
+-0.732185 57.1124 0.475342 1.98971 4.38215 1.58092 193047 0 0.245518 0.131062 0
+15.5233 -7.14547 0.414062 0.853468 1.02797 1.71943 99030 0 -1.82134 0.130729 1
+-70.5422 -3.91843 0.418457 0.993612 1.77055 1.54166 103441 0 -1.3307 0.130729 2
+17.6925 62.2725 0.00219727 2.06191 4.66477 1.71775 200593 0 3.10766 0.130618 0
+-47.1539 9.79469 0.625977 0.849103 1.01451 1.71859 123638 0 1.54713 0.130507 1
+-32.7309 27.2825 -0.0993652 0.834308 1.35289 1.75505 149423 0 -1.33728 0.130285 2
+33.4216 33.8544 0.0222168 0.792612 0.885951 1.40541 158990 0 -3.08196 0.130285 1
+26.3823 -9.40078 0.0716553 0.902378 0.996588 1.60229 95788 0 -2.1294 0.130064 1
+12.6775 -0.848434 0.542969 0.821975 1.24118 1.71859 108381 0 1.50494 0.130064 2
+34.4259 17.1666 0.224731 0.863107 1.88474 1.33452 134657 0 2.64204 0.129733 0
+0.0571136 48.7458 0.664062 2.16192 4.80346 1.80371 180882 0 -0.00435622 0.129733 0
+65.1172 3.73516 0.374268 1.93509 4.32687 1.63111 115097 0 1.59282 0.129403 0
+-71.9235 -3.75281 0.365967 1.81342 3.82218 1.54204 103905 0 -1.49436 0.128635 0
+33.9851 34.0263 0.111694 0.805683 0.812201 1.33583 159460 0 1.72459 0.128525 1
+0.0942993 73.1847 0.391113 1.90138 4.16111 1.70355 216450 0 0.0574763 0.128307 0
+22.7994 -52.2659 -0.776367 0.788751 1.62078 1.78792 33065 0 -0.0334419 0.127653 2
+20.4116 -13.319 -0.453857 0.846826 0.92983 1.63111 90153 0 -1.80498 0.127002 1
+-47.6155 10.6761 0.682617 0.879271 1.04238 1.74651 125041 0 1.64494 0.127002 1
+20.2836 -1.38641 0.194824 0.758534 0.903866 1.60112 107469 0 1.90868 0.126785 1
+20.5956 -13.4888 -0.457275 0.845793 0.924849 1.68453 89686 0 -1.90461 0.126569 1
+21.9162 -8.71906 0.0407715 0.864161 0.957941 1.738 96710 0 3.09908 0.126569 1
+46.1477 -8.41235 0.673828 0.827008 1.80459 1.79667 97254 0 -1.61722 0.126353 2
+61.8796 14.667 0.782227 0.837983 1.97422 1.81697 130999 0 1.50329 0.126138 2
+-32.7464 27.1356 -0.102539 0.8262 1.34499 1.7414 148955 0 -1.57182 0.126138 1
+-47.6022 -3.58797 0.561523 1.90231 3.5871 1.49168 103981 0 -1.57432 0.125815 0
+58.6522 47.6047 0.343018 0.842907 1.6335 1.77228 179193 0 -0.255272 0.125493 2
+-38.9026 24.0924 0.517578 2.09745 4.66022 1.73038 144724 0 0.270852 0.125172 0
+-2.78468 -22.8262 1.0459 0.853468 0.991734 1.75849 76041 0 3.13735 0.125065 1
+-72.78 -7.24664 0.496582 1.93793 4.32264 1.60582 98754 0 -1.50411 0.124959 0
+-2.07031 -4.57094 1.51855 2.98545 6.7872 2.84874 102719 0 -1.69851 0.124852 0
+58.1828 47.8506 0.274902 0.864372 1.38261 1.75934 179659 0 0.332094 0.124852 1
+38.2608 12.5258 0.387207 0.821373 0.873494 1.72953 128117 0 1.51782 0.124639 1
+36.3394 28.0619 0.214478 0.821173 0.993915 1.63749 150575 0 1.56852 0.124639 1
+68.6824 -15.9021 0.034668 1.70855 3.75558 1.47646 86560 0 1.55858 0.124319 0
+17.5066 48.9131 -0.000732422 1.69112 3.28528 1.56938 180936 0 3.09422 0.124319 0
+19.3838 -10.7377 -0.19751 0.899189 0.998536 1.69195 93894 0 1.44905 0.124213 1
+33.4863 32.1109 0.308105 0.781849 0.725922 1.76106 156650 0 -1.18898 0.124213 1
+-72.1645 -6.93094 0.394531 1.45286 2.53743 1.62713 99224 0 -1.55546 0.124107 0
+16.8917 71.6427 -0.0314941 1.63869 3.6117 1.54997 214162 0 3.10707 0.124107 0
+-62.9763 10.9839 0.660645 0.831868 1.06501 1.71273 125461 0 1.42065 0.123789 1
+25.0999 -34.6481 -1.21191 0.823381 1.85915 1.79755 58812 0 -0.108123 0.123577 2
+52.6903 11.2906 0.362549 0.820572 0.906075 1.81077 126290 0 1.46932 0.123577 1
+25.6555 34.7792 0.0842285 0.699819 0.62609 1.20535 160370 0 0.246247 0.123577 1
+-17.4641 58.0931 1.21777 2.54488 5.93148 2.27343 194399 0 -0.112576 0.123366 0
+-15.8328 14.6716 0.768555 0.844349 1.83211 1.809 130756 0 1.45667 0.123155 2
+48.1338 15.1709 0.425537 1.13897 1.39244 1.75849 131892 0 -1.64027 0.123155 1
+-73.4552 -7.24211 0.679688 1.95599 4.25979 1.65356 98752 0 -1.46854 0.123049 0
+-35.0162 27.3667 0.855469 2.14195 4.80815 1.81697 149416 0 -1.64274 0.123049 0
+19.6726 -10.0988 -0.0437012 0.917708 1.02072 1.73038 94831 0 1.64622 0.122944 1
+33.1759 33.8497 0.0100098 0.730545 0.732688 1.38904 158989 0 -3.0074 0.122523 1
+34.4242 17.4125 0.216187 0.877127 2.12944 1.35885 135125 0 2.22677 0.122418 0
+-43.5981 -23.9006 1.04102 0.840647 1.58556 1.79142 74509 0 3.05616 0.122104 2
+-0.737808 57.4324 0.484863 1.98002 4.36933 1.57707 193515 0 0.541353 0.12179 0
+-64.8307 -12.2562 0.640625 0.708414 0.719921 1.61881 91291 0 -1.5154 0.121686 1
+1.17 14.9622 0.486328 0.751897 0.819371 1.43629 131277 0 0.0925049 0.121477 1
+44.6108 43.567 0.217285 0.934438 0.985218 1.79667 173533 0 -1.55692 0.121477 1
+-71.8627 -6.92078 0.386719 1.95982 4.34805 1.63549 99225 0 -1.54692 0.121373 0
+54.0069 10.0258 0.214478 0.822176 0.963571 1.54204 124422 0 -1.52339 0.121269 1
+19.6194 13.2484 0.411621 1.04092 1.06918 1.84828 128995 0 -1.49427 0.121061 1
+8.2225 50.7516 0.461914 0.869239 1.90509 1.69608 183715 0 -1.36933 0.121061 2
+-21.8922 11.4459 0.505371 0.777281 0.835939 1.32705 126057 0 1.58411 0.120853 1

+ 8 - 0
src/detection/CenterPoint-master/run-new.sh

@@ -0,0 +1,8 @@
+./build-qt/CenterPoint \
+--pfeOnnxPath=models/pfe_baseline32000.onnx \
+--rpnOnnxPath=models/rpn_baseline.onnx \
+--pfeEnginePath=models/pfe_fp.engine \
+--rpnEnginePath=models/rpn_fp.engine \
+--savePath=results \
+--filePath=lidars \
+--fp16

+ 8 - 0
src/detection/CenterPoint-master/run.sh

@@ -0,0 +1,8 @@
+./build/centerpoint \
+--pfeOnnxPath=models/pfe_baseline32000.onnx \
+--rpnOnnxPath=models/rpn_baseline.onnx \
+--pfeEnginePath=models/pfe_fp.engine \
+--rpnEnginePath=models/rpn_fp.engine \
+--savePath=results \
+--filePath=lidars \
+--fp16

+ 403 - 0
src/detection/CenterPoint-master/src/centerpoint.cpp

@@ -0,0 +1,403 @@
+#include "centerpoint.h"
+#include "preprocess.h"
+
+
+bool CenterPoint::engineInitlization()
+ {
+        sample::gLogInfo << "Building pfe engine . . .  "<< std::endl;
+        mEngine = mParams.load_engine ? buildFromSerializedEngine(mParams.pfeSerializedEnginePath) : build(mParams.pfeOnnxFilePath,mParams.pfeSerializedEnginePath);
+        sample::gLogInfo << "Building rpn engine . . .  "<< std::endl;
+        mEngineRPN = mParams.load_engine ? buildFromSerializedEngine(mParams.rpnSerializedEnginePath) : build(mParams.rpnOnnxFilePath,mParams.rpnSerializedEnginePath);
+        sample::gLogInfo << "All has Built !  "<< std::endl;
+
+        // Create RAII buffer manager object
+        sample::gLogInfo << "Creating pfe context " << std::endl;
+        //samplesCommon::BufferManager buffers(mEngine);
+        mbuffers = new samplesCommon::BufferManager(mEngine);
+        mContext = SampleUniquePtr<nvinfer1::IExecutionContext>(mEngine->createExecutionContext());
+
+        sample::gLogInfo << "Creating rpn context " << std::endl;
+        //samplesCommon::BufferManager buffersRPN(mEngineRPN);
+        mbuffersRPN = new samplesCommon::BufferManager(mEngineRPN);
+        mContextRPN = SampleUniquePtr<nvinfer1::IExecutionContext>(mEngineRPN->createExecutionContext());
+
+        if (!mContext || !mContextRPN)
+        {
+            sample::gLogError<< "Failed to create context " << std::endl;
+            return false;
+        }
+        return true;
+}
+
+std::shared_ptr<nvinfer1::ICudaEngine> CenterPoint::buildFromSerializedEngine(std::string serializedEngineFile) 
+{
+
+     std::vector<char> trtModelStream_;
+     size_t size{0};
+     std::ifstream file(serializedEngineFile, std::ios::binary);
+     if (file.good()) 
+    {
+         file.seekg(0, file.end);
+         size = file.tellg();
+         file.seekg(0,file.beg);
+         trtModelStream_.resize(size);
+         file.read(trtModelStream_.data(), size);
+         file.close() ;
+     }
+     else 
+     {
+        sample::gLogError<< " Failed to read serialized engine ! " << std::endl;
+        return nullptr;
+     }
+    SampleUniquePtr<IRuntime> runtime{createInferRuntime(sample::gLogger.getTRTLogger())};
+    if(!runtime) { sample::gLogError << "Failed to create runtime \n"; return nullptr;}
+    sample::gLogInfo<<"Create ICudaEngine  !" << std::endl;
+    std::shared_ptr<nvinfer1::ICudaEngine>  engine = std::shared_ptr<nvinfer1::ICudaEngine>(
+        runtime->deserializeCudaEngine(trtModelStream_.data(), size), 
+        samplesCommon::InferDeleter());
+
+    if (!engine)
+    {
+        sample::gLogError << "Failed to create engine \n";
+        return nullptr;
+    }
+
+    return engine;
+}
+
+std::shared_ptr<nvinfer1::ICudaEngine>  CenterPoint::build(std::string  onnxFilePath,std::string saveEnginePath)
+{
+    // We assumed that nvinfer1::createInferBuilder is droped in TRT 8.0 or above
+    
+    auto builder = SampleUniquePtr<nvinfer1::IBuilder>(nvinfer1::createInferBuilder(sample::gLogger.getTRTLogger()));
+    if (!builder)
+    { 
+        sample::gLogError<< "Builder not created !" << std::endl;
+        return nullptr;
+    }
+   
+    const auto explicitBatch = 1U << static_cast<uint32_t>(NetworkDefinitionCreationFlag::kEXPLICIT_BATCH);
+    auto network = SampleUniquePtr<nvinfer1::INetworkDefinition>(builder->createNetworkV2(explicitBatch));
+    if (!network)
+    {
+        sample::gLogError<< "Network not created ! " << std::endl;
+        return nullptr;
+    }
+
+    auto config = SampleUniquePtr<nvinfer1::IBuilderConfig>(builder->createBuilderConfig());
+    if (!config)
+    {
+        sample::gLogError<< "Config not created ! " << std::endl;
+        return nullptr;
+    }
+    auto parser
+        = SampleUniquePtr<nvonnxparser::IParser>(nvonnxparser::createParser(*network, sample::gLogger.getTRTLogger()));
+    if (!parser)
+    {
+        sample::gLogError<< "Parser not created ! " << std::endl;
+        return nullptr;
+    }
+    sample::gLogInfo<<"ConstructNetwork !" << std::endl;
+    
+    cudaEvent_t  start, end;
+    cudaStream_t stream;
+    cudaStreamCreate(&stream);
+    cudaEventCreate(&start);
+    cudaEventCreate(&end);
+    cudaEventRecord(start,stream);    
+
+    auto constructed = constructNetwork(builder, network, config, parser,onnxFilePath);
+    if (!constructed)
+    {
+        return nullptr;
+    }
+
+    ///////////////////////////////////////////////////////////////////BUILD ENGINE FROM FILE//////////////////////////////////////////////////////////////////////////////
+
+    // std::shared_ptr<nvinfer1::ICudaEngine> engine = std::shared_ptr<nvinfer1::ICudaEngine>(
+    //     builder->buildEngineWithConfig(*network, *config), samplesCommon::InferDeleter());
+    
+    // cuda stream used to profiling the builder
+    auto profileStream = samplesCommon::makeCudaStream();
+    if(!profileStream) {
+        sample::gLogError<<"Failed to create a profile stream !\n";
+        return  nullptr;
+    }
+    config->setProfileStream(*profileStream);    
+
+    SampleUniquePtr<IHostMemory> plan{builder->buildSerializedNetwork(*network, *config)};
+    if (!plan) {sample::gLogError << "Failed to create IHostMemory plan \n";return  nullptr;}
+
+    SampleUniquePtr<IRuntime> runtime{createInferRuntime(sample::gLogger.getTRTLogger())};
+    if(!runtime) { sample::gLogError << "Failed to create runtime \n"; return nullptr;}
+    sample::gLogInfo<<"Create ICudaEngine  !" << std::endl;
+    std::shared_ptr<nvinfer1::ICudaEngine> engine = std::shared_ptr<nvinfer1::ICudaEngine>(
+        runtime->deserializeCudaEngine(plan->data(), plan->size()), samplesCommon::InferDeleter());
+/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+    if (!engine)
+    {
+        sample::gLogError << "Failed to create engine \n";
+        return nullptr;
+    }
+    // save engine
+    nvinfer1::IHostMemory& trtModelStream = *(engine->serialize());
+    std::ofstream file;
+    file.open(saveEnginePath, std::ios::binary | std::ios::out);
+    if(!file.is_open())
+    {
+        std::cout << "read create engine file" << saveEnginePath <<" failed" << std::endl;
+        return nullptr;
+    }
+    file.write((const char*)trtModelStream.data(), std::streamsize(trtModelStream.size()));
+    file.close();
+
+    sample::gLogInfo << "getNbInputs: " << network->getNbInputs() << " \n" << std::endl;
+    sample::gLogInfo << "getNbOutputs: " << network->getNbOutputs() << " \n" << std::endl;
+    sample::gLogInfo << "getNbOutputs Name: " << network->getOutput(0)->getName() << " \n" << std::endl;
+
+    mInputDims = network->getInput(0)->getDimensions();
+    mOutputDims = network->getOutput(0)->getDimensions();
+    return engine;
+}
+
+
+bool CenterPoint::constructNetwork(SampleUniquePtr<nvinfer1::IBuilder>& builder,
+    SampleUniquePtr<nvinfer1::INetworkDefinition>& network, SampleUniquePtr<nvinfer1::IBuilderConfig>& config,
+    SampleUniquePtr<nvonnxparser::IParser>& parser,
+    std::string  onnxFilePath)
+{   
+    auto parsed = parser->parseFromFile(
+        // locateFile(mParams.onnxFileName, mParams.dataDirs).c_str(),
+        // params.onnxFileName.c_str(),
+        onnxFilePath.c_str(),
+        static_cast<int>(sample::gLogger.getReportableSeverity()));
+
+        // ILogger::Severity::kWARNING);
+    if (!parsed)
+    {
+        sample::gLogError<< "Onnx model cannot be parsed ! " << std::endl;
+        return false;
+    }
+    builder->setMaxBatchSize(BATCH_SIZE_);
+    config->setMaxWorkspaceSize(5_GiB); //8_GiB);
+    if (mParams.fp16)
+        config->setFlag(BuilderFlag::kFP16);
+    if (mParams.dlaCore >=0 ){
+    samplesCommon::enableDLA(builder.get(), config.get(), mParams.dlaCore);
+    sample::gLogInfo << "Deep Learning Acclerator (DLA) was enabled . \n";
+    }
+    return true;
+}
+
+
+//!
+//! \brief Runs the TensorRT inference engine for this sample
+//!
+//! \details This function is the main execution function of the sample. It allocates the buffer,
+//!          sets inputs and executes the engine.
+//!
+bool CenterPoint::infer(float* lidarpoints,int pointsnum,std::vector<Box>& predResult)
+{
+    // mParams.inputTensorNames :  [ voxels,  num_voxels, coords ]
+    float* devicePillars = static_cast<float*>(mbuffers->getDeviceBuffer(mParams.pfeInputTensorNames[0]));
+    // create event  object , which are used time computing
+    cudaEvent_t start, stop;
+    float pre_time = 0;
+    float pfe_time = 0;
+    float scatter_time = 0;
+    float rpn_time  = 0;
+    float post_time = 0;
+    
+    float totalPreprocessDur = 0;
+    float totalPostprocessDur = 0;
+    float totalScatterDur =0 ;
+    float totalPfeDur = 0;
+    float totalRpnDur = 0;
+    cudaStream_t stream;
+    GPU_CHECK(cudaStreamCreate(&stream));
+
+    cudaEventCreate(&start);
+    cudaEventCreate(&stop);
+    cudaEventRecord(start);
+
+    GPU_CHECK(cudaMemcpy(dev_points_, lidarpoints, pointsnum * POINT_DIM * sizeof(float), cudaMemcpyHostToDevice));
+    preprocessGPU(dev_points_, devicePillars,dev_indices_,
+    p_mask_, p_bev_idx_,  p_point_num_assigned_,  bev_voxel_idx_, v_point_sum_,  v_range_,  v_point_num_,
+     pointsnum, POINT_DIM);
+    cudaEventRecord(stop);
+    cudaEventSynchronize(stop);
+    cudaEventElapsedTime(&pre_time, start, stop);
+
+
+    // Doing inference
+    cudaEventRecord(start);
+    bool status = mContext->executeV2(mbuffers->getDeviceBindings().data());
+
+    cudaEventRecord(stop);
+    cudaEventSynchronize(stop);
+    cudaEventElapsedTime(&pfe_time, start, stop);
+    cudaEventRecord(start);
+    if (!status)
+    {
+        sample::gLogError<< "Error with pfe contex execution ! " << std::endl;
+        return false;
+    }
+
+    // copy coordinates from host to device
+    // GPU_CHECK(cudaMemcpyAsync(dev_indices_, hostIndex, MAX_PILLARS * sizeof(int), cudaMemcpyHostToDevice, stream));
+    // GPU_CHECK(cudaMemcpy(dev_indices_, hostIndex, MAX_PILLARS * sizeof(int), cudaMemcpyHostToDevice));
+
+    //  cast value type on the GPU device
+    dev_scattered_feature_ = static_cast<float*>(mbuffersRPN->getDeviceBuffer(mParams.rpnInputTensorNames[0]));
+
+
+    // reset scattered feature to zero .
+    GPU_CHECK(cudaMemset(dev_scattered_feature_, 0 ,  PFE_OUTPUT_DIM * BEV_W * BEV_H * sizeof(float)));
+
+    scatter_cuda_ptr_->doScatterCuda(MAX_PILLARS, dev_indices_,static_cast<float*>(mbuffers->getDeviceBuffer(mParams.pfeOutputTensorNames[0])),
+                                                            //   static_cast<float*>(buffersRPN.getDeviceBuffer(mParamsRPN.inputTensorNames[0]) )) ;
+                                                            dev_scattered_feature_);
+
+    cudaEventRecord(stop);
+    cudaEventSynchronize(stop);
+    cudaEventElapsedTime(&scatter_time, start, stop);
+    cudaEventRecord(start);
+    // status = contextRPN->enqueue( BATCH_SIZE_,buffersRPN.getDeviceBindings().data(), stream, nullptr);
+    status = mContextRPN->executeV2( mbuffersRPN->getDeviceBindings().data());
+    if (!status)
+    {
+        sample::gLogError<< "Error with rpn contex execution ! " << std::endl;
+        return false;
+    }
+    // Copying outputs from device to host
+    cudaEventRecord(stop);
+    cudaEventSynchronize(stop);
+    cudaEventElapsedTime(&rpn_time, start, stop);
+
+    // Doing postprocess
+    predResult.clear();
+    cudaEventRecord(start);
+
+    // buffersRPN.copyOutputToHostAsync(stream);
+    // buffersRPN.copyOutputToHost();
+
+
+    postprocessGPU(mbuffersRPN, predResult, mParams.rpnOutputTensorNames,
+                                            dev_score_idx_,
+                                            mask_cpu,
+                                            remv_cpu,
+                                            host_score_idx_,
+                                            host_keep_data_,
+                                            host_boxes_,
+                                            host_label_);
+    cudaEventRecord(stop);
+    cudaEventSynchronize(stop);
+    cudaEventElapsedTime(&post_time, start, stop);
+
+    totalPreprocessDur += pre_time;
+    totalScatterDur += scatter_time;
+    totalPfeDur += pfe_time;
+    totalRpnDur += rpn_time;
+    totalPostprocessDur += post_time;
+
+    //saveOutput(predResult, mParams.filePaths[idx], mParams.savePath);
+
+//    for (size_t idx = 0; idx < predResult.size(); idx++){
+//            std::cout << predResult[idx].x << " " << predResult[idx].y << " " << predResult[idx].z << " "<< \
+//            predResult[idx].l << " " << predResult[idx].h << " " << predResult[idx].w << " " << predResult[idx].velX \
+//            << " " << predResult[idx].velY << " " << predResult[idx].theta << " " << predResult[idx].score << \
+//            " "<< predResult[idx].cls << std::endl;
+//    }
+
+
+    //free(lidarpoints);
+    // release the stream and  the buffers
+    cudaStreamDestroy(stream);
+
+    sample::gLogInfo << "Average PreProcess Time: " << totalPreprocessDur  << " ms"<< std::endl;
+    sample::gLogInfo << "Average PfeInfer Time: " << totalPfeDur  << " ms"<< std::endl;
+    sample::gLogInfo << "Average ScatterInfer Time: " << totalScatterDur  << " ms"<< std::endl;
+    sample::gLogInfo << "Average RpnInfer  Time: " << totalRpnDur  << " ms"<< std::endl;
+    sample::gLogInfo << "Average PostProcess Time: " << totalPostprocessDur << " ms"<< std::endl;
+
+    return true;
+}
+
+bool CenterPoint::testinfer()
+{
+    void* inputPointBuf = nullptr;
+    int fileSize = mParams.filePaths.size();
+    // int fileSize = 1;
+
+    if (!fileSize) {
+        sample::gLogError<< "No Bin File Was Found ! " << std::endl;
+        return false;
+    }
+
+    // For Loop Every Pcl Bin
+    // for(auto idx = 0; idx < filePath.size(); idx++){
+     for(auto idx = 0; idx < fileSize; idx++){
+        std::cout << "===========FilePath[" << idx <<"/"<<fileSize<<"]:" << mParams.filePaths[idx] <<"=============="<< std::endl;
+
+        int pointNum = 0;
+        if (!processInput(inputPointBuf, mParams.filePaths[idx], pointNum))
+        {
+            return false;
+        }
+        float* points = static_cast<float*>(inputPointBuf);
+        std::vector<Box> predResult;
+
+        infer(points,pointNum,predResult);
+        saveOutput(predResult, mParams.filePaths[idx], mParams.savePath);
+     }
+
+     return true;
+}
+
+
+/* There is a bug. 
+ * If I change void to bool, the "for (size_t idx = 0; idx < mEngine->getNbBindings(); idx++)" loop will not stop.
+ */
+
+void CenterPoint::saveOutput(std::vector<Box>& predResult, std::string& inputFileName,  std::string savePath)
+{
+    
+    std::string::size_type pos = inputFileName.find_last_of("/");
+    std::string outputFilePath = savePath + "/" +  inputFileName.substr(pos) + ".txt";
+
+
+    ofstream resultFile;
+
+    resultFile.exceptions ( std::ifstream::failbit | std::ifstream::badbit );
+    try {
+        resultFile.open(outputFilePath);
+        for (size_t idx = 0; idx < predResult.size(); idx++){
+                resultFile << predResult[idx].x << " " << predResult[idx].y << " " << predResult[idx].z << " "<< \
+                predResult[idx].l << " " << predResult[idx].h << " " << predResult[idx].w << " " << predResult[idx].velX \
+                << " " << predResult[idx].velY << " " << predResult[idx].theta << " " << predResult[idx].score << \ 
+                " "<< predResult[idx].cls << std::endl;
+        }
+        resultFile.close();
+    }
+    catch (std::ifstream::failure e) {
+        sample::gLogError << "Open File: " << outputFilePath << " Falied"<< std::endl;
+    }
+}
+
+
+//!
+//! \brief Reads the input and stores the result in a managed buffer
+//!
+bool CenterPoint::processInput(void*& inputPointBuf, std::string& pointFilePath, int& pointNum)
+{
+
+    bool ret = readBinFile(pointFilePath, inputPointBuf, pointNum,  POINT_DIM);
+    std::cout << "Success to read and Point Num  Is: " << pointNum << std::endl;
+    if(!ret){
+        sample::gLogError << "Error read point file: " << pointFilePath<< std::endl;
+        free(inputPointBuf);
+        return ret;
+    }
+    return ret;
+}

+ 734 - 0
src/detection/CenterPoint-master/src/iou3d_nms_kernel.cu

@@ -0,0 +1,734 @@
+/*
+3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
+Written by Shaoshuai Shi
+All Rights Reserved 2019-2020.
+*/
+
+#include <iostream>
+#include <stdio.h>
+#include <vector>
+#include <thrust/sort.h>
+#include <thrust/sequence.h>
+#include <thrust/device_vector.h>
+#include <thrust/host_vector.h>
+#include <thrust/gather.h>
+#include <thrust/transform.h>
+#include <thrust/count.h>
+#include <config.h>
+
+#define THREADS_PER_BLOCK 16
+#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
+// int THREADS_PER_BLOCK_NMS =  sizeof(unsigned long long) * 8
+// #define DEBUG
+const float EPS = 1e-8;
+
+struct Point {
+    float x, y;
+    __device__ Point() {}
+    __device__ Point(double _x, double _y){
+        x = _x, y = _y;
+    }
+
+    __device__ void set(float _x, float _y){
+        x = _x; y = _y;
+    }
+
+    __device__ Point operator +(const Point &b)const{
+        return Point(x + b.x, y + b.y);
+    }
+
+    __device__ Point operator -(const Point &b)const{
+        return Point(x - b.x, y - b.y);
+    }
+};
+
+__device__ inline float cross(const Point &a, const Point &b){
+    return a.x * b.y - a.y * b.x;
+}
+
+__device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){
+    return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
+}
+
+__device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){
+    int ret = min(p1.x,p2.x) <= max(q1.x,q2.x)  &&
+              min(q1.x,q2.x) <= max(p1.x,p2.x) &&
+              min(p1.y,p2.y) <= max(q1.y,q2.y) &&
+              min(q1.y,q2.y) <= max(p1.y,p2.y);
+    return ret;
+}
+
+__device__ inline int check_in_box2d(const float *box, const Point &p){
+    //params: (7) [x, y, z, dx, dy, dz, heading]
+    const float MARGIN = 1e-2;
+
+    float center_x = box[0], center_y = box[1];
+    float angle_cos = cos(-box[6]), angle_sin = sin(-box[6]);  // rotate the point in the opposite direction of box
+    float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * (-angle_sin);
+    float rot_y = (p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos;
+
+    return (fabs(rot_x) < box[3] / 2 + MARGIN && fabs(rot_y) < box[4] / 2 + MARGIN);
+}
+
+__device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){
+    // fast exclusion
+    if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
+
+    // check cross standing
+    float s1 = cross(q0, p1, p0);
+    float s2 = cross(p1, q1, p0);
+    float s3 = cross(p0, q1, q0);
+    float s4 = cross(q1, p1, q0);
+
+    if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
+
+    // calculate intersection of two lines
+    float s5 = cross(q1, p1, p0);
+    if(fabs(s5 - s1) > EPS){
+        ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
+        ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
+
+    }
+    else{
+        float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
+        float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
+        float D = a0 * b1 - a1 * b0;
+
+        ans.x = (b0 * c1 - b1 * c0) / D;
+        ans.y = (a1 * c0 - a0 * c1) / D;
+    }
+
+    return 1;
+}
+
+__device__ inline void rotate_around_center(const Point &center, const float angle_cos, const float angle_sin, Point &p){
+    float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * (-angle_sin) + center.x;
+    float new_y = (p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
+    p.set(new_x, new_y);
+}
+
+__device__ inline int point_cmp(const Point &a, const Point &b, const Point &center){
+    return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x);
+}
+
+__device__ inline float box_overlap(const float *box_a, const float *box_b){
+    // params box_a: [x, y, z, dx, dy, dz, heading]
+    // params box_b: [x, y, z, dx, dy, dz, heading]
+
+    float a_angle = box_a[6], b_angle = box_b[6];
+    float a_dx_half = box_a[3] / 2, b_dx_half = box_b[3] / 2, a_dy_half = box_a[4] / 2, b_dy_half = box_b[4] / 2;
+    float a_x1 = box_a[0] - a_dx_half, a_y1 = box_a[1] - a_dy_half;
+    float a_x2 = box_a[0] + a_dx_half, a_y2 = box_a[1] + a_dy_half;
+    float b_x1 = box_b[0] - b_dx_half, b_y1 = box_b[1] - b_dy_half;
+    float b_x2 = box_b[0] + b_dx_half, b_y2 = box_b[1] + b_dy_half;
+
+    Point center_a(box_a[0], box_a[1]);
+    Point center_b(box_b[0], box_b[1]);
+
+#ifdef DEBUG
+    printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle,
+           b_x1, b_y1, b_x2, b_y2, b_angle);
+    printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y);
+#endif
+
+    Point box_a_corners[5];
+    box_a_corners[0].set(a_x1, a_y1);
+    box_a_corners[1].set(a_x2, a_y1);
+    box_a_corners[2].set(a_x2, a_y2);
+    box_a_corners[3].set(a_x1, a_y2);
+
+    Point box_b_corners[5];
+    box_b_corners[0].set(b_x1, b_y1);
+    box_b_corners[1].set(b_x2, b_y1);
+    box_b_corners[2].set(b_x2, b_y2);
+    box_b_corners[3].set(b_x1, b_y2);
+
+    // get oriented corners
+    float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
+    float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
+
+    for (int k = 0; k < 4; k++){
+#ifdef DEBUG
+        printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
+#endif
+        rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
+        rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
+#ifdef DEBUG
+        printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
+#endif
+    }
+
+    box_a_corners[4] = box_a_corners[0];
+    box_b_corners[4] = box_b_corners[0];
+
+    // get intersection of lines
+    Point cross_points[16];
+    Point poly_center;
+    int cnt = 0, flag = 0;
+
+    poly_center.set(0, 0);
+    for (int i = 0; i < 4; i++){
+        for (int j = 0; j < 4; j++){
+            flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]);
+            if (flag){
+                poly_center = poly_center + cross_points[cnt];
+                cnt++;
+#ifdef DEBUG
+                printf("Cross points (%.3f, %.3f): a(%.3f, %.3f)->(%.3f, %.3f), b(%.3f, %.3f)->(%.3f, %.3f) \n",
+                    cross_points[cnt - 1].x, cross_points[cnt - 1].y,
+                    box_a_corners[i].x, box_a_corners[i].y, box_a_corners[i + 1].x, box_a_corners[i + 1].y,
+                    box_b_corners[i].x, box_b_corners[i].y, box_b_corners[i + 1].x, box_b_corners[i + 1].y);
+#endif
+            }
+        }
+    }
+
+    // check corners
+    for (int k = 0; k < 4; k++){
+        if (check_in_box2d(box_a, box_b_corners[k])){
+            poly_center = poly_center + box_b_corners[k];
+            cross_points[cnt] = box_b_corners[k];
+            cnt++;
+#ifdef DEBUG
+                printf("b corners in a: corner_b(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y);
+#endif
+        }
+        if (check_in_box2d(box_b, box_a_corners[k])){
+            poly_center = poly_center + box_a_corners[k];
+            cross_points[cnt] = box_a_corners[k];
+            cnt++;
+#ifdef DEBUG
+                printf("a corners in b: corner_a(%.3f, %.3f)", cross_points[cnt - 1].x, cross_points[cnt - 1].y);
+#endif
+        }
+    }
+
+    poly_center.x /= cnt;
+    poly_center.y /= cnt;
+
+    // sort the points of polygon
+    Point temp;
+    for (int j = 0; j < cnt - 1; j++){
+        for (int i = 0; i < cnt - j - 1; i++){
+            if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){
+                temp = cross_points[i];
+                cross_points[i] = cross_points[i + 1];
+                cross_points[i + 1] = temp;
+            }
+        }
+    }
+
+#ifdef DEBUG
+    printf("cnt=%d\n", cnt);
+    for (int i = 0; i < cnt; i++){
+        printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y);
+    }
+#endif
+
+    // get the overlap areas
+    float area = 0;
+    for (int k = 0; k < cnt - 1; k++){
+        area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]);
+    }
+
+    return fabs(area) / 2.0;
+}
+
+__device__ inline float iou_bev(const float *box_a, const float *box_b){
+    // params box_a: [x, y, z, dx, dy, dz, heading]
+    // params box_b: [x, y, z, dx, dy, dz, heading]
+    float sa = box_a[3] * box_a[4];
+    float sb = box_b[3] * box_b[4];
+    float s_overlap = box_overlap(box_a, box_b);
+    return s_overlap / fmaxf(sa + sb - s_overlap, EPS);
+}
+
+__global__ void boxes_overlap_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){
+    // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
+    // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
+    const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
+    const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
+
+    if (a_idx >= num_a || b_idx >= num_b){
+        return;
+    }
+    const float * cur_box_a = boxes_a + a_idx * 7;
+    const float * cur_box_b = boxes_b + b_idx * 7;
+    float s_overlap = box_overlap(cur_box_a, cur_box_b);
+    ans_overlap[a_idx * num_b + b_idx] = s_overlap;
+}
+
+__global__ void boxes_iou_bev_kernel(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
+    // params boxes_a: (N, 7) [x, y, z, dx, dy, dz, heading]
+    // params boxes_b: (M, 7) [x, y, z, dx, dy, dz, heading]
+    const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
+    const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
+
+    if (a_idx >= num_a || b_idx >= num_b){
+        return;
+    }
+
+    const float * cur_box_a = boxes_a + a_idx * 7;
+    const float * cur_box_b = boxes_b + b_idx * 7;
+    float cur_iou_bev = iou_bev(cur_box_a, cur_box_b);
+    ans_iou[a_idx * num_b + b_idx] = cur_iou_bev;
+}
+
+__global__ void nms_kernel(const int boxes_num, const float nms_overlap_thresh,
+                           const float *boxes, unsigned long long *mask){
+    //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
+    //params: mask (N, N/THREADS_PER_BLOCK_NMS)
+
+    const int row_start = blockIdx.y;
+    const int col_start = blockIdx.x;
+
+    // if (row_start > col_start) return;
+
+    const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
+    const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
+
+    __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
+
+    if (threadIdx.x < col_size) {
+        block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
+        block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
+        block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
+        block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
+        block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
+        block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
+        block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
+    }
+    __syncthreads();
+
+    if (threadIdx.x < row_size) {
+        const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
+        const float *cur_box = boxes + cur_box_idx * 7;
+
+        int i = 0;
+        unsigned long long t = 0;
+        int start = 0;
+        if (row_start == col_start) {
+          start = threadIdx.x + 1;
+        }
+        for (i = start; i < col_size; i++) {
+            if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh){
+                t |= 1ULL << i;
+            }
+        }
+        const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
+        mask[cur_box_idx * col_blocks + col_start] = t;
+    }
+}
+
+__global__ void tmpfunc(const int boxes_num, const float nms_overlap_thresh,
+                            const float *reg,  const float* height,  const float* dim, const float* rot, const int* indexs,  unsigned long long *mask,float* block_boxes) {
+    const int row_start = blockIdx.y;
+    const int col_start = blockIdx.x;
+
+    // if (row_start > col_start) return;
+    const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
+    const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
+
+
+    if (row_start + col_start == 0 && threadIdx.x < col_size) {
+        const int col_actual_idx = indexs[THREADS_PER_BLOCK_NMS * col_start + threadIdx.x];
+
+        block_boxes[threadIdx.x * 7 + 0] = reg[col_actual_idx ];
+        block_boxes[threadIdx.x * 7 + 1] = reg[OUTPUT_H * OUTPUT_W + col_actual_idx];
+        block_boxes[threadIdx.x * 7 + 2] = height[col_actual_idx];
+        block_boxes[threadIdx.x * 7 + 3] = dim[col_actual_idx];
+        block_boxes[threadIdx.x * 7 + 4] = dim[col_actual_idx + OUTPUT_W * OUTPUT_H];
+        block_boxes[threadIdx.x * 7 + 5] = dim[col_actual_idx + OUTPUT_W * OUTPUT_H * 2];
+        float theta = atan2f(rot[col_actual_idx], rot[col_actual_idx + OUTPUT_W * OUTPUT_H]);
+        block_boxes[threadIdx.x * 7 + 6] = theta;
+    }
+}
+
+__global__ void raw_nms_kernel(const int boxes_num, const float nms_overlap_thresh,
+                            const float *reg,  const float* height,  const float* dim, const float* rot, const int* indexs,  unsigned long long *mask){
+    //params: mask (N, N/THREADS_PER_BLOCK_NMS)
+
+    const int row_start = blockIdx.y;
+    const int col_start = blockIdx.x;
+
+
+    // if (row_start > col_start) return;
+    const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
+    const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
+
+    __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
+
+    if (threadIdx.x < col_size) {
+        const int col_actual_idx = indexs[THREADS_PER_BLOCK_NMS * col_start + threadIdx.x];
+        const int xIdx = col_actual_idx % OUTPUT_W;
+        const int yIdx = col_actual_idx / OUTPUT_W;
+
+        //encode boxs according kitti  format : (N, 7) [x, y, z, dy, dx, dz, heading]
+        block_boxes[threadIdx.x * 7 + 0] = (reg[col_actual_idx ]+xIdx)*OUT_SIZE_FACTOR*X_STEP + X_MIN;
+        block_boxes[threadIdx.x * 7 + 1] = (reg[OUTPUT_H * OUTPUT_W + col_actual_idx] + yIdx ) * OUT_SIZE_FACTOR*Y_STEP + Y_MIN;
+        block_boxes[threadIdx.x * 7 + 2] = height[col_actual_idx];
+        block_boxes[threadIdx.x * 7 + 4] = dim[col_actual_idx];
+        block_boxes[threadIdx.x * 7 + 3] = dim[col_actual_idx + OUTPUT_W * OUTPUT_H];
+        block_boxes[threadIdx.x * 7 + 5] = dim[col_actual_idx + OUTPUT_W * OUTPUT_H * 2];
+        float theta = atan2f(rot[col_actual_idx], rot[col_actual_idx + OUTPUT_W * OUTPUT_H]);
+        theta = -theta - 3.1415926/2;
+        block_boxes[threadIdx.x * 7 + 6] = theta;
+    }
+    __syncthreads();
+
+    if (threadIdx.x < row_size) {
+        const int row_actual_idx = indexs[THREADS_PER_BLOCK_NMS * row_start + threadIdx.x];
+        const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
+        const int xIdx = row_actual_idx % OUTPUT_W;
+        const int yIdx = row_actual_idx / OUTPUT_W;
+
+        //encode boxs according kitti  format : (N, 7) [x, y, z, dy, dx, dz, heading]
+        float cur_box[7];
+        cur_box[0] = (reg[row_actual_idx ]+xIdx)*OUT_SIZE_FACTOR*X_STEP + X_MIN;
+        cur_box[1] = (reg[OUTPUT_H * OUTPUT_W + row_actual_idx] + yIdx ) * OUT_SIZE_FACTOR*Y_STEP + Y_MIN;
+        cur_box[2] = height[row_actual_idx];
+        cur_box[4] = dim[row_actual_idx];
+        cur_box[3] = dim[row_actual_idx + OUTPUT_W * OUTPUT_H];
+        cur_box[5] = dim[row_actual_idx + OUTPUT_W * OUTPUT_H * 2];
+        float theta  = atan2f(rot[row_actual_idx], rot[row_actual_idx + OUTPUT_W * OUTPUT_H]);
+        theta = -theta - 3.1415926/2;
+        cur_box[6] = theta;
+
+        // const float *cur_box = boxes + cur_box_idx * 7;
+
+        int i = 0;
+        unsigned long long t = 0;
+        int start = 0;
+        if (row_start == col_start) {
+          start = threadIdx.x + 1;
+        }
+        for (i = start; i < col_size; i++) {
+            if (iou_bev(cur_box, block_boxes + i * 7) > nms_overlap_thresh){
+                t |= 1ULL << i;
+            }
+        }
+
+        const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
+        // assume cur_box_idx = 21, col_start = 0, row_start = 0 , threadIdx = 21, mark 21 th box and top 64 boxes 
+        mask[cur_box_idx * col_blocks + col_start] = t; 
+    }
+}
+
+
+
+
+__device__ inline float iou_normal(float const * const a, float const * const b) {
+    //params: a: [x, y, z, dx, dy, dz, heading]
+    //params: b: [x, y, z, dx, dy, dz, heading]
+
+    float left = fmaxf(a[0] - a[3] / 2, b[0] - b[3] / 2), right = fminf(a[0] + a[3] / 2, b[0] + b[3] / 2);
+    float top = fmaxf(a[1] - a[4] / 2, b[1] - b[4] / 2), bottom = fminf(a[1] + a[4] / 2, b[1] + b[4] / 2);
+    float width = fmaxf(right - left, 0.f), height = fmaxf(bottom - top, 0.f);
+    float interS = width * height;
+    float Sa = a[3] * a[4];
+    float Sb = b[3] * b[4];
+    return interS / fmaxf(Sa + Sb - interS, EPS);
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////////BEGIN////////////////////////////////////////////////////////////////////////////////////////////
+
+__global__   void boxAssignKernel(float* reg, float* height , float* dim, float*rot,float* boxes, float*score, int* label,  float* out_score, int*out_label,
+                                                                         int* validIndexs , int output_h , int output_w) {
+        int boxId = blockIdx.x;
+        int channel = threadIdx.x;
+        int idx = validIndexs[boxId];
+        if (channel ==0 )
+                boxes[boxId * 7 + 0] = reg[idx ];
+        else if (channel == 1)
+                boxes[boxId * 7 + 1] = reg[idx + output_w * output_h];
+        else if (channel == 2)
+                boxes[boxId * 7 + 2] = height[idx];
+        else if (channel == 3)
+                boxes[boxId * 7 + 3] = dim[idx];
+        else if (channel == 4)
+                boxes[boxId * 7 + 4] = dim[idx + output_h * output_w];
+        else if (channel == 5)
+                boxes[boxId * 7 + 5] = dim[idx + 2 * output_w * output_h];
+        else if (channel == 6){
+                float theta = atan2f(rot[0*output_h*output_w + idx], rot[1*output_h*output_w + idx]);
+                theta = -theta - 3.1415926/2;
+                boxes[boxId * 7 + 6] = theta; }
+        // else if(channel == 7)
+                // out_score[boxId] = score[idx];
+        else if(channel == 8)
+                out_label[boxId] = label[idx];
+}
+void _box_assign_launcher(float* reg, float* height , float* dim, float*rot, float* boxes, float*score, int* label,  float* out_score, int*out_label,
+int* validIndexs ,int boxSize,  int output_h, int output_w) {
+                                                        boxAssignKernel<<< boxSize, 9 >>> (reg, height , dim ,rot, boxes, score, label,  out_score, out_label, validIndexs, output_h, output_w);
+                                                        }
+
+__global__ void indexAssign(int* indexs) {
+    int yIdx = blockIdx.x;
+    int xIdx = threadIdx.x;
+    int idx = yIdx * blockDim.x + xIdx;
+    indexs[idx] = idx;
+}
+
+void _index_assign_launcher(int* indexs, int output_h, int output_w) {
+    indexAssign<<<output_h, output_w>>>(indexs);
+}
+
+// compute how many scores are valid 
+struct is_greater{
+    is_greater(float thre) : _thre(thre) { }
+    __host__ __device__ 
+    bool operator()(const float &x) {
+        return x>= _thre;
+    }
+    float _thre;
+};
+struct is_odd
+{
+    __host__ __device__ 
+    bool operator()(const int &x) 
+    {
+        return true ;
+    }
+};
+
+
+
+__global__ void _find_valid_score_numKernel_(float* score, float* thre, float* N) 
+{
+    int yIdx = blockIdx.x;
+    int xIdx = threadIdx.x;
+    int idx = yIdx * blockDim.x + xIdx;
+    if (score[idx] >= 0.1) 
+     atomicAdd(N, 1.0);
+}
+
+int _find_valid_score_num(float* score, float thre, int output_h, int output_w) 
+{
+    // thrust::device_vector<float> score_vec(score,score + output_h * output_w);
+    return thrust::count_if(thrust::device, score, score + output_h * output_w,  is_greater(thre));
+    // return thrust::count_if(thrust::device, score_vec.begin(),score_vec.end(),is_greater(thre));
+}
+
+
+
+
+void _sort_by_key(float* keys, int* values,int size) {
+
+        thrust::sequence(thrust::device, values, values+size);
+        // size = OUTPUT_H * OUTPUT_W;
+        thrust::sort_by_key(thrust::device, keys, keys + size,   values,  thrust::greater<float>());
+
+}
+
+
+void _gather_all(float* host_boxes, int* host_label, 
+                                float* reg, float* height, float* dim, float* rot,  float* sorted_score, int32_t* label,  
+                                int* dev_indexs, long* host_keep_indexs,  int boxSizeBef, int boxSizeAft) 
+{
+
+    // copy keep_indexs from host to device
+    // int* tmp_keep_indexs = static_cast<int*>(host_keep_indexs);
+    thrust::device_vector<long> dev_keep_indexs(host_keep_indexs, host_keep_indexs + boxSizeAft);
+    // thrust::host_vector<long> host_keep_indexs_vec(host_keep_indexs,host_keep_indexs+boxSizeAft);
+    // // thrust::copy(host_keep_indexs,host_keep_indexs+boxSizeAft, dev_keep_indexs.begin());
+    // thrust::copy(host_keep_indexs_vec.begin(), host_keep_indexs_vec.end(), dev_keep_indexs.begin());
+    // gather keeped indexs after nms
+    thrust::device_vector<int> dev_indexs_bef(dev_indexs, dev_indexs + boxSizeBef);
+    thrust::device_vector<int> dev_indexs_aft(boxSizeAft);
+    thrust::gather(dev_keep_indexs.begin(), dev_keep_indexs.end(),
+                                  dev_indexs_bef.begin(),
+                                  dev_indexs_aft.begin());
+    // gather boxes, score, label
+    thrust::device_vector<float> tmp_boxes(boxSizeAft * 9);
+    thrust::device_vector<int> tmp_label(boxSizeAft);
+    // gather x, y 
+    thrust::device_vector<float> reg_vec(reg,reg+OUTPUT_H * OUTPUT_W * 2);
+    thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), reg_vec.begin(),tmp_boxes.begin());
+    thrust::gather(dev_indexs_aft.begin(), dev_indexs_aft.end(), reg_vec.begin() + OUTPUT_W * OUTPUT_H, tmp_boxes.begin() + boxSizeAft);
+    // gather height 
+    thrust::device_vector<float> height_vec(height, height + OUTPUT_H * OUTPUT_W);
+    thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), height_vec.begin(),tmp_boxes.begin() + 2 * boxSizeAft);
+    // gather  dim
+    thrust::device_vector<float> dim_vec(dim, dim + 3 * OUTPUT_H * OUTPUT_W);
+    thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), dim_vec.begin() + OUTPUT_W * OUTPUT_H * 0,tmp_boxes.begin() + 3 * boxSizeAft);
+    thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), dim_vec.begin() + OUTPUT_W * OUTPUT_H * 1,tmp_boxes.begin() + 4 * boxSizeAft);
+    thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), dim_vec.begin() + OUTPUT_W * OUTPUT_H * 2,tmp_boxes.begin() + 5 * boxSizeAft);
+    // gather rotation
+    thrust::device_vector<float> rot_vec(rot, rot + 2 * OUTPUT_H * OUTPUT_W);
+    thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), rot_vec.begin() + OUTPUT_W * OUTPUT_H * 0,tmp_boxes.begin() + 6 * boxSizeAft);
+    thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), rot_vec.begin() + OUTPUT_W * OUTPUT_H * 1,tmp_boxes.begin() + 7 * boxSizeAft);
+    // gather score
+    thrust::device_vector<float> sorted_score_vec(sorted_score, sorted_score + 1 * OUTPUT_H * OUTPUT_W);
+    thrust::gather(dev_keep_indexs.begin(),dev_keep_indexs.end(), sorted_score_vec.begin() + OUTPUT_W * OUTPUT_H * 0,tmp_boxes.begin() + 8 * boxSizeAft);
+    // gather label
+    thrust::device_vector<int> label_vec(label, label + 1 * OUTPUT_H * OUTPUT_W);
+    thrust::gather(dev_indexs_aft.begin(),dev_indexs_aft.end(), label_vec.begin() + OUTPUT_W * OUTPUT_H * 0, tmp_label.begin());
+
+    // copy values from device => host 
+    // host_boxes = tmp_boxes;
+    // host_label = tmp_label;
+    thrust::copy(tmp_boxes.begin(), tmp_boxes.end(), host_boxes);
+    thrust::copy(tmp_label.begin(),tmp_label.end(), host_label);
+
+
+}
+
+
+
+///////////////////////////////////////////////////////////////////////////////////END//////////////////////////////////////////////////////////////////////////////////////////
+
+
+__global__ void nms_normal_kernel(const int boxes_num, const float nms_overlap_thresh,
+                          const float* boxes, unsigned long long *mask){
+    //params: boxes (N, 7) [x, y, z, dx, dy, dz, heading]
+    //params: mask (N, N/THREADS_PER_BLOCK_NMS)
+
+    const int row_start = blockIdx.y;
+    const int col_start = blockIdx.x;
+
+    // if (row_start > col_start) return;
+
+    const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
+    const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
+
+    __shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
+
+    if (threadIdx.x < col_size) {
+        block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
+        block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
+        block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
+        block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
+        block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
+        block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
+        block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
+    }
+    __syncthreads();
+
+    if (threadIdx.x < row_size) {
+        const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
+        const float *cur_box = boxes + cur_box_idx * 7;
+
+        int i = 0;
+        unsigned long long t = 0;
+        int start = 0;
+        if (row_start == col_start) {
+          start = threadIdx.x + 1;
+        }
+        for (i = start; i < col_size; i++) {
+            if (iou_normal(cur_box, block_boxes + i * 7) > nms_overlap_thresh){
+                t |= 1ULL << i;
+            }
+        }
+        const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
+        mask[cur_box_idx * col_blocks + col_start] = t;
+    }
+}
+
+
+
+
+
+void boxesoverlapLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_overlap){
+
+    dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK));  // blockIdx.x(col), blockIdx.y(row)
+    dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
+
+    boxes_overlap_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_overlap);
+#ifdef DEBUG
+    cudaDeviceSynchronize();  // for using printf in kernel function
+#endif
+}
+
+void boxesioubevLauncher(const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
+
+    dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK));  // blockIdx.x(col), blockIdx.y(row)
+    dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
+
+    boxes_iou_bev_kernel<<<blocks, threads>>>(num_a, boxes_a, num_b, boxes_b, ans_iou);
+#ifdef DEBUG
+    cudaDeviceSynchronize();  // for using printf in kernel function
+#endif
+}
+
+
+
+
+
+void nmsLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){
+    dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
+                DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
+    dim3 threads(THREADS_PER_BLOCK_NMS);
+    
+    nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
+}
+
+
+void nmsNormalLauncher(const float *boxes, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){
+    dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
+                DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
+    dim3 threads(THREADS_PER_BLOCK_NMS);
+    nms_normal_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
+}
+
+void rawNmsLauncher(const float *reg, const float* height, const float* dim, const float* rot, const int* indexs, unsigned long long * mask, int boxes_num, float nms_overlap_thresh){
+    dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
+                DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
+    dim3 threads(THREADS_PER_BLOCK_NMS);
+    raw_nms_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, reg,height,dim,rot, indexs, mask);
+}
+
+
+int _raw_nms_gpu(const float* reg,  const float* height, const float* dim , const float* rot,
+                                     const int* indexs, long* host_keep_data,unsigned long long* mask_cpu, unsigned long long* remv_cpu,
+                                      int boxes_num,  float nms_overlap_thresh){
+    // params boxes: (N, 7) [x, y, z, dx, dy, dz, heading]
+    // params keep: (N)
+
+    // int boxes_num = boxes.size(0);
+    // const float * boxes_data = boxes.data<float>();
+    // long * keep_data = keep.data<long>();
+
+
+
+
+    const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
+
+    unsigned long long *mask_data = NULL;
+    cudaMalloc((void**)&mask_data, boxes_num * col_blocks * sizeof(unsigned long long));
+    rawNmsLauncher(reg, height, dim, rot, indexs, mask_data, boxes_num, nms_overlap_thresh);
+
+    // unsigned long long mask_cpu[boxes_num * col_blocks];
+    // unsigned long long *mask_cpu = new unsigned long long [boxes_num * col_blocks];
+    // std::vector<unsigned long long> mask_cpu(boxes_num * col_blocks);
+
+//    printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks);
+    cudaMemcpy(mask_cpu, mask_data, boxes_num * col_blocks * sizeof(unsigned long long),
+                           cudaMemcpyDeviceToHost);
+
+// TODO : CUT HERE ! ! !
+    cudaFree(mask_data);
+
+    // unsigned long long remv_cpu[col_blocks];
+    // memset(remv_cpu, 0, col_blocks * sizeof(unsigned long long));
+
+    memset(remv_cpu, 0 , col_blocks * sizeof(unsigned long long ));
+    int num_to_keep = 0;
+
+    for (int i = 0; i < boxes_num; i++){
+        int nblock = i / THREADS_PER_BLOCK_NMS;
+        int inblock = i % THREADS_PER_BLOCK_NMS;
+
+        if (!(remv_cpu[nblock] & (1ULL << inblock))){
+            host_keep_data[num_to_keep++] = i;
+            for (int j = nblock; j < col_blocks; j++){
+                remv_cpu[j] |= mask_cpu[ i * col_blocks + j];
+            }
+        }
+    }
+
+    if ( cudaSuccess != cudaGetLastError() ) printf( "Error!\n" );
+    return num_to_keep;
+}
+

+ 471 - 0
src/detection/CenterPoint-master/src/main.cpp

@@ -0,0 +1,471 @@
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// close lidar_ufk_pda
+#include "argsParser.h"
+#include "buffers.h"
+#include "common.h"
+#include "logger.h"
+#include "parserOnnxConfig.h"
+#include "NvInfer.h"
+#include <cuda_runtime_api.h>
+#include <cstdlib>
+#include <map>
+#include <fstream>
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <sys/time.h>
+#include <chrono>
+#include "preprocess.h"
+#include "postprocess.h"
+#include "scatter_cuda.h"
+#include "centerpoint.h"
+#include "utils.h"
+#include <pcl/point_cloud.h>
+#include <pcl/point_types.h>
+#include <pcl/io/io.h>
+#include <pcl/io/pcd_io.h>
+#include "xmlparam.h"
+#include "modulecomm.h"
+#include "ivfault.h"
+#include "ivlog.h"
+#include "ivexit.h"
+#include "ivversion.h"
+#include <thread>
+#include "objectarray.pb.h"
+
+const std::string gSampleName = "TensorRT.sample_onnx_centerpoint";
+
+
+CenterPoint * centerpoint = nullptr ;
+
+iv::Ivfault *gfault = nullptr;
+iv::Ivlog *givlog = nullptr;
+
+int gnothavedatatime = 0;
+std::thread * gpthread;
+
+void * gpa;
+void * gpdetect;
+
+
+string lidarname = "lidar_pointpillar";
+//string lidarname = "lidar_pc";
+string detectname = "lidar_track";
+
+void PclXYZITToArray(
+        const pcl::PointCloud<pcl::PointXYZI>::Ptr& in_pcl_pc_ptr,
+        float* out_points_array, const float normalizing_factor) {
+    for (size_t i = 0; i < in_pcl_pc_ptr->size(); ++i) {
+        pcl::PointXYZI point = in_pcl_pc_ptr->at(i);
+        out_points_array[i * 5 + 0] = point.x;
+        out_points_array[i * 5 + 1] = point.y;
+        out_points_array[i * 5 + 2] = point.z;
+        out_points_array[i * 5 + 3] =
+                static_cast<float>(point.intensity / normalizing_factor);
+        out_points_array[i * 5 + 4] = 0;
+
+//        std::cout<<"xyz="<<point.x<<point.y<<point.z<<std::endl;
+
+
+        std::cout<<"the intensity = "<< out_points_array[i * 5 + 3]<< std::endl;
+    }
+}
+
+
+void GetLidarObj(std::vector<Box> &predResult,iv::lidar::objectarray & lidarobjvec)
+{
+    //    givlog->verbose("OBJ","object size is %d",obj_size);
+    for(size_t idx = 0; idx < predResult.size(); idx++)
+    {
+        iv::lidar::lidarobject lidarobj;
+
+        Box result = predResult[idx];
+
+        if (result.score < 0.2) continue;
+
+        std::cout<<" The scores = "<<result.score<<std::endl;
+
+        lidarobj.set_tyaw(result.theta);
+        iv::lidar::PointXYZ centroid;
+        iv::lidar::PointXYZ * _centroid;
+        centroid.set_x(result.x);
+        centroid.set_y(result.y);
+        centroid.set_z(result.z);
+        _centroid = lidarobj.mutable_centroid();
+        _centroid->CopyFrom(centroid);
+
+        iv::lidar::PointXYZ min_point;
+        iv::lidar::PointXYZ * _min_point;
+        min_point.set_x(0);
+        min_point.set_y(0);
+        min_point.set_z(0);
+        _min_point = lidarobj.mutable_min_point();
+        _min_point->CopyFrom(min_point);
+
+        iv::lidar::PointXYZ max_point;
+        iv::lidar::PointXYZ * _max_point;
+        max_point.set_x(0);
+        max_point.set_y(0);
+        max_point.set_z(0);
+        _max_point = lidarobj.mutable_max_point();
+        _max_point->CopyFrom(max_point);
+
+        iv::lidar::PointXYZ position;
+        iv::lidar::PointXYZ * _position;
+        position.set_x(result.x);
+        position.set_y(result.y);
+        position.set_z(result.z);
+        _position = lidarobj.mutable_position();
+        _position->CopyFrom(position);
+
+        lidarobj.set_mntype(result.cls);
+
+        lidarobj.set_score(result.score);
+        lidarobj.add_type_probs(result.score);
+
+        iv::lidar::PointXYZI point_cloud;
+        iv::lidar::PointXYZI * _point_cloud;
+        point_cloud.set_x(result.x);
+        point_cloud.set_y(result.y);
+        point_cloud.set_z(result.z);
+        point_cloud.set_i(0);
+
+        _point_cloud = lidarobj.add_cloud();
+        _point_cloud->CopyFrom(point_cloud);
+
+        iv::lidar::Dimension ld;
+        iv::lidar::Dimension * pld;
+        ld.set_x(result.l);
+        ld.set_y(result.w);
+        ld.set_z(result.h);
+        pld = lidarobj.mutable_dimensions();
+        pld->CopyFrom(ld);
+
+        iv::lidar::lidarobject * po = lidarobjvec.add_obj();
+        po->CopyFrom(lidarobj);
+    }
+
+}
+
+
+void DectectOnePCD(const pcl::PointCloud<pcl::PointXYZI>::Ptr &pc_ptr)
+{
+
+    std::shared_ptr<float> points_array_ptr = std::shared_ptr<float>(new float[pc_ptr->size() * POINT_DIM]);
+    PclXYZITToArray(pc_ptr, points_array_ptr.get(), 255.0);
+    int pointsnum = pc_ptr->width;
+    std::vector<Box> predResult;
+
+    centerpoint->infer(points_array_ptr.get(),pointsnum,predResult);
+
+
+    std::cout<<"obj size is "<<predResult.size()<<std::endl;
+
+    //    std::vector<iv::lidar::lidarobject> lidarobjvec;
+    iv::lidar::objectarray lidarobjvec;
+    GetLidarObj(predResult,lidarobjvec);
+
+    double timex = pc_ptr->header.stamp;
+    timex = timex/1000.0;
+    lidarobjvec.set_timestamp(pc_ptr->header.stamp);
+
+    int ntlen;
+    std::string out = lidarobjvec.SerializeAsString();
+    //   char * strout = lidarobjtostr(lidarobjvec,ntlen);
+    iv::modulecomm::ModuleSendMsg(gpdetect,out.data(),out.length());
+    givlog->verbose("lenth is %d",out.length());
+
+    //std::cout<<"time is "<<(QDateTime::currentMSecsSinceEpoch() % 1000)<<" "<<xTime.elapsed()<<std::endl;
+    gfault->SetFaultState(0, 0, "ok");
+    std::cout<<"points num is "<<pointsnum<<std::endl;
+}
+
+ // read source lidar_pointcloud
+//void ListenPointCloud(const char *strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
+//{
+//    //    std::cout<<" is  ok  ------------  "<<std::endl;
+
+//    std::cout<<"ListenPointCloud is  ok  ------------  "<<std::endl;
+
+//    if(nSize <=16)return;
+//    unsigned int * pHeadSize = (unsigned int *)strdata;
+//    if(*pHeadSize > nSize)
+//    {
+//        givlog->verbose("ListenPointCloud data is small headsize = %d, data size is %d", *pHeadSize, nSize);
+//        std::cout<<"ListenPointCloud data is small headsize ="<<*pHeadSize<<"  data size is"<<nSize<<std::endl;
+//    }
+
+//    gnothavedatatime = 0;
+//    QTime xTime;
+//    xTime.start();
+
+//    pcl::PointCloud<pcl::PointXYZI>::Ptr point_cloud(
+//                new pcl::PointCloud<pcl::PointXYZI>());
+//    int nNameSize;
+//    nNameSize = *pHeadSize - 4-4-8;
+//    char * strName = new char[nNameSize+1];strName[nNameSize] = 0;
+//    std::shared_ptr<char> str_ptr;
+//    str_ptr.reset(strName);
+//    memcpy(strName,(char *)((char *)strdata +4),nNameSize);
+//    point_cloud->header.frame_id = strName;
+//    memcpy(&point_cloud->header.seq,(char *)strdata+4+nNameSize,4);
+//    memcpy(&point_cloud->header.stamp,(char *)strdata+4+nNameSize+4,8);
+//    int nPCount = (nSize - *pHeadSize)/sizeof(pcl::PointXYZI);
+//    int i;
+//    pcl::PointXYZI * p;
+//    p = (pcl::PointXYZI *)((char *)strdata + *pHeadSize);
+//    for(i=0;i<nPCount;i++)
+//    {
+//        pcl::PointXYZI xp;
+//        memcpy(&xp,p,sizeof(pcl::PointXYZI));
+//        xp.z = xp.z;
+//        point_cloud->push_back(xp);
+//        p++;
+//    }
+
+//    DectectOnePCD(point_cloud);
+
+//    std::cout<<"ListenPointCloud is  end  ------------  "<<std::endl;
+//}
+
+// read segmentaion_cnn output lidar_pointcloud
+void ListenPointCloud(const char *strdata,const unsigned int nSize,const unsigned int index,const QDateTime * dt,const char * strmemname)
+{
+    iv::lidar::objectarray lidarobjvec;
+    std::string in;
+    in.append(strdata,nSize);
+    lidarobjvec.ParseFromString(in);
+
+    pcl::PointCloud<pcl::PointXYZI>::Ptr point_cloud(
+                new pcl::PointCloud<pcl::PointXYZI>());
+    for(int i=0; i<lidarobjvec.obj_size();i++){
+        iv::lidar::lidarobject lidarobj = lidarobjvec.obj(i);
+        if(lidarobj.type_name()!="car" && lidarobj.type_name()!="pedestrian")
+            continue;
+        for(int j=0;j<lidarobj.cloud_size();j++){
+//            iv::lidar::PointXYZI Point = lidarobj.cloud(j);
+            pcl::PointXYZI xp;
+            xp.x = lidarobj.cloud(j).x();
+            xp.y = lidarobj.cloud(j).y();
+            xp.z = lidarobj.cloud(j).z();
+            xp.intensity = lidarobj.cloud(j).i();
+            point_cloud->push_back(xp);
+        }
+    }
+    DectectOnePCD(point_cloud);
+
+    std::cout<<"ListenPointCloud is  end  ------------  "<<std::endl;
+}
+
+//!
+//! \brief Prints the help information for running this sample
+//!
+void printHelpInfo()
+{
+    std::cout
+        << "Usage: ./centerpoint [-h or --help]"
+        << std::endl;
+    std::cout << "--help          Display help information" << std::endl;
+    std::cout << "--filePath       Specify path to a data directory. "
+              << std::endl;
+    std::cout << "--savePath       Specify path to a directory you want save detection results."
+              << std::endl;
+
+    std::cout << "--loadEngine       Load from serialized engine files or from onnx files, provide this argument only when you want to create "
+    "engine from serialized engine files you previously generated(and provide paths to engine files), or you will need to provide paths to onnx files. "
+              << std::endl;   
+
+    std::cout << "--pfeOnnxPath       Specify path to pfe onnx model. This option can be used when you want to create engine from onnx file. "
+              << std::endl;
+    std::cout << "--rpnOnnxPath       Specify path to rpn onnx model. This option can be used when you want to create engine from onnx file. "
+              << std::endl;      
+    std::cout << "--pfeEnginePath       Specify path to pfe engine model. This option can be used when you want to create engine from serialized engine file you previously generated. "
+              << std::endl;
+    std::cout << "--rpnEnginePath       Specify path to rpn engine model. This option can be used when you want to create engine from serialized engine file you previously generated.  "
+              << std::endl;   
+
+    std::cout << "--fp16       Provide this argument only when you want  to do inference on fp16 mode, note that this config is only valid when you create engine from onnx files. "
+              << std::endl;   
+
+    std::cout << "--useDLACore=N  Specify a DLA engine for layers that support DLA. Value can range from 0 to n-1, "
+                 "where n is the number of DLA engines on the platform, by default it's set -1."
+              << std::endl;
+    
+}
+
+
+bool gbstate = true;
+void statethread()
+{
+    int nstate = 0;
+    int nlaststate = 0;
+    while (gbstate)
+    {
+        std::this_thread::sleep_for(std::chrono::milliseconds(10));
+        if(gnothavedatatime < 100000) gnothavedatatime++;
+
+        if (gnothavedatatime  < 100){
+            nstate = 0;
+        }
+        if (gnothavedatatime > 1000)
+        {
+            nstate = 1;
+        }
+        if (gnothavedatatime > 6000)
+        {
+            nstate = 2;
+        }
+        if (nstate != nlaststate) {
+            switch (nstate) {
+            case 0:
+                givlog->info("detection_lidar_pointpillar is ok");
+                gfault->SetFaultState(0,0,"data is ok.");
+                break;
+            case 1:
+                givlog->info(" more than 10 seconds not have lidar pointcloud.");
+                gfault->SetFaultState(1,1,"more than 10 seconds not have lidar pointcloud.");
+                break;
+            case 2:
+                givlog->info(" more than 60 seconds not have lidar pointcloud.");
+                gfault->SetFaultState(2,2, "more than 60 seconds not have lidar pointcloud.");
+                break;
+            default:
+                break;
+            }
+        }
+    }
+}
+
+void exitfunc()
+{
+    gbstate = false;
+    gpthread->join();
+    std::cout<<" state thread closed."<<std::endl;
+    iv::modulecomm::Unregister(gpa);
+    iv::modulecomm::Unregister(gpdetect);
+    std::cout<<"exit func complete"<<std::endl;
+}
+
+int main(int argc, char** argv)
+{
+    gfault = new iv::Ivfault("lidar_centerpoint");
+    givlog = new iv::Ivlog("lidar_centerpoint");
+    gfault->SetFaultState(0,0,"centerpoint initialize. ");
+
+//    samplesCommon::Args args;
+//    bool argsOK = samplesCommon::parseArgs(args, argc, argv);
+//    if (!argsOK)
+//    {
+//        sample::gLogError << "Invalid arguments" << std::endl;
+//        printHelpInfo();
+//        return EXIT_FAILURE;
+//    }
+//    if (args.help)
+//    {
+//        printHelpInfo();
+//        return EXIT_SUCCESS;
+//    }
+
+    auto sampleTest = sample::gLogger.defineTest(gSampleName, argc, argv);
+    sample::gLogger.reportTestStart(sampleTest);
+
+    ///////////////////////////////////////////////////////////////PARAM INITIALIZATION///////////////////////////////////////////////////////////////
+    Params params;
+    // initialize sample parameters 
+//    params.pfeOnnxFilePath =  args.pfeOnnxPath;
+//    params.rpnOnnxFilePath =  args.rpnOnnxPath;
+//    params.pfeSerializedEnginePath = args.pfeEnginePath;
+//    params.rpnSerializedEnginePath = args.rpnEnginePath;
+//    params.savePath = args.savePath;
+//    params.filePaths=glob(args.filePath + "/seq_*.bin");
+//    params.fp16 = args.runInFp16;
+//    params.load_engine = args.loadEngine;
+
+    params.pfeOnnxFilePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/models/pfe_baseline32000.onnx";
+    params.rpnOnnxFilePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/models/rpn_baseline.onnx";
+    params.pfeSerializedEnginePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/models/pfe_fp.engine";
+    params.rpnSerializedEnginePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/models/rpn_fp.engine";
+    params.savePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/results";
+    string filePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/lidars";
+    params.filePaths=glob(filePath + "/seq_*.bin");
+    params.fp16 = true;
+    params.load_engine = true;
+
+
+    // Input Output Names, according to TASK_NUM
+    params.pfeInputTensorNames.push_back("input.1");
+    params.rpnInputTensorNames.push_back("input.1");
+    params.pfeOutputTensorNames.push_back("47");
+
+    params.rpnOutputTensorNames["regName"]  = {"246"};
+    params.rpnOutputTensorNames["rotName"] = {"258"};
+    params.rpnOutputTensorNames["heightName"]={"250"};
+    params.rpnOutputTensorNames["dimName"] = {"264"};
+    params.rpnOutputTensorNames["scoreName"] = {"265"};
+    params.rpnOutputTensorNames["clsName"] = {"266"};
+
+
+    // Attrs
+    //params.dlaCore = args.useDLACore;
+    params.dlaCore = -1;
+    params.batch_size = 1;
+
+    ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+    // std::string savePath = "/home/wanghao/Desktop/projects/notebooks/centerpoint_output_cpp" ;
+    //CenterPoint sample(params);
+
+    centerpoint = new CenterPoint(params);
+
+    sample::gLogInfo << "Building and running a GPU inference engine for CenterPoint" << std::endl;
+    if (!centerpoint->engineInitlization())
+    {
+        sample::gLogInfo << "sample build error  " << std::endl;
+        return sample::gLogger.reportFail(sampleTest);
+    }
+
+
+//    if (!centerpoint->testinfer())
+//    {
+//        return sample::gLogger.reportFail(sampleTest);
+//    }
+
+
+    gpa = iv::modulecomm::RegisterRecv(&lidarname[0],ListenPointCloud);
+    gpdetect = iv::modulecomm::RegisterSend(&detectname[0], 10000000,1);
+
+//    gpthread = new std::thread(statethread);
+//    iv::ivexit::RegIVExitCall(exitfunc);
+
+    sample::gLogger.reportPass(sampleTest);
+
+
+    while(1)
+    {
+        std::this_thread::sleep_for(std::chrono::milliseconds(100));
+    }
+
+    return 1;
+}
+
+
+
+
+
+
+

+ 319 - 0
src/detection/CenterPoint-master/src/postprocess.cpp

@@ -0,0 +1,319 @@
+#include "preprocess.h"
+#include "postprocess.h"
+#include "centerpoint.h"
+#include <string>
+#include <sys/time.h>
+#include <chrono>
+#include <thread>
+#include <vector>
+#include <math.h>
+#include "buffers.h"
+#include "common.h"
+#include <iou3d_nms.h>
+#include <stdio.h>
+#include <thrust/sort.h>
+#include <thrust/sequence.h>
+#include <thrust/execution_policy.h>
+#include <thrust/device_vector.h>
+
+inline void RotateAroundCenter(Box& box, float (&corner)[4][2], float& cosVal, float& sinVal, float (&cornerANew)[4][2]){
+    
+    for(auto idx = 0; idx < 4; idx++){
+        auto x = corner[idx][0];
+        auto y = corner[idx][1];
+
+        cornerANew[idx][0] = (x - box.x) * cosVal + (y - box.y) * (-sinVal) + box.x;
+        cornerANew[idx][1] = (x - box.x) * sinVal + (y - box.y) * cosVal + box.y;
+    }
+}
+inline void FindMaxMin(float (&box)[4][2], float& maxVAl, float& minVAl, int xyIdx){
+    
+    maxVAl = box[0][xyIdx];
+    minVAl = box[0][xyIdx];
+    
+    for(auto idx=0; idx < 4; idx++){
+        if (maxVAl < box[idx][xyIdx])
+            maxVAl = box[idx][xyIdx];
+
+        if (minVAl > box[idx][xyIdx])
+            minVAl = box[idx][xyIdx];
+    }
+}
+
+inline void AlignBox(float (&cornerRot)[4][2], float (&cornerAlign)[2][2]){
+
+    float maxX = 0;
+    float minX = 0;
+    float maxY = 0;
+    float minY = 0;
+
+    FindMaxMin(cornerRot, maxX, minX, 0); // 0 mean X
+    FindMaxMin(cornerRot, maxY, minY, 1); // 1 mean X
+
+    cornerAlign[0][0] = minX;
+    cornerAlign[0][1] = minY;
+    cornerAlign[1][0] = maxX;
+    cornerAlign[1][1] = maxY;
+}
+
+inline float IoUBev(Box& boxA, Box& boxB){
+   
+    float ax1 = boxA.x - boxA.l/2;
+    float ax2 = boxA.x + boxA.l/2;
+    float ay1 = boxA.y - boxA.w/2;
+    float ay2 = boxA.y + boxA.w/2;
+
+    float bx1 = boxB.x - boxB.l/2;
+    float bx2 = boxB.x + boxB.l/2;
+    float by1 = boxB.y - boxB.w/2;
+    float by2 = boxB.y + boxB.w/2;
+
+    float cornerA[4][2] = {{ax1, ay1}, {ax1, ay2},
+                         {ax2, ay1}, {ax2, ay2}};
+    float cornerB[4][2] = {{bx1, ay1}, {bx1, by2},
+                         {bx2, by1}, {bx2, by2}};
+    
+    float cornerARot[4][2] = {0};
+    float cornerBRot[4][2] = {0};
+
+    float cosA = cos(boxA.theta), sinA = sin(boxA.theta);
+    float cosB = cos(boxB.theta), sinB = sin(boxB.theta);
+
+    RotateAroundCenter(boxA, cornerA, cosA, sinA, cornerARot);
+    RotateAroundCenter(boxB, cornerB, cosB, sinB, cornerBRot);
+
+    float cornerAlignA[2][2] = {0};
+    float cornerAlignB[2][2] = {0};
+
+    AlignBox(cornerARot, cornerAlignA);
+    AlignBox(cornerBRot, cornerAlignB);
+    
+    float sBoxA = (cornerAlignA[1][0] - cornerAlignA[0][0]) * (cornerAlignA[1][1] - cornerAlignA[0][1]);
+    float sBoxB = (cornerAlignB[1][0] - cornerAlignB[0][0]) * (cornerAlignB[1][1] - cornerAlignB[0][1]);
+    
+    float interW = std::min(cornerAlignA[1][0], cornerAlignB[1][0]) - std::max(cornerAlignA[0][0], cornerAlignB[0][0]);
+    float interH = std::min(cornerAlignA[1][1], cornerAlignB[1][1]) - std::max(cornerAlignA[0][1], cornerAlignB[0][1]);
+    
+    float sInter = std::max(interW, 0.0f) * std::max(interH, 0.0f);
+    float sUnion = sBoxA + sBoxB - sInter;
+    
+    return sInter/sUnion;
+}
+
+void AlignedNMSBev(std::vector<Box>& predBoxs){
+    
+    if(predBoxs.size() == 0)
+        return;
+
+    std::sort(predBoxs.begin(),predBoxs.end(),[ ](Box& box1, Box& box2){return box1.score > box2.score;});
+
+    auto boxSize = predBoxs.size() > INPUT_NMS_MAX_SIZE? INPUT_NMS_MAX_SIZE : predBoxs.size();
+    int numBoxValid = 0;
+    for(auto boxIdx1 =0; boxIdx1 < boxSize; boxIdx1++){
+        
+        if (numBoxValid >= OUTPUT_NMS_MAX_SIZE) {
+            for(auto boxIdx2 = boxIdx1+1; boxIdx2 < boxSize; boxIdx2++) 
+                predBoxs[boxIdx2].isDrop = true;
+            break;
+        }
+        if (predBoxs[boxIdx1].isDrop) continue;
+
+
+        if (predBoxs[boxIdx1].x >X_CENTER_MAX || predBoxs[boxIdx1].x < X_CENTER_MIN ) {
+            predBoxs[boxIdx1].isDrop = true;
+            continue;
+        } 
+        if (predBoxs[boxIdx1].y >Y_CENTER_MAX || predBoxs[boxIdx1].y < Y_CENTER_MIN ) {
+            predBoxs[boxIdx1].isDrop = true;
+            continue;
+        } 
+
+        if (predBoxs[boxIdx1].z >Z_CENTER_MAX || predBoxs[boxIdx1].z < Z_CENTER_MIN ) {
+            predBoxs[boxIdx1].isDrop = true;
+            continue;
+        } 
+
+        for(auto boxIdx2 = boxIdx1+1; boxIdx2 < boxSize; boxIdx2++){
+            if(predBoxs[boxIdx2].isDrop == true)
+                continue;
+            if(IoUBev(predBoxs[boxIdx1], predBoxs[boxIdx2]) > NMS_THREAHOLD)
+                predBoxs[boxIdx2].isDrop = true;
+        } 
+        if (!predBoxs[boxIdx1].isDrop) numBoxValid ++;
+    }
+}
+
+void postprocess(samplesCommon::BufferManager * buffers, std::vector<Box>& predResult){
+
+
+// #define REG_CHANNEL 2
+// #define HEIGHT_CHANNEL 1
+// #define ROT_CHANNEL 2
+// // #define VEL_CHANNEL 2 //don't defined in waymo
+// #define DIM_CHANNEL 3
+
+    std::vector<std::string> regName{   "246"};
+    std::vector<std::string> rotName{   "258"};
+
+    std::vector<std::string> heightName{"250"};
+    std::vector<std::string> dimName{   "264"};
+
+    std::vector<std::string> scoreName{ "265"};
+    std::vector<std::string> clsName{   "266"};
+    for (int taskIdx=0;taskIdx < TASK_NUM;taskIdx++){
+        std::vector<Box> predBoxs;
+        float* reg = static_cast<float*>(buffers->getHostBuffer(regName[taskIdx]));
+        float* height = static_cast<float*>(buffers->getHostBuffer(heightName[taskIdx]));
+        float* rot = static_cast<float*>(buffers->getHostBuffer(rotName[taskIdx]));
+        // float* vel = static_cast<float*>(buffers.getHostBuffer(velName[taskIdx]));
+        float* dim = static_cast<float*>(buffers->getHostBuffer(dimName[taskIdx]));
+        float* score = static_cast<float*>(buffers->getHostBuffer(scoreName[taskIdx]));
+        int32_t* cls = static_cast<int32_t*>(buffers->getHostBuffer(clsName[taskIdx]));
+
+        int cnt = 0;
+        for(size_t yIdx=0; yIdx < OUTPUT_H; yIdx++){
+            for(size_t xIdx=0; xIdx < OUTPUT_W; xIdx++){
+                auto idx = yIdx* OUTPUT_W + xIdx;
+                if(score[idx] < SCORE_THREAHOLD)
+                    continue;
+                
+                float x = (xIdx + reg[0*OUTPUT_H*OUTPUT_W + idx])*OUT_SIZE_FACTOR*X_STEP + X_MIN;
+                float y = (yIdx + reg[1*OUTPUT_H*OUTPUT_W + idx])*OUT_SIZE_FACTOR*Y_STEP + Y_MIN;
+                float z = height[idx];
+
+                if(x < X_MIN || x > X_MAX || y < Y_MIN || y > Y_MAX || z < Z_MIN || z > Z_MAX)
+                    continue;
+                cnt ++;
+                Box box;
+                box.x = x;
+                box.y = y;
+                box.z = z;
+                box.l = dim[0*OUTPUT_H*OUTPUT_W + idx];
+                box.h = dim[1*OUTPUT_H*OUTPUT_W + idx];
+                box.w = dim[2*OUTPUT_H*OUTPUT_W + idx];
+                box.theta = atan2(rot[0*OUTPUT_H*OUTPUT_W + idx], rot[1*OUTPUT_H*OUTPUT_W + idx]);
+
+                // box.velX = vel[0*OUTPUT_H*OUTPUT_W+idx];
+                // box.velY = vel[1*OUTPUT_H*OUTPUT_W+idx];
+                // box.theta = box.theta - PI /2;
+
+                box.score = score[idx];
+                box.cls = cls[idx] ; 
+                box.isDrop = false;
+                predBoxs.push_back(box);
+            }
+        }
+        std::cout << " Num boxes before nms " << cnt << "\n";
+
+        AlignedNMSBev(predBoxs);
+        for(auto idx =0; idx < predBoxs.size(); idx++){
+            if(!predBoxs[idx].isDrop)
+                predResult.push_back(predBoxs[idx]);
+        }
+        std::cout << " Num boxes after nms " << predResult.size() << "\n";
+    }
+}
+
+
+
+
+void postprocessGPU(samplesCommon::BufferManager * buffers,
+                                                 std::vector<Box>& predResult ,
+                                                 std::map<std::string, std::vector<string>>rpnOutputTensorNames,
+                                                 int* dev_score_indexs,
+                                                 unsigned long long* mask_cpu,
+                                                 unsigned long long* remv_cpu, 
+                                        
+                                                 int* host_score_indexs,
+                                                 long* host_keep_data,
+                                                 float* host_boxes,
+                                                 int* host_label
+                                                 )
+{
+
+    
+    for (size_t taskIdx = 0; taskIdx < TASK_NUM; taskIdx++){
+        std::vector<Box> predBoxs;
+        float* reg = static_cast<float*>(buffers->getDeviceBuffer(rpnOutputTensorNames["regName"][taskIdx]));
+        float* height = static_cast<float*>(buffers->getDeviceBuffer(rpnOutputTensorNames["heightName"][taskIdx]));
+        float* rot = static_cast<float*>(buffers->getDeviceBuffer(rpnOutputTensorNames["rotName"][taskIdx]));
+        float* dim = static_cast<float*>(buffers->getDeviceBuffer(rpnOutputTensorNames["dimName"][taskIdx]));
+        float* score = static_cast<float*>(buffers->getDeviceBuffer(rpnOutputTensorNames["scoreName"][taskIdx]));
+        int32_t* cls = static_cast<int32_t*>(buffers->getDeviceBuffer(rpnOutputTensorNames["clsName"][taskIdx]));
+        
+
+        // cudaStream_t stream;
+        // GPU_CHECK(cudaStreamCreate(&stream));
+        int boxSize = _find_valid_score_num( score, SCORE_THREAHOLD, OUTPUT_H , OUTPUT_W);
+        std::cout << " Num boxes before " << boxSize <<"\n";
+
+        _sort_by_key(score, dev_score_indexs, OUTPUT_W * OUTPUT_H);
+
+        boxSize = boxSize > INPUT_NMS_MAX_SIZE ? INPUT_NMS_MAX_SIZE : boxSize;
+        // int boxSizeAft = raw_nms_gpu(reg,  height, dim , rot, dev_score_indexs, 
+        //                                                 host_keep_data, boxSize,  NMS_THREAHOLD);
+        int boxSizeAft = _raw_nms_gpu(reg,  height, dim , rot, dev_score_indexs, 
+                                                     host_keep_data, mask_cpu, remv_cpu,  boxSize,  NMS_THREAHOLD);
+
+
+
+
+        boxSizeAft = boxSizeAft > OUTPUT_NMS_MAX_SIZE ? OUTPUT_NMS_MAX_SIZE : boxSizeAft;
+        std::cout << " Num boxes after " <<boxSizeAft << "\n";
+
+
+        // GPU_CHECK(cudaMemcpy(host_keep_data, dev_keep_data, boxSizeAft * sizeof(long), cudaMemcpyDeviceToHost));
+
+        _gather_all(host_boxes, host_label, 
+                               reg, height, dim,rot, score, cls, dev_score_indexs, host_keep_data,
+                                boxSize,  boxSizeAft );
+                                
+
+        GPU_CHECK(cudaMemcpy(host_score_indexs, dev_score_indexs, boxSize * sizeof(int), cudaMemcpyDeviceToHost));
+        for(auto i =0; i < boxSizeAft; i++){
+            int ii = host_keep_data[i];
+            // std::cout <<i<< ", "<<ii<<", \n";
+            int idx = host_score_indexs[ii];
+            int xIdx = idx % OUTPUT_W;
+            int yIdx = idx / OUTPUT_W;
+            Box box;
+
+            box.x = (host_boxes[i  + 0 * boxSizeAft] + xIdx) *OUT_SIZE_FACTOR*X_STEP + X_MIN;
+            box.y = (host_boxes[i  + 1 * boxSizeAft] + yIdx) * OUT_SIZE_FACTOR*Y_STEP + Y_MIN;
+            box.z = host_boxes[i +  2 * boxSizeAft];
+            box.l = host_boxes[i +  3 * boxSizeAft];
+            box.h = host_boxes[i + 4 * boxSizeAft];
+            box.w = host_boxes[i + 5 * boxSizeAft];
+            float theta_s =host_boxes[i + 6 * boxSizeAft];
+            float theta_c =host_boxes[i + 7 * boxSizeAft];
+            box.theta = atan2(theta_s, theta_c);
+            box.score  = host_boxes[i + 8 * boxSizeAft];
+            box.cls = host_label[i];
+            box.velX = idx;
+            box.velY = 0;
+            predResult.push_back(box);
+        }
+
+    }
+
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

+ 211 - 0
src/detection/CenterPoint-master/src/preprocess.cpp

@@ -0,0 +1,211 @@
+#include"preprocess.h"
+#include <string>
+#include <sys/time.h>
+#include <chrono>
+#include <thread>
+#include <vector>
+#include "logger.h"
+#include "iostream"
+#include <fstream>
+#include <iostream>
+#include <sstream>
+#include "common.h"
+
+ #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
+
+
+// feature : voxels,  indices : coords 
+void PreprocessWorker(float* points, float* feature, int* indices, int pointNum, int threadIdx, int pillarsPerThread, int pointDim ){
+    // 0 ~ MAX_PIONT_IN_PILLARS
+
+    unsigned short pointCount[MAX_PILLARS] = {0};
+    // 0 ~ MAX_PILLARS
+    int pillarsIndices[BEV_W*BEV_H] = {0};
+    for(size_t idx = 0; idx < BEV_W*BEV_H; idx++){
+        pillarsIndices[idx] = -1;}
+
+    int pillarCount = threadIdx*pillarsPerThread;
+    for(int idx = 0; idx < pointNum; idx++){
+        float x = points[idx*pointDim];
+        float y = points[idx*pointDim+1];
+        float z = points[idx*pointDim+2];
+
+ 
+        if(pillarCount> MAX_PILLARS - 1)
+           continue;
+
+        if(x < X_MIN || x > X_MAX || y < Y_MIN || y > Y_MAX || 
+           z < Z_MIN || z > Z_MAX)
+           continue;
+
+        int xIdx = int((x-X_MIN)/X_STEP);
+        int yIdx = int((y-Y_MIN)/Y_STEP);
+        
+        if(xIdx % THREAD_NUM != threadIdx)
+            continue;
+
+        // get Real Index of voxels
+        int pillarIdx = yIdx*BEV_W+xIdx;
+        // pillarCountIdx default is -1 
+        auto pillarCountIdx = pillarsIndices[pillarIdx];
+
+        // pillarCountIdx, actual used pillar index, according pillar orders that has been pushed into points, 
+        if(pillarCountIdx == -1){
+            pillarCountIdx = pillarCount;
+            // indices[pillarCount*2] = pillarIdx;
+            pillarsIndices[pillarIdx] = pillarCount;
+            indices[pillarCount] = pillarIdx;
+            ++pillarCount;
+        }
+
+
+        // pointNumInPillar default is 0
+        auto pointNumInPillar = pointCount[pillarCountIdx];
+
+       if(pointNumInPillar > MAX_PIONT_IN_PILLARS - 1)
+           continue;
+
+
+        feature[     pillarCountIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointNumInPillar* FEATURE_NUM] = x;
+        feature[1 +  pillarCountIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointNumInPillar* FEATURE_NUM] = y;
+        feature[2 +  pillarCountIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointNumInPillar* FEATURE_NUM] = z; // z
+        feature[3 +  pillarCountIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointNumInPillar* FEATURE_NUM] = points[idx*pointDim+3]; // instence
+        feature[4 +  pillarCountIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointNumInPillar* FEATURE_NUM] = points[idx*pointDim+4]; // time_lag
+        feature[8 +  pillarCountIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointNumInPillar* FEATURE_NUM] = x - (xIdx*X_STEP + X_MIN + X_STEP/2); //  x residual to geometric center
+        feature[9 +  pillarCountIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointNumInPillar* FEATURE_NUM] = y - (yIdx*Y_STEP + Y_MIN + Y_STEP/2); //  y residual to geometric center
+
+        ++pointNumInPillar;
+        pointCount[pillarCountIdx] = pointNumInPillar;
+
+    }
+
+    for(int pillarIdx = threadIdx*pillarsPerThread; pillarIdx < (threadIdx+1)*pillarsPerThread; pillarIdx++)
+    {
+        float xCenter = 0;
+        float yCenter = 0;
+        float zCenter = 0;
+        auto pointNum = pointCount[pillarIdx];
+        for(int pointIdx=0; pointIdx < pointNum; pointIdx++)
+        {
+            
+            auto x = feature[       pillarIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointIdx* FEATURE_NUM];
+            auto y = feature[1 + pillarIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointIdx* FEATURE_NUM];
+            auto z = feature[2 + pillarIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointIdx* FEATURE_NUM];
+            xCenter += x;
+            yCenter += y;
+            zCenter += z;
+        }
+
+        if (pointNum > 0) {
+        xCenter = xCenter / pointNum;
+        yCenter = yCenter / pointNum;
+        zCenter = zCenter / pointNum;
+        }
+
+        
+        for(int pointIdx=0; pointIdx < pointNum; pointIdx++)
+        {    
+
+            auto x = feature[       pillarIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointIdx* FEATURE_NUM];
+            auto y = feature[1 + pillarIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointIdx* FEATURE_NUM];
+            auto z = feature[2 + pillarIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointIdx* FEATURE_NUM];
+
+
+            feature[5 + pillarIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointIdx* FEATURE_NUM] = x - xCenter; // x offest from cluster center 
+            feature[6 + pillarIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointIdx* FEATURE_NUM] = y - yCenter; // y offset ...
+            feature[7 + pillarIdx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ pointIdx* FEATURE_NUM] = z - zCenter; // z offset ...
+        }
+    }
+}
+
+
+void preprocess(float* points, float* feature, int* indices, int pointNum, int pointDim =5)
+{
+
+    if (MAX_PILLARS%THREAD_NUM) {
+        sample::gLogError<< "THREAD_NUM SHOULD EXACTLY DIVIDE MAX_PILLARS ! " << std::endl;
+        return ; 
+    }
+    int size_ =  BEV_W * BEV_H;
+    for(int idx=0; idx< static_cast<int>(MAX_PILLARS); idx++){
+        indices[idx] = -1;
+    }
+
+
+    for(int idx=0; idx<static_cast<int>( MAX_PILLARS)*static_cast<int>(FEATURE_NUM)*static_cast<int>(MAX_PIONT_IN_PILLARS); idx++){
+        feature[idx] = 0;
+    }
+
+
+
+
+    std::vector<std::thread> threadPool;
+    for(int idx=0; idx <static_cast<int>(  THREAD_NUM); idx++){
+        std::thread worker(PreprocessWorker,
+                                             points,
+                                             feature,
+                                             indices,
+                                             pointNum,
+                                             idx,
+                                             MAX_PILLARS/THREAD_NUM,
+                                             pointDim
+                                             );
+        
+        threadPool.push_back(std::move(worker));
+    }
+
+    for(auto idx=0; idx < THREAD_NUM; idx++){
+        threadPool[idx].join();
+    }
+
+
+
+}
+
+
+void preprocessGPU(float* dev_points, float* feature,int* indices,
+ bool* p_mask, int* p_bev_idx, int* p_point_num_assigned, int* bev_voxel_idx, float* v_point_sum, int* v_range, int* v_point_num,
+int pointNum, int pointDim = 5)
+{
+    pointNum = pointNum > MAX_POINTS ? MAX_POINTS : pointNum;
+    
+    GPU_CHECK(cudaMemset(feature, 0.0, MAX_PILLARS * MAX_PIONT_IN_PILLARS * FEATURE_NUM * sizeof(float)));
+
+    _preprocess_gpu( dev_points, feature, indices, 
+    p_mask, p_bev_idx,  p_point_num_assigned,  bev_voxel_idx, v_point_sum,  v_range,  v_point_num,
+     pointNum);
+
+}
+
+
+bool readBinFile(std::string& filename, void*& bufPtr, int& pointNum, int pointDim)
+{
+    // open the file:
+    std::streampos fileSize;
+    std::ifstream file(filename, std::ios::binary);
+    
+    if (!file) {
+        sample::gLogError << "[Error] Open file " << filename << " failed" << std::endl;
+        return false;
+    }
+    // get its size:
+    file.seekg(0, std::ios::end);
+    fileSize = file.tellg();
+    file.seekg(0, std::ios::beg);
+    
+    bufPtr = malloc(fileSize);
+    if(bufPtr == nullptr){
+        sample::gLogError << "[Error] Malloc Memory Failed! Size: " << fileSize << std::endl;
+        return false;
+    }
+    // read the data:
+    file.read((char*) bufPtr, fileSize);
+    file.close();
+    
+    pointNum = fileSize /sizeof(float) / pointDim;
+    if( fileSize /sizeof(float) % pointDim != 0){
+         sample::gLogError << "[Error] File Size Error! " << fileSize << std::endl;
+    }
+    sample::gLogInfo << "[INFO] pointNum : " << pointNum << std::endl;
+    return true;
+}

+ 262 - 0
src/detection/CenterPoint-master/src/preprocess.cu

@@ -0,0 +1,262 @@
+#include <iostream>
+#include <stdio.h>
+#include <vector>
+#include <thrust/sort.h>
+#include <thrust/sequence.h>
+#include <thrust/device_vector.h>
+#include <thrust/host_vector.h>
+#include <thrust/gather.h>
+#include <thrust/transform.h>
+#include <thrust/count.h>
+#include <config.h>
+#include <preprocess.h>
+#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
+
+
+
+// FIRST OF ALL , DEFINE  LOCK-RELATED STRUCTURE
+struct Lock
+{
+    int *mutex;
+    Lock()
+    {
+        int state = 0;
+        cudaMalloc((void**)&mutex, sizeof(int));
+        cudaMemcpy(mutex, &state, sizeof(int),cudaMemcpyHostToDevice);
+    }
+    ~Lock()
+    {
+        cudaFree(mutex);
+    }
+    __device__ void lock()
+    {
+        while(atomicCAS(mutex,0,1) !=0);
+    }
+    __device__ void unlock()
+    {
+        atomicExch(mutex,0);
+    }
+};
+
+
+
+
+
+__global__ void IndiceResetKernel(int* indices) {
+    int idx = threadIdx.x + blockIdx.x * blockDim.x ;
+    if(idx < MAX_PILLARS) 
+        indices[idx] = -1;
+}
+
+__global__ void Point2BEVIdxKernel (float* points, int* _PBEVIdxs,bool* _PMask, int pointNum )
+{
+    int point_idx =  threadIdx.x + blockIdx.x * blockDim.x ;
+
+    if (point_idx < pointNum)
+    {
+    float x = points[point_idx * POINT_DIM + 0];    
+    float y = points[point_idx * POINT_DIM + 1];    
+    float z = points[point_idx * POINT_DIM + 2];    
+
+    if(x >= X_MIN && x <= X_MAX && y >= Y_MIN && y <= Y_MAX && z >= Z_MIN && z <= Z_MAX) 
+    {
+        int xIdx = int((x-X_MIN)/X_STEP);
+        int yIdx = int((y-Y_MIN)/Y_STEP);
+        // get BEVIndex of voxels
+        int bevIdx = yIdx*BEV_W+xIdx;
+        _PMask[point_idx] = true;
+        _PBEVIdxs[point_idx] =  bevIdx;
+    }
+    }
+}
+
+
+__global__ void BEV2VIdxKernel (int* _VBEVIdxs, int* _VRange,int*  _BEVVoxelIdx)
+{
+    int idx =  threadIdx.x + blockIdx.x * blockDim.x ;
+    if (idx < MAX_PILLARS)
+    {
+        int bev_idx = _VBEVIdxs[idx] ;
+        if (bev_idx >= 0) 
+        {
+            int voxel_idx = _VRange[idx];
+            _BEVVoxelIdx[bev_idx] = voxel_idx+1; // TODO : Note that BEVVoxelIdx save valid values begin from 1 
+        }
+    }
+}
+
+__device__ int ReadAndAdd(int* address, int val)
+{
+    int old = *address;
+    int assumed;
+    do {
+        assumed = old;
+        old = atomicCAS(address, assumed,
+                                    val + assumed);
+    } while (assumed != old);
+    return old;
+}
+
+// Note that the below func is not valid 
+// __device__ int ReadAndAdd(int* address, int val)
+// {
+//     int old = *address;
+//     int assumed = old;
+//     while (assumed == old && assumed < MAX_PIONT_IN_PILLARS);
+//     {
+//         atomicCAS(address, assumed,
+//                                     val + assumed);
+//         assumed = *address;
+//     } 
+//     return old;
+// }
+
+__global__ void CountAndSumKernel (float* points, int* _BEVVoxelIdx,  bool* _PMask, int* _PBEVIdxs, int* _PPointNumAssigned, float* _VPointSum, int* _VPointNum, int pointNum)
+{
+    
+    int point_idx =  threadIdx.x + blockIdx.x * blockDim.x ;
+    if (point_idx < pointNum && _PMask[point_idx])
+    {
+        // from xyz to bev idx
+        float x = points[point_idx * POINT_DIM + 0];    
+        float y = points[point_idx * POINT_DIM + 1];    
+        float z = points[point_idx * POINT_DIM + 2];    
+        int xIdx = int((x-X_MIN)/X_STEP);
+        int yIdx = int((y-Y_MIN)/Y_STEP);
+        // get BEVIndex of voxels
+        int bev_idx = yIdx*BEV_W+xIdx;
+        int voxel_idx = _BEVVoxelIdx[bev_idx]-1; // decode voxel_idx
+        
+        _PBEVIdxs[point_idx] = bev_idx;
+        // use threadfence() to make it sequential between blocks
+        int voxel_point_idx = ReadAndAdd(_VPointNum+voxel_idx, 1);
+        __threadfence();
+
+        if (voxel_point_idx < MAX_PIONT_IN_PILLARS) {
+            _PPointNumAssigned[point_idx] = voxel_point_idx;
+
+            atomicAdd(_VPointSum+voxel_idx*3 + 0, x);
+            __threadfence();
+            atomicAdd(_VPointSum+voxel_idx*3 + 1, y);
+            __threadfence();
+            atomicAdd(_VPointSum+voxel_idx*3 + 2, z);
+            __threadfence();        
+        }
+
+        else
+            {
+                _VPointNum[voxel_idx] = MAX_PIONT_IN_PILLARS;
+                _PMask[point_idx] = false;
+
+            }
+    }
+}
+
+__global__ void PointAssignKernel(float* points, float* feature,int* _BEVVoxelIdx, bool* _PMask,int* _PBEVIdxs, int*  _PPointNumAssigned, float* _VPointSum, int* _VPointNum,int pointNum)
+{
+    int point_idx =  threadIdx.x + blockIdx.x * blockDim.x ;
+    if (point_idx < pointNum && _PMask[point_idx])
+    {
+        // from xyz to bev idx
+        float x = points[point_idx * POINT_DIM + 0];    
+        float y = points[point_idx * POINT_DIM + 1];    
+        float z = points[point_idx * POINT_DIM + 2];    
+        int bev_idx = _PBEVIdxs[point_idx];
+        int voxel_idx = _BEVVoxelIdx[bev_idx] -1;
+        int voxel_point_idx = _PPointNumAssigned[point_idx];
+        
+        int voxel_point_num = _VPointNum[voxel_idx] ;
+        voxel_point_num = voxel_point_num > MAX_PIONT_IN_PILLARS ? MAX_PIONT_IN_PILLARS : voxel_point_num;
+        // TODO ::: 
+        if (voxel_idx>=0) 
+        {
+    
+            feature[        voxel_idx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ voxel_point_idx* FEATURE_NUM] = x;
+            feature[ 1+  voxel_idx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ voxel_point_idx* FEATURE_NUM] = y;
+            feature[ 2+  voxel_idx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ voxel_point_idx* FEATURE_NUM] = z;
+            feature[ 3+  voxel_idx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ voxel_point_idx* FEATURE_NUM] = points[point_idx * POINT_DIM + 3];
+            feature[ 4+  voxel_idx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ voxel_point_idx* FEATURE_NUM] = points[point_idx * POINT_DIM + 4];
+
+            feature[ 5+  voxel_idx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ voxel_point_idx* FEATURE_NUM] = x - _VPointSum[voxel_idx * 3 + 0]/voxel_point_num;
+            feature[ 6+  voxel_idx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ voxel_point_idx* FEATURE_NUM] = y - _VPointSum[voxel_idx * 3 + 1]/voxel_point_num;
+            feature[ 7+  voxel_idx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ voxel_point_idx* FEATURE_NUM] = z - _VPointSum[voxel_idx * 3 + 2]/voxel_point_num;
+
+            int x_idx = bev_idx % BEV_W;
+            int y_idx = bev_idx / BEV_W;
+            feature[8 +  voxel_idx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ voxel_point_idx* FEATURE_NUM] = x - (x_idx*X_STEP + X_MIN + X_STEP/2); //  x residual to geometric center
+            feature[9 +  voxel_idx*MAX_PIONT_IN_PILLARS * FEATURE_NUM+ voxel_point_idx* FEATURE_NUM] = y - (y_idx*Y_STEP + Y_MIN + Y_STEP/2); //  y residual to geometric center
+        }
+    }
+}
+
+
+
+
+// void _preprocess_gpu(float* points, float* feature, int* _VBEVIdxs, int pointNum)
+void _preprocess_gpu(float* points, float* feature, int* _VBEVIdxs,
+ bool* _PMask, int* _PBEVIdxs, int* _PPointNumAssigned, int* _BEVVoxelIdx, float* _VPointSum, int* _VRange, int* _VPointNum,
+int pointNum)
+{
+
+
+    cudaMemset(_PBEVIdxs, 0, pointNum * sizeof(int));
+    cudaMemset(_PPointNumAssigned, 0, pointNum * sizeof(int));
+    cudaMemset(_PMask, 0, pointNum * sizeof(bool));
+    cudaMemset(_BEVVoxelIdx, 0, BEV_H * BEV_W * sizeof(int));
+
+    // cudaMalloc((void**)&_VPointSum, MAX_PILLARS * 3 *sizeof(float));
+    cudaMemset(_VPointSum, 0, MAX_PILLARS * 3 * sizeof(float));
+    cudaMemset(_VPointNum, 0, MAX_PILLARS *  sizeof(int));
+
+    // cudaMalloc((void**)&_VRange, MAX_PILLARS * sizeof(int));
+    // cudaMalloc((void**)&_VPointNum, MAX_PILLARS * sizeof(int));
+
+    // compute the time 
+
+
+    int threadNum= 1024;
+    int blockNum = DIVUP(pointNum,threadNum);
+    // init _VBEVIdxs
+    // IndiceResetKernel<<<DIVUP(MAX_PILLARS, threadNum), threadNum>>>(_VBEVIdxs);
+    cudaMemset(_VBEVIdxs, -1 , MAX_PILLARS * sizeof(int));
+
+    // get _PBEVIDxs, _PMask
+    Point2BEVIdxKernel<<<blockNum, threadNum>>>(points,_PBEVIdxs,_PMask, pointNum );
+
+    thrust::sort(thrust::device, _PBEVIdxs, _PBEVIdxs + pointNum, thrust::greater<int>());
+
+    thrust::unique_copy(thrust::device, _PBEVIdxs, _PBEVIdxs + pointNum , _VBEVIdxs);
+
+    thrust::sequence(thrust::device, _VRange, _VRange + MAX_PILLARS);
+
+    // map bev idx to voxel idx 
+    BEV2VIdxKernel<<<DIVUP(MAX_PILLARS, threadNum), threadNum>>>(_VBEVIdxs, _VRange, _BEVVoxelIdx);
+
+    // The Key Step 
+    CountAndSumKernel<<<blockNum, threadNum>>>(points, _BEVVoxelIdx, _PMask, _PBEVIdxs,_PPointNumAssigned,  _VPointSum, _VPointNum, pointNum);
+    PointAssignKernel<<<blockNum, threadNum>>>(points, feature, _BEVVoxelIdx, _PMask,_PBEVIdxs, _PPointNumAssigned,  _VPointSum, _VPointNum, pointNum);
+
+
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+

+ 191 - 0
src/detection/CenterPoint-master/src/samplecenterpoint.cpp

@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "argsParser.h"
+#include "buffers.h"
+#include "common.h"
+#include "logger.h"
+#include "parserOnnxConfig.h"
+#include "NvInfer.h"
+#include <cuda_runtime_api.h>
+#include <cstdlib>
+#include <map>
+#include <fstream>
+#include <iostream>
+#include <sstream>
+#include <string>
+#include <sys/time.h>
+#include <chrono>
+#include "preprocess.h"
+#include "postprocess.h"
+#include "scatter_cuda.h"
+#include "centerpoint.h"
+#include "utils.h"
+#include "xmlparam.h"
+#include "modulecomm.h"
+#include "ivfault.h"
+#include "ivlog.h"
+#include "ivexit.h"
+#include "ivversion.h"
+#include <thread>
+#include "objectarray.pb.h"
+
+const std::string gSampleName = "TensorRT.sample_onnx_centerpoint";
+
+void PclXYZITToArray(
+        const pcl::PointCloud<pcl::PointXYZI>::Ptr& in_pcl_pc_ptr,
+        float* out_points_array, const float normalizing_factor) {
+    for (size_t i = 0; i < in_pcl_pc_ptr->size(); ++i) {
+        pcl::PointXYZI point = in_pcl_pc_ptr->at(i);
+        out_points_array[i * 5 + 0] = point.x;
+        out_points_array[i * 5 + 1] = point.y;
+        out_points_array[i * 5 + 2] = point.z;
+        out_points_array[i * 5 + 3] =
+                static_cast<float>(point.intensity / normalizing_factor);
+        out_points_array[i * 5 + 4] = 0;
+    }
+}
+
+
+
+
+
+
+
+//!
+//! \brief Prints the help information for running this sample
+//!
+void printHelpInfo()
+{
+    std::cout
+        << "Usage: ./centerpoint [-h or --help]"
+        << std::endl;
+    std::cout << "--help          Display help information" << std::endl;
+    std::cout << "--filePath       Specify path to a data directory. "
+              << std::endl;
+    std::cout << "--savePath       Specify path to a directory you want save detection results."
+              << std::endl;
+
+    std::cout << "--loadEngine       Load from serialized engine files or from onnx files, provide this argument only when you want to create "
+    "engine from serialized engine files you previously generated(and provide paths to engine files), or you will need to provide paths to onnx files. "
+              << std::endl;   
+
+    std::cout << "--pfeOnnxPath       Specify path to pfe onnx model. This option can be used when you want to create engine from onnx file. "
+              << std::endl;
+    std::cout << "--rpnOnnxPath       Specify path to rpn onnx model. This option can be used when you want to create engine from onnx file. "
+              << std::endl;      
+    std::cout << "--pfeEnginePath       Specify path to pfe engine model. This option can be used when you want to create engine from serialized engine file you previously generated. "
+              << std::endl;
+    std::cout << "--rpnEnginePath       Specify path to rpn engine model. This option can be used when you want to create engine from serialized engine file you previously generated.  "
+              << std::endl;   
+
+    std::cout << "--fp16       Provide this argument only when you want  to do inference on fp16 mode, note that this config is only valid when you create engine from onnx files. "
+              << std::endl;   
+
+    std::cout << "--useDLACore=N  Specify a DLA engine for layers that support DLA. Value can range from 0 to n-1, "
+                 "where n is the number of DLA engines on the platform, by default it's set -1."
+              << std::endl;
+    
+}
+
+int main(int argc, char** argv)
+{
+//    samplesCommon::Args args;
+//    bool argsOK = samplesCommon::parseArgs(args, argc, argv);
+//    if (!argsOK)
+//    {
+//        sample::gLogError << "Invalid arguments" << std::endl;
+//        printHelpInfo();
+//        return EXIT_FAILURE;
+//    }
+//    if (args.help)
+//    {
+//        printHelpInfo();
+//        return EXIT_SUCCESS;
+//    }
+
+    auto sampleTest = sample::gLogger.defineTest(gSampleName, argc, argv);
+    sample::gLogger.reportTestStart(sampleTest);
+
+
+
+    ///////////////////////////////////////////////////////////////PARAM INITIALIZATION///////////////////////////////////////////////////////////////
+    Params params;
+    // initialize sample parameters 
+//    params.pfeOnnxFilePath =  args.pfeOnnxPath;
+//    params.rpnOnnxFilePath =  args.rpnOnnxPath;
+//    params.pfeSerializedEnginePath = args.pfeEnginePath;
+//    params.rpnSerializedEnginePath = args.rpnEnginePath;
+//    params.savePath = args.savePath;
+//    params.filePaths=glob(args.filePath + "/seq_*.bin");
+//    params.fp16 = args.runInFp16;
+//    params.load_engine = args.loadEngine;
+
+    params.pfeOnnxFilePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/models/pfe_baseline32000.onnx";
+    params.rpnOnnxFilePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/models/rpn_baseline.onnx";
+    params.pfeSerializedEnginePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/models/pfe_fp.engine";
+    params.rpnSerializedEnginePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/models/rpn_fp.engine";
+    params.savePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/results";
+    string filePath = "/home/nvidia/modularization/src/detection/CenterPoint-master/lidars";
+    params.filePaths=glob(filePath + "/seq_*.bin");
+    params.fp16 = true;
+    params.load_engine = false;
+
+
+    // Input Output Names, according to TASK_NUM
+    params.pfeInputTensorNames.push_back("input.1");
+    params.rpnInputTensorNames.push_back("input.1");
+    params.pfeOutputTensorNames.push_back("47");
+
+    params.rpnOutputTensorNames["regName"]  = {"246"};
+    params.rpnOutputTensorNames["rotName"] = {"258"};
+    params.rpnOutputTensorNames["heightName"]={"250"};
+    params.rpnOutputTensorNames["dimName"] = {"264"};
+    params.rpnOutputTensorNames["scoreName"] = {"265"};
+    params.rpnOutputTensorNames["clsName"] = {"266"};
+
+
+    // Attrs
+    //params.dlaCore = args.useDLACore;
+    params.dlaCore = -1;
+    params.batch_size = 1;
+
+    ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
+
+    // std::string savePath = "/home/wanghao/Desktop/projects/notebooks/centerpoint_output_cpp" ;
+    CenterPoint sample(params);
+    sample::gLogInfo << "Building and running a GPU inference engine for CenterPoint" << std::endl;
+    if (!sample.engineInitlization())
+    {
+        sample::gLogInfo << "sample build error  " << std::endl;
+        return sample::gLogger.reportFail(sampleTest);
+    }
+    
+    if (!sample.infer())
+    {
+        return sample::gLogger.reportFail(sampleTest);
+    }
+
+    sample::gLogger.reportPass(sampleTest);
+    return 1;
+}
+
+
+
+
+
+
+

+ 56 - 0
src/detection/CenterPoint-master/src/scatter_cuda.cu

@@ -0,0 +1,56 @@
+/*
+ * Copyright 2018-2019 Autoware Foundation. All rights reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//headers in local files
+#include "scatter_cuda.h"
+#include <stdio.h>
+// For (N * channel) tensors ===>  (channel, BEV_H, BEV_W) tensors
+__global__ void scatter_kernel( int *coors, float *pfe_output, float *scattered_feature,
+                                const int FEATURE_NUM, const int BEV_H, const int BEV_W)
+{
+    int i_pillar = blockIdx.x;
+    int i_feature = threadIdx.x;
+    int bev_ind = coors[i_pillar];
+    // if (i_feature ==60 && i_pillar % 100 ==0)
+    //     printf("Block %d / %d, Thread %d / %d,  bev_ind %d \n", i_pillar, gridDim.x, i_feature, blockDim.x,bev_ind);
+
+    if(bev_ind >= 0) {
+    // // int x_ind = x_coors[i_pillar];
+    // // int y_ind = y_coors[i_pillar];
+    // // pfe_output : N * 64, get  current feature value ;
+    float feature = pfe_output[i_pillar*FEATURE_NUM + i_feature];
+    // scattered_feature[i_feature*BEV_H*BEV_W + y_ind * BEV_W + x_ind] = feature;
+    scattered_feature[i_feature * BEV_H * BEV_W + bev_ind] = feature;
+    }
+
+}
+
+
+ScatterCuda::ScatterCuda(const int NUM_THREADS, const int FEATURE_NUM, const int GRID_X_SIZE, const int GRID_Y_SIZE):
+NUM_THREADS_(NUM_THREADS),
+FEATURE_NUM_(FEATURE_NUM),
+GRID_X_SIZE_(GRID_X_SIZE),
+GRID_Y_SIZE_(GRID_Y_SIZE)
+{
+}
+
+// MAX_PILLARS, dev_coors_,  static_cast<float*>(buffers.getHostBuffer("47")), dev_scattered_feature_)
+// NUM_THREADS_ need to be consistent with channels of pfe output , default is 64
+void ScatterCuda::doScatterCuda(const int pillar_count, int *coors, float *pfe_output, float *scattered_feature)
+{
+  scatter_kernel<<<pillar_count, NUM_THREADS_>>>(coors, pfe_output, scattered_feature,
+                                                FEATURE_NUM_, GRID_X_SIZE_, GRID_Y_SIZE_);
+}

+ 5 - 0
src/detection/CenterPoint-master/tools/catkin_ws/catkin_make.sh

@@ -0,0 +1,5 @@
+catkin_make --cmake-args \
+            -DCMAKE_BUILD_TYPE=Release \
+            -DPYTHON_EXECUTABLE=/usr/bin/python3 \
+            -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m \
+            -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython3.6m.so

+ 166 - 0
src/detection/CenterPoint-master/tools/catkin_ws/default.rviz

@@ -0,0 +1,166 @@
+Panels:
+  - Class: rviz/Displays
+    Help Height: 78
+    Name: Displays
+    Property Tree Widget:
+      Expanded:
+        - /Global Options1
+        - /Status1
+      Splitter Ratio: 0.5
+    Tree Height: 549
+  - Class: rviz/Selection
+    Name: Selection
+  - Class: rviz/Tool Properties
+    Expanded:
+      - /2D Pose Estimate1
+      - /2D Nav Goal1
+      - /Publish Point1
+    Name: Tool Properties
+    Splitter Ratio: 0.5886790156364441
+  - Class: rviz/Views
+    Expanded:
+      - /Current View1
+    Name: Views
+    Splitter Ratio: 0.5
+  - Class: rviz/Time
+    Experimental: false
+    Name: Time
+    SyncMode: 0
+    SyncSource: PointCloud2
+Preferences:
+  PromptSaveOnExit: true
+Toolbars:
+  toolButtonStyle: 2
+Visualization Manager:
+  Class: ""
+  Displays:
+    - Alpha: 0.5
+      Cell Size: 1
+      Class: rviz/Grid
+      Color: 160; 160; 164
+      Enabled: true
+      Line Style:
+        Line Width: 0.029999999329447746
+        Value: Lines
+      Name: Grid
+      Normal Cell Count: 0
+      Offset:
+        X: 0
+        Y: 0
+        Z: 0
+      Plane: XY
+      Plane Cell Count: 10
+      Reference Frame: <Fixed Frame>
+      Value: true
+    - Class: rviz/MarkerArray
+      Enabled: true
+      Marker Topic: /waymo_3dbox
+      Name: MarkerArray
+      Namespaces:
+        "": true
+      Queue Size: 100
+      Value: true
+    - Alpha: 1
+      Autocompute Intensity Bounds: true
+      Autocompute Value Bounds:
+        Max Value: 10
+        Min Value: -10
+        Value: true
+      Axis: Z
+      Channel Name: intensity
+      Class: rviz/PointCloud2
+      Color: 255; 255; 255
+      Color Transformer: Intensity
+      Decay Time: 0
+      Enabled: true
+      Invert Rainbow: false
+      Max Color: 255; 255; 255
+      Max Intensity: 74752
+      Min Color: 0; 0; 0
+      Min Intensity: 0.0001888275146484375
+      Name: PointCloud2
+      Position Transformer: XYZ
+      Queue Size: 10
+      Selectable: true
+      Size (Pixels): 3
+      Size (m): 0.05000000074505806
+      Style: Flat Squares
+      Topic: /waymo_point_cloud
+      Unreliable: false
+      Use Fixed Frame: true
+      Use rainbow: true
+      Value: true
+    - Class: rviz/Marker
+      Enabled: true
+      Marker Topic: /waymo_ego_car
+      Name: Marker
+      Namespaces:
+        "": true
+      Queue Size: 100
+      Value: true
+  Enabled: true
+  Global Options:
+    Background Color: 48; 48; 48
+    Default Light: true
+    Fixed Frame: map
+    Frame Rate: 30
+  Name: root
+  Tools:
+    - Class: rviz/Interact
+      Hide Inactive Objects: true
+    - Class: rviz/MoveCamera
+    - Class: rviz/Select
+    - Class: rviz/FocusCamera
+    - Class: rviz/Measure
+    - Class: rviz/SetInitialPose
+      Theta std deviation: 0.2617993950843811
+      Topic: /initialpose
+      X std deviation: 0.5
+      Y std deviation: 0.5
+    - Class: rviz/SetGoal
+      Topic: /move_base_simple/goal
+    - Class: rviz/PublishPoint
+      Single click: true
+      Topic: /clicked_point
+  Value: true
+  Views:
+    Current:
+      Class: rviz/Orbit
+      Distance: 93.70480346679688
+      Enable Stereo Rendering:
+        Stereo Eye Separation: 0.05999999865889549
+        Stereo Focal Distance: 1
+        Swap Stereo Eyes: false
+        Value: false
+      Focal Point:
+        X: 0
+        Y: 0
+        Z: 0
+      Focal Shape Fixed Size: true
+      Focal Shape Size: 0.05000000074505806
+      Invert Z Axis: false
+      Name: Current View
+      Near Clip Distance: 0.009999999776482582
+      Pitch: 0.7703980207443237
+      Target Frame: <Fixed Frame>
+      Value: Orbit (rviz)
+      Yaw: 2.8003976345062256
+    Saved: ~
+Window Geometry:
+  Displays:
+    collapsed: false
+  Height: 846
+  Hide Left Dock: false
+  Hide Right Dock: false
+  QMainWindow State: 000000ff00000000fd000000040000000000000156000002b0fc0200000008fb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d000002b0000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261000000010000010f000002b0fc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d000002b0000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004f30000003efc0100000002fb0000000800540069006d00650100000000000004f3000002eb00fffffffb0000000800540069006d0065010000000000000450000000000000000000000282000002b000000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
+  Selection:
+    collapsed: false
+  Time:
+    collapsed: false
+  Tool Properties:
+    collapsed: false
+  Views:
+    collapsed: false
+  Width: 1267
+  X: 67
+  Y: 30

+ 1 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/.built_by

@@ -0,0 +1 @@
+catkin_make

+ 1 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/.catkin

@@ -0,0 +1 @@
+/home/wanghao/Desktop/projects/CP_TRT/github/CenterPointTensorRT/tools/catkin_ws/src

+ 2 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/.rosinstall

@@ -0,0 +1,2 @@
+- setup-file:
+    local-name: /home/wanghao/Desktop/projects/CP_TRT/github/CenterPointTensorRT/tools/catkin_ws/devel/setup.sh

+ 304 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/_setup_util.py

@@ -0,0 +1,304 @@
+#!/usr/bin/python3
+# -*- coding: utf-8 -*-
+
+# Software License Agreement (BSD License)
+#
+# Copyright (c) 2012, Willow Garage, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+#  * Redistributions of source code must retain the above copyright
+#    notice, this list of conditions and the following disclaimer.
+#  * Redistributions in binary form must reproduce the above
+#    copyright notice, this list of conditions and the following
+#    disclaimer in the documentation and/or other materials provided
+#    with the distribution.
+#  * Neither the name of Willow Garage, Inc. nor the names of its
+#    contributors may be used to endorse or promote products derived
+#    from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""This file generates shell code for the setup.SHELL scripts to set environment variables."""
+
+from __future__ import print_function
+
+import argparse
+import copy
+import errno
+import os
+import platform
+import sys
+
+CATKIN_MARKER_FILE = '.catkin'
+
+system = platform.system()
+IS_DARWIN = (system == 'Darwin')
+IS_WINDOWS = (system == 'Windows')
+
+PATH_TO_ADD_SUFFIX = ['bin']
+if IS_WINDOWS:
+    # while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
+    # since Windows finds dll's via the PATH variable, prepend it with path to lib
+    PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
+
+# subfolder of workspace prepended to CMAKE_PREFIX_PATH
+ENV_VAR_SUBFOLDERS = {
+    'CMAKE_PREFIX_PATH': '',
+    'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
+    'PATH': PATH_TO_ADD_SUFFIX,
+    'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
+    'PYTHONPATH': 'lib/python3/dist-packages',
+}
+
+
+def rollback_env_variables(environ, env_var_subfolders):
+    """
+    Generate shell code to reset environment variables.
+
+    by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
+    This does not cover modifications performed by environment hooks.
+    """
+    lines = []
+    unmodified_environ = copy.copy(environ)
+    for key in sorted(env_var_subfolders.keys()):
+        subfolders = env_var_subfolders[key]
+        if not isinstance(subfolders, list):
+            subfolders = [subfolders]
+        value = _rollback_env_variable(unmodified_environ, key, subfolders)
+        if value is not None:
+            environ[key] = value
+            lines.append(assignment(key, value))
+    if lines:
+        lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
+    return lines
+
+
+def _rollback_env_variable(environ, name, subfolders):
+    """
+    For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
+
+    :param subfolders: list of str '' or subfoldername that may start with '/'
+    :returns: the updated value of the environment variable.
+    """
+    value = environ[name] if name in environ else ''
+    env_paths = [path for path in value.split(os.pathsep) if path]
+    value_modified = False
+    for subfolder in subfolders:
+        if subfolder:
+            if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
+                subfolder = subfolder[1:]
+            if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
+                subfolder = subfolder[:-1]
+        for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
+            path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
+            path_to_remove = None
+            for env_path in env_paths:
+                env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
+                if env_path_clean == path_to_find:
+                    path_to_remove = env_path
+                    break
+            if path_to_remove:
+                env_paths.remove(path_to_remove)
+                value_modified = True
+    new_value = os.pathsep.join(env_paths)
+    return new_value if value_modified else None
+
+
+def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
+    """
+    Based on CMAKE_PREFIX_PATH return all catkin workspaces.
+
+    :param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
+    """
+    # get all cmake prefix paths
+    env_name = 'CMAKE_PREFIX_PATH'
+    value = environ[env_name] if env_name in environ else ''
+    paths = [path for path in value.split(os.pathsep) if path]
+    # remove non-workspace paths
+    workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
+    return workspaces
+
+
+def prepend_env_variables(environ, env_var_subfolders, workspaces):
+    """Generate shell code to prepend environment variables for the all workspaces."""
+    lines = []
+    lines.append(comment('prepend folders of workspaces to environment variables'))
+
+    paths = [path for path in workspaces.split(os.pathsep) if path]
+
+    prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
+    lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
+
+    for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):
+        subfolder = env_var_subfolders[key]
+        prefix = _prefix_env_variable(environ, key, paths, subfolder)
+        lines.append(prepend(environ, key, prefix))
+    return lines
+
+
+def _prefix_env_variable(environ, name, paths, subfolders):
+    """
+    Return the prefix to prepend to the environment variable NAME.
+
+    Adding any path in NEW_PATHS_STR without creating duplicate or empty items.
+    """
+    value = environ[name] if name in environ else ''
+    environ_paths = [path for path in value.split(os.pathsep) if path]
+    checked_paths = []
+    for path in paths:
+        if not isinstance(subfolders, list):
+            subfolders = [subfolders]
+        for subfolder in subfolders:
+            path_tmp = path
+            if subfolder:
+                path_tmp = os.path.join(path_tmp, subfolder)
+            # skip nonexistent paths
+            if not os.path.exists(path_tmp):
+                continue
+            # exclude any path already in env and any path we already added
+            if path_tmp not in environ_paths and path_tmp not in checked_paths:
+                checked_paths.append(path_tmp)
+    prefix_str = os.pathsep.join(checked_paths)
+    if prefix_str != '' and environ_paths:
+        prefix_str += os.pathsep
+    return prefix_str
+
+
+def assignment(key, value):
+    if not IS_WINDOWS:
+        return 'export %s="%s"' % (key, value)
+    else:
+        return 'set %s=%s' % (key, value)
+
+
+def comment(msg):
+    if not IS_WINDOWS:
+        return '# %s' % msg
+    else:
+        return 'REM %s' % msg
+
+
+def prepend(environ, key, prefix):
+    if key not in environ or not environ[key]:
+        return assignment(key, prefix)
+    if not IS_WINDOWS:
+        return 'export %s="%s$%s"' % (key, prefix, key)
+    else:
+        return 'set %s=%s%%%s%%' % (key, prefix, key)
+
+
+def find_env_hooks(environ, cmake_prefix_path):
+    """Generate shell code with found environment hooks for the all workspaces."""
+    lines = []
+    lines.append(comment('found environment hooks in workspaces'))
+
+    generic_env_hooks = []
+    generic_env_hooks_workspace = []
+    specific_env_hooks = []
+    specific_env_hooks_workspace = []
+    generic_env_hooks_by_filename = {}
+    specific_env_hooks_by_filename = {}
+    generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
+    specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
+    # remove non-workspace paths
+    workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
+    for workspace in reversed(workspaces):
+        env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
+        if os.path.isdir(env_hook_dir):
+            for filename in sorted(os.listdir(env_hook_dir)):
+                if filename.endswith('.%s' % generic_env_hook_ext):
+                    # remove previous env hook with same name if present
+                    if filename in generic_env_hooks_by_filename:
+                        i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
+                        generic_env_hooks.pop(i)
+                        generic_env_hooks_workspace.pop(i)
+                    # append env hook
+                    generic_env_hooks.append(os.path.join(env_hook_dir, filename))
+                    generic_env_hooks_workspace.append(workspace)
+                    generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
+                elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
+                    # remove previous env hook with same name if present
+                    if filename in specific_env_hooks_by_filename:
+                        i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
+                        specific_env_hooks.pop(i)
+                        specific_env_hooks_workspace.pop(i)
+                    # append env hook
+                    specific_env_hooks.append(os.path.join(env_hook_dir, filename))
+                    specific_env_hooks_workspace.append(workspace)
+                    specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
+    env_hooks = generic_env_hooks + specific_env_hooks
+    env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
+    count = len(env_hooks)
+    lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
+    for i in range(count):
+        lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
+        lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
+    return lines
+
+
+def _parse_arguments(args=None):
+    parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
+    parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
+    parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
+    return parser.parse_known_args(args=args)[0]
+
+
+if __name__ == '__main__':
+    try:
+        try:
+            args = _parse_arguments()
+        except Exception as e:
+            print(e, file=sys.stderr)
+            sys.exit(1)
+
+        if not args.local:
+            # environment at generation time
+            CMAKE_PREFIX_PATH = r'/home/wanghao/Desktop/projects/CP_TRT/github/CenterPointTensorRT/tools/catkin_ws/devel;/opt/ros/melodic'.split(';')
+        else:
+            # don't consider any other prefix path than this one
+            CMAKE_PREFIX_PATH = []
+        # prepend current workspace if not already part of CPP
+        base_path = os.path.dirname(__file__)
+        # CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
+        # base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
+        if os.path.sep != '/':
+            base_path = base_path.replace(os.path.sep, '/')
+
+        if base_path not in CMAKE_PREFIX_PATH:
+            CMAKE_PREFIX_PATH.insert(0, base_path)
+        CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
+
+        environ = dict(os.environ)
+        lines = []
+        if not args.extend:
+            lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
+        lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
+        lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
+        print('\n'.join(lines))
+
+        # need to explicitly flush the output
+        sys.stdout.flush()
+    except IOError as e:
+        # and catch potential "broken pipe" if stdout is not writable
+        # which can happen when piping the output to a file but the disk is full
+        if e.errno == errno.EPIPE:
+            print(e, file=sys.stderr)
+            sys.exit(2)
+        raise
+
+    sys.exit(0)

+ 0 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/cmake.lock


+ 16 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/env.sh

@@ -0,0 +1,16 @@
+#!/usr/bin/env sh
+# generated from catkin/cmake/templates/env.sh.in
+
+if [ $# -eq 0 ] ; then
+  /bin/echo "Usage: env.sh COMMANDS"
+  /bin/echo "Calling env.sh without arguments is not supported anymore. Instead spawn a subshell and source a setup file manually."
+  exit 1
+fi
+
+# ensure to not use different shell type which was set before
+CATKIN_SHELL=sh
+
+# source setup.sh from same directory as this file
+_CATKIN_SETUP_DIR=$(cd "`dirname "$0"`" > /dev/null && pwd)
+. "$_CATKIN_SETUP_DIR/setup.sh"
+exec "$@"

+ 123 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf/FrameGraph.h

@@ -0,0 +1,123 @@
+// Generated by gencpp from file tf/FrameGraph.msg
+// DO NOT EDIT!
+
+
+#ifndef TF_MESSAGE_FRAMEGRAPH_H
+#define TF_MESSAGE_FRAMEGRAPH_H
+
+#include <ros/service_traits.h>
+
+
+#include <tf/FrameGraphRequest.h>
+#include <tf/FrameGraphResponse.h>
+
+
+namespace tf
+{
+
+struct FrameGraph
+{
+
+typedef FrameGraphRequest Request;
+typedef FrameGraphResponse Response;
+Request request;
+Response response;
+
+typedef Request RequestType;
+typedef Response ResponseType;
+
+}; // struct FrameGraph
+} // namespace tf
+
+
+namespace ros
+{
+namespace service_traits
+{
+
+
+template<>
+struct MD5Sum< ::tf::FrameGraph > {
+  static const char* value()
+  {
+    return "c4af9ac907e58e906eb0b6e3c58478c0";
+  }
+
+  static const char* value(const ::tf::FrameGraph&) { return value(); }
+};
+
+template<>
+struct DataType< ::tf::FrameGraph > {
+  static const char* value()
+  {
+    return "tf/FrameGraph";
+  }
+
+  static const char* value(const ::tf::FrameGraph&) { return value(); }
+};
+
+
+// service_traits::MD5Sum< ::tf::FrameGraphRequest> should match
+// service_traits::MD5Sum< ::tf::FrameGraph >
+template<>
+struct MD5Sum< ::tf::FrameGraphRequest>
+{
+  static const char* value()
+  {
+    return MD5Sum< ::tf::FrameGraph >::value();
+  }
+  static const char* value(const ::tf::FrameGraphRequest&)
+  {
+    return value();
+  }
+};
+
+// service_traits::DataType< ::tf::FrameGraphRequest> should match
+// service_traits::DataType< ::tf::FrameGraph >
+template<>
+struct DataType< ::tf::FrameGraphRequest>
+{
+  static const char* value()
+  {
+    return DataType< ::tf::FrameGraph >::value();
+  }
+  static const char* value(const ::tf::FrameGraphRequest&)
+  {
+    return value();
+  }
+};
+
+// service_traits::MD5Sum< ::tf::FrameGraphResponse> should match
+// service_traits::MD5Sum< ::tf::FrameGraph >
+template<>
+struct MD5Sum< ::tf::FrameGraphResponse>
+{
+  static const char* value()
+  {
+    return MD5Sum< ::tf::FrameGraph >::value();
+  }
+  static const char* value(const ::tf::FrameGraphResponse&)
+  {
+    return value();
+  }
+};
+
+// service_traits::DataType< ::tf::FrameGraphResponse> should match
+// service_traits::DataType< ::tf::FrameGraph >
+template<>
+struct DataType< ::tf::FrameGraphResponse>
+{
+  static const char* value()
+  {
+    return DataType< ::tf::FrameGraph >::value();
+  }
+  static const char* value(const ::tf::FrameGraphResponse&)
+  {
+    return value();
+  }
+};
+
+} // namespace service_traits
+} // namespace ros
+
+#endif // TF_MESSAGE_FRAMEGRAPH_H

+ 174 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf/FrameGraphRequest.h

@@ -0,0 +1,174 @@
+// Generated by gencpp from file tf/FrameGraphRequest.msg
+// DO NOT EDIT!
+
+
+#ifndef TF_MESSAGE_FRAMEGRAPHREQUEST_H
+#define TF_MESSAGE_FRAMEGRAPHREQUEST_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+
+namespace tf
+{
+template <class ContainerAllocator>
+struct FrameGraphRequest_
+{
+  typedef FrameGraphRequest_<ContainerAllocator> Type;
+
+  FrameGraphRequest_()
+    {
+    }
+  FrameGraphRequest_(const ContainerAllocator& _alloc)
+    {
+  (void)_alloc;
+    }
+
+
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf::FrameGraphRequest_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf::FrameGraphRequest_<ContainerAllocator> const> ConstPtr;
+
+}; // struct FrameGraphRequest_
+
+typedef ::tf::FrameGraphRequest_<std::allocator<void> > FrameGraphRequest;
+
+typedef boost::shared_ptr< ::tf::FrameGraphRequest > FrameGraphRequestPtr;
+typedef boost::shared_ptr< ::tf::FrameGraphRequest const> FrameGraphRequestConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf::FrameGraphRequest_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf::FrameGraphRequest_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+} // namespace tf
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf::FrameGraphRequest_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf::FrameGraphRequest_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf::FrameGraphRequest_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf::FrameGraphRequest_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf::FrameGraphRequest_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf::FrameGraphRequest_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf::FrameGraphRequest_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "d41d8cd98f00b204e9800998ecf8427e";
+  }
+
+  static const char* value(const ::tf::FrameGraphRequest_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0xd41d8cd98f00b204ULL;
+  static const uint64_t static_value2 = 0xe9800998ecf8427eULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf::FrameGraphRequest_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf/FrameGraphRequest";
+  }
+
+  static const char* value(const ::tf::FrameGraphRequest_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf::FrameGraphRequest_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "\n"
+;
+  }
+
+  static const char* value(const ::tf::FrameGraphRequest_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf::FrameGraphRequest_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream&, T)
+    {}
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct FrameGraphRequest_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf::FrameGraphRequest_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream&, const std::string&, const ::tf::FrameGraphRequest_<ContainerAllocator>&)
+  {}
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF_MESSAGE_FRAMEGRAPHREQUEST_H

+ 196 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf/FrameGraphResponse.h

@@ -0,0 +1,196 @@
+// Generated by gencpp from file tf/FrameGraphResponse.msg
+// DO NOT EDIT!
+
+
+#ifndef TF_MESSAGE_FRAMEGRAPHRESPONSE_H
+#define TF_MESSAGE_FRAMEGRAPHRESPONSE_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+
+namespace tf
+{
+template <class ContainerAllocator>
+struct FrameGraphResponse_
+{
+  typedef FrameGraphResponse_<ContainerAllocator> Type;
+
+  FrameGraphResponse_()
+    : dot_graph()  {
+    }
+  FrameGraphResponse_(const ContainerAllocator& _alloc)
+    : dot_graph(_alloc)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other >  _dot_graph_type;
+  _dot_graph_type dot_graph;
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf::FrameGraphResponse_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf::FrameGraphResponse_<ContainerAllocator> const> ConstPtr;
+
+}; // struct FrameGraphResponse_
+
+typedef ::tf::FrameGraphResponse_<std::allocator<void> > FrameGraphResponse;
+
+typedef boost::shared_ptr< ::tf::FrameGraphResponse > FrameGraphResponsePtr;
+typedef boost::shared_ptr< ::tf::FrameGraphResponse const> FrameGraphResponseConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf::FrameGraphResponse_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf::FrameGraphResponse_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf::FrameGraphResponse_<ContainerAllocator1> & lhs, const ::tf::FrameGraphResponse_<ContainerAllocator2> & rhs)
+{
+  return lhs.dot_graph == rhs.dot_graph;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf::FrameGraphResponse_<ContainerAllocator1> & lhs, const ::tf::FrameGraphResponse_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf::FrameGraphResponse_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf::FrameGraphResponse_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf::FrameGraphResponse_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf::FrameGraphResponse_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf::FrameGraphResponse_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf::FrameGraphResponse_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf::FrameGraphResponse_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "c4af9ac907e58e906eb0b6e3c58478c0";
+  }
+
+  static const char* value(const ::tf::FrameGraphResponse_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0xc4af9ac907e58e90ULL;
+  static const uint64_t static_value2 = 0x6eb0b6e3c58478c0ULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf::FrameGraphResponse_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf/FrameGraphResponse";
+  }
+
+  static const char* value(const ::tf::FrameGraphResponse_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf::FrameGraphResponse_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "string dot_graph\n"
+"\n"
+;
+  }
+
+  static const char* value(const ::tf::FrameGraphResponse_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf::FrameGraphResponse_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.dot_graph);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct FrameGraphResponse_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf::FrameGraphResponse_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf::FrameGraphResponse_<ContainerAllocator>& v)
+  {
+    s << indent << "dot_graph: ";
+    Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + "  ", v.dot_graph);
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF_MESSAGE_FRAMEGRAPHRESPONSE_H

+ 259 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf/tfMessage.h

@@ -0,0 +1,259 @@
+// Generated by gencpp from file tf/tfMessage.msg
+// DO NOT EDIT!
+
+
+#ifndef TF_MESSAGE_TFMESSAGE_H
+#define TF_MESSAGE_TFMESSAGE_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+#include <geometry_msgs/TransformStamped.h>
+
+namespace tf
+{
+template <class ContainerAllocator>
+struct tfMessage_
+{
+  typedef tfMessage_<ContainerAllocator> Type;
+
+  tfMessage_()
+    : transforms()  {
+    }
+  tfMessage_(const ContainerAllocator& _alloc)
+    : transforms(_alloc)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef std::vector< ::geometry_msgs::TransformStamped_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::geometry_msgs::TransformStamped_<ContainerAllocator> >::other >  _transforms_type;
+  _transforms_type transforms;
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf::tfMessage_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf::tfMessage_<ContainerAllocator> const> ConstPtr;
+
+}; // struct tfMessage_
+
+typedef ::tf::tfMessage_<std::allocator<void> > tfMessage;
+
+typedef boost::shared_ptr< ::tf::tfMessage > tfMessagePtr;
+typedef boost::shared_ptr< ::tf::tfMessage const> tfMessageConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf::tfMessage_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf::tfMessage_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf::tfMessage_<ContainerAllocator1> & lhs, const ::tf::tfMessage_<ContainerAllocator2> & rhs)
+{
+  return lhs.transforms == rhs.transforms;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf::tfMessage_<ContainerAllocator1> & lhs, const ::tf::tfMessage_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf::tfMessage_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf::tfMessage_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf::tfMessage_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf::tfMessage_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf::tfMessage_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf::tfMessage_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf::tfMessage_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "94810edda583a504dfda3829e70d7eec";
+  }
+
+  static const char* value(const ::tf::tfMessage_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0x94810edda583a504ULL;
+  static const uint64_t static_value2 = 0xdfda3829e70d7eecULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf::tfMessage_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf/tfMessage";
+  }
+
+  static const char* value(const ::tf::tfMessage_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf::tfMessage_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "geometry_msgs/TransformStamped[] transforms\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/TransformStamped\n"
+"# This expresses a transform from coordinate frame header.frame_id\n"
+"# to the coordinate frame child_frame_id\n"
+"#\n"
+"# This message is mostly used by the \n"
+"# <a href=\"http://wiki.ros.org/tf\">tf</a> package. \n"
+"# See its documentation for more information.\n"
+"\n"
+"Header header\n"
+"string child_frame_id # the frame id of the child frame\n"
+"Transform transform\n"
+"\n"
+"================================================================================\n"
+"MSG: std_msgs/Header\n"
+"# Standard metadata for higher-level stamped data types.\n"
+"# This is generally used to communicate timestamped data \n"
+"# in a particular coordinate frame.\n"
+"# \n"
+"# sequence ID: consecutively increasing ID \n"
+"uint32 seq\n"
+"#Two-integer timestamp that is expressed as:\n"
+"# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n"
+"# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n"
+"# time-handling sugar is provided by the client library\n"
+"time stamp\n"
+"#Frame this data is associated with\n"
+"string frame_id\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Transform\n"
+"# This represents the transform between two coordinate frames in free space.\n"
+"\n"
+"Vector3 translation\n"
+"Quaternion rotation\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Vector3\n"
+"# This represents a vector in free space. \n"
+"# It is only meant to represent a direction. Therefore, it does not\n"
+"# make sense to apply a translation to it (e.g., when applying a \n"
+"# generic rigid transformation to a Vector3, tf2 will only apply the\n"
+"# rotation). If you want your data to be translatable too, use the\n"
+"# geometry_msgs/Point message instead.\n"
+"\n"
+"float64 x\n"
+"float64 y\n"
+"float64 z\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Quaternion\n"
+"# This represents an orientation in free space in quaternion form.\n"
+"\n"
+"float64 x\n"
+"float64 y\n"
+"float64 z\n"
+"float64 w\n"
+;
+  }
+
+  static const char* value(const ::tf::tfMessage_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf::tfMessage_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.transforms);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct tfMessage_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf::tfMessage_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf::tfMessage_<ContainerAllocator>& v)
+  {
+    s << indent << "transforms[]" << std::endl;
+    for (size_t i = 0; i < v.transforms.size(); ++i)
+    {
+      s << indent << "  transforms[" << i << "]: ";
+      s << std::endl;
+      s << indent;
+      Printer< ::geometry_msgs::TransformStamped_<ContainerAllocator> >::stream(s, indent + "    ", v.transforms[i]);
+    }
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF_MESSAGE_TFMESSAGE_H

+ 123 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/FrameGraph.h

@@ -0,0 +1,123 @@
+// Generated by gencpp from file tf2_msgs/FrameGraph.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_FRAMEGRAPH_H
+#define TF2_MSGS_MESSAGE_FRAMEGRAPH_H
+
+#include <ros/service_traits.h>
+
+
+#include <tf2_msgs/FrameGraphRequest.h>
+#include <tf2_msgs/FrameGraphResponse.h>
+
+
+namespace tf2_msgs
+{
+
+struct FrameGraph
+{
+
+typedef FrameGraphRequest Request;
+typedef FrameGraphResponse Response;
+Request request;
+Response response;
+
+typedef Request RequestType;
+typedef Response ResponseType;
+
+}; // struct FrameGraph
+} // namespace tf2_msgs
+
+
+namespace ros
+{
+namespace service_traits
+{
+
+
+template<>
+struct MD5Sum< ::tf2_msgs::FrameGraph > {
+  static const char* value()
+  {
+    return "437ea58e9463815a0d511c7326b686b0";
+  }
+
+  static const char* value(const ::tf2_msgs::FrameGraph&) { return value(); }
+};
+
+template<>
+struct DataType< ::tf2_msgs::FrameGraph > {
+  static const char* value()
+  {
+    return "tf2_msgs/FrameGraph";
+  }
+
+  static const char* value(const ::tf2_msgs::FrameGraph&) { return value(); }
+};
+
+
+// service_traits::MD5Sum< ::tf2_msgs::FrameGraphRequest> should match
+// service_traits::MD5Sum< ::tf2_msgs::FrameGraph >
+template<>
+struct MD5Sum< ::tf2_msgs::FrameGraphRequest>
+{
+  static const char* value()
+  {
+    return MD5Sum< ::tf2_msgs::FrameGraph >::value();
+  }
+  static const char* value(const ::tf2_msgs::FrameGraphRequest&)
+  {
+    return value();
+  }
+};
+
+// service_traits::DataType< ::tf2_msgs::FrameGraphRequest> should match
+// service_traits::DataType< ::tf2_msgs::FrameGraph >
+template<>
+struct DataType< ::tf2_msgs::FrameGraphRequest>
+{
+  static const char* value()
+  {
+    return DataType< ::tf2_msgs::FrameGraph >::value();
+  }
+  static const char* value(const ::tf2_msgs::FrameGraphRequest&)
+  {
+    return value();
+  }
+};
+
+// service_traits::MD5Sum< ::tf2_msgs::FrameGraphResponse> should match
+// service_traits::MD5Sum< ::tf2_msgs::FrameGraph >
+template<>
+struct MD5Sum< ::tf2_msgs::FrameGraphResponse>
+{
+  static const char* value()
+  {
+    return MD5Sum< ::tf2_msgs::FrameGraph >::value();
+  }
+  static const char* value(const ::tf2_msgs::FrameGraphResponse&)
+  {
+    return value();
+  }
+};
+
+// service_traits::DataType< ::tf2_msgs::FrameGraphResponse> should match
+// service_traits::DataType< ::tf2_msgs::FrameGraph >
+template<>
+struct DataType< ::tf2_msgs::FrameGraphResponse>
+{
+  static const char* value()
+  {
+    return DataType< ::tf2_msgs::FrameGraph >::value();
+  }
+  static const char* value(const ::tf2_msgs::FrameGraphResponse&)
+  {
+    return value();
+  }
+};
+
+} // namespace service_traits
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_FRAMEGRAPH_H

+ 174 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/FrameGraphRequest.h

@@ -0,0 +1,174 @@
+// Generated by gencpp from file tf2_msgs/FrameGraphRequest.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_FRAMEGRAPHREQUEST_H
+#define TF2_MSGS_MESSAGE_FRAMEGRAPHREQUEST_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct FrameGraphRequest_
+{
+  typedef FrameGraphRequest_<ContainerAllocator> Type;
+
+  FrameGraphRequest_()
+    {
+    }
+  FrameGraphRequest_(const ContainerAllocator& _alloc)
+    {
+  (void)_alloc;
+    }
+
+
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> const> ConstPtr;
+
+}; // struct FrameGraphRequest_
+
+typedef ::tf2_msgs::FrameGraphRequest_<std::allocator<void> > FrameGraphRequest;
+
+typedef boost::shared_ptr< ::tf2_msgs::FrameGraphRequest > FrameGraphRequestPtr;
+typedef boost::shared_ptr< ::tf2_msgs::FrameGraphRequest const> FrameGraphRequestConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "d41d8cd98f00b204e9800998ecf8427e";
+  }
+
+  static const char* value(const ::tf2_msgs::FrameGraphRequest_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0xd41d8cd98f00b204ULL;
+  static const uint64_t static_value2 = 0xe9800998ecf8427eULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/FrameGraphRequest";
+  }
+
+  static const char* value(const ::tf2_msgs::FrameGraphRequest_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::FrameGraphRequest_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream&, T)
+    {}
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct FrameGraphRequest_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::FrameGraphRequest_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream&, const std::string&, const ::tf2_msgs::FrameGraphRequest_<ContainerAllocator>&)
+  {}
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_FRAMEGRAPHREQUEST_H

+ 196 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/FrameGraphResponse.h

@@ -0,0 +1,196 @@
+// Generated by gencpp from file tf2_msgs/FrameGraphResponse.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_FRAMEGRAPHRESPONSE_H
+#define TF2_MSGS_MESSAGE_FRAMEGRAPHRESPONSE_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct FrameGraphResponse_
+{
+  typedef FrameGraphResponse_<ContainerAllocator> Type;
+
+  FrameGraphResponse_()
+    : frame_yaml()  {
+    }
+  FrameGraphResponse_(const ContainerAllocator& _alloc)
+    : frame_yaml(_alloc)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other >  _frame_yaml_type;
+  _frame_yaml_type frame_yaml;
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> const> ConstPtr;
+
+}; // struct FrameGraphResponse_
+
+typedef ::tf2_msgs::FrameGraphResponse_<std::allocator<void> > FrameGraphResponse;
+
+typedef boost::shared_ptr< ::tf2_msgs::FrameGraphResponse > FrameGraphResponsePtr;
+typedef boost::shared_ptr< ::tf2_msgs::FrameGraphResponse const> FrameGraphResponseConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf2_msgs::FrameGraphResponse_<ContainerAllocator1> & lhs, const ::tf2_msgs::FrameGraphResponse_<ContainerAllocator2> & rhs)
+{
+  return lhs.frame_yaml == rhs.frame_yaml;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf2_msgs::FrameGraphResponse_<ContainerAllocator1> & lhs, const ::tf2_msgs::FrameGraphResponse_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "437ea58e9463815a0d511c7326b686b0";
+  }
+
+  static const char* value(const ::tf2_msgs::FrameGraphResponse_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0x437ea58e9463815aULL;
+  static const uint64_t static_value2 = 0x0d511c7326b686b0ULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/FrameGraphResponse";
+  }
+
+  static const char* value(const ::tf2_msgs::FrameGraphResponse_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "string frame_yaml\n"
+"\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::FrameGraphResponse_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.frame_yaml);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct FrameGraphResponse_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::FrameGraphResponse_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf2_msgs::FrameGraphResponse_<ContainerAllocator>& v)
+  {
+    s << indent << "frame_yaml: ";
+    Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + "  ", v.frame_yaml);
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_FRAMEGRAPHRESPONSE_H

+ 384 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformAction.h

@@ -0,0 +1,384 @@
+// Generated by gencpp from file tf2_msgs/LookupTransformAction.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTION_H
+#define TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTION_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+#include <tf2_msgs/LookupTransformActionGoal.h>
+#include <tf2_msgs/LookupTransformActionResult.h>
+#include <tf2_msgs/LookupTransformActionFeedback.h>
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct LookupTransformAction_
+{
+  typedef LookupTransformAction_<ContainerAllocator> Type;
+
+  LookupTransformAction_()
+    : action_goal()
+    , action_result()
+    , action_feedback()  {
+    }
+  LookupTransformAction_(const ContainerAllocator& _alloc)
+    : action_goal(_alloc)
+    , action_result(_alloc)
+    , action_feedback(_alloc)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef  ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator>  _action_goal_type;
+  _action_goal_type action_goal;
+
+   typedef  ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator>  _action_result_type;
+  _action_result_type action_result;
+
+   typedef  ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator>  _action_feedback_type;
+  _action_feedback_type action_feedback;
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> const> ConstPtr;
+
+}; // struct LookupTransformAction_
+
+typedef ::tf2_msgs::LookupTransformAction_<std::allocator<void> > LookupTransformAction;
+
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformAction > LookupTransformActionPtr;
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformAction const> LookupTransformActionConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::LookupTransformAction_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf2_msgs::LookupTransformAction_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformAction_<ContainerAllocator2> & rhs)
+{
+  return lhs.action_goal == rhs.action_goal &&
+    lhs.action_result == rhs.action_result &&
+    lhs.action_feedback == rhs.action_feedback;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf2_msgs::LookupTransformAction_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformAction_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "7ee01ba91a56c2245c610992dbaa3c37";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformAction_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0x7ee01ba91a56c224ULL;
+  static const uint64_t static_value2 = 0x5c610992dbaa3c37ULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/LookupTransformAction";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformAction_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"\n"
+"LookupTransformActionGoal action_goal\n"
+"LookupTransformActionResult action_result\n"
+"LookupTransformActionFeedback action_feedback\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/LookupTransformActionGoal\n"
+"# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"\n"
+"Header header\n"
+"actionlib_msgs/GoalID goal_id\n"
+"LookupTransformGoal goal\n"
+"\n"
+"================================================================================\n"
+"MSG: std_msgs/Header\n"
+"# Standard metadata for higher-level stamped data types.\n"
+"# This is generally used to communicate timestamped data \n"
+"# in a particular coordinate frame.\n"
+"# \n"
+"# sequence ID: consecutively increasing ID \n"
+"uint32 seq\n"
+"#Two-integer timestamp that is expressed as:\n"
+"# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n"
+"# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n"
+"# time-handling sugar is provided by the client library\n"
+"time stamp\n"
+"#Frame this data is associated with\n"
+"string frame_id\n"
+"\n"
+"================================================================================\n"
+"MSG: actionlib_msgs/GoalID\n"
+"# The stamp should store the time at which this goal was requested.\n"
+"# It is used by an action server when it tries to preempt all\n"
+"# goals that were requested before a certain time\n"
+"time stamp\n"
+"\n"
+"# The id provides a way to associate feedback and\n"
+"# result message with specific goal requests. The id\n"
+"# specified must be unique.\n"
+"string id\n"
+"\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/LookupTransformGoal\n"
+"# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"#Simple API\n"
+"string target_frame\n"
+"string source_frame\n"
+"time source_time\n"
+"duration timeout\n"
+"\n"
+"#Advanced API\n"
+"time target_time\n"
+"string fixed_frame\n"
+"\n"
+"#Whether or not to use the advanced API\n"
+"bool advanced\n"
+"\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/LookupTransformActionResult\n"
+"# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"\n"
+"Header header\n"
+"actionlib_msgs/GoalStatus status\n"
+"LookupTransformResult result\n"
+"\n"
+"================================================================================\n"
+"MSG: actionlib_msgs/GoalStatus\n"
+"GoalID goal_id\n"
+"uint8 status\n"
+"uint8 PENDING         = 0   # The goal has yet to be processed by the action server\n"
+"uint8 ACTIVE          = 1   # The goal is currently being processed by the action server\n"
+"uint8 PREEMPTED       = 2   # The goal received a cancel request after it started executing\n"
+"                            #   and has since completed its execution (Terminal State)\n"
+"uint8 SUCCEEDED       = 3   # The goal was achieved successfully by the action server (Terminal State)\n"
+"uint8 ABORTED         = 4   # The goal was aborted during execution by the action server due\n"
+"                            #    to some failure (Terminal State)\n"
+"uint8 REJECTED        = 5   # The goal was rejected by the action server without being processed,\n"
+"                            #    because the goal was unattainable or invalid (Terminal State)\n"
+"uint8 PREEMPTING      = 6   # The goal received a cancel request after it started executing\n"
+"                            #    and has not yet completed execution\n"
+"uint8 RECALLING       = 7   # The goal received a cancel request before it started executing,\n"
+"                            #    but the action server has not yet confirmed that the goal is canceled\n"
+"uint8 RECALLED        = 8   # The goal received a cancel request before it started executing\n"
+"                            #    and was successfully cancelled (Terminal State)\n"
+"uint8 LOST            = 9   # An action client can determine that a goal is LOST. This should not be\n"
+"                            #    sent over the wire by an action server\n"
+"\n"
+"#Allow for the user to associate a string with GoalStatus for debugging\n"
+"string text\n"
+"\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/LookupTransformResult\n"
+"# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"geometry_msgs/TransformStamped transform\n"
+"tf2_msgs/TF2Error error\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/TransformStamped\n"
+"# This expresses a transform from coordinate frame header.frame_id\n"
+"# to the coordinate frame child_frame_id\n"
+"#\n"
+"# This message is mostly used by the \n"
+"# <a href=\"http://wiki.ros.org/tf\">tf</a> package. \n"
+"# See its documentation for more information.\n"
+"\n"
+"Header header\n"
+"string child_frame_id # the frame id of the child frame\n"
+"Transform transform\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Transform\n"
+"# This represents the transform between two coordinate frames in free space.\n"
+"\n"
+"Vector3 translation\n"
+"Quaternion rotation\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Vector3\n"
+"# This represents a vector in free space. \n"
+"# It is only meant to represent a direction. Therefore, it does not\n"
+"# make sense to apply a translation to it (e.g., when applying a \n"
+"# generic rigid transformation to a Vector3, tf2 will only apply the\n"
+"# rotation). If you want your data to be translatable too, use the\n"
+"# geometry_msgs/Point message instead.\n"
+"\n"
+"float64 x\n"
+"float64 y\n"
+"float64 z\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Quaternion\n"
+"# This represents an orientation in free space in quaternion form.\n"
+"\n"
+"float64 x\n"
+"float64 y\n"
+"float64 z\n"
+"float64 w\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/TF2Error\n"
+"uint8 NO_ERROR = 0\n"
+"uint8 LOOKUP_ERROR = 1\n"
+"uint8 CONNECTIVITY_ERROR = 2\n"
+"uint8 EXTRAPOLATION_ERROR = 3\n"
+"uint8 INVALID_ARGUMENT_ERROR = 4\n"
+"uint8 TIMEOUT_ERROR = 5\n"
+"uint8 TRANSFORM_ERROR = 6\n"
+"\n"
+"uint8 error\n"
+"string error_string\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/LookupTransformActionFeedback\n"
+"# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"\n"
+"Header header\n"
+"actionlib_msgs/GoalStatus status\n"
+"LookupTransformFeedback feedback\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/LookupTransformFeedback\n"
+"# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformAction_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.action_goal);
+      stream.next(m.action_result);
+      stream.next(m.action_feedback);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct LookupTransformAction_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::LookupTransformAction_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf2_msgs::LookupTransformAction_<ContainerAllocator>& v)
+  {
+    s << indent << "action_goal: ";
+    s << std::endl;
+    Printer< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> >::stream(s, indent + "  ", v.action_goal);
+    s << indent << "action_result: ";
+    s << std::endl;
+    Printer< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> >::stream(s, indent + "  ", v.action_result);
+    s << indent << "action_feedback: ";
+    s << std::endl;
+    Printer< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> >::stream(s, indent + "  ", v.action_feedback);
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTION_H

+ 283 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformActionFeedback.h

@@ -0,0 +1,283 @@
+// Generated by gencpp from file tf2_msgs/LookupTransformActionFeedback.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTIONFEEDBACK_H
+#define TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTIONFEEDBACK_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+#include <std_msgs/Header.h>
+#include <actionlib_msgs/GoalStatus.h>
+#include <tf2_msgs/LookupTransformFeedback.h>
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct LookupTransformActionFeedback_
+{
+  typedef LookupTransformActionFeedback_<ContainerAllocator> Type;
+
+  LookupTransformActionFeedback_()
+    : header()
+    , status()
+    , feedback()  {
+    }
+  LookupTransformActionFeedback_(const ContainerAllocator& _alloc)
+    : header(_alloc)
+    , status(_alloc)
+    , feedback(_alloc)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef  ::std_msgs::Header_<ContainerAllocator>  _header_type;
+  _header_type header;
+
+   typedef  ::actionlib_msgs::GoalStatus_<ContainerAllocator>  _status_type;
+  _status_type status;
+
+   typedef  ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator>  _feedback_type;
+  _feedback_type feedback;
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> const> ConstPtr;
+
+}; // struct LookupTransformActionFeedback_
+
+typedef ::tf2_msgs::LookupTransformActionFeedback_<std::allocator<void> > LookupTransformActionFeedback;
+
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionFeedback > LookupTransformActionFeedbackPtr;
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionFeedback const> LookupTransformActionFeedbackConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator2> & rhs)
+{
+  return lhs.header == rhs.header &&
+    lhs.status == rhs.status &&
+    lhs.feedback == rhs.feedback;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "aae20e09065c3809e8a8e87c4c8953fd";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0xaae20e09065c3809ULL;
+  static const uint64_t static_value2 = 0xe8a8e87c4c8953fdULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/LookupTransformActionFeedback";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"\n"
+"Header header\n"
+"actionlib_msgs/GoalStatus status\n"
+"LookupTransformFeedback feedback\n"
+"\n"
+"================================================================================\n"
+"MSG: std_msgs/Header\n"
+"# Standard metadata for higher-level stamped data types.\n"
+"# This is generally used to communicate timestamped data \n"
+"# in a particular coordinate frame.\n"
+"# \n"
+"# sequence ID: consecutively increasing ID \n"
+"uint32 seq\n"
+"#Two-integer timestamp that is expressed as:\n"
+"# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n"
+"# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n"
+"# time-handling sugar is provided by the client library\n"
+"time stamp\n"
+"#Frame this data is associated with\n"
+"string frame_id\n"
+"\n"
+"================================================================================\n"
+"MSG: actionlib_msgs/GoalStatus\n"
+"GoalID goal_id\n"
+"uint8 status\n"
+"uint8 PENDING         = 0   # The goal has yet to be processed by the action server\n"
+"uint8 ACTIVE          = 1   # The goal is currently being processed by the action server\n"
+"uint8 PREEMPTED       = 2   # The goal received a cancel request after it started executing\n"
+"                            #   and has since completed its execution (Terminal State)\n"
+"uint8 SUCCEEDED       = 3   # The goal was achieved successfully by the action server (Terminal State)\n"
+"uint8 ABORTED         = 4   # The goal was aborted during execution by the action server due\n"
+"                            #    to some failure (Terminal State)\n"
+"uint8 REJECTED        = 5   # The goal was rejected by the action server without being processed,\n"
+"                            #    because the goal was unattainable or invalid (Terminal State)\n"
+"uint8 PREEMPTING      = 6   # The goal received a cancel request after it started executing\n"
+"                            #    and has not yet completed execution\n"
+"uint8 RECALLING       = 7   # The goal received a cancel request before it started executing,\n"
+"                            #    but the action server has not yet confirmed that the goal is canceled\n"
+"uint8 RECALLED        = 8   # The goal received a cancel request before it started executing\n"
+"                            #    and was successfully cancelled (Terminal State)\n"
+"uint8 LOST            = 9   # An action client can determine that a goal is LOST. This should not be\n"
+"                            #    sent over the wire by an action server\n"
+"\n"
+"#Allow for the user to associate a string with GoalStatus for debugging\n"
+"string text\n"
+"\n"
+"\n"
+"================================================================================\n"
+"MSG: actionlib_msgs/GoalID\n"
+"# The stamp should store the time at which this goal was requested.\n"
+"# It is used by an action server when it tries to preempt all\n"
+"# goals that were requested before a certain time\n"
+"time stamp\n"
+"\n"
+"# The id provides a way to associate feedback and\n"
+"# result message with specific goal requests. The id\n"
+"# specified must be unique.\n"
+"string id\n"
+"\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/LookupTransformFeedback\n"
+"# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.header);
+      stream.next(m.status);
+      stream.next(m.feedback);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct LookupTransformActionFeedback_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf2_msgs::LookupTransformActionFeedback_<ContainerAllocator>& v)
+  {
+    s << indent << "header: ";
+    s << std::endl;
+    Printer< ::std_msgs::Header_<ContainerAllocator> >::stream(s, indent + "  ", v.header);
+    s << indent << "status: ";
+    s << std::endl;
+    Printer< ::actionlib_msgs::GoalStatus_<ContainerAllocator> >::stream(s, indent + "  ", v.status);
+    s << indent << "feedback: ";
+    s << std::endl;
+    Printer< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> >::stream(s, indent + "  ", v.feedback);
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTIONFEEDBACK_H

+ 269 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformActionGoal.h

@@ -0,0 +1,269 @@
+// Generated by gencpp from file tf2_msgs/LookupTransformActionGoal.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTIONGOAL_H
+#define TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTIONGOAL_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+#include <std_msgs/Header.h>
+#include <actionlib_msgs/GoalID.h>
+#include <tf2_msgs/LookupTransformGoal.h>
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct LookupTransformActionGoal_
+{
+  typedef LookupTransformActionGoal_<ContainerAllocator> Type;
+
+  LookupTransformActionGoal_()
+    : header()
+    , goal_id()
+    , goal()  {
+    }
+  LookupTransformActionGoal_(const ContainerAllocator& _alloc)
+    : header(_alloc)
+    , goal_id(_alloc)
+    , goal(_alloc)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef  ::std_msgs::Header_<ContainerAllocator>  _header_type;
+  _header_type header;
+
+   typedef  ::actionlib_msgs::GoalID_<ContainerAllocator>  _goal_id_type;
+  _goal_id_type goal_id;
+
+   typedef  ::tf2_msgs::LookupTransformGoal_<ContainerAllocator>  _goal_type;
+  _goal_type goal;
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> const> ConstPtr;
+
+}; // struct LookupTransformActionGoal_
+
+typedef ::tf2_msgs::LookupTransformActionGoal_<std::allocator<void> > LookupTransformActionGoal;
+
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionGoal > LookupTransformActionGoalPtr;
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionGoal const> LookupTransformActionGoalConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator2> & rhs)
+{
+  return lhs.header == rhs.header &&
+    lhs.goal_id == rhs.goal_id &&
+    lhs.goal == rhs.goal;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "f2e7bcdb75c847978d0351a13e699da5";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0xf2e7bcdb75c84797ULL;
+  static const uint64_t static_value2 = 0x8d0351a13e699da5ULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/LookupTransformActionGoal";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"\n"
+"Header header\n"
+"actionlib_msgs/GoalID goal_id\n"
+"LookupTransformGoal goal\n"
+"\n"
+"================================================================================\n"
+"MSG: std_msgs/Header\n"
+"# Standard metadata for higher-level stamped data types.\n"
+"# This is generally used to communicate timestamped data \n"
+"# in a particular coordinate frame.\n"
+"# \n"
+"# sequence ID: consecutively increasing ID \n"
+"uint32 seq\n"
+"#Two-integer timestamp that is expressed as:\n"
+"# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n"
+"# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n"
+"# time-handling sugar is provided by the client library\n"
+"time stamp\n"
+"#Frame this data is associated with\n"
+"string frame_id\n"
+"\n"
+"================================================================================\n"
+"MSG: actionlib_msgs/GoalID\n"
+"# The stamp should store the time at which this goal was requested.\n"
+"# It is used by an action server when it tries to preempt all\n"
+"# goals that were requested before a certain time\n"
+"time stamp\n"
+"\n"
+"# The id provides a way to associate feedback and\n"
+"# result message with specific goal requests. The id\n"
+"# specified must be unique.\n"
+"string id\n"
+"\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/LookupTransformGoal\n"
+"# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"#Simple API\n"
+"string target_frame\n"
+"string source_frame\n"
+"time source_time\n"
+"duration timeout\n"
+"\n"
+"#Advanced API\n"
+"time target_time\n"
+"string fixed_frame\n"
+"\n"
+"#Whether or not to use the advanced API\n"
+"bool advanced\n"
+"\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.header);
+      stream.next(m.goal_id);
+      stream.next(m.goal);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct LookupTransformActionGoal_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf2_msgs::LookupTransformActionGoal_<ContainerAllocator>& v)
+  {
+    s << indent << "header: ";
+    s << std::endl;
+    Printer< ::std_msgs::Header_<ContainerAllocator> >::stream(s, indent + "  ", v.header);
+    s << indent << "goal_id: ";
+    s << std::endl;
+    Printer< ::actionlib_msgs::GoalID_<ContainerAllocator> >::stream(s, indent + "  ", v.goal_id);
+    s << indent << "goal: ";
+    s << std::endl;
+    Printer< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> >::stream(s, indent + "  ", v.goal);
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTIONGOAL_H

+ 338 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformActionResult.h

@@ -0,0 +1,338 @@
+// Generated by gencpp from file tf2_msgs/LookupTransformActionResult.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTIONRESULT_H
+#define TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTIONRESULT_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+#include <std_msgs/Header.h>
+#include <actionlib_msgs/GoalStatus.h>
+#include <tf2_msgs/LookupTransformResult.h>
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct LookupTransformActionResult_
+{
+  typedef LookupTransformActionResult_<ContainerAllocator> Type;
+
+  LookupTransformActionResult_()
+    : header()
+    , status()
+    , result()  {
+    }
+  LookupTransformActionResult_(const ContainerAllocator& _alloc)
+    : header(_alloc)
+    , status(_alloc)
+    , result(_alloc)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef  ::std_msgs::Header_<ContainerAllocator>  _header_type;
+  _header_type header;
+
+   typedef  ::actionlib_msgs::GoalStatus_<ContainerAllocator>  _status_type;
+  _status_type status;
+
+   typedef  ::tf2_msgs::LookupTransformResult_<ContainerAllocator>  _result_type;
+  _result_type result;
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> const> ConstPtr;
+
+}; // struct LookupTransformActionResult_
+
+typedef ::tf2_msgs::LookupTransformActionResult_<std::allocator<void> > LookupTransformActionResult;
+
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionResult > LookupTransformActionResultPtr;
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformActionResult const> LookupTransformActionResultConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator2> & rhs)
+{
+  return lhs.header == rhs.header &&
+    lhs.status == rhs.status &&
+    lhs.result == rhs.result;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "ac26ce75a41384fa8bb4dc10f491ab90";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0xac26ce75a41384faULL;
+  static const uint64_t static_value2 = 0x8bb4dc10f491ab90ULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/LookupTransformActionResult";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"\n"
+"Header header\n"
+"actionlib_msgs/GoalStatus status\n"
+"LookupTransformResult result\n"
+"\n"
+"================================================================================\n"
+"MSG: std_msgs/Header\n"
+"# Standard metadata for higher-level stamped data types.\n"
+"# This is generally used to communicate timestamped data \n"
+"# in a particular coordinate frame.\n"
+"# \n"
+"# sequence ID: consecutively increasing ID \n"
+"uint32 seq\n"
+"#Two-integer timestamp that is expressed as:\n"
+"# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n"
+"# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n"
+"# time-handling sugar is provided by the client library\n"
+"time stamp\n"
+"#Frame this data is associated with\n"
+"string frame_id\n"
+"\n"
+"================================================================================\n"
+"MSG: actionlib_msgs/GoalStatus\n"
+"GoalID goal_id\n"
+"uint8 status\n"
+"uint8 PENDING         = 0   # The goal has yet to be processed by the action server\n"
+"uint8 ACTIVE          = 1   # The goal is currently being processed by the action server\n"
+"uint8 PREEMPTED       = 2   # The goal received a cancel request after it started executing\n"
+"                            #   and has since completed its execution (Terminal State)\n"
+"uint8 SUCCEEDED       = 3   # The goal was achieved successfully by the action server (Terminal State)\n"
+"uint8 ABORTED         = 4   # The goal was aborted during execution by the action server due\n"
+"                            #    to some failure (Terminal State)\n"
+"uint8 REJECTED        = 5   # The goal was rejected by the action server without being processed,\n"
+"                            #    because the goal was unattainable or invalid (Terminal State)\n"
+"uint8 PREEMPTING      = 6   # The goal received a cancel request after it started executing\n"
+"                            #    and has not yet completed execution\n"
+"uint8 RECALLING       = 7   # The goal received a cancel request before it started executing,\n"
+"                            #    but the action server has not yet confirmed that the goal is canceled\n"
+"uint8 RECALLED        = 8   # The goal received a cancel request before it started executing\n"
+"                            #    and was successfully cancelled (Terminal State)\n"
+"uint8 LOST            = 9   # An action client can determine that a goal is LOST. This should not be\n"
+"                            #    sent over the wire by an action server\n"
+"\n"
+"#Allow for the user to associate a string with GoalStatus for debugging\n"
+"string text\n"
+"\n"
+"\n"
+"================================================================================\n"
+"MSG: actionlib_msgs/GoalID\n"
+"# The stamp should store the time at which this goal was requested.\n"
+"# It is used by an action server when it tries to preempt all\n"
+"# goals that were requested before a certain time\n"
+"time stamp\n"
+"\n"
+"# The id provides a way to associate feedback and\n"
+"# result message with specific goal requests. The id\n"
+"# specified must be unique.\n"
+"string id\n"
+"\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/LookupTransformResult\n"
+"# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"geometry_msgs/TransformStamped transform\n"
+"tf2_msgs/TF2Error error\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/TransformStamped\n"
+"# This expresses a transform from coordinate frame header.frame_id\n"
+"# to the coordinate frame child_frame_id\n"
+"#\n"
+"# This message is mostly used by the \n"
+"# <a href=\"http://wiki.ros.org/tf\">tf</a> package. \n"
+"# See its documentation for more information.\n"
+"\n"
+"Header header\n"
+"string child_frame_id # the frame id of the child frame\n"
+"Transform transform\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Transform\n"
+"# This represents the transform between two coordinate frames in free space.\n"
+"\n"
+"Vector3 translation\n"
+"Quaternion rotation\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Vector3\n"
+"# This represents a vector in free space. \n"
+"# It is only meant to represent a direction. Therefore, it does not\n"
+"# make sense to apply a translation to it (e.g., when applying a \n"
+"# generic rigid transformation to a Vector3, tf2 will only apply the\n"
+"# rotation). If you want your data to be translatable too, use the\n"
+"# geometry_msgs/Point message instead.\n"
+"\n"
+"float64 x\n"
+"float64 y\n"
+"float64 z\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Quaternion\n"
+"# This represents an orientation in free space in quaternion form.\n"
+"\n"
+"float64 x\n"
+"float64 y\n"
+"float64 z\n"
+"float64 w\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/TF2Error\n"
+"uint8 NO_ERROR = 0\n"
+"uint8 LOOKUP_ERROR = 1\n"
+"uint8 CONNECTIVITY_ERROR = 2\n"
+"uint8 EXTRAPOLATION_ERROR = 3\n"
+"uint8 INVALID_ARGUMENT_ERROR = 4\n"
+"uint8 TIMEOUT_ERROR = 5\n"
+"uint8 TRANSFORM_ERROR = 6\n"
+"\n"
+"uint8 error\n"
+"string error_string\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.header);
+      stream.next(m.status);
+      stream.next(m.result);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct LookupTransformActionResult_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf2_msgs::LookupTransformActionResult_<ContainerAllocator>& v)
+  {
+    s << indent << "header: ";
+    s << std::endl;
+    Printer< ::std_msgs::Header_<ContainerAllocator> >::stream(s, indent + "  ", v.header);
+    s << indent << "status: ";
+    s << std::endl;
+    Printer< ::actionlib_msgs::GoalStatus_<ContainerAllocator> >::stream(s, indent + "  ", v.status);
+    s << indent << "result: ";
+    s << std::endl;
+    Printer< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> >::stream(s, indent + "  ", v.result);
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_LOOKUPTRANSFORMACTIONRESULT_H

+ 175 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformFeedback.h

@@ -0,0 +1,175 @@
+// Generated by gencpp from file tf2_msgs/LookupTransformFeedback.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_LOOKUPTRANSFORMFEEDBACK_H
+#define TF2_MSGS_MESSAGE_LOOKUPTRANSFORMFEEDBACK_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct LookupTransformFeedback_
+{
+  typedef LookupTransformFeedback_<ContainerAllocator> Type;
+
+  LookupTransformFeedback_()
+    {
+    }
+  LookupTransformFeedback_(const ContainerAllocator& _alloc)
+    {
+  (void)_alloc;
+    }
+
+
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> const> ConstPtr;
+
+}; // struct LookupTransformFeedback_
+
+typedef ::tf2_msgs::LookupTransformFeedback_<std::allocator<void> > LookupTransformFeedback;
+
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformFeedback > LookupTransformFeedbackPtr;
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformFeedback const> LookupTransformFeedbackConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "d41d8cd98f00b204e9800998ecf8427e";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0xd41d8cd98f00b204ULL;
+  static const uint64_t static_value2 = 0xe9800998ecf8427eULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/LookupTransformFeedback";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream&, T)
+    {}
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct LookupTransformFeedback_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream&, const std::string&, const ::tf2_msgs::LookupTransformFeedback_<ContainerAllocator>&)
+  {}
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_LOOKUPTRANSFORMFEEDBACK_H

+ 262 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformGoal.h

@@ -0,0 +1,262 @@
+// Generated by gencpp from file tf2_msgs/LookupTransformGoal.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_LOOKUPTRANSFORMGOAL_H
+#define TF2_MSGS_MESSAGE_LOOKUPTRANSFORMGOAL_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct LookupTransformGoal_
+{
+  typedef LookupTransformGoal_<ContainerAllocator> Type;
+
+  LookupTransformGoal_()
+    : target_frame()
+    , source_frame()
+    , source_time()
+    , timeout()
+    , target_time()
+    , fixed_frame()
+    , advanced(false)  {
+    }
+  LookupTransformGoal_(const ContainerAllocator& _alloc)
+    : target_frame(_alloc)
+    , source_frame(_alloc)
+    , source_time()
+    , timeout()
+    , target_time()
+    , fixed_frame(_alloc)
+    , advanced(false)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other >  _target_frame_type;
+  _target_frame_type target_frame;
+
+   typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other >  _source_frame_type;
+  _source_frame_type source_frame;
+
+   typedef ros::Time _source_time_type;
+  _source_time_type source_time;
+
+   typedef ros::Duration _timeout_type;
+  _timeout_type timeout;
+
+   typedef ros::Time _target_time_type;
+  _target_time_type target_time;
+
+   typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other >  _fixed_frame_type;
+  _fixed_frame_type fixed_frame;
+
+   typedef uint8_t _advanced_type;
+  _advanced_type advanced;
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> const> ConstPtr;
+
+}; // struct LookupTransformGoal_
+
+typedef ::tf2_msgs::LookupTransformGoal_<std::allocator<void> > LookupTransformGoal;
+
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformGoal > LookupTransformGoalPtr;
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformGoal const> LookupTransformGoalConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf2_msgs::LookupTransformGoal_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformGoal_<ContainerAllocator2> & rhs)
+{
+  return lhs.target_frame == rhs.target_frame &&
+    lhs.source_frame == rhs.source_frame &&
+    lhs.source_time == rhs.source_time &&
+    lhs.timeout == rhs.timeout &&
+    lhs.target_time == rhs.target_time &&
+    lhs.fixed_frame == rhs.fixed_frame &&
+    lhs.advanced == rhs.advanced;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf2_msgs::LookupTransformGoal_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformGoal_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "35e3720468131d675a18bb6f3e5f22f8";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformGoal_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0x35e3720468131d67ULL;
+  static const uint64_t static_value2 = 0x5a18bb6f3e5f22f8ULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/LookupTransformGoal";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformGoal_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"#Simple API\n"
+"string target_frame\n"
+"string source_frame\n"
+"time source_time\n"
+"duration timeout\n"
+"\n"
+"#Advanced API\n"
+"time target_time\n"
+"string fixed_frame\n"
+"\n"
+"#Whether or not to use the advanced API\n"
+"bool advanced\n"
+"\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformGoal_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.target_frame);
+      stream.next(m.source_frame);
+      stream.next(m.source_time);
+      stream.next(m.timeout);
+      stream.next(m.target_time);
+      stream.next(m.fixed_frame);
+      stream.next(m.advanced);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct LookupTransformGoal_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::LookupTransformGoal_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf2_msgs::LookupTransformGoal_<ContainerAllocator>& v)
+  {
+    s << indent << "target_frame: ";
+    Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + "  ", v.target_frame);
+    s << indent << "source_frame: ";
+    Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + "  ", v.source_frame);
+    s << indent << "source_time: ";
+    Printer<ros::Time>::stream(s, indent + "  ", v.source_time);
+    s << indent << "timeout: ";
+    Printer<ros::Duration>::stream(s, indent + "  ", v.timeout);
+    s << indent << "target_time: ";
+    Printer<ros::Time>::stream(s, indent + "  ", v.target_time);
+    s << indent << "fixed_frame: ";
+    Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + "  ", v.fixed_frame);
+    s << indent << "advanced: ";
+    Printer<uint8_t>::stream(s, indent + "  ", v.advanced);
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_LOOKUPTRANSFORMGOAL_H

+ 280 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/LookupTransformResult.h

@@ -0,0 +1,280 @@
+// Generated by gencpp from file tf2_msgs/LookupTransformResult.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_LOOKUPTRANSFORMRESULT_H
+#define TF2_MSGS_MESSAGE_LOOKUPTRANSFORMRESULT_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+#include <geometry_msgs/TransformStamped.h>
+#include <tf2_msgs/TF2Error.h>
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct LookupTransformResult_
+{
+  typedef LookupTransformResult_<ContainerAllocator> Type;
+
+  LookupTransformResult_()
+    : transform()
+    , error()  {
+    }
+  LookupTransformResult_(const ContainerAllocator& _alloc)
+    : transform(_alloc)
+    , error(_alloc)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef  ::geometry_msgs::TransformStamped_<ContainerAllocator>  _transform_type;
+  _transform_type transform;
+
+   typedef  ::tf2_msgs::TF2Error_<ContainerAllocator>  _error_type;
+  _error_type error;
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> const> ConstPtr;
+
+}; // struct LookupTransformResult_
+
+typedef ::tf2_msgs::LookupTransformResult_<std::allocator<void> > LookupTransformResult;
+
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformResult > LookupTransformResultPtr;
+typedef boost::shared_ptr< ::tf2_msgs::LookupTransformResult const> LookupTransformResultConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::LookupTransformResult_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf2_msgs::LookupTransformResult_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformResult_<ContainerAllocator2> & rhs)
+{
+  return lhs.transform == rhs.transform &&
+    lhs.error == rhs.error;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf2_msgs::LookupTransformResult_<ContainerAllocator1> & lhs, const ::tf2_msgs::LookupTransformResult_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "3fe5db6a19ca9cfb675418c5ad875c36";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformResult_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0x3fe5db6a19ca9cfbULL;
+  static const uint64_t static_value2 = 0x675418c5ad875c36ULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/LookupTransformResult";
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformResult_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======\n"
+"geometry_msgs/TransformStamped transform\n"
+"tf2_msgs/TF2Error error\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/TransformStamped\n"
+"# This expresses a transform from coordinate frame header.frame_id\n"
+"# to the coordinate frame child_frame_id\n"
+"#\n"
+"# This message is mostly used by the \n"
+"# <a href=\"http://wiki.ros.org/tf\">tf</a> package. \n"
+"# See its documentation for more information.\n"
+"\n"
+"Header header\n"
+"string child_frame_id # the frame id of the child frame\n"
+"Transform transform\n"
+"\n"
+"================================================================================\n"
+"MSG: std_msgs/Header\n"
+"# Standard metadata for higher-level stamped data types.\n"
+"# This is generally used to communicate timestamped data \n"
+"# in a particular coordinate frame.\n"
+"# \n"
+"# sequence ID: consecutively increasing ID \n"
+"uint32 seq\n"
+"#Two-integer timestamp that is expressed as:\n"
+"# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n"
+"# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n"
+"# time-handling sugar is provided by the client library\n"
+"time stamp\n"
+"#Frame this data is associated with\n"
+"string frame_id\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Transform\n"
+"# This represents the transform between two coordinate frames in free space.\n"
+"\n"
+"Vector3 translation\n"
+"Quaternion rotation\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Vector3\n"
+"# This represents a vector in free space. \n"
+"# It is only meant to represent a direction. Therefore, it does not\n"
+"# make sense to apply a translation to it (e.g., when applying a \n"
+"# generic rigid transformation to a Vector3, tf2 will only apply the\n"
+"# rotation). If you want your data to be translatable too, use the\n"
+"# geometry_msgs/Point message instead.\n"
+"\n"
+"float64 x\n"
+"float64 y\n"
+"float64 z\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Quaternion\n"
+"# This represents an orientation in free space in quaternion form.\n"
+"\n"
+"float64 x\n"
+"float64 y\n"
+"float64 z\n"
+"float64 w\n"
+"\n"
+"================================================================================\n"
+"MSG: tf2_msgs/TF2Error\n"
+"uint8 NO_ERROR = 0\n"
+"uint8 LOOKUP_ERROR = 1\n"
+"uint8 CONNECTIVITY_ERROR = 2\n"
+"uint8 EXTRAPOLATION_ERROR = 3\n"
+"uint8 INVALID_ARGUMENT_ERROR = 4\n"
+"uint8 TIMEOUT_ERROR = 5\n"
+"uint8 TRANSFORM_ERROR = 6\n"
+"\n"
+"uint8 error\n"
+"string error_string\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::LookupTransformResult_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.transform);
+      stream.next(m.error);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct LookupTransformResult_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::LookupTransformResult_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf2_msgs::LookupTransformResult_<ContainerAllocator>& v)
+  {
+    s << indent << "transform: ";
+    s << std::endl;
+    Printer< ::geometry_msgs::TransformStamped_<ContainerAllocator> >::stream(s, indent + "  ", v.transform);
+    s << indent << "error: ";
+    s << std::endl;
+    Printer< ::tf2_msgs::TF2Error_<ContainerAllocator> >::stream(s, indent + "  ", v.error);
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_LOOKUPTRANSFORMRESULT_H

+ 259 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/TF2Error.h

@@ -0,0 +1,259 @@
+// Generated by gencpp from file tf2_msgs/TF2Error.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_TF2ERROR_H
+#define TF2_MSGS_MESSAGE_TF2ERROR_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct TF2Error_
+{
+  typedef TF2Error_<ContainerAllocator> Type;
+
+  TF2Error_()
+    : error(0)
+    , error_string()  {
+    }
+  TF2Error_(const ContainerAllocator& _alloc)
+    : error(0)
+    , error_string(_alloc)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef uint8_t _error_type;
+  _error_type error;
+
+   typedef std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other >  _error_string_type;
+  _error_string_type error_string;
+
+
+
+// reducing the odds to have name collisions with Windows.h 
+#if defined(_WIN32) && defined(NO_ERROR)
+  #undef NO_ERROR
+#endif
+#if defined(_WIN32) && defined(LOOKUP_ERROR)
+  #undef LOOKUP_ERROR
+#endif
+#if defined(_WIN32) && defined(CONNECTIVITY_ERROR)
+  #undef CONNECTIVITY_ERROR
+#endif
+#if defined(_WIN32) && defined(EXTRAPOLATION_ERROR)
+  #undef EXTRAPOLATION_ERROR
+#endif
+#if defined(_WIN32) && defined(INVALID_ARGUMENT_ERROR)
+  #undef INVALID_ARGUMENT_ERROR
+#endif
+#if defined(_WIN32) && defined(TIMEOUT_ERROR)
+  #undef TIMEOUT_ERROR
+#endif
+#if defined(_WIN32) && defined(TRANSFORM_ERROR)
+  #undef TRANSFORM_ERROR
+#endif
+
+  enum {
+    NO_ERROR = 0u,
+    LOOKUP_ERROR = 1u,
+    CONNECTIVITY_ERROR = 2u,
+    EXTRAPOLATION_ERROR = 3u,
+    INVALID_ARGUMENT_ERROR = 4u,
+    TIMEOUT_ERROR = 5u,
+    TRANSFORM_ERROR = 6u,
+  };
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::TF2Error_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::TF2Error_<ContainerAllocator> const> ConstPtr;
+
+}; // struct TF2Error_
+
+typedef ::tf2_msgs::TF2Error_<std::allocator<void> > TF2Error;
+
+typedef boost::shared_ptr< ::tf2_msgs::TF2Error > TF2ErrorPtr;
+typedef boost::shared_ptr< ::tf2_msgs::TF2Error const> TF2ErrorConstPtr;
+
+// constants requiring out of line definition
+
+   
+
+   
+
+   
+
+   
+
+   
+
+   
+
+   
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::TF2Error_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::TF2Error_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf2_msgs::TF2Error_<ContainerAllocator1> & lhs, const ::tf2_msgs::TF2Error_<ContainerAllocator2> & rhs)
+{
+  return lhs.error == rhs.error &&
+    lhs.error_string == rhs.error_string;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf2_msgs::TF2Error_<ContainerAllocator1> & lhs, const ::tf2_msgs::TF2Error_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::TF2Error_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::TF2Error_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::TF2Error_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::TF2Error_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::TF2Error_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::TF2Error_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::TF2Error_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "bc6848fd6fd750c92e38575618a4917d";
+  }
+
+  static const char* value(const ::tf2_msgs::TF2Error_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0xbc6848fd6fd750c9ULL;
+  static const uint64_t static_value2 = 0x2e38575618a4917dULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::TF2Error_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/TF2Error";
+  }
+
+  static const char* value(const ::tf2_msgs::TF2Error_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::TF2Error_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "uint8 NO_ERROR = 0\n"
+"uint8 LOOKUP_ERROR = 1\n"
+"uint8 CONNECTIVITY_ERROR = 2\n"
+"uint8 EXTRAPOLATION_ERROR = 3\n"
+"uint8 INVALID_ARGUMENT_ERROR = 4\n"
+"uint8 TIMEOUT_ERROR = 5\n"
+"uint8 TRANSFORM_ERROR = 6\n"
+"\n"
+"uint8 error\n"
+"string error_string\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::TF2Error_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::TF2Error_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.error);
+      stream.next(m.error_string);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct TF2Error_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::TF2Error_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf2_msgs::TF2Error_<ContainerAllocator>& v)
+  {
+    s << indent << "error: ";
+    Printer<uint8_t>::stream(s, indent + "  ", v.error);
+    s << indent << "error_string: ";
+    Printer<std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > >::stream(s, indent + "  ", v.error_string);
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_TF2ERROR_H

+ 259 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/include/tf2_msgs/TFMessage.h

@@ -0,0 +1,259 @@
+// Generated by gencpp from file tf2_msgs/TFMessage.msg
+// DO NOT EDIT!
+
+
+#ifndef TF2_MSGS_MESSAGE_TFMESSAGE_H
+#define TF2_MSGS_MESSAGE_TFMESSAGE_H
+
+
+#include <string>
+#include <vector>
+#include <map>
+
+#include <ros/types.h>
+#include <ros/serialization.h>
+#include <ros/builtin_message_traits.h>
+#include <ros/message_operations.h>
+
+#include <geometry_msgs/TransformStamped.h>
+
+namespace tf2_msgs
+{
+template <class ContainerAllocator>
+struct TFMessage_
+{
+  typedef TFMessage_<ContainerAllocator> Type;
+
+  TFMessage_()
+    : transforms()  {
+    }
+  TFMessage_(const ContainerAllocator& _alloc)
+    : transforms(_alloc)  {
+  (void)_alloc;
+    }
+
+
+
+   typedef std::vector< ::geometry_msgs::TransformStamped_<ContainerAllocator> , typename ContainerAllocator::template rebind< ::geometry_msgs::TransformStamped_<ContainerAllocator> >::other >  _transforms_type;
+  _transforms_type transforms;
+
+
+
+
+
+  typedef boost::shared_ptr< ::tf2_msgs::TFMessage_<ContainerAllocator> > Ptr;
+  typedef boost::shared_ptr< ::tf2_msgs::TFMessage_<ContainerAllocator> const> ConstPtr;
+
+}; // struct TFMessage_
+
+typedef ::tf2_msgs::TFMessage_<std::allocator<void> > TFMessage;
+
+typedef boost::shared_ptr< ::tf2_msgs::TFMessage > TFMessagePtr;
+typedef boost::shared_ptr< ::tf2_msgs::TFMessage const> TFMessageConstPtr;
+
+// constants requiring out of line definition
+
+
+
+template<typename ContainerAllocator>
+std::ostream& operator<<(std::ostream& s, const ::tf2_msgs::TFMessage_<ContainerAllocator> & v)
+{
+ros::message_operations::Printer< ::tf2_msgs::TFMessage_<ContainerAllocator> >::stream(s, "", v);
+return s;
+}
+
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator==(const ::tf2_msgs::TFMessage_<ContainerAllocator1> & lhs, const ::tf2_msgs::TFMessage_<ContainerAllocator2> & rhs)
+{
+  return lhs.transforms == rhs.transforms;
+}
+
+template<typename ContainerAllocator1, typename ContainerAllocator2>
+bool operator!=(const ::tf2_msgs::TFMessage_<ContainerAllocator1> & lhs, const ::tf2_msgs::TFMessage_<ContainerAllocator2> & rhs)
+{
+  return !(lhs == rhs);
+}
+
+
+} // namespace tf2_msgs
+
+namespace ros
+{
+namespace message_traits
+{
+
+
+
+
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::TFMessage_<ContainerAllocator> >
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsMessage< ::tf2_msgs::TFMessage_<ContainerAllocator> const>
+  : TrueType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::TFMessage_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct IsFixedSize< ::tf2_msgs::TFMessage_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::TFMessage_<ContainerAllocator> >
+  : FalseType
+  { };
+
+template <class ContainerAllocator>
+struct HasHeader< ::tf2_msgs::TFMessage_<ContainerAllocator> const>
+  : FalseType
+  { };
+
+
+template<class ContainerAllocator>
+struct MD5Sum< ::tf2_msgs::TFMessage_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "94810edda583a504dfda3829e70d7eec";
+  }
+
+  static const char* value(const ::tf2_msgs::TFMessage_<ContainerAllocator>&) { return value(); }
+  static const uint64_t static_value1 = 0x94810edda583a504ULL;
+  static const uint64_t static_value2 = 0xdfda3829e70d7eecULL;
+};
+
+template<class ContainerAllocator>
+struct DataType< ::tf2_msgs::TFMessage_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "tf2_msgs/TFMessage";
+  }
+
+  static const char* value(const ::tf2_msgs::TFMessage_<ContainerAllocator>&) { return value(); }
+};
+
+template<class ContainerAllocator>
+struct Definition< ::tf2_msgs::TFMessage_<ContainerAllocator> >
+{
+  static const char* value()
+  {
+    return "geometry_msgs/TransformStamped[] transforms\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/TransformStamped\n"
+"# This expresses a transform from coordinate frame header.frame_id\n"
+"# to the coordinate frame child_frame_id\n"
+"#\n"
+"# This message is mostly used by the \n"
+"# <a href=\"http://wiki.ros.org/tf\">tf</a> package. \n"
+"# See its documentation for more information.\n"
+"\n"
+"Header header\n"
+"string child_frame_id # the frame id of the child frame\n"
+"Transform transform\n"
+"\n"
+"================================================================================\n"
+"MSG: std_msgs/Header\n"
+"# Standard metadata for higher-level stamped data types.\n"
+"# This is generally used to communicate timestamped data \n"
+"# in a particular coordinate frame.\n"
+"# \n"
+"# sequence ID: consecutively increasing ID \n"
+"uint32 seq\n"
+"#Two-integer timestamp that is expressed as:\n"
+"# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')\n"
+"# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')\n"
+"# time-handling sugar is provided by the client library\n"
+"time stamp\n"
+"#Frame this data is associated with\n"
+"string frame_id\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Transform\n"
+"# This represents the transform between two coordinate frames in free space.\n"
+"\n"
+"Vector3 translation\n"
+"Quaternion rotation\n"
+"\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Vector3\n"
+"# This represents a vector in free space. \n"
+"# It is only meant to represent a direction. Therefore, it does not\n"
+"# make sense to apply a translation to it (e.g., when applying a \n"
+"# generic rigid transformation to a Vector3, tf2 will only apply the\n"
+"# rotation). If you want your data to be translatable too, use the\n"
+"# geometry_msgs/Point message instead.\n"
+"\n"
+"float64 x\n"
+"float64 y\n"
+"float64 z\n"
+"================================================================================\n"
+"MSG: geometry_msgs/Quaternion\n"
+"# This represents an orientation in free space in quaternion form.\n"
+"\n"
+"float64 x\n"
+"float64 y\n"
+"float64 z\n"
+"float64 w\n"
+;
+  }
+
+  static const char* value(const ::tf2_msgs::TFMessage_<ContainerAllocator>&) { return value(); }
+};
+
+} // namespace message_traits
+} // namespace ros
+
+namespace ros
+{
+namespace serialization
+{
+
+  template<class ContainerAllocator> struct Serializer< ::tf2_msgs::TFMessage_<ContainerAllocator> >
+  {
+    template<typename Stream, typename T> inline static void allInOne(Stream& stream, T m)
+    {
+      stream.next(m.transforms);
+    }
+
+    ROS_DECLARE_ALLINONE_SERIALIZER
+  }; // struct TFMessage_
+
+} // namespace serialization
+} // namespace ros
+
+namespace ros
+{
+namespace message_operations
+{
+
+template<class ContainerAllocator>
+struct Printer< ::tf2_msgs::TFMessage_<ContainerAllocator> >
+{
+  template<typename Stream> static void stream(Stream& s, const std::string& indent, const ::tf2_msgs::TFMessage_<ContainerAllocator>& v)
+  {
+    s << indent << "transforms[]" << std::endl;
+    for (size_t i = 0; i < v.transforms.size(); ++i)
+    {
+      s << indent << "  transforms[" << i << "]: ";
+      s << std::endl;
+      s << indent;
+      Printer< ::geometry_msgs::TransformStamped_<ContainerAllocator> >::stream(s, indent + "    ", v.transforms[i]);
+    }
+  }
+};
+
+} // namespace message_operations
+} // namespace ros
+
+#endif // TF2_MSGS_MESSAGE_TFMESSAGE_H

+ 8 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/local_setup.bash

@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+# generated from catkin/cmake/templates/local_setup.bash.in
+
+CATKIN_SHELL=bash
+
+# source setup.sh from same directory as this file
+_CATKIN_SETUP_DIR=$(builtin cd "`dirname "${BASH_SOURCE[0]}"`" > /dev/null && pwd)
+. "$_CATKIN_SETUP_DIR/setup.sh" --extend --local

+ 9 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/local_setup.sh

@@ -0,0 +1,9 @@
+#!/usr/bin/env sh
+# generated from catkin/cmake/template/local_setup.sh.in
+
+# since this file is sourced either use the provided _CATKIN_SETUP_DIR
+# or fall back to the destination set at configure time
+: ${_CATKIN_SETUP_DIR:=/home/wanghao/Desktop/projects/CP_TRT/github/CenterPointTensorRT/tools/catkin_ws/devel}
+CATKIN_SETUP_UTIL_ARGS="--extend --local"
+. "$_CATKIN_SETUP_DIR/setup.sh"
+unset CATKIN_SETUP_UTIL_ARGS

+ 8 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/local_setup.zsh

@@ -0,0 +1,8 @@
+#!/usr/bin/env zsh
+# generated from catkin/cmake/templates/local_setup.zsh.in
+
+CATKIN_SHELL=zsh
+
+# source setup.sh from same directory as this file
+_CATKIN_SETUP_DIR=$(builtin cd -q "`dirname "$0"`" > /dev/null && pwd)
+emulate -R zsh -c 'source "$_CATKIN_SETUP_DIR/setup.sh" --extend --local'

+ 8 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/setup.bash

@@ -0,0 +1,8 @@
+#!/usr/bin/env bash
+# generated from catkin/cmake/templates/setup.bash.in
+
+CATKIN_SHELL=bash
+
+# source setup.sh from same directory as this file
+_CATKIN_SETUP_DIR=$(builtin cd "`dirname "${BASH_SOURCE[0]}"`" > /dev/null && pwd)
+. "$_CATKIN_SETUP_DIR/setup.sh"

+ 96 - 0
src/detection/CenterPoint-master/tools/catkin_ws/devel/setup.sh

@@ -0,0 +1,96 @@
+#!/usr/bin/env sh
+# generated from catkin/cmake/template/setup.sh.in
+
+# Sets various environment variables and sources additional environment hooks.
+# It tries it's best to undo changes from a previously sourced setup file before.
+# Supported command line options:
+# --extend: skips the undoing of changes from a previously sourced setup file
+# --local: only considers this workspace but not the chained ones
+# In plain sh shell which doesn't support arguments for sourced scripts you can
+# set the environment variable `CATKIN_SETUP_UTIL_ARGS=--extend/--local` instead.
+
+# since this file is sourced either use the provided _CATKIN_SETUP_DIR
+# or fall back to the destination set at configure time
+: ${_CATKIN_SETUP_DIR:=/home/wanghao/Desktop/projects/CP_TRT/github/CenterPointTensorRT/tools/catkin_ws/devel}
+_SETUP_UTIL="$_CATKIN_SETUP_DIR/_setup_util.py"
+unset _CATKIN_SETUP_DIR
+
+if [ ! -f "$_SETUP_UTIL" ]; then
+  echo "Missing Python script: $_SETUP_UTIL"
+  return 22
+fi
+
+# detect if running on Darwin platform
+_UNAME=`uname -s`
+_IS_DARWIN=0
+if [ "$_UNAME" = "Darwin" ]; then
+  _IS_DARWIN=1
+fi
+unset _UNAME
+
+# make sure to export all environment variables
+export CMAKE_PREFIX_PATH
+if [ $_IS_DARWIN -eq 0 ]; then
+  export LD_LIBRARY_PATH
+else
+  export DYLD_LIBRARY_PATH
+fi
+unset _IS_DARWIN
+export PATH
+export PKG_CONFIG_PATH
+export PYTHONPATH
+
+# remember type of shell if not already set
+if [ -z "$CATKIN_SHELL" ]; then
+  CATKIN_SHELL=sh
+fi
+
+# invoke Python script to generate necessary exports of environment variables
+# use TMPDIR if it exists, otherwise fall back to /tmp
+if [ -d "${TMPDIR:-}" ]; then
+  _TMPDIR="${TMPDIR}"
+else
+  _TMPDIR=/tmp
+fi
+_SETUP_TMP=`mktemp "${_TMPDIR}/setup.sh.XXXXXXXXXX"`
+unset _TMPDIR
+if [ $? -ne 0 -o ! -f "$_SETUP_TMP" ]; then
+  echo "Could not create temporary file: $_SETUP_TMP"
+  return 1
+fi
+CATKIN_SHELL=$CATKIN_SHELL "$_SETUP_UTIL" $@ ${CATKIN_SETUP_UTIL_ARGS:-} >> "$_SETUP_TMP"
+_RC=$?
+if [ $_RC -ne 0 ]; then
+  if [ $_RC -eq 2 ]; then
+    echo "Could not write the output of '$_SETUP_UTIL' to temporary file '$_SETUP_TMP': may be the disk if full?"
+  else
+    echo "Failed to run '\"$_SETUP_UTIL\" $@': return code $_RC"
+  fi
+  unset _RC
+  unset _SETUP_UTIL
+  rm -f "$_SETUP_TMP"
+  unset _SETUP_TMP
+  return 1
+fi
+unset _RC
+unset _SETUP_UTIL
+. "$_SETUP_TMP"
+rm -f "$_SETUP_TMP"
+unset _SETUP_TMP
+
+# source all environment hooks
+_i=0
+while [ $_i -lt $_CATKIN_ENVIRONMENT_HOOKS_COUNT ]; do
+  eval _envfile=\$_CATKIN_ENVIRONMENT_HOOKS_$_i
+  unset _CATKIN_ENVIRONMENT_HOOKS_$_i
+  eval _envfile_workspace=\$_CATKIN_ENVIRONMENT_HOOKS_${_i}_WORKSPACE
+  unset _CATKIN_ENVIRONMENT_HOOKS_${_i}_WORKSPACE
+  # set workspace for environment hook
+  CATKIN_ENV_HOOK_WORKSPACE=$_envfile_workspace
+  . "$_envfile"
+  unset CATKIN_ENV_HOOK_WORKSPACE
+  _i=$((_i + 1))
+done
+unset _i
+
+unset _CATKIN_ENVIRONMENT_HOOKS_COUNT

Alguns arquivos não foram mostrados porque muitos arquivos mudaram nesse diff