yolov8_seg.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. #include <iostream>
  2. #include <fstream>
  3. #include <opencv2/opencv.hpp>
  4. #include "model.h"
  5. #include "utils.h"
  6. #include "preprocess.h"
  7. #include "postprocess.h"
  8. #include "cuda_utils.h"
  9. #include "logging.h"
  10. Logger gLogger;
  11. using namespace nvinfer1;
  12. const int kOutputSize = kMaxNumOutputBbox * sizeof(Detection) / sizeof(float) + 1;
  13. const static int kOutputSegSize = 32 * (kInputH / 4) * (kInputW / 4);
  14. static cv::Rect get_downscale_rect(float bbox[4], float scale) {
  15. float left = bbox[0];
  16. float top = bbox[1];
  17. float right = bbox[0] + bbox[2];
  18. float bottom = bbox[1] + bbox[3];
  19. left = left < 0 ? 0 : left;
  20. top = top < 0 ? 0: top;
  21. right = right > 640 ? 640 : right;
  22. bottom = bottom > 640 ? 640: bottom;
  23. left /= scale;
  24. top /= scale;
  25. right /= scale;
  26. bottom /= scale;
  27. return cv::Rect(int(left), int(top), int(right - left), int(bottom - top));
  28. }
  29. std::vector<cv::Mat> process_mask(const float* proto, int proto_size, std::vector<Detection>& dets) {
  30. std::vector<cv::Mat> masks;
  31. for (size_t i = 0; i < dets.size(); i++) {
  32. cv::Mat mask_mat = cv::Mat::zeros(kInputH / 4, kInputW / 4, CV_32FC1);
  33. auto r = get_downscale_rect(dets[i].bbox, 4);
  34. for (int x = r.x; x < r.x + r.width; x++) {
  35. for (int y = r.y; y < r.y + r.height; y++) {
  36. float e = 0.0f;
  37. for (int j = 0; j < 32; j++) {
  38. e += dets[i].mask[j] * proto[j * proto_size / 32 + y * mask_mat.cols + x];
  39. }
  40. e = 1.0f / (1.0f + expf(-e));
  41. mask_mat.at<float>(y, x) = e;
  42. }
  43. }
  44. cv::resize(mask_mat, mask_mat, cv::Size(kInputW, kInputH));
  45. masks.push_back(mask_mat);
  46. }
  47. return masks;
  48. }
  49. void serialize_engine(std::string &wts_name, std::string &engine_name, std::string &sub_type, float &gd, float &gw, int &max_channels)
  50. {
  51. IBuilder *builder = createInferBuilder(gLogger);
  52. IBuilderConfig *config = builder->createBuilderConfig();
  53. IHostMemory *serialized_engine = nullptr;
  54. serialized_engine = buildEngineYolov8Seg(builder, config, DataType::kFLOAT, wts_name, gd, gw, max_channels);
  55. assert(serialized_engine);
  56. std::ofstream p(engine_name, std::ios::binary);
  57. if (!p)
  58. {
  59. std::cout << "could not open plan output file" << std::endl;
  60. assert(false);
  61. }
  62. p.write(reinterpret_cast<const char *>(serialized_engine->data()), serialized_engine->size());
  63. delete builder;
  64. delete config;
  65. delete serialized_engine;
  66. }
  67. void deserialize_engine(std::string &engine_name, IRuntime **runtime, ICudaEngine **engine, IExecutionContext **context)
  68. {
  69. std::ifstream file(engine_name, std::ios::binary);
  70. if (!file.good())
  71. {
  72. std::cerr << "read " << engine_name << " error!" << std::endl;
  73. assert(false);
  74. }
  75. size_t size = 0;
  76. file.seekg(0, file.end);
  77. size = file.tellg();
  78. file.seekg(0, file.beg);
  79. char *serialized_engine = new char[size];
  80. assert(serialized_engine);
  81. file.read(serialized_engine, size);
  82. file.close();
  83. *runtime = createInferRuntime(gLogger);
  84. assert(*runtime);
  85. *engine = (*runtime)->deserializeCudaEngine(serialized_engine, size);
  86. assert(*engine);
  87. *context = (*engine)->createExecutionContext();
  88. assert(*context);
  89. delete[] serialized_engine;
  90. }
  91. void prepare_buffer(ICudaEngine *engine, float **input_buffer_device, float **output_buffer_device, float **output_seg_buffer_device,
  92. float **output_buffer_host,float **output_seg_buffer_host ,float **decode_ptr_host, float **decode_ptr_device, std::string cuda_post_process) {
  93. assert(engine->getNbBindings() == 3);
  94. // In order to bind the buffers, we need to know the names of the input and output tensors.
  95. // Note that indices are guaranteed to be less than IEngine::getNbBindings()
  96. const int inputIndex = engine->getBindingIndex(kInputTensorName);
  97. const int outputIndex = engine->getBindingIndex(kOutputTensorName);
  98. const int outputIndex_seg = engine->getBindingIndex("proto");
  99. assert(inputIndex == 0);
  100. assert(outputIndex == 1);
  101. assert(outputIndex_seg == 2);
  102. // Create GPU buffers on device
  103. CUDA_CHECK(cudaMalloc((void **) input_buffer_device, kBatchSize * 3 * kInputH * kInputW * sizeof(float)));
  104. CUDA_CHECK(cudaMalloc((void **) output_buffer_device, kBatchSize * kOutputSize * sizeof(float)));
  105. CUDA_CHECK(cudaMalloc((void **) output_seg_buffer_device, kBatchSize * kOutputSegSize * sizeof(float)));
  106. if (cuda_post_process == "c") {
  107. *output_buffer_host = new float[kBatchSize * kOutputSize];
  108. *output_seg_buffer_host = new float[kBatchSize * kOutputSegSize];
  109. } else if (cuda_post_process == "g") {
  110. if (kBatchSize > 1) {
  111. std::cerr << "Do not yet support GPU post processing for multiple batches" << std::endl;
  112. exit(0);
  113. }
  114. // Allocate memory for decode_ptr_host and copy to device
  115. *decode_ptr_host = new float[1 + kMaxNumOutputBbox * bbox_element];
  116. CUDA_CHECK(cudaMalloc((void **)decode_ptr_device, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element)));
  117. }
  118. }
  119. void infer(IExecutionContext &context, cudaStream_t &stream, void **buffers, float *output, float *output_seg,int batchsize, float* decode_ptr_host, float* decode_ptr_device, int model_bboxes, std::string cuda_post_process) {
  120. // infer on the batch asynchronously, and DMA output back to host
  121. auto start = std::chrono::system_clock::now();
  122. context.enqueue(batchsize, buffers, stream, nullptr);
  123. if (cuda_post_process == "c") {
  124. std::cout << "kOutputSize:" << kOutputSize <<std::endl;
  125. CUDA_CHECK(cudaMemcpyAsync(output, buffers[1], batchsize * kOutputSize * sizeof(float), cudaMemcpyDeviceToHost,stream));
  126. std::cout << "kOutputSegSize:" << kOutputSegSize <<std::endl;
  127. CUDA_CHECK(cudaMemcpyAsync(output_seg, buffers[2], batchsize * kOutputSegSize * sizeof(float), cudaMemcpyDeviceToHost, stream));
  128. auto end = std::chrono::system_clock::now();
  129. std::cout << "inference time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
  130. } else if (cuda_post_process == "g") {
  131. CUDA_CHECK(cudaMemsetAsync(decode_ptr_device, 0, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element), stream));
  132. cuda_decode((float *)buffers[1], model_bboxes, kConfThresh, decode_ptr_device, kMaxNumOutputBbox, stream);
  133. cuda_nms(decode_ptr_device, kNmsThresh, kMaxNumOutputBbox, stream);//cuda nms
  134. CUDA_CHECK(cudaMemcpyAsync(decode_ptr_host, decode_ptr_device, sizeof(float) * (1 + kMaxNumOutputBbox * bbox_element), cudaMemcpyDeviceToHost, stream));
  135. auto end = std::chrono::system_clock::now();
  136. std::cout << "inference and gpu postprocess time: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms" << std::endl;
  137. }
  138. CUDA_CHECK(cudaStreamSynchronize(stream));
  139. }
  140. bool parse_args(int argc, char **argv, std::string &wts, std::string &engine, std::string &img_dir, std::string &sub_type,
  141. std::string &cuda_post_process, std::string& labels_filename, float &gd, float &gw, int &max_channels)
  142. {
  143. if (argc < 4)
  144. return false;
  145. if (std::string(argv[1]) == "-s" && argc == 5) {
  146. wts = std::string(argv[2]);
  147. engine = std::string(argv[3]);
  148. sub_type = std::string(argv[4]);
  149. if (sub_type == "n") {
  150. gd = 0.33;
  151. gw = 0.25;
  152. max_channels = 1024;
  153. } else if (sub_type == "s") {
  154. gd = 0.33;
  155. gw = 0.50;
  156. max_channels = 1024;
  157. } else if (sub_type == "m") {
  158. gd = 0.67;
  159. gw = 0.75;
  160. max_channels = 576;
  161. } else if (sub_type == "l") {
  162. gd = 1.0;
  163. gw = 1.0;
  164. max_channels = 512;
  165. } else if (sub_type == "x") {
  166. gd = 1.0;
  167. gw = 1.25;
  168. max_channels = 640;
  169. } else{
  170. return false;
  171. }
  172. } else if (std::string(argv[1]) == "-d" && argc == 6) {
  173. engine = std::string(argv[2]);
  174. img_dir = std::string(argv[3]);
  175. cuda_post_process = std::string(argv[4]);
  176. labels_filename = std::string(argv[5]);
  177. } else {
  178. return false;
  179. }
  180. return true;
  181. }
  182. int main(int argc, char **argv) {
  183. cudaSetDevice(kGpuId);
  184. std::string wts_name = "";
  185. std::string engine_name = "";
  186. std::string img_dir;
  187. std::string sub_type = "";
  188. std::string cuda_post_process = "";
  189. std::string labels_filename = "../coco.txt";
  190. int model_bboxes;
  191. float gd = 0.0f, gw = 0.0f;
  192. int max_channels = 0;
  193. if (!parse_args(argc, argv, wts_name, engine_name, img_dir, sub_type, cuda_post_process, labels_filename, gd, gw, max_channels)) {
  194. std::cerr << "Arguments not right!" << std::endl;
  195. std::cerr << "./yolov8 -s [.wts] [.engine] [n/s/m/l/x] // serialize model to plan file" << std::endl;
  196. std::cerr << "./yolov8 -d [.engine] ../samples [c/g] coco_file// deserialize plan file and run inference" << std::endl;
  197. return -1;
  198. }
  199. // Create a model using the API directly and serialize it to a file
  200. if (!wts_name.empty()) {
  201. serialize_engine(wts_name, engine_name, sub_type, gd, gw, max_channels);
  202. return 0;
  203. }
  204. // Deserialize the engine from file
  205. IRuntime *runtime = nullptr;
  206. ICudaEngine *engine = nullptr;
  207. IExecutionContext *context = nullptr;
  208. deserialize_engine(engine_name, &runtime, &engine, &context);
  209. cudaStream_t stream;
  210. CUDA_CHECK(cudaStreamCreate(&stream));
  211. cuda_preprocess_init(kMaxInputImageSize);
  212. auto out_dims = engine->getBindingDimensions(1);
  213. model_bboxes = out_dims.d[0];
  214. // Prepare cpu and gpu buffers
  215. float *device_buffers[3];
  216. float *output_buffer_host = nullptr;
  217. float *output_seg_buffer_host = nullptr;
  218. float *decode_ptr_host=nullptr;
  219. float *decode_ptr_device=nullptr;
  220. // Read images from directory
  221. std::vector<std::string> file_names;
  222. if (read_files_in_dir(img_dir.c_str(), file_names) < 0) {
  223. std::cerr << "read_files_in_dir failed." << std::endl;
  224. return -1;
  225. }
  226. std::unordered_map<int, std::string> labels_map;
  227. read_labels(labels_filename, labels_map);
  228. assert(kNumClass == labels_map.size());
  229. prepare_buffer(engine, &device_buffers[0], &device_buffers[1], &device_buffers[2], &output_buffer_host, &output_seg_buffer_host,&decode_ptr_host, &decode_ptr_device, cuda_post_process);
  230. // // batch predict
  231. for (size_t i = 0; i < file_names.size(); i += kBatchSize) {
  232. // Get a batch of images
  233. std::vector<cv::Mat> img_batch;
  234. std::vector<std::string> img_name_batch;
  235. for (size_t j = i; j < i + kBatchSize && j < file_names.size(); j++) {
  236. cv::Mat img = cv::imread(img_dir + "/" + file_names[j]);
  237. img_batch.push_back(img);
  238. img_name_batch.push_back(file_names[j]);
  239. }
  240. // Preprocess
  241. cuda_batch_preprocess(img_batch, device_buffers[0], kInputW, kInputH, stream);
  242. // Run inference
  243. infer(*context, stream, (void **)device_buffers, output_buffer_host, output_seg_buffer_host,kBatchSize, decode_ptr_host, decode_ptr_device, model_bboxes, cuda_post_process);
  244. std::vector<std::vector<Detection>> res_batch;
  245. if (cuda_post_process == "c") {
  246. // NMS
  247. batch_nms(res_batch, output_buffer_host, img_batch.size(), kOutputSize, kConfThresh, kNmsThresh);
  248. for (size_t b = 0; b < img_batch.size(); b++) {
  249. auto& res = res_batch[b];
  250. cv::Mat img = img_batch[b];
  251. auto masks = process_mask(&output_seg_buffer_host[b * kOutputSegSize], kOutputSegSize, res);
  252. draw_mask_bbox(img, res, masks, labels_map);
  253. cv::imwrite("_" + img_name_batch[b], img);
  254. }
  255. } else if (cuda_post_process == "g") {
  256. // Process gpu decode and nms results
  257. // batch_process(res_batch, decode_ptr_host, img_batch.size(), bbox_element, img_batch);
  258. // todo seg in gpu
  259. std::cerr << "seg_postprocess is not support in gpu right now" << std::endl;
  260. }
  261. }
  262. // Release stream and buffers
  263. cudaStreamDestroy(stream);
  264. CUDA_CHECK(cudaFree(device_buffers[0]));
  265. CUDA_CHECK(cudaFree(device_buffers[1]));
  266. CUDA_CHECK(cudaFree(device_buffers[2]));
  267. CUDA_CHECK(cudaFree(decode_ptr_device));
  268. delete[] decode_ptr_host;
  269. delete[] output_buffer_host;
  270. delete[] output_seg_buffer_host;
  271. cuda_preprocess_destroy();
  272. // Destroy the engine
  273. delete context;
  274. delete engine;
  275. delete runtime;
  276. // Print histogram of the output distribution
  277. // std::cout << "\nOutput:\n\n";
  278. // for (unsigned int i = 0; i < kOutputSize; i++)
  279. //{
  280. // std::cout << prob[i] << ", ";
  281. // if (i % 10 == 0) std::cout << std::endl;
  282. //}
  283. // std::cout << std::endl;
  284. return 0;
  285. }