prograrm temp

旧城等待, 2023-01-17 13:51 235阅读 0赞

调试暂存

  • 2021.4.30:12:39temp
  • new
  • normal

2021.4.30:12:39temp

  1. /** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */
  2. #include <string.h>
  3. #include <string>
  4. #include <sstream>
  5. #include <iostream>
  6. #include <ostream>
  7. #include <fstream>
  8. #include "gstdsexample.h"
  9. #include <sys/time.h>
  10. #include "opencv2/core/core.hpp"
  11. #include "opencv2/imgproc/imgproc.hpp"
  12. #include "opencv2/calib3d/calib3d.hpp"
  13. #include "opencv2/highgui/highgui.hpp"
  14. using namespace cv;
  15. using namespace std;
  16. GST_DEBUG_CATEGORY_STATIC (gst_dsexample_debug);
  17. #define GST_CAT_DEFAULT gst_dsexample_debug
  18. #define USE_EGLIMAGE 1
  19. /* enable to write transformed cvmat to files */
  20. /* #define DSEXAMPLE_DEBUG */
  21. static GQuark _dsmeta_quark = 0;
  22. /* Enum to identify properties */
  23. enum
  24. {
  25. PROP_0,
  26. PROP_UNIQUE_ID,
  27. PROP_PROCESSING_WIDTH,
  28. PROP_PROCESSING_HEIGHT,
  29. PROP_PROCESS_FULL_FRAME,
  30. PROP_BLUR_OBJECTS,
  31. PROP_GPU_DEVICE_ID
  32. };
  33. #define CHECK_NVDS_MEMORY_AND_GPUID(object, surface) \ ({ int _errtype=0;\ do { \ if ((surface->memType == NVBUF_MEM_DEFAULT || surface->memType == NVBUF_MEM_CUDA_DEVICE) && \ (surface->gpuId != object->gpu_id)) { \ GST_ELEMENT_ERROR (object, RESOURCE, FAILED, \ ("Input surface gpu-id doesnt match with configured gpu-id for element," \ " please allocate input using unified memory, or use same gpu-ids"),\ ("surface-gpu-id=%d,%s-gpu-id=%d",surface->gpuId,GST_ELEMENT_NAME(object),\ object->gpu_id)); \ _errtype = 1;\ } \ } while(0); \ _errtype; \ })
  34. /* Default values for properties */
  35. #define DEFAULT_UNIQUE_ID 15
  36. #define DEFAULT_PROCESSING_WIDTH 640
  37. #define DEFAULT_PROCESSING_HEIGHT 480
  38. #define DEFAULT_PROCESS_FULL_FRAME TRUE
  39. #define DEFAULT_BLUR_OBJECTS FALSE
  40. #define DEFAULT_GPU_ID 0
  41. #define RGB_BYTES_PER_PIXEL 3
  42. #define RGBA_BYTES_PER_PIXEL 4
  43. #define Y_BYTES_PER_PIXEL 1
  44. #define UV_BYTES_PER_PIXEL 2
  45. #define MIN_INPUT_OBJECT_WIDTH 16
  46. #define MIN_INPUT_OBJECT_HEIGHT 16
  47. /*畸变矫正相关*/
  48. #define SRC_WIDTH 1920
  49. #define SRC_HEIGHT 1080
  50. Mat mapx = Mat(Size(SRC_WIDTH, SRC_HEIGHT), CV_32FC1);
  51. Mat mapy = Mat(Size(SRC_WIDTH, SRC_HEIGHT), CV_32FC1);
  52. Mat newimage ; //校正后输出图片
  53. #define CHECK_NPP_STATUS(npp_status,error_str) do { \ if ((npp_status) != NPP_SUCCESS) { \ g_print ("Error: %s in %s at line %d: NPP Error %d\n", \ error_str, __FILE__, __LINE__, npp_status); \ goto error; \ } \ } while (0)
  54. #define CHECK_CUDA_STATUS(cuda_status,error_str) do { \ if ((cuda_status) != cudaSuccess) { \ g_print ("Error: %s in %s at line %d (%s)\n", \ error_str, __FILE__, __LINE__, cudaGetErrorName(cuda_status)); \ goto error; \ } \ } while (0)
  55. /* By default NVIDIA Hardware allocated memory flows through the pipeline. We * will be processing on this type of memory only. */
  56. #define GST_CAPS_FEATURE_MEMORY_NVMM "memory:NVMM"
  57. static GstStaticPadTemplate gst_dsexample_sink_template =
  58. GST_STATIC_PAD_TEMPLATE ("sink",
  59. GST_PAD_SINK,
  60. GST_PAD_ALWAYS,
  61. GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES
  62. (GST_CAPS_FEATURE_MEMORY_NVMM,
  63. "{ NV12, RGBA, I420 }")));
  64. static GstStaticPadTemplate gst_dsexample_src_template =
  65. GST_STATIC_PAD_TEMPLATE ("src",
  66. GST_PAD_SRC,
  67. GST_PAD_ALWAYS,
  68. GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES
  69. (GST_CAPS_FEATURE_MEMORY_NVMM,
  70. "{ NV12, RGBA, I420 }")));
  71. /* Define our element type. Standard GObject/GStreamer boilerplate stuff */
  72. #define gst_dsexample_parent_class parent_class
  73. G_DEFINE_TYPE (GstDsExample, gst_dsexample, GST_TYPE_BASE_TRANSFORM);
  74. static void gst_dsexample_set_property (GObject * object, guint prop_id,
  75. const GValue * value, GParamSpec * pspec);
  76. static void gst_dsexample_get_property (GObject * object, guint prop_id,
  77. GValue * value, GParamSpec * pspec);
  78. static gboolean gst_dsexample_set_caps (GstBaseTransform * btrans,
  79. GstCaps * incaps, GstCaps * outcaps);
  80. static gboolean gst_dsexample_start (GstBaseTransform * btrans);
  81. static gboolean gst_dsexample_stop (GstBaseTransform * btrans);
  82. static GstFlowReturn gst_dsexample_transform_ip (GstBaseTransform *
  83. btrans, GstBuffer * inbuf);
  84. static void
  85. attach_metadata_full_frame (GstDsExample * dsexample, NvDsFrameMeta *frame_meta,
  86. gdouble scale_ratio, DsExampleOutput * output, guint batch_id);
  87. static void attach_metadata_object (GstDsExample * dsexample,
  88. NvDsObjectMeta * obj_meta, DsExampleOutput * output);
  89. void InitMat(Mat& m, float* num)
  90. {
  91. for (int i = 0; i<m.rows; i++)
  92. {
  93. for (int j = 0; j<m.cols; j++)
  94. {
  95. m.at<float>(i, j) = *(num + i * m.rows + j);
  96. }
  97. }
  98. }
  99. void jiaozheng_init(void)
  100. {
  101. int OK = 0;
  102. Mat R = Mat::eye(3, 3, CV_32F);
  103. //参数矩阵
  104. float neican_data[] = { 9558.649257742036, 0, 959.3165310990756, 0, 9435.752651759443, 532.7507141910969, 0, 0, 1 };
  105. Mat cameraMatrix(3, 3, CV_32FC1);
  106. InitMat(neican_data,cameraMatrix);
  107. cout << "cameraMatrix= " << endl << " " << cameraMatrix << endl << endl;
  108. //测得的畸变系数
  109. float jibian_data[] = { -6.956561513881647, -68.83902522804168, -0.004834538444671919, 0.01471273691928269, -0.4916103704308509 };
  110. Mat distCoeffs(1, 5, CV_32FC1); /* 摄像机的5个畸变系数:k1,k2,p1,p2,k3 */
  111. InitMat( jibian_data, distCoeffs);
  112. cout << "distCoeffs= " << endl << " " << distCoeffs << endl << endl;
  113. /********相机矫正*******************************************************************************/
  114. //cout << "mapx= " << endl << " " << mapx << endl << endl;
  115. //cout << "mapy= " << endl << " " << mapy << endl << endl;
  116. initUndistortRectifyMap(cameraMatrix, distCoeffs, R, cameraMatrix, Size(SRC_WIDTH, SRC_HEIGHT), CV_32FC1, mapx, mapy);
  117. //cout << "mapx= " << endl << " " << mapx << endl << endl;
  118. //cout << "mapy= " << endl << " " << mapy << endl << endl;
  119. }
  120. /******************************************************************************* Function: jibianjiaozheng Description: 该函数实现畸变矫正 Input: 原图 Output: 畸变矫正后的图片 Return: 0: Successful ohters: Failed *******************************************************************************/
  121. int jibianjiaozheng(Mat src_picture, Mat & dst_picture)
  122. {
  123. int OK = 0;
  124. static unsigned int INIT_MAT_OK = 1;
  125. if (INIT_MAT_OK)
  126. {
  127. newimage = src_picture.clone(); //校正后输出图片
  128. INIT_MAT_OK = 0;
  129. }
  130. remap(src_picture, newimage, mapx, mapy, INTER_LINEAR);
  131. dst_picture= newimage+1;
  132. return OK;
  133. }
  134. /* Install properties, set sink and src pad capabilities, override the required * functions of the base class, These are common to all instances of the * element. */
  135. static void
  136. gst_dsexample_class_init (GstDsExampleClass * klass)
  137. {
  138. GObjectClass *gobject_class;
  139. GstElementClass *gstelement_class;
  140. GstBaseTransformClass *gstbasetransform_class;
  141. /* Indicates we want to use DS buf api */
  142. g_setenv ("DS_NEW_BUFAPI", "1", TRUE);
  143. gobject_class = (GObjectClass *) klass;
  144. gstelement_class = (GstElementClass *) klass;
  145. gstbasetransform_class = (GstBaseTransformClass *) klass;
  146. /* Overide base class functions */
  147. gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_dsexample_set_property);
  148. gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_dsexample_get_property);
  149. gstbasetransform_class->set_caps = GST_DEBUG_FUNCPTR (gst_dsexample_set_caps);
  150. gstbasetransform_class->start = GST_DEBUG_FUNCPTR (gst_dsexample_start);
  151. gstbasetransform_class->stop = GST_DEBUG_FUNCPTR (gst_dsexample_stop);
  152. gstbasetransform_class->transform_ip =
  153. GST_DEBUG_FUNCPTR (gst_dsexample_transform_ip);
  154. /* Install properties */
  155. g_object_class_install_property (gobject_class, PROP_UNIQUE_ID,
  156. g_param_spec_uint ("unique-id",
  157. "Unique ID",
  158. "Unique ID for the element. Can be used to identify output of the"
  159. " element", 0, G_MAXUINT, DEFAULT_UNIQUE_ID, (GParamFlags)
  160. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  161. g_object_class_install_property (gobject_class, PROP_PROCESSING_WIDTH,
  162. g_param_spec_int ("processing-width",
  163. "Processing Width",
  164. "Width of the input buffer to algorithm",
  165. 1, G_MAXINT, DEFAULT_PROCESSING_WIDTH, (GParamFlags)
  166. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  167. g_object_class_install_property (gobject_class, PROP_PROCESSING_HEIGHT,
  168. g_param_spec_int ("processing-height",
  169. "Processing Height",
  170. "Height of the input buffer to algorithm",
  171. 1, G_MAXINT, DEFAULT_PROCESSING_HEIGHT, (GParamFlags)
  172. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  173. g_object_class_install_property (gobject_class, PROP_PROCESS_FULL_FRAME,
  174. g_param_spec_boolean ("full-frame",
  175. "Full frame",
  176. "Enable to process full frame or disable to process objects detected"
  177. "by primary detector", DEFAULT_PROCESS_FULL_FRAME, (GParamFlags)
  178. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  179. g_object_class_install_property (gobject_class, PROP_BLUR_OBJECTS,
  180. g_param_spec_boolean ("blur-objects",
  181. "Blur Objects",
  182. "Enable to blur the objects detected in full-frame=0 mode"
  183. "by primary detector", DEFAULT_BLUR_OBJECTS, (GParamFlags)
  184. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  185. g_object_class_install_property (gobject_class, PROP_GPU_DEVICE_ID,
  186. g_param_spec_uint ("gpu-id",
  187. "Set GPU Device ID",
  188. "Set GPU Device ID", 0,
  189. G_MAXUINT, 0,
  190. GParamFlags
  191. (G_PARAM_READWRITE |
  192. G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY)));
  193. /* Set sink and src pad capabilities */
  194. gst_element_class_add_pad_template (gstelement_class,
  195. gst_static_pad_template_get (&gst_dsexample_src_template));
  196. gst_element_class_add_pad_template (gstelement_class,
  197. gst_static_pad_template_get (&gst_dsexample_sink_template));
  198. /* Set metadata describing the element */
  199. gst_element_class_set_details_simple (gstelement_class,
  200. "DsExample plugin",
  201. "DsExample Plugin",
  202. "Process a 3rdparty example algorithm on objects / full frame",
  203. "NVIDIA Corporation. Post on Deepstream for Tesla forum for any queries "
  204. "@ https://devtalk.nvidia.com/default/board/209/");
  205. }
  206. static void
  207. gst_dsexample_init (GstDsExample * dsexample)
  208. {
  209. GstBaseTransform *btrans = GST_BASE_TRANSFORM (dsexample);
  210. /* We will not be generating a new buffer. Just adding / updating * metadata. */
  211. gst_base_transform_set_in_place (GST_BASE_TRANSFORM (btrans), TRUE);
  212. /* We do not want to change the input caps. Set to passthrough. transform_ip * is still called. */
  213. gst_base_transform_set_passthrough (GST_BASE_TRANSFORM (btrans), TRUE);
  214. /* Initialize all property variables to default values */
  215. dsexample->unique_id = DEFAULT_UNIQUE_ID;
  216. dsexample->processing_width = DEFAULT_PROCESSING_WIDTH;
  217. dsexample->processing_height = DEFAULT_PROCESSING_HEIGHT;
  218. dsexample->process_full_frame = DEFAULT_PROCESS_FULL_FRAME;
  219. dsexample->blur_objects = DEFAULT_BLUR_OBJECTS;
  220. dsexample->gpu_id = DEFAULT_GPU_ID;
  221. /* This quark is required to identify NvDsMeta when iterating through * the buffer metadatas */
  222. if (!_dsmeta_quark)
  223. _dsmeta_quark = g_quark_from_static_string (NVDS_META_STRING);
  224. }
  225. /* Function called when a property of the element is set. Standard boilerplate. */
  226. static void
  227. gst_dsexample_set_property (GObject * object, guint prop_id,
  228. const GValue * value, GParamSpec * pspec)
  229. {
  230. GstDsExample *dsexample = GST_DSEXAMPLE (object);
  231. switch (prop_id) {
  232. case PROP_UNIQUE_ID:
  233. dsexample->unique_id = g_value_get_uint (value);
  234. break;
  235. case PROP_PROCESSING_WIDTH:
  236. dsexample->processing_width = g_value_get_int (value);
  237. break;
  238. case PROP_PROCESSING_HEIGHT:
  239. dsexample->processing_height = g_value_get_int (value);
  240. break;
  241. case PROP_PROCESS_FULL_FRAME:
  242. dsexample->process_full_frame = g_value_get_boolean (value);
  243. break;
  244. case PROP_BLUR_OBJECTS:
  245. dsexample->blur_objects = g_value_get_boolean (value);
  246. break;
  247. case PROP_GPU_DEVICE_ID:
  248. dsexample->gpu_id = g_value_get_uint (value);
  249. break;
  250. default:
  251. G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
  252. break;
  253. }
  254. }
  255. /* Function called when a property of the element is requested. Standard * boilerplate. */
  256. static void
  257. gst_dsexample_get_property (GObject * object, guint prop_id,
  258. GValue * value, GParamSpec * pspec)
  259. {
  260. GstDsExample *dsexample = GST_DSEXAMPLE (object);
  261. switch (prop_id) {
  262. case PROP_UNIQUE_ID:
  263. g_value_set_uint (value, dsexample->unique_id);
  264. break;
  265. case PROP_PROCESSING_WIDTH:
  266. g_value_set_int (value, dsexample->processing_width);
  267. break;
  268. case PROP_PROCESSING_HEIGHT:
  269. g_value_set_int (value, dsexample->processing_height);
  270. break;
  271. case PROP_PROCESS_FULL_FRAME:
  272. g_value_set_boolean (value, dsexample->process_full_frame);
  273. break;
  274. case PROP_BLUR_OBJECTS:
  275. g_value_set_boolean (value, dsexample->blur_objects);
  276. break;
  277. case PROP_GPU_DEVICE_ID:
  278. g_value_set_uint (value, dsexample->gpu_id);
  279. break;
  280. default:
  281. G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
  282. break;
  283. }
  284. }
  285. /** * Initialize all resources and start the output thread */
  286. static gboolean
  287. gst_dsexample_start (GstBaseTransform * btrans)
  288. {
  289. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  290. NvBufSurfaceCreateParams create_params;
  291. DsExampleInitParams init_params =
  292. { dsexample->processing_width, dsexample->processing_height,
  293. dsexample->process_full_frame
  294. };
  295. GstQuery *queryparams = NULL;
  296. guint batch_size = 1;
  297. /* Algorithm specific initializations and resource allocation. */
  298. dsexample->dsexamplelib_ctx = DsExampleCtxInit (&init_params);
  299. GST_DEBUG_OBJECT (dsexample, "ctx lib %p \n", dsexample->dsexamplelib_ctx);
  300. CHECK_CUDA_STATUS (cudaSetDevice (dsexample->gpu_id),
  301. "Unable to set cuda device");
  302. dsexample->batch_size = 1;
  303. queryparams = gst_nvquery_batch_size_new ();
  304. if (gst_pad_peer_query (GST_BASE_TRANSFORM_SINK_PAD (btrans), queryparams)
  305. || gst_pad_peer_query (GST_BASE_TRANSFORM_SRC_PAD (btrans), queryparams)) {
  306. if (gst_nvquery_batch_size_parse (queryparams, &batch_size)) {
  307. dsexample->batch_size = batch_size;
  308. }
  309. }
  310. GST_DEBUG_OBJECT (dsexample, "Setting batch-size %d \n",
  311. dsexample->batch_size);
  312. gst_query_unref (queryparams);
  313. if (dsexample->process_full_frame && dsexample->blur_objects) {
  314. GST_ERROR ("Error: does not support blurring while processing full frame");
  315. goto error;
  316. }
  317. CHECK_CUDA_STATUS (cudaStreamCreate (&dsexample->cuda_stream),
  318. "Could not create cuda stream");
  319. if (dsexample->inter_buf)
  320. NvBufSurfaceDestroy (dsexample->inter_buf);
  321. dsexample->inter_buf = NULL;
  322. /* An intermediate buffer for NV12/RGBA to BGR conversion will be * required. Can be skipped if custom algorithm can work directly on NV12/RGBA. */
  323. create_params.gpuId = dsexample->gpu_id;
  324. create_params.width = dsexample->processing_width;
  325. create_params.height = dsexample->processing_height;
  326. create_params.size = 0;
  327. create_params.colorFormat = NVBUF_COLOR_FORMAT_RGBA;
  328. create_params.layout = NVBUF_LAYOUT_PITCH;
  329. #ifdef __aarch64__
  330. create_params.memType = NVBUF_MEM_DEFAULT;
  331. #else
  332. create_params.memType = NVBUF_MEM_CUDA_UNIFIED;
  333. #endif
  334. if (NvBufSurfaceCreate (&dsexample->inter_buf, 1,
  335. &create_params) != 0) {
  336. GST_ERROR ("Error: Could not allocate internal buffer for dsexample");
  337. goto error;
  338. }
  339. /* Create host memory for storing converted/scaled interleaved RGB data */
  340. CHECK_CUDA_STATUS (cudaMallocHost (&dsexample->host_rgb_buf,
  341. dsexample->processing_width * dsexample->processing_height *
  342. RGB_BYTES_PER_PIXEL), "Could not allocate cuda host buffer");
  343. GST_DEBUG_OBJECT (dsexample, "allocated cuda buffer %p \n",
  344. dsexample->host_rgb_buf);
  345. /* CV Mat containing interleaved RGB data. This call does not allocate memory. * It uses host_rgb_buf as data. */
  346. dsexample->cvmat =
  347. new cv::Mat (dsexample->processing_height, dsexample->processing_width,
  348. CV_8UC3, dsexample->host_rgb_buf,
  349. dsexample->processing_width * RGB_BYTES_PER_PIXEL);
  350. if (!dsexample->cvmat)
  351. goto error;
  352. GST_DEBUG_OBJECT (dsexample, "created CV Mat\n");
  353. return TRUE;
  354. error:
  355. if (dsexample->host_rgb_buf) {
  356. cudaFreeHost (dsexample->host_rgb_buf);
  357. dsexample->host_rgb_buf = NULL;
  358. }
  359. if (dsexample->cuda_stream) {
  360. cudaStreamDestroy (dsexample->cuda_stream);
  361. dsexample->cuda_stream = NULL;
  362. }
  363. if (dsexample->dsexamplelib_ctx)
  364. DsExampleCtxDeinit (dsexample->dsexamplelib_ctx);
  365. return FALSE;
  366. }
  367. /** * Stop the output thread and free up all the resources */
  368. static gboolean
  369. gst_dsexample_stop (GstBaseTransform * btrans)
  370. {
  371. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  372. if (dsexample->inter_buf)
  373. NvBufSurfaceDestroy(dsexample->inter_buf);
  374. dsexample->inter_buf = NULL;
  375. if (dsexample->cuda_stream)
  376. cudaStreamDestroy (dsexample->cuda_stream);
  377. dsexample->cuda_stream = NULL;
  378. delete dsexample->cvmat;
  379. dsexample->cvmat = NULL;
  380. if (dsexample->host_rgb_buf) {
  381. cudaFreeHost (dsexample->host_rgb_buf);
  382. dsexample->host_rgb_buf = NULL;
  383. }
  384. GST_DEBUG_OBJECT (dsexample, "deleted CV Mat \n");
  385. /* Deinit the algorithm library */
  386. DsExampleCtxDeinit (dsexample->dsexamplelib_ctx);
  387. dsexample->dsexamplelib_ctx = NULL;
  388. GST_DEBUG_OBJECT (dsexample, "ctx lib released \n");
  389. return TRUE;
  390. }
  391. /** * Called when source / sink pad capabilities have been negotiated. */
  392. static gboolean
  393. gst_dsexample_set_caps (GstBaseTransform * btrans, GstCaps * incaps,
  394. GstCaps * outcaps)
  395. {
  396. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  397. /* Save the input video information, since this will be required later. */
  398. gst_video_info_from_caps (&dsexample->video_info, incaps);
  399. if (dsexample->blur_objects && !dsexample->process_full_frame) {
  400. /* requires RGBA format for blurring the objects in opencv */
  401. if (dsexample->video_info.finfo->format != GST_VIDEO_FORMAT_RGBA) {
  402. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  403. ("input format should be RGBA when using blur-objects property"), (NULL));
  404. goto error;
  405. }
  406. }
  407. return TRUE;
  408. error:
  409. return FALSE;
  410. }
  411. /** * Scale the entire frame to the processing resolution maintaining aspect ratio. * Or crop and scale objects to the processing resolution maintaining the aspect * ratio. Remove the padding required by hardware and convert from RGBA to RGB * using openCV. These steps can be skipped if the algorithm can work with * padded data and/or can work with RGBA. */
  412. static GstFlowReturn
  413. get_converted_mat (GstDsExample * dsexample, NvBufSurface *input_buf, gint idx,
  414. NvOSD_RectParams * crop_rect_params, gdouble & ratio, gint input_width,
  415. gint input_height)
  416. {
  417. NvBufSurfTransform_Error err;
  418. NvBufSurfTransformConfigParams transform_config_params;
  419. NvBufSurfTransformParams transform_params;
  420. NvBufSurfTransformRect src_rect;
  421. NvBufSurfTransformRect dst_rect;
  422. NvBufSurface ip_surf;
  423. cv::Mat in_mat;
  424. ip_surf = *input_buf;
  425. ip_surf.numFilled = ip_surf.batchSize = 1;
  426. ip_surf.surfaceList = &(input_buf->surfaceList[idx]);
  427. gint src_left = GST_ROUND_UP_2((unsigned int)crop_rect_params->left);
  428. gint src_top = GST_ROUND_UP_2((unsigned int)crop_rect_params->top);
  429. gint src_width = GST_ROUND_DOWN_2((unsigned int)crop_rect_params->width);
  430. gint src_height = GST_ROUND_DOWN_2((unsigned int)crop_rect_params->height);
  431. /* Maintain aspect ratio */
  432. double hdest = dsexample->processing_width * src_height / (double) src_width;
  433. double wdest = dsexample->processing_height * src_width / (double) src_height;
  434. guint dest_width, dest_height;
  435. if (hdest <= dsexample->processing_height) {
  436. dest_width = dsexample->processing_width;
  437. dest_height = hdest;
  438. } else {
  439. dest_width = wdest;
  440. dest_height = dsexample->processing_height;
  441. }
  442. /* Configure transform session parameters for the transformation */
  443. transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
  444. transform_config_params.gpu_id = dsexample->gpu_id;
  445. transform_config_params.cuda_stream = dsexample->cuda_stream;
  446. /* Set the transform session parameters for the conversions executed in this * thread. */
  447. err = NvBufSurfTransformSetSessionParams (&transform_config_params);
  448. if (err != NvBufSurfTransformError_Success) {
  449. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  450. ("NvBufSurfTransformSetSessionParams failed with error %d", err), (NULL));
  451. goto error;
  452. }
  453. /* Calculate scaling ratio while maintaining aspect ratio */
  454. ratio = MIN (1.0 * dest_width/ src_width, 1.0 * dest_height / src_height);
  455. if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
  456. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  457. ("%s:crop_rect_params dimensions are zero",__func__), (NULL));
  458. goto error;
  459. }
  460. #ifdef __aarch64__
  461. if (ratio <= 1.0 / 16 || ratio >= 16.0) {
  462. /* Currently cannot scale by ratio > 16 or < 1/16 for Jetson */
  463. goto error;
  464. }
  465. #endif
  466. /* Set the transform ROIs for source and destination */
  467. src_rect = { (guint)src_top, (guint)src_left, (guint)src_width, (guint)src_height};
  468. dst_rect = { 0, 0, (guint)dest_width, (guint)dest_height};
  469. /* Set the transform parameters */
  470. transform_params.src_rect = &src_rect;
  471. transform_params.dst_rect = &dst_rect;
  472. transform_params.transform_flag =
  473. NVBUFSURF_TRANSFORM_FILTER | NVBUFSURF_TRANSFORM_CROP_SRC |
  474. NVBUFSURF_TRANSFORM_CROP_DST;
  475. transform_params.transform_filter = NvBufSurfTransformInter_Default;
  476. /* Memset the memory */
  477. NvBufSurfaceMemSet (dsexample->inter_buf, 0, 0, 0);
  478. GST_DEBUG_OBJECT (dsexample, "Scaling and converting input buffer\n");
  479. /* Transformation scaling+format conversion if any. */
  480. err = NvBufSurfTransform (&ip_surf, dsexample->inter_buf, &transform_params);
  481. if (err != NvBufSurfTransformError_Success) {
  482. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  483. ("NvBufSurfTransform failed with error %d while converting buffer", err),
  484. (NULL));
  485. goto error;
  486. }
  487. /* Map the buffer so that it can be accessed by CPU */
  488. if (NvBufSurfaceMap (dsexample->inter_buf, 0, 0, NVBUF_MAP_READ) != 0){
  489. goto error;
  490. }
  491. /* Cache the mapped data for CPU access */
  492. NvBufSurfaceSyncForCpu (dsexample->inter_buf, 0, 0);
  493. /* Use openCV to remove padding and convert RGBA to BGR. Can be skipped if * algorithm can handle padded RGBA data. */
  494. in_mat =
  495. cv::Mat (dsexample->processing_height, dsexample->processing_width,
  496. CV_8UC4, dsexample->inter_buf->surfaceList[0].mappedAddr.addr[0],
  497. dsexample->inter_buf->surfaceList[0].pitch);
  498. #if (CV_MAJOR_VERSION >= 4)
  499. cv::cvtColor (in_mat, *dsexample->cvmat, cv::COLOR_RGBA2BGR);
  500. #else
  501. cv::cvtColor (in_mat, *dsexample->cvmat, CV_RGBA2BGR);
  502. #endif
  503. if (NvBufSurfaceUnMap (dsexample->inter_buf, 0, 0)){
  504. goto error;
  505. }
  506. #ifdef __aarch64__
  507. /* To use the converted buffer in CUDA, create an EGLImage and then use * CUDA-EGL interop APIs */
  508. if (USE_EGLIMAGE) {
  509. if (NvBufSurfaceMapEglImage (dsexample->inter_buf, 0) !=0 ) {
  510. goto error;
  511. }
  512. /* dsexample->inter_buf->surfaceList[0].mappedAddr.eglImage * Use interop APIs cuGraphicsEGLRegisterImage and * cuGraphicsResourceGetMappedEglFrame to access the buffer in CUDA */
  513. /* Destroy the EGLImage */
  514. NvBufSurfaceUnMapEglImage (dsexample->inter_buf, 0);
  515. }
  516. #endif
  517. /* We will first convert only the Region of Interest (the entire frame or the * object bounding box) to RGB and then scale the converted RGB frame to * processing resolution. */
  518. return GST_FLOW_OK;
  519. error:
  520. return GST_FLOW_ERROR;
  521. }
  522. #if 0
  523. /* * Blur the detected objects when processing in object mode (full-frame=0) */
  524. static GstFlowReturn
  525. blur_objects (GstDsExample * dsexample, gint idx,
  526. NvOSD_RectParams * crop_rect_params, cv::Mat in_mat)
  527. {
  528. cv::Rect crop_rect;
  529. if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
  530. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  531. ("%s:crop_rect_params dimensions are zero",__func__), (NULL));
  532. return GST_FLOW_ERROR;
  533. }
  534. /* rectangle for cropped objects */
  535. crop_rect = cv::Rect (crop_rect_params->left, crop_rect_params->top,
  536. crop_rect_params->width, crop_rect_params->height);
  537. /* apply gaussian blur to the detected objects */
  538. GaussianBlur(in_mat(crop_rect), in_mat(crop_rect), cv::Size(15,15), 4);
  539. return GST_FLOW_OK;
  540. }
  541. #endif
  542. /* * Blur the detected objects when processing in object mode (full-frame=0) */
  543. static GstFlowRetur blur_objects (GstDsExample * dsexample, gint idx,
  544. NvOSD_RectParams * crop_rect_params, cv::Mat in_mat)
  545. {
  546. int hrst=1;
  547. cv::Rect crop_rect;
  548. static unsigned int jiaozheng_init_flag = 0;
  549. if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0))
  550. {
  551. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  552. ("%s:crop_rect_params dimensions are zero",__func__), (NULL));
  553. return GST_FLOW_ERROR;
  554. }
  555. steady_clock::time_point t1 = steady_clock::now();
  556. if (!jiaozheng_init_flag)
  557. {
  558. printf("初始化\n");
  559. jiaozheng_init();
  560. jiaozheng_init_flag = 1;
  561. }
  562. hrst = jibianjiaozheng(in_mat, in_mat);
  563. if (hrst != 0)
  564. {
  565. printf("畸变矫正失败\n");
  566. }
  567. else
  568. {
  569. //printf("畸变矫正成功\n");
  570. }
  571. /*time cost calc code */
  572. steady_clock::time_point t2 = steady_clock::now();
  573. duration<double> time_used = duration_cast<duration<double>>(t2 - t1);
  574. cout<" time cost= "<< (time_used.count() * 1000) <<" ms."<<endl;
  575. return GST_FLOW_OK;
  576. }
  577. /** * Called when element recieves an input buffer from upstream element. */
  578. static GstFlowReturn
  579. gst_dsexample_transform_ip (GstBaseTransform * btrans, GstBuffer * inbuf)
  580. {
  581. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  582. GstMapInfo in_map_info;
  583. GstFlowReturn flow_ret = GST_FLOW_ERROR;
  584. gdouble scale_ratio = 1.0;
  585. DsExampleOutput *output;
  586. NvBufSurface *surface = NULL;
  587. NvDsBatchMeta *batch_meta = NULL;
  588. NvDsFrameMeta *frame_meta = NULL;
  589. NvDsMetaList * l_frame = NULL;
  590. guint i = 0;
  591. dsexample->frame_num++;
  592. CHECK_CUDA_STATUS (cudaSetDevice (dsexample->gpu_id),
  593. "Unable to set cuda device");
  594. memset (&in_map_info, 0, sizeof (in_map_info));
  595. if (!gst_buffer_map (inbuf, &in_map_info, GST_MAP_READ)) {
  596. g_print ("Error: Failed to map gst buffer\n");
  597. goto error;
  598. }
  599. nvds_set_input_system_timestamp (inbuf, GST_ELEMENT_NAME (dsexample));
  600. surface = (NvBufSurface *) in_map_info.data;
  601. GST_DEBUG_OBJECT (dsexample,
  602. "Processing Frame %" G_GUINT64_FORMAT " Surface %p\n",
  603. dsexample->frame_num, surface);
  604. if (CHECK_NVDS_MEMORY_AND_GPUID (dsexample, surface))
  605. goto error;
  606. batch_meta = gst_buffer_get_nvds_batch_meta (inbuf);
  607. if (batch_meta == nullptr) {
  608. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  609. ("NvDsBatchMeta not found for input buffer."), (NULL));
  610. return GST_FLOW_ERROR;
  611. }
  612. if (dsexample->process_full_frame) {
  613. for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
  614. l_frame = l_frame->next)
  615. {
  616. frame_meta = (NvDsFrameMeta *) (l_frame->data);
  617. NvOSD_RectParams rect_params;
  618. /* Scale the entire frame to processing resolution */
  619. rect_params.left = 0;
  620. rect_params.top = 0;
  621. rect_params.width = dsexample->video_info.width;
  622. rect_params.height = dsexample->video_info.height;
  623. /* Scale and convert the frame */
  624. if (get_converted_mat (dsexample, surface, i, &rect_params,
  625. scale_ratio, dsexample->video_info.width,
  626. dsexample->video_info.height) != GST_FLOW_OK) {
  627. goto error;
  628. }
  629. /* Process to get the output */
  630. output =
  631. DsExampleProcess (dsexample->dsexamplelib_ctx,
  632. dsexample->cvmat->data);
  633. /* Attach the metadata for the full frame */
  634. attach_metadata_full_frame (dsexample, frame_meta, scale_ratio, output, i);
  635. i++;
  636. free (output);
  637. }
  638. } else {
  639. /* Using object crops as input to the algorithm. The objects are detected by * the primary detector */
  640. NvDsMetaList * l_obj = NULL;
  641. NvDsObjectMeta *obj_meta = NULL;
  642. #ifndef __aarch64__
  643. if (dsexample->blur_objects) {
  644. if (surface->memType != NVBUF_MEM_CUDA_UNIFIED){
  645. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  646. ("%s:need NVBUF_MEM_CUDA_UNIFIED memory for opencv blurring",__func__), (NULL));
  647. return GST_FLOW_ERROR;
  648. }
  649. }
  650. #endif
  651. for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
  652. l_frame = l_frame->next)
  653. {
  654. frame_meta = (NvDsFrameMeta *) (l_frame->data);
  655. cv::Mat in_mat;
  656. if (dsexample->blur_objects) {
  657. /* Map the buffer so that it can be accessed by CPU */
  658. if (surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0] == NULL){
  659. if (NvBufSurfaceMap (surface, frame_meta->batch_id, 0, NVBUF_MAP_READ_WRITE) != 0){
  660. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  661. ("%s:buffer map to be accessed by CPU failed", __func__), (NULL));
  662. return GST_FLOW_ERROR;
  663. }
  664. }
  665. /* Cache the mapped data for CPU access */
  666. NvBufSurfaceSyncForCpu (surface, frame_meta->batch_id, 0);
  667. in_mat =
  668. cv::Mat (surface->surfaceList[frame_meta->batch_id].planeParams.height[0],
  669. surface->surfaceList[frame_meta->batch_id].planeParams.width[0], CV_8UC4,
  670. surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
  671. surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0]);
  672. }
  673. for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
  674. l_obj = l_obj->next)
  675. {
  676. obj_meta = (NvDsObjectMeta *) (l_obj->data);
  677. if (dsexample->blur_objects) {
  678. /* gaussian blur the detected objects using opencv */
  679. if (blur_objects (dsexample, frame_meta->batch_id,
  680. &obj_meta->rect_params, in_mat) != GST_FLOW_OK) {
  681. /* Error in blurring, skip processing on object. */
  682. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  683. ("blurring the object failed"), (NULL));
  684. if (NvBufSurfaceUnMap (surface, frame_meta->batch_id, 0)){
  685. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  686. ("%s:buffer unmap to be accessed by CPU failed", __func__), (NULL));
  687. }
  688. return GST_FLOW_ERROR;
  689. }
  690. continue;
  691. }
  692. /* Should not process on objects smaller than MIN_INPUT_OBJECT_WIDTH x MIN_INPUT_OBJECT_HEIGHT * since it will cause hardware scaling issues. */
  693. if (obj_meta->rect_params.width < MIN_INPUT_OBJECT_WIDTH ||
  694. obj_meta->rect_params.height < MIN_INPUT_OBJECT_HEIGHT)
  695. continue;
  696. /* Crop and scale the object */
  697. if (get_converted_mat (dsexample,
  698. surface, frame_meta->batch_id, &obj_meta->rect_params,
  699. scale_ratio, dsexample->video_info.width,
  700. dsexample->video_info.height) != GST_FLOW_OK) {
  701. /* Error in conversion, skip processing on object. */
  702. continue;
  703. }
  704. /* Process the object crop to obtain label */
  705. output = DsExampleProcess (dsexample->dsexamplelib_ctx,
  706. dsexample->cvmat->data);
  707. /* Attach labels for the object */
  708. attach_metadata_object (dsexample, obj_meta, output);
  709. free (output);
  710. }
  711. if (dsexample->blur_objects) {
  712. /* Cache the mapped data for device access */
  713. NvBufSurfaceSyncForDevice (surface, frame_meta->batch_id, 0);
  714. #ifdef DSEXAMPLE_DEBUG
  715. /* Use openCV to remove padding and convert RGBA to BGR. Can be skipped if * algorithm can handle padded RGBA data. */
  716. #if (CV_MAJOR_VERSION >= 4)
  717. cv::cvtColor (in_mat, *dsexample->cvmat, cv::COLOR_RGBA2BGR);
  718. #else
  719. cv::cvtColor (in_mat, *dsexample->cvmat, CV_RGBA2BGR);
  720. #endif
  721. /* used to dump the converted mat to files for debug */
  722. static guint cnt = 0;
  723. cv::imwrite("out_" + std::to_string (cnt) + ".jpeg", *dsexample->cvmat);
  724. cnt++;
  725. #endif
  726. }
  727. }
  728. }
  729. flow_ret = GST_FLOW_OK;
  730. error:
  731. nvds_set_output_system_timestamp (inbuf, GST_ELEMENT_NAME (dsexample));
  732. gst_buffer_unmap (inbuf, &in_map_info);
  733. return flow_ret;
  734. }
  735. /** * Attach metadata for the full frame. We will be adding a new metadata. */
  736. static void
  737. attach_metadata_full_frame (GstDsExample * dsexample, NvDsFrameMeta *frame_meta,
  738. gdouble scale_ratio, DsExampleOutput * output, guint batch_id)
  739. {
  740. NvDsBatchMeta *batch_meta = frame_meta->base_meta.batch_meta;
  741. NvDsObjectMeta *object_meta = NULL;
  742. static gchar font_name[] = "Serif";
  743. GST_DEBUG_OBJECT (dsexample, "Attaching metadata %d\n", output->numObjects);
  744. for (gint i = 0; i < output->numObjects; i++) {
  745. DsExampleObject *obj = &output->object[i];
  746. object_meta = nvds_acquire_obj_meta_from_pool(batch_meta);
  747. NvOSD_RectParams & rect_params = object_meta->rect_params;
  748. NvOSD_TextParams & text_params = object_meta->text_params;
  749. /* Assign bounding box coordinates */
  750. rect_params.left = obj->left;
  751. rect_params.top = obj->top;
  752. rect_params.width = obj->width;
  753. rect_params.height = obj->height;
  754. /* Semi-transparent yellow background */
  755. rect_params.has_bg_color = 0;
  756. rect_params.bg_color = (NvOSD_ColorParams) {
  757. 1, 1, 0, 0.4};
  758. /* Red border of width 6 */
  759. rect_params.border_width = 3;
  760. rect_params.border_color = (NvOSD_ColorParams) {
  761. 1, 0, 0, 1};
  762. /* Scale the bounding boxes proportionally based on how the object/frame was * scaled during input */
  763. rect_params.left /= scale_ratio;
  764. rect_params.top /= scale_ratio;
  765. rect_params.width /= scale_ratio;
  766. rect_params.height /= scale_ratio;
  767. GST_DEBUG_OBJECT (dsexample, "Attaching rect%d of batch%u"
  768. " left->%f top->%f width->%f"
  769. " height->%f label->%s\n", i, batch_id, rect_params.left,
  770. rect_params.top, rect_params.width, rect_params.height, obj->label);
  771. object_meta->object_id = UNTRACKED_OBJECT_ID;
  772. g_strlcpy (object_meta->obj_label, obj->label, MAX_LABEL_SIZE);
  773. /* display_text required heap allocated memory */
  774. text_params.display_text = g_strdup (obj->label);
  775. /* Display text above the left top corner of the object */
  776. text_params.x_offset = rect_params.left;
  777. text_params.y_offset = rect_params.top - 10;
  778. /* Set black background for the text */
  779. text_params.set_bg_clr = 1;
  780. text_params.text_bg_clr = (NvOSD_ColorParams) {
  781. 0, 0, 0, 1};
  782. /* Font face, size and color */
  783. text_params.font_params.font_name = font_name;
  784. text_params.font_params.font_size = 11;
  785. text_params.font_params.font_color = (NvOSD_ColorParams) {
  786. 1, 1, 1, 1};
  787. nvds_add_obj_meta_to_frame(frame_meta, object_meta, NULL);
  788. frame_meta->bInferDone = TRUE;
  789. }
  790. }
  791. /** * Only update string label in an existing object metadata. No bounding boxes. * We assume only one label per object is generated */
  792. static void
  793. attach_metadata_object (GstDsExample * dsexample, NvDsObjectMeta * obj_meta,
  794. DsExampleOutput * output)
  795. {
  796. if (output->numObjects == 0)
  797. return;
  798. NvDsBatchMeta *batch_meta = obj_meta->base_meta.batch_meta;
  799. NvDsClassifierMeta *classifier_meta =
  800. nvds_acquire_classifier_meta_from_pool (batch_meta);
  801. classifier_meta->unique_component_id = dsexample->unique_id;
  802. NvDsLabelInfo *label_info =
  803. nvds_acquire_label_info_meta_from_pool (batch_meta);
  804. g_strlcpy (label_info->result_label, output->object[0].label, MAX_LABEL_SIZE);
  805. nvds_add_label_info_meta_to_classifier(classifier_meta, label_info);
  806. nvds_add_classifier_meta_to_object (obj_meta, classifier_meta);
  807. nvds_acquire_meta_lock (batch_meta);
  808. NvOSD_TextParams & text_params = obj_meta->text_params;
  809. NvOSD_RectParams & rect_params = obj_meta->rect_params;
  810. /* Below code to display the result */
  811. /* Set black background for the text * display_text required heap allocated memory */
  812. if (text_params.display_text) {
  813. gchar *conc_string = g_strconcat (text_params.display_text, " ",
  814. output->object[0].label, NULL);
  815. g_free (text_params.display_text);
  816. text_params.display_text = conc_string;
  817. } else {
  818. /* Display text above the left top corner of the object */
  819. text_params.x_offset = rect_params.left;
  820. text_params.y_offset = rect_params.top - 10;
  821. text_params.display_text = g_strdup (output->object[0].label);
  822. /* Font face, size and color */
  823. text_params.font_params.font_name = (char *)"Serif";
  824. text_params.font_params.font_size = 11;
  825. text_params.font_params.font_color = (NvOSD_ColorParams) {
  826. 1, 1, 1, 1};
  827. /* Set black background for the text */
  828. text_params.set_bg_clr = 1;
  829. text_params.text_bg_clr = (NvOSD_ColorParams) {
  830. 0, 0, 0, 1};
  831. }
  832. nvds_release_meta_lock (batch_meta);
  833. }
  834. /** * Boiler plate for registering a plugin and an element. */
  835. static gboolean
  836. dsexample_plugin_init (GstPlugin * plugin)
  837. {
  838. GST_DEBUG_CATEGORY_INIT (gst_dsexample_debug, "dsexample", 0,
  839. "dsexample plugin");
  840. return gst_element_register (plugin, "dsexample", GST_RANK_PRIMARY,
  841. GST_TYPE_DSEXAMPLE);
  842. }
  843. GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
  844. GST_VERSION_MINOR,
  845. nvdsgst_dsexample,
  846. DESCRIPTION, dsexample_plugin_init, DS_VERSION, LICENSE, BINARY_PACKAGE, URL)

new

  1. /** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */
  2. #include <string.h>
  3. #include <string>
  4. #include <sstream>
  5. #include <iostream>
  6. #include <ostream>
  7. #include <fstream>
  8. #include "gstdsexample.h"
  9. #include <sys/time.h>
  10. #include "opencv2/core/core.hpp"
  11. #include "opencv2/imgproc/imgproc.hpp"
  12. #include "opencv2/calib3d/calib3d.hpp"
  13. #include "opencv2/highgui/highgui.hpp"
  14. using namespace cv;
  15. using namespace std;
  16. GST_DEBUG_CATEGORY_STATIC (gst_dsexample_debug);
  17. #define GST_CAT_DEFAULT gst_dsexample_debug
  18. #define USE_EGLIMAGE 1
  19. /* enable to write transformed cvmat to files */
  20. /* #define DSEXAMPLE_DEBUG */
  21. static GQuark _dsmeta_quark = 0;
  22. /* Enum to identify properties */
  23. enum
  24. {
  25. PROP_0,
  26. PROP_UNIQUE_ID,
  27. PROP_PROCESSING_WIDTH,
  28. PROP_PROCESSING_HEIGHT,
  29. PROP_PROCESS_FULL_FRAME,
  30. PROP_BLUR_OBJECTS,
  31. PROP_GPU_DEVICE_ID
  32. };
  33. #define CHECK_NVDS_MEMORY_AND_GPUID(object, surface) \ ({ int _errtype=0;\ do { \ if ((surface->memType == NVBUF_MEM_DEFAULT || surface->memType == NVBUF_MEM_CUDA_DEVICE) && \ (surface->gpuId != object->gpu_id)) { \ GST_ELEMENT_ERROR (object, RESOURCE, FAILED, \ ("Input surface gpu-id doesnt match with configured gpu-id for element," \ " please allocate input using unified memory, or use same gpu-ids"),\ ("surface-gpu-id=%d,%s-gpu-id=%d",surface->gpuId,GST_ELEMENT_NAME(object),\ object->gpu_id)); \ _errtype = 1;\ } \ } while(0); \ _errtype; \ })
  34. /* Default values for properties */
  35. #define DEFAULT_UNIQUE_ID 15
  36. #define DEFAULT_PROCESSING_WIDTH 640
  37. #define DEFAULT_PROCESSING_HEIGHT 480
  38. #define DEFAULT_PROCESS_FULL_FRAME TRUE
  39. #define DEFAULT_BLUR_OBJECTS FALSE
  40. #define DEFAULT_GPU_ID 0
  41. #define RGB_BYTES_PER_PIXEL 3
  42. #define RGBA_BYTES_PER_PIXEL 4
  43. #define Y_BYTES_PER_PIXEL 1
  44. #define UV_BYTES_PER_PIXEL 2
  45. #define MIN_INPUT_OBJECT_WIDTH 16
  46. #define MIN_INPUT_OBJECT_HEIGHT 16
  47. #define CHECK_NPP_STATUS(npp_status,error_str) do { \ if ((npp_status) != NPP_SUCCESS) { \ g_print ("Error: %s in %s at line %d: NPP Error %d\n", \ error_str, __FILE__, __LINE__, npp_status); \ goto error; \ } \ } while (0)
  48. #define CHECK_CUDA_STATUS(cuda_status,error_str) do { \ if ((cuda_status) != cudaSuccess) { \ g_print ("Error: %s in %s at line %d (%s)\n", \ error_str, __FILE__, __LINE__, cudaGetErrorName(cuda_status)); \ goto error; \ } \ } while (0)
  49. /* By default NVIDIA Hardware allocated memory flows through the pipeline. We * will be processing on this type of memory only. */
  50. #define GST_CAPS_FEATURE_MEMORY_NVMM "memory:NVMM"
  51. static GstStaticPadTemplate gst_dsexample_sink_template =
  52. GST_STATIC_PAD_TEMPLATE ("sink",
  53. GST_PAD_SINK,
  54. GST_PAD_ALWAYS,
  55. GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES
  56. (GST_CAPS_FEATURE_MEMORY_NVMM,
  57. "{ NV12, RGBA, I420 }")));
  58. static GstStaticPadTemplate gst_dsexample_src_template =
  59. GST_STATIC_PAD_TEMPLATE ("src",
  60. GST_PAD_SRC,
  61. GST_PAD_ALWAYS,
  62. GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES
  63. (GST_CAPS_FEATURE_MEMORY_NVMM,
  64. "{ NV12, RGBA, I420 }")));
  65. /* Define our element type. Standard GObject/GStreamer boilerplate stuff */
  66. #define gst_dsexample_parent_class parent_class
  67. G_DEFINE_TYPE (GstDsExample, gst_dsexample, GST_TYPE_BASE_TRANSFORM);
  68. static void gst_dsexample_set_property (GObject * object, guint prop_id,
  69. const GValue * value, GParamSpec * pspec);
  70. static void gst_dsexample_get_property (GObject * object, guint prop_id,
  71. GValue * value, GParamSpec * pspec);
  72. static gboolean gst_dsexample_set_caps (GstBaseTransform * btrans,
  73. GstCaps * incaps, GstCaps * outcaps);
  74. static gboolean gst_dsexample_start (GstBaseTransform * btrans);
  75. static gboolean gst_dsexample_stop (GstBaseTransform * btrans);
  76. static GstFlowReturn gst_dsexample_transform_ip (GstBaseTransform *
  77. btrans, GstBuffer * inbuf);
  78. static void
  79. attach_metadata_full_frame (GstDsExample * dsexample, NvDsFrameMeta *frame_meta,
  80. gdouble scale_ratio, DsExampleOutput * output, guint batch_id);
  81. static void attach_metadata_object (GstDsExample * dsexample,
  82. NvDsObjectMeta * obj_meta, DsExampleOutput * output);
  83. void InitMat(Mat& m, float* num)
  84. {
  85. for (int i = 0; i<m.rows; i++)
  86. for (int j = 0; j<m.cols; j++)
  87. m.at<float>(i, j) = *(num + i * m.rows + j);
  88. }
  89. /******************************************************************************* Function: jibianjiaozheng Description: 该函数实现畸变矫正 Input: 原图 Output: 畸变矫正后的图片 Return: 0: Successful ohters: Failed *******************************************************************************/
  90. int jibianjiaozheng(Mat src_picture, Mat & dst_picture)
  91. {
  92. int OK = 0;
  93. int i = 1000;
  94. int n = 1;
  95. Mat edges;
  96. Mat R = Mat::eye(3, 3, CV_32F);
  97. if (src_picture.data == NULL)
  98. {
  99. printf("param error,input Image is empty\n");
  100. return -1;
  101. }
  102. Size image_size; /* 图像的尺寸 */
  103. //获取图像大小
  104. image_size.width = 1920;
  105. image_size.height = 1080;
  106. //cameraMatrix为 "相机内参数矩阵:" << endl;
  107. Mat mapx = Mat(image_size, CV_32FC1);
  108. Mat mapy = Mat(image_size, CV_32FC1);
  109. //参数矩阵
  110. float neican_data[] = { 9558.649257742036, 0, 959.3165310990756, 0, 9435.752651759443, 532.7507141910969, 0, 0, 1 };
  111. Mat cameraMatrix(3, 3, CV_32FC1);
  112. InitMat(cameraMatrix, neican_data);
  113. //cout << "cameraMatrix= " << endl << " " << cameraMatrix << endl << endl;
  114. //测得的畸变系数
  115. float jibian_data[] = { -6.956561513881647, -68.83902522804168, -0.004834538444671919, 0.01471273691928269, -0.4916103704308509 };
  116. Mat distCoeffs(1, 5, CV_32FC1); /* 摄像机的5个畸变系数:k1,k2,p1,p2,k3 */
  117. InitMat(distCoeffs, jibian_data);
  118. //cout << "distCoeffs= " << endl << " " << distCoeffs << endl << endl;
  119. i = 0;
  120. //namedWindow("【原始图】", 0);//参数为零,则可以自由拖动
  121. //imshow("【原始图】", src_picture);
  122. /********相机矫正*******************************************************************************/
  123. initUndistortRectifyMap(cameraMatrix, distCoeffs, R, cameraMatrix, image_size, CV_32FC1, mapx, mapy);
  124. Mat imageSource = src_picture; //读取畸变图片
  125. Mat newimage = imageSource.clone(); //校正后输出图片
  126. remap(imageSource, newimage, mapx, mapy, INTER_LINEAR);
  127. //namedWindow("畸变校正后的图片", 0);//参数为零,则可以自由拖动
  128. //imshow("畸变校正后的图片", newimage);
  129. dst_picture= newimage+1;
  130. return OK;
  131. }
  132. /* Install properties, set sink and src pad capabilities, override the required * functions of the base class, These are common to all instances of the * element. */
  133. static void
  134. gst_dsexample_class_init (GstDsExampleClass * klass)
  135. {
  136. GObjectClass *gobject_class;
  137. GstElementClass *gstelement_class;
  138. GstBaseTransformClass *gstbasetransform_class;
  139. /* Indicates we want to use DS buf api */
  140. g_setenv ("DS_NEW_BUFAPI", "1", TRUE);
  141. gobject_class = (GObjectClass *) klass;
  142. gstelement_class = (GstElementClass *) klass;
  143. gstbasetransform_class = (GstBaseTransformClass *) klass;
  144. /* Overide base class functions */
  145. gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_dsexample_set_property);
  146. gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_dsexample_get_property);
  147. gstbasetransform_class->set_caps = GST_DEBUG_FUNCPTR (gst_dsexample_set_caps);
  148. gstbasetransform_class->start = GST_DEBUG_FUNCPTR (gst_dsexample_start);
  149. gstbasetransform_class->stop = GST_DEBUG_FUNCPTR (gst_dsexample_stop);
  150. gstbasetransform_class->transform_ip =
  151. GST_DEBUG_FUNCPTR (gst_dsexample_transform_ip);
  152. /* Install properties */
  153. g_object_class_install_property (gobject_class, PROP_UNIQUE_ID,
  154. g_param_spec_uint ("unique-id",
  155. "Unique ID",
  156. "Unique ID for the element. Can be used to identify output of the"
  157. " element", 0, G_MAXUINT, DEFAULT_UNIQUE_ID, (GParamFlags)
  158. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  159. g_object_class_install_property (gobject_class, PROP_PROCESSING_WIDTH,
  160. g_param_spec_int ("processing-width",
  161. "Processing Width",
  162. "Width of the input buffer to algorithm",
  163. 1, G_MAXINT, DEFAULT_PROCESSING_WIDTH, (GParamFlags)
  164. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  165. g_object_class_install_property (gobject_class, PROP_PROCESSING_HEIGHT,
  166. g_param_spec_int ("processing-height",
  167. "Processing Height",
  168. "Height of the input buffer to algorithm",
  169. 1, G_MAXINT, DEFAULT_PROCESSING_HEIGHT, (GParamFlags)
  170. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  171. g_object_class_install_property (gobject_class, PROP_PROCESS_FULL_FRAME,
  172. g_param_spec_boolean ("full-frame",
  173. "Full frame",
  174. "Enable to process full frame or disable to process objects detected"
  175. "by primary detector", DEFAULT_PROCESS_FULL_FRAME, (GParamFlags)
  176. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  177. g_object_class_install_property (gobject_class, PROP_BLUR_OBJECTS,
  178. g_param_spec_boolean ("blur-objects",
  179. "Blur Objects",
  180. "Enable to blur the objects detected in full-frame=0 mode"
  181. "by primary detector", DEFAULT_BLUR_OBJECTS, (GParamFlags)
  182. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  183. g_object_class_install_property (gobject_class, PROP_GPU_DEVICE_ID,
  184. g_param_spec_uint ("gpu-id",
  185. "Set GPU Device ID",
  186. "Set GPU Device ID", 0,
  187. G_MAXUINT, 0,
  188. GParamFlags
  189. (G_PARAM_READWRITE |
  190. G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY)));
  191. /* Set sink and src pad capabilities */
  192. gst_element_class_add_pad_template (gstelement_class,
  193. gst_static_pad_template_get (&gst_dsexample_src_template));
  194. gst_element_class_add_pad_template (gstelement_class,
  195. gst_static_pad_template_get (&gst_dsexample_sink_template));
  196. /* Set metadata describing the element */
  197. gst_element_class_set_details_simple (gstelement_class,
  198. "DsExample plugin",
  199. "DsExample Plugin",
  200. "Process a 3rdparty example algorithm on objects / full frame",
  201. "NVIDIA Corporation. Post on Deepstream for Tesla forum for any queries "
  202. "@ https://devtalk.nvidia.com/default/board/209/");
  203. }
  204. static void
  205. gst_dsexample_init (GstDsExample * dsexample)
  206. {
  207. GstBaseTransform *btrans = GST_BASE_TRANSFORM (dsexample);
  208. /* We will not be generating a new buffer. Just adding / updating * metadata. */
  209. gst_base_transform_set_in_place (GST_BASE_TRANSFORM (btrans), TRUE);
  210. /* We do not want to change the input caps. Set to passthrough. transform_ip * is still called. */
  211. gst_base_transform_set_passthrough (GST_BASE_TRANSFORM (btrans), TRUE);
  212. /* Initialize all property variables to default values */
  213. dsexample->unique_id = DEFAULT_UNIQUE_ID;
  214. dsexample->processing_width = DEFAULT_PROCESSING_WIDTH;
  215. dsexample->processing_height = DEFAULT_PROCESSING_HEIGHT;
  216. dsexample->process_full_frame = DEFAULT_PROCESS_FULL_FRAME;
  217. dsexample->blur_objects = DEFAULT_BLUR_OBJECTS;
  218. dsexample->gpu_id = DEFAULT_GPU_ID;
  219. /* This quark is required to identify NvDsMeta when iterating through * the buffer metadatas */
  220. if (!_dsmeta_quark)
  221. _dsmeta_quark = g_quark_from_static_string (NVDS_META_STRING);
  222. }
  223. /* Function called when a property of the element is set. Standard boilerplate. */
  224. static void
  225. gst_dsexample_set_property (GObject * object, guint prop_id,
  226. const GValue * value, GParamSpec * pspec)
  227. {
  228. GstDsExample *dsexample = GST_DSEXAMPLE (object);
  229. switch (prop_id) {
  230. case PROP_UNIQUE_ID:
  231. dsexample->unique_id = g_value_get_uint (value);
  232. break;
  233. case PROP_PROCESSING_WIDTH:
  234. dsexample->processing_width = g_value_get_int (value);
  235. break;
  236. case PROP_PROCESSING_HEIGHT:
  237. dsexample->processing_height = g_value_get_int (value);
  238. break;
  239. case PROP_PROCESS_FULL_FRAME:
  240. dsexample->process_full_frame = g_value_get_boolean (value);
  241. break;
  242. case PROP_BLUR_OBJECTS:
  243. dsexample->blur_objects = g_value_get_boolean (value);
  244. break;
  245. case PROP_GPU_DEVICE_ID:
  246. dsexample->gpu_id = g_value_get_uint (value);
  247. break;
  248. default:
  249. G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
  250. break;
  251. }
  252. }
  253. /* Function called when a property of the element is requested. Standard * boilerplate. */
  254. static void
  255. gst_dsexample_get_property (GObject * object, guint prop_id,
  256. GValue * value, GParamSpec * pspec)
  257. {
  258. GstDsExample *dsexample = GST_DSEXAMPLE (object);
  259. switch (prop_id) {
  260. case PROP_UNIQUE_ID:
  261. g_value_set_uint (value, dsexample->unique_id);
  262. break;
  263. case PROP_PROCESSING_WIDTH:
  264. g_value_set_int (value, dsexample->processing_width);
  265. break;
  266. case PROP_PROCESSING_HEIGHT:
  267. g_value_set_int (value, dsexample->processing_height);
  268. break;
  269. case PROP_PROCESS_FULL_FRAME:
  270. g_value_set_boolean (value, dsexample->process_full_frame);
  271. break;
  272. case PROP_BLUR_OBJECTS:
  273. g_value_set_boolean (value, dsexample->blur_objects);
  274. break;
  275. case PROP_GPU_DEVICE_ID:
  276. g_value_set_uint (value, dsexample->gpu_id);
  277. break;
  278. default:
  279. G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
  280. break;
  281. }
  282. }
  283. /** * Initialize all resources and start the output thread */
  284. static gboolean
  285. gst_dsexample_start (GstBaseTransform * btrans)
  286. {
  287. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  288. NvBufSurfaceCreateParams create_params;
  289. DsExampleInitParams init_params =
  290. { dsexample->processing_width, dsexample->processing_height,
  291. dsexample->process_full_frame
  292. };
  293. GstQuery *queryparams = NULL;
  294. guint batch_size = 1;
  295. /* Algorithm specific initializations and resource allocation. */
  296. dsexample->dsexamplelib_ctx = DsExampleCtxInit (&init_params);
  297. GST_DEBUG_OBJECT (dsexample, "ctx lib %p \n", dsexample->dsexamplelib_ctx);
  298. CHECK_CUDA_STATUS (cudaSetDevice (dsexample->gpu_id),
  299. "Unable to set cuda device");
  300. dsexample->batch_size = 1;
  301. queryparams = gst_nvquery_batch_size_new ();
  302. if (gst_pad_peer_query (GST_BASE_TRANSFORM_SINK_PAD (btrans), queryparams)
  303. || gst_pad_peer_query (GST_BASE_TRANSFORM_SRC_PAD (btrans), queryparams)) {
  304. if (gst_nvquery_batch_size_parse (queryparams, &batch_size)) {
  305. dsexample->batch_size = batch_size;
  306. }
  307. }
  308. GST_DEBUG_OBJECT (dsexample, "Setting batch-size %d \n",
  309. dsexample->batch_size);
  310. gst_query_unref (queryparams);
  311. if (dsexample->process_full_frame && dsexample->blur_objects) {
  312. GST_ERROR ("Error: does not support blurring while processing full frame");
  313. goto error;
  314. }
  315. CHECK_CUDA_STATUS (cudaStreamCreate (&dsexample->cuda_stream),
  316. "Could not create cuda stream");
  317. if (dsexample->inter_buf)
  318. NvBufSurfaceDestroy (dsexample->inter_buf);
  319. dsexample->inter_buf = NULL;
  320. /* An intermediate buffer for NV12/RGBA to BGR conversion will be * required. Can be skipped if custom algorithm can work directly on NV12/RGBA. */
  321. create_params.gpuId = dsexample->gpu_id;
  322. create_params.width = dsexample->processing_width;
  323. create_params.height = dsexample->processing_height;
  324. create_params.size = 0;
  325. create_params.colorFormat = NVBUF_COLOR_FORMAT_RGBA;
  326. create_params.layout = NVBUF_LAYOUT_PITCH;
  327. #ifdef __aarch64__
  328. create_params.memType = NVBUF_MEM_DEFAULT;
  329. #else
  330. create_params.memType = NVBUF_MEM_CUDA_UNIFIED;
  331. #endif
  332. if (NvBufSurfaceCreate (&dsexample->inter_buf, 1,
  333. &create_params) != 0) {
  334. GST_ERROR ("Error: Could not allocate internal buffer for dsexample");
  335. goto error;
  336. }
  337. /* Create host memory for storing converted/scaled interleaved RGB data */
  338. CHECK_CUDA_STATUS (cudaMallocHost (&dsexample->host_rgb_buf,
  339. dsexample->processing_width * dsexample->processing_height *
  340. RGB_BYTES_PER_PIXEL), "Could not allocate cuda host buffer");
  341. GST_DEBUG_OBJECT (dsexample, "allocated cuda buffer %p \n",
  342. dsexample->host_rgb_buf);
  343. /* CV Mat containing interleaved RGB data. This call does not allocate memory. * It uses host_rgb_buf as data. */
  344. dsexample->cvmat =
  345. new cv::Mat (dsexample->processing_height, dsexample->processing_width,
  346. CV_8UC3, dsexample->host_rgb_buf,
  347. dsexample->processing_width * RGB_BYTES_PER_PIXEL);
  348. if (!dsexample->cvmat)
  349. goto error;
  350. GST_DEBUG_OBJECT (dsexample, "created CV Mat\n");
  351. return TRUE;
  352. error:
  353. if (dsexample->host_rgb_buf) {
  354. cudaFreeHost (dsexample->host_rgb_buf);
  355. dsexample->host_rgb_buf = NULL;
  356. }
  357. if (dsexample->cuda_stream) {
  358. cudaStreamDestroy (dsexample->cuda_stream);
  359. dsexample->cuda_stream = NULL;
  360. }
  361. if (dsexample->dsexamplelib_ctx)
  362. DsExampleCtxDeinit (dsexample->dsexamplelib_ctx);
  363. return FALSE;
  364. }
  365. /** * Stop the output thread and free up all the resources */
  366. static gboolean
  367. gst_dsexample_stop (GstBaseTransform * btrans)
  368. {
  369. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  370. if (dsexample->inter_buf)
  371. NvBufSurfaceDestroy(dsexample->inter_buf);
  372. dsexample->inter_buf = NULL;
  373. if (dsexample->cuda_stream)
  374. cudaStreamDestroy (dsexample->cuda_stream);
  375. dsexample->cuda_stream = NULL;
  376. delete dsexample->cvmat;
  377. dsexample->cvmat = NULL;
  378. if (dsexample->host_rgb_buf) {
  379. cudaFreeHost (dsexample->host_rgb_buf);
  380. dsexample->host_rgb_buf = NULL;
  381. }
  382. GST_DEBUG_OBJECT (dsexample, "deleted CV Mat \n");
  383. /* Deinit the algorithm library */
  384. DsExampleCtxDeinit (dsexample->dsexamplelib_ctx);
  385. dsexample->dsexamplelib_ctx = NULL;
  386. GST_DEBUG_OBJECT (dsexample, "ctx lib released \n");
  387. return TRUE;
  388. }
  389. /** * Called when source / sink pad capabilities have been negotiated. */
  390. static gboolean
  391. gst_dsexample_set_caps (GstBaseTransform * btrans, GstCaps * incaps,
  392. GstCaps * outcaps)
  393. {
  394. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  395. /* Save the input video information, since this will be required later. */
  396. gst_video_info_from_caps (&dsexample->video_info, incaps);
  397. if (dsexample->blur_objects && !dsexample->process_full_frame) {
  398. /* requires RGBA format for blurring the objects in opencv */
  399. if (dsexample->video_info.finfo->format != GST_VIDEO_FORMAT_RGBA) {
  400. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  401. ("input format should be RGBA when using blur-objects property"), (NULL));
  402. goto error;
  403. }
  404. }
  405. return TRUE;
  406. error:
  407. return FALSE;
  408. }
  409. /** * Scale the entire frame to the processing resolution maintaining aspect ratio. * Or crop and scale objects to the processing resolution maintaining the aspect * ratio. Remove the padding required by hardware and convert from RGBA to RGB * using openCV. These steps can be skipped if the algorithm can work with * padded data and/or can work with RGBA. */
  410. static GstFlowReturn
  411. get_converted_mat (GstDsExample * dsexample, NvBufSurface *input_buf, gint idx,
  412. NvOSD_RectParams * crop_rect_params, gdouble & ratio, gint input_width,
  413. gint input_height)
  414. {
  415. NvBufSurfTransform_Error err;
  416. NvBufSurfTransformConfigParams transform_config_params;
  417. NvBufSurfTransformParams transform_params;
  418. NvBufSurfTransformRect src_rect;
  419. NvBufSurfTransformRect dst_rect;
  420. NvBufSurface ip_surf;
  421. cv::Mat in_mat;
  422. ip_surf = *input_buf;
  423. ip_surf.numFilled = ip_surf.batchSize = 1;
  424. ip_surf.surfaceList = &(input_buf->surfaceList[idx]);
  425. gint src_left = GST_ROUND_UP_2((unsigned int)crop_rect_params->left);
  426. gint src_top = GST_ROUND_UP_2((unsigned int)crop_rect_params->top);
  427. gint src_width = GST_ROUND_DOWN_2((unsigned int)crop_rect_params->width);
  428. gint src_height = GST_ROUND_DOWN_2((unsigned int)crop_rect_params->height);
  429. /* Maintain aspect ratio */
  430. double hdest = dsexample->processing_width * src_height / (double) src_width;
  431. double wdest = dsexample->processing_height * src_width / (double) src_height;
  432. guint dest_width, dest_height;
  433. if (hdest <= dsexample->processing_height) {
  434. dest_width = dsexample->processing_width;
  435. dest_height = hdest;
  436. } else {
  437. dest_width = wdest;
  438. dest_height = dsexample->processing_height;
  439. }
  440. /* Configure transform session parameters for the transformation */
  441. transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
  442. transform_config_params.gpu_id = dsexample->gpu_id;
  443. transform_config_params.cuda_stream = dsexample->cuda_stream;
  444. /* Set the transform session parameters for the conversions executed in this * thread. */
  445. err = NvBufSurfTransformSetSessionParams (&transform_config_params);
  446. if (err != NvBufSurfTransformError_Success) {
  447. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  448. ("NvBufSurfTransformSetSessionParams failed with error %d", err), (NULL));
  449. goto error;
  450. }
  451. /* Calculate scaling ratio while maintaining aspect ratio */
  452. ratio = MIN (1.0 * dest_width/ src_width, 1.0 * dest_height / src_height);
  453. if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
  454. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  455. ("%s:crop_rect_params dimensions are zero",__func__), (NULL));
  456. goto error;
  457. }
  458. #ifdef __aarch64__
  459. if (ratio <= 1.0 / 16 || ratio >= 16.0) {
  460. /* Currently cannot scale by ratio > 16 or < 1/16 for Jetson */
  461. goto error;
  462. }
  463. #endif
  464. /* Set the transform ROIs for source and destination */
  465. src_rect = { (guint)src_top, (guint)src_left, (guint)src_width, (guint)src_height};
  466. dst_rect = { 0, 0, (guint)dest_width, (guint)dest_height};
  467. /* Set the transform parameters */
  468. transform_params.src_rect = &src_rect;
  469. transform_params.dst_rect = &dst_rect;
  470. transform_params.transform_flag =
  471. NVBUFSURF_TRANSFORM_FILTER | NVBUFSURF_TRANSFORM_CROP_SRC |
  472. NVBUFSURF_TRANSFORM_CROP_DST;
  473. transform_params.transform_filter = NvBufSurfTransformInter_Default;
  474. /* Memset the memory */
  475. NvBufSurfaceMemSet (dsexample->inter_buf, 0, 0, 0);
  476. GST_DEBUG_OBJECT (dsexample, "Scaling and converting input buffer\n");
  477. /* Transformation scaling+format conversion if any. */
  478. err = NvBufSurfTransform (&ip_surf, dsexample->inter_buf, &transform_params);
  479. if (err != NvBufSurfTransformError_Success) {
  480. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  481. ("NvBufSurfTransform failed with error %d while converting buffer", err),
  482. (NULL));
  483. goto error;
  484. }
  485. /* Map the buffer so that it can be accessed by CPU */
  486. if (NvBufSurfaceMap (dsexample->inter_buf, 0, 0, NVBUF_MAP_READ) != 0){
  487. goto error;
  488. }
  489. /* Cache the mapped data for CPU access */
  490. NvBufSurfaceSyncForCpu (dsexample->inter_buf, 0, 0);
  491. /* Use openCV to remove padding and convert RGBA to BGR. Can be skipped if * algorithm can handle padded RGBA data. */
  492. in_mat =
  493. cv::Mat (dsexample->processing_height, dsexample->processing_width,
  494. CV_8UC4, dsexample->inter_buf->surfaceList[0].mappedAddr.addr[0],
  495. dsexample->inter_buf->surfaceList[0].pitch);
  496. #if (CV_MAJOR_VERSION >= 4)
  497. cv::cvtColor (in_mat, *dsexample->cvmat, cv::COLOR_RGBA2BGR);
  498. #else
  499. cv::cvtColor (in_mat, *dsexample->cvmat, CV_RGBA2BGR);
  500. #endif
  501. if (NvBufSurfaceUnMap (dsexample->inter_buf, 0, 0)){
  502. goto error;
  503. }
  504. #ifdef __aarch64__
  505. /* To use the converted buffer in CUDA, create an EGLImage and then use * CUDA-EGL interop APIs */
  506. if (USE_EGLIMAGE) {
  507. if (NvBufSurfaceMapEglImage (dsexample->inter_buf, 0) !=0 ) {
  508. goto error;
  509. }
  510. /* dsexample->inter_buf->surfaceList[0].mappedAddr.eglImage * Use interop APIs cuGraphicsEGLRegisterImage and * cuGraphicsResourceGetMappedEglFrame to access the buffer in CUDA */
  511. /* Destroy the EGLImage */
  512. NvBufSurfaceUnMapEglImage (dsexample->inter_buf, 0);
  513. }
  514. #endif
  515. /* We will first convert only the Region of Interest (the entire frame or the * object bounding box) to RGB and then scale the converted RGB frame to * processing resolution. */
  516. return GST_FLOW_OK;
  517. error:
  518. return GST_FLOW_ERROR;
  519. }
  520. /* * Blur the detected objects when processing in object mode (full-frame=0) */
  521. static GstFlowReturn
  522. blur_objects (GstDsExample * dsexample, gint idx,
  523. NvOSD_RectParams * crop_rect_params, cv::Mat in_mat)
  524. {
  525. int hrst=1;
  526. cv::Rect crop_rect;
  527. if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
  528. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  529. ("%s:crop_rect_params dimensions are zero",__func__), (NULL));
  530. return GST_FLOW_ERROR;
  531. }
  532. /* rectangle for cropped objects */
  533. crop_rect = cv::Rect (crop_rect_params->left, crop_rect_params->top,
  534. crop_rect_params->width, crop_rect_params->height);
  535. /* apply gaussian blur to the detected objects */
  536. //GaussianBlur(in_mat(crop_rect), in_mat(crop_rect), cv::Size(15,15), 4);
  537. hrst = jibianjiaozheng(in_mat, in_mat);
  538. if (hrst != 0)
  539. {
  540. printf("畸变矫正失败\n");
  541. }
  542. else
  543. {
  544. //printf("畸变矫正成功\n");
  545. }
  546. return GST_FLOW_OK;
  547. }
  548. /** * Called when element recieves an input buffer from upstream element. */
  549. static GstFlowReturn
  550. gst_dsexample_transform_ip (GstBaseTransform * btrans, GstBuffer * inbuf)
  551. {
  552. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  553. GstMapInfo in_map_info;
  554. GstFlowReturn flow_ret = GST_FLOW_ERROR;
  555. gdouble scale_ratio = 1.0;
  556. DsExampleOutput *output;
  557. NvBufSurface *surface = NULL;
  558. NvDsBatchMeta *batch_meta = NULL;
  559. NvDsFrameMeta *frame_meta = NULL;
  560. NvDsMetaList * l_frame = NULL;
  561. guint i = 0;
  562. dsexample->frame_num++;
  563. CHECK_CUDA_STATUS (cudaSetDevice (dsexample->gpu_id),
  564. "Unable to set cuda device");
  565. memset (&in_map_info, 0, sizeof (in_map_info));
  566. if (!gst_buffer_map (inbuf, &in_map_info, GST_MAP_READ)) {
  567. g_print ("Error: Failed to map gst buffer\n");
  568. goto error;
  569. }
  570. nvds_set_input_system_timestamp (inbuf, GST_ELEMENT_NAME (dsexample));
  571. surface = (NvBufSurface *) in_map_info.data;
  572. GST_DEBUG_OBJECT (dsexample,
  573. "Processing Frame %" G_GUINT64_FORMAT " Surface %p\n",
  574. dsexample->frame_num, surface);
  575. if (CHECK_NVDS_MEMORY_AND_GPUID (dsexample, surface))
  576. goto error;
  577. batch_meta = gst_buffer_get_nvds_batch_meta (inbuf);
  578. if (batch_meta == nullptr) {
  579. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  580. ("NvDsBatchMeta not found for input buffer."), (NULL));
  581. return GST_FLOW_ERROR;
  582. }
  583. if (dsexample->process_full_frame) {
  584. for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
  585. l_frame = l_frame->next)
  586. {
  587. frame_meta = (NvDsFrameMeta *) (l_frame->data);
  588. NvOSD_RectParams rect_params;
  589. /* Scale the entire frame to processing resolution */
  590. rect_params.left = 0;
  591. rect_params.top = 0;
  592. rect_params.width = dsexample->video_info.width;
  593. rect_params.height = dsexample->video_info.height;
  594. /* Scale and convert the frame */
  595. if (get_converted_mat (dsexample, surface, i, &rect_params,
  596. scale_ratio, dsexample->video_info.width,
  597. dsexample->video_info.height) != GST_FLOW_OK) {
  598. goto error;
  599. }
  600. /* Process to get the output */
  601. output =
  602. DsExampleProcess (dsexample->dsexamplelib_ctx,
  603. dsexample->cvmat->data);
  604. /* Attach the metadata for the full frame */
  605. attach_metadata_full_frame (dsexample, frame_meta, scale_ratio, output, i);
  606. i++;
  607. free (output);
  608. }
  609. } else {
  610. /* Using object crops as input to the algorithm. The objects are detected by * the primary detector */
  611. NvDsMetaList * l_obj = NULL;
  612. NvDsObjectMeta *obj_meta = NULL;
  613. #ifndef __aarch64__
  614. if (dsexample->blur_objects) {
  615. if (surface->memType != NVBUF_MEM_CUDA_UNIFIED){
  616. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  617. ("%s:need NVBUF_MEM_CUDA_UNIFIED memory for opencv blurring",__func__), (NULL));
  618. return GST_FLOW_ERROR;
  619. }
  620. }
  621. #endif
  622. for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
  623. l_frame = l_frame->next)
  624. {
  625. frame_meta = (NvDsFrameMeta *) (l_frame->data);
  626. cv::Mat in_mat;
  627. if (dsexample->blur_objects) {
  628. /* Map the buffer so that it can be accessed by CPU */
  629. if (surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0] == NULL){
  630. if (NvBufSurfaceMap (surface, frame_meta->batch_id, 0, NVBUF_MAP_READ_WRITE) != 0){
  631. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  632. ("%s:buffer map to be accessed by CPU failed", __func__), (NULL));
  633. return GST_FLOW_ERROR;
  634. }
  635. }
  636. /* Cache the mapped data for CPU access */
  637. NvBufSurfaceSyncForCpu (surface, frame_meta->batch_id, 0);
  638. in_mat =
  639. cv::Mat (surface->surfaceList[frame_meta->batch_id].planeParams.height[0],
  640. surface->surfaceList[frame_meta->batch_id].planeParams.width[0], CV_8UC4,
  641. surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
  642. surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0]);
  643. }
  644. for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
  645. l_obj = l_obj->next)
  646. {
  647. obj_meta = (NvDsObjectMeta *) (l_obj->data);
  648. if (dsexample->blur_objects) {
  649. /* gaussian blur the detected objects using opencv */
  650. if (blur_objects (dsexample, frame_meta->batch_id,
  651. &obj_meta->rect_params, in_mat) != GST_FLOW_OK) {
  652. /* Error in blurring, skip processing on object. */
  653. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  654. ("blurring the object failed"), (NULL));
  655. if (NvBufSurfaceUnMap (surface, frame_meta->batch_id, 0)){
  656. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  657. ("%s:buffer unmap to be accessed by CPU failed", __func__), (NULL));
  658. }
  659. return GST_FLOW_ERROR;
  660. }
  661. continue;
  662. }
  663. /* Should not process on objects smaller than MIN_INPUT_OBJECT_WIDTH x MIN_INPUT_OBJECT_HEIGHT * since it will cause hardware scaling issues. */
  664. if (obj_meta->rect_params.width < MIN_INPUT_OBJECT_WIDTH ||
  665. obj_meta->rect_params.height < MIN_INPUT_OBJECT_HEIGHT)
  666. continue;
  667. /* Crop and scale the object */
  668. if (get_converted_mat (dsexample,
  669. surface, frame_meta->batch_id, &obj_meta->rect_params,
  670. scale_ratio, dsexample->video_info.width,
  671. dsexample->video_info.height) != GST_FLOW_OK) {
  672. /* Error in conversion, skip processing on object. */
  673. continue;
  674. }
  675. /* Process the object crop to obtain label */
  676. output = DsExampleProcess (dsexample->dsexamplelib_ctx,
  677. dsexample->cvmat->data);
  678. /* Attach labels for the object */
  679. attach_metadata_object (dsexample, obj_meta, output);
  680. free (output);
  681. }
  682. if (dsexample->blur_objects) {
  683. /* Cache the mapped data for device access */
  684. NvBufSurfaceSyncForDevice (surface, frame_meta->batch_id, 0);
  685. #ifdef DSEXAMPLE_DEBUG
  686. /* Use openCV to remove padding and convert RGBA to BGR. Can be skipped if * algorithm can handle padded RGBA data. */
  687. #if (CV_MAJOR_VERSION >= 4)
  688. cv::cvtColor (in_mat, *dsexample->cvmat, cv::COLOR_RGBA2BGR);
  689. #else
  690. cv::cvtColor (in_mat, *dsexample->cvmat, CV_RGBA2BGR);
  691. #endif
  692. /* used to dump the converted mat to files for debug */
  693. static guint cnt = 0;
  694. cv::imwrite("out_" + std::to_string (cnt) + ".jpeg", *dsexample->cvmat);
  695. cnt++;
  696. #endif
  697. }
  698. }
  699. }
  700. flow_ret = GST_FLOW_OK;
  701. error:
  702. nvds_set_output_system_timestamp (inbuf, GST_ELEMENT_NAME (dsexample));
  703. gst_buffer_unmap (inbuf, &in_map_info);
  704. return flow_ret;
  705. }
  706. /** * Attach metadata for the full frame. We will be adding a new metadata. */
  707. static void
  708. attach_metadata_full_frame (GstDsExample * dsexample, NvDsFrameMeta *frame_meta,
  709. gdouble scale_ratio, DsExampleOutput * output, guint batch_id)
  710. {
  711. NvDsBatchMeta *batch_meta = frame_meta->base_meta.batch_meta;
  712. NvDsObjectMeta *object_meta = NULL;
  713. static gchar font_name[] = "Serif";
  714. GST_DEBUG_OBJECT (dsexample, "Attaching metadata %d\n", output->numObjects);
  715. for (gint i = 0; i < output->numObjects; i++) {
  716. DsExampleObject *obj = &output->object[i];
  717. object_meta = nvds_acquire_obj_meta_from_pool(batch_meta);
  718. NvOSD_RectParams & rect_params = object_meta->rect_params;
  719. NvOSD_TextParams & text_params = object_meta->text_params;
  720. /* Assign bounding box coordinates */
  721. rect_params.left = obj->left;
  722. rect_params.top = obj->top;
  723. rect_params.width = obj->width;
  724. rect_params.height = obj->height;
  725. /* Semi-transparent yellow background */
  726. rect_params.has_bg_color = 0;
  727. rect_params.bg_color = (NvOSD_ColorParams) {
  728. 1, 1, 0, 0.4};
  729. /* Red border of width 6 */
  730. rect_params.border_width = 3;
  731. rect_params.border_color = (NvOSD_ColorParams) {
  732. 1, 0, 0, 1};
  733. /* Scale the bounding boxes proportionally based on how the object/frame was * scaled during input */
  734. rect_params.left /= scale_ratio;
  735. rect_params.top /= scale_ratio;
  736. rect_params.width /= scale_ratio;
  737. rect_params.height /= scale_ratio;
  738. GST_DEBUG_OBJECT (dsexample, "Attaching rect%d of batch%u"
  739. " left->%f top->%f width->%f"
  740. " height->%f label->%s\n", i, batch_id, rect_params.left,
  741. rect_params.top, rect_params.width, rect_params.height, obj->label);
  742. object_meta->object_id = UNTRACKED_OBJECT_ID;
  743. g_strlcpy (object_meta->obj_label, obj->label, MAX_LABEL_SIZE);
  744. /* display_text required heap allocated memory */
  745. text_params.display_text = g_strdup (obj->label);
  746. /* Display text above the left top corner of the object */
  747. text_params.x_offset = rect_params.left;
  748. text_params.y_offset = rect_params.top - 10;
  749. /* Set black background for the text */
  750. text_params.set_bg_clr = 1;
  751. text_params.text_bg_clr = (NvOSD_ColorParams) {
  752. 0, 0, 0, 1};
  753. /* Font face, size and color */
  754. text_params.font_params.font_name = font_name;
  755. text_params.font_params.font_size = 11;
  756. text_params.font_params.font_color = (NvOSD_ColorParams) {
  757. 1, 1, 1, 1};
  758. nvds_add_obj_meta_to_frame(frame_meta, object_meta, NULL);
  759. frame_meta->bInferDone = TRUE;
  760. }
  761. }
  762. /** * Only update string label in an existing object metadata. No bounding boxes. * We assume only one label per object is generated */
  763. static void
  764. attach_metadata_object (GstDsExample * dsexample, NvDsObjectMeta * obj_meta,
  765. DsExampleOutput * output)
  766. {
  767. if (output->numObjects == 0)
  768. return;
  769. NvDsBatchMeta *batch_meta = obj_meta->base_meta.batch_meta;
  770. NvDsClassifierMeta *classifier_meta =
  771. nvds_acquire_classifier_meta_from_pool (batch_meta);
  772. classifier_meta->unique_component_id = dsexample->unique_id;
  773. NvDsLabelInfo *label_info =
  774. nvds_acquire_label_info_meta_from_pool (batch_meta);
  775. g_strlcpy (label_info->result_label, output->object[0].label, MAX_LABEL_SIZE);
  776. nvds_add_label_info_meta_to_classifier(classifier_meta, label_info);
  777. nvds_add_classifier_meta_to_object (obj_meta, classifier_meta);
  778. nvds_acquire_meta_lock (batch_meta);
  779. NvOSD_TextParams & text_params = obj_meta->text_params;
  780. NvOSD_RectParams & rect_params = obj_meta->rect_params;
  781. /* Below code to display the result */
  782. /* Set black background for the text * display_text required heap allocated memory */
  783. if (text_params.display_text) {
  784. gchar *conc_string = g_strconcat (text_params.display_text, " ",
  785. output->object[0].label, NULL);
  786. g_free (text_params.display_text);
  787. text_params.display_text = conc_string;
  788. } else {
  789. /* Display text above the left top corner of the object */
  790. text_params.x_offset = rect_params.left;
  791. text_params.y_offset = rect_params.top - 10;
  792. text_params.display_text = g_strdup (output->object[0].label);
  793. /* Font face, size and color */
  794. text_params.font_params.font_name = (char *)"Serif";
  795. text_params.font_params.font_size = 11;
  796. text_params.font_params.font_color = (NvOSD_ColorParams) {
  797. 1, 1, 1, 1};
  798. /* Set black background for the text */
  799. text_params.set_bg_clr = 1;
  800. text_params.text_bg_clr = (NvOSD_ColorParams) {
  801. 0, 0, 0, 1};
  802. }
  803. nvds_release_meta_lock (batch_meta);
  804. }
  805. /** * Boiler plate for registering a plugin and an element. */
  806. static gboolean
  807. dsexample_plugin_init (GstPlugin * plugin)
  808. {
  809. GST_DEBUG_CATEGORY_INIT (gst_dsexample_debug, "dsexample", 0,
  810. "dsexample plugin");
  811. return gst_element_register (plugin, "dsexample", GST_RANK_PRIMARY,
  812. GST_TYPE_DSEXAMPLE);
  813. }
  814. GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
  815. GST_VERSION_MINOR,
  816. nvdsgst_dsexample,
  817. DESCRIPTION, dsexample_plugin_init, DS_VERSION, LICENSE, BINARY_PACKAGE, URL)

normal

  1. /** * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */
  2. #include <string.h>
  3. #include <string>
  4. #include <sstream>
  5. #include <iostream>
  6. #include <ostream>
  7. #include <fstream>
  8. #include "gstdsexample.h"
  9. #include <sys/time.h>
  10. GST_DEBUG_CATEGORY_STATIC (gst_dsexample_debug);
  11. #define GST_CAT_DEFAULT gst_dsexample_debug
  12. #define USE_EGLIMAGE 1
  13. /* enable to write transformed cvmat to files */
  14. /* #define DSEXAMPLE_DEBUG */
  15. static GQuark _dsmeta_quark = 0;
  16. /* Enum to identify properties */
  17. enum
  18. {
  19. PROP_0,
  20. PROP_UNIQUE_ID,
  21. PROP_PROCESSING_WIDTH,
  22. PROP_PROCESSING_HEIGHT,
  23. PROP_PROCESS_FULL_FRAME,
  24. PROP_BLUR_OBJECTS,
  25. PROP_GPU_DEVICE_ID
  26. };
  27. #define CHECK_NVDS_MEMORY_AND_GPUID(object, surface) \ ({ int _errtype=0;\ do { \ if ((surface->memType == NVBUF_MEM_DEFAULT || surface->memType == NVBUF_MEM_CUDA_DEVICE) && \ (surface->gpuId != object->gpu_id)) { \ GST_ELEMENT_ERROR (object, RESOURCE, FAILED, \ ("Input surface gpu-id doesnt match with configured gpu-id for element," \ " please allocate input using unified memory, or use same gpu-ids"),\ ("surface-gpu-id=%d,%s-gpu-id=%d",surface->gpuId,GST_ELEMENT_NAME(object),\ object->gpu_id)); \ _errtype = 1;\ } \ } while(0); \ _errtype; \ })
  28. /* Default values for properties */
  29. #define DEFAULT_UNIQUE_ID 15
  30. #define DEFAULT_PROCESSING_WIDTH 640
  31. #define DEFAULT_PROCESSING_HEIGHT 480
  32. #define DEFAULT_PROCESS_FULL_FRAME TRUE
  33. #define DEFAULT_BLUR_OBJECTS FALSE
  34. #define DEFAULT_GPU_ID 0
  35. #define RGB_BYTES_PER_PIXEL 3
  36. #define RGBA_BYTES_PER_PIXEL 4
  37. #define Y_BYTES_PER_PIXEL 1
  38. #define UV_BYTES_PER_PIXEL 2
  39. #define MIN_INPUT_OBJECT_WIDTH 16
  40. #define MIN_INPUT_OBJECT_HEIGHT 16
  41. #define CHECK_NPP_STATUS(npp_status,error_str) do { \ if ((npp_status) != NPP_SUCCESS) { \ g_print ("Error: %s in %s at line %d: NPP Error %d\n", \ error_str, __FILE__, __LINE__, npp_status); \ goto error; \ } \ } while (0)
  42. #define CHECK_CUDA_STATUS(cuda_status,error_str) do { \ if ((cuda_status) != cudaSuccess) { \ g_print ("Error: %s in %s at line %d (%s)\n", \ error_str, __FILE__, __LINE__, cudaGetErrorName(cuda_status)); \ goto error; \ } \ } while (0)
  43. /* By default NVIDIA Hardware allocated memory flows through the pipeline. We * will be processing on this type of memory only. */
  44. #define GST_CAPS_FEATURE_MEMORY_NVMM "memory:NVMM"
  45. static GstStaticPadTemplate gst_dsexample_sink_template =
  46. GST_STATIC_PAD_TEMPLATE ("sink",
  47. GST_PAD_SINK,
  48. GST_PAD_ALWAYS,
  49. GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES
  50. (GST_CAPS_FEATURE_MEMORY_NVMM,
  51. "{ NV12, RGBA, I420 }")));
  52. static GstStaticPadTemplate gst_dsexample_src_template =
  53. GST_STATIC_PAD_TEMPLATE ("src",
  54. GST_PAD_SRC,
  55. GST_PAD_ALWAYS,
  56. GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE_WITH_FEATURES
  57. (GST_CAPS_FEATURE_MEMORY_NVMM,
  58. "{ NV12, RGBA, I420 }")));
  59. /* Define our element type. Standard GObject/GStreamer boilerplate stuff */
  60. #define gst_dsexample_parent_class parent_class
  61. G_DEFINE_TYPE (GstDsExample, gst_dsexample, GST_TYPE_BASE_TRANSFORM);
  62. static void gst_dsexample_set_property (GObject * object, guint prop_id,
  63. const GValue * value, GParamSpec * pspec);
  64. static void gst_dsexample_get_property (GObject * object, guint prop_id,
  65. GValue * value, GParamSpec * pspec);
  66. static gboolean gst_dsexample_set_caps (GstBaseTransform * btrans,
  67. GstCaps * incaps, GstCaps * outcaps);
  68. static gboolean gst_dsexample_start (GstBaseTransform * btrans);
  69. static gboolean gst_dsexample_stop (GstBaseTransform * btrans);
  70. static GstFlowReturn gst_dsexample_transform_ip (GstBaseTransform *
  71. btrans, GstBuffer * inbuf);
  72. static void
  73. attach_metadata_full_frame (GstDsExample * dsexample, NvDsFrameMeta *frame_meta,
  74. gdouble scale_ratio, DsExampleOutput * output, guint batch_id);
  75. static void attach_metadata_object (GstDsExample * dsexample,
  76. NvDsObjectMeta * obj_meta, DsExampleOutput * output);
  77. /* Install properties, set sink and src pad capabilities, override the required * functions of the base class, These are common to all instances of the * element. */
  78. static void
  79. gst_dsexample_class_init (GstDsExampleClass * klass)
  80. {
  81. GObjectClass *gobject_class;
  82. GstElementClass *gstelement_class;
  83. GstBaseTransformClass *gstbasetransform_class;
  84. /* Indicates we want to use DS buf api */
  85. g_setenv ("DS_NEW_BUFAPI", "1", TRUE);
  86. gobject_class = (GObjectClass *) klass;
  87. gstelement_class = (GstElementClass *) klass;
  88. gstbasetransform_class = (GstBaseTransformClass *) klass;
  89. /* Overide base class functions */
  90. gobject_class->set_property = GST_DEBUG_FUNCPTR (gst_dsexample_set_property);
  91. gobject_class->get_property = GST_DEBUG_FUNCPTR (gst_dsexample_get_property);
  92. gstbasetransform_class->set_caps = GST_DEBUG_FUNCPTR (gst_dsexample_set_caps);
  93. gstbasetransform_class->start = GST_DEBUG_FUNCPTR (gst_dsexample_start);
  94. gstbasetransform_class->stop = GST_DEBUG_FUNCPTR (gst_dsexample_stop);
  95. gstbasetransform_class->transform_ip =
  96. GST_DEBUG_FUNCPTR (gst_dsexample_transform_ip);
  97. /* Install properties */
  98. g_object_class_install_property (gobject_class, PROP_UNIQUE_ID,
  99. g_param_spec_uint ("unique-id",
  100. "Unique ID",
  101. "Unique ID for the element. Can be used to identify output of the"
  102. " element", 0, G_MAXUINT, DEFAULT_UNIQUE_ID, (GParamFlags)
  103. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  104. g_object_class_install_property (gobject_class, PROP_PROCESSING_WIDTH,
  105. g_param_spec_int ("processing-width",
  106. "Processing Width",
  107. "Width of the input buffer to algorithm",
  108. 1, G_MAXINT, DEFAULT_PROCESSING_WIDTH, (GParamFlags)
  109. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  110. g_object_class_install_property (gobject_class, PROP_PROCESSING_HEIGHT,
  111. g_param_spec_int ("processing-height",
  112. "Processing Height",
  113. "Height of the input buffer to algorithm",
  114. 1, G_MAXINT, DEFAULT_PROCESSING_HEIGHT, (GParamFlags)
  115. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  116. g_object_class_install_property (gobject_class, PROP_PROCESS_FULL_FRAME,
  117. g_param_spec_boolean ("full-frame",
  118. "Full frame",
  119. "Enable to process full frame or disable to process objects detected"
  120. "by primary detector", DEFAULT_PROCESS_FULL_FRAME, (GParamFlags)
  121. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  122. g_object_class_install_property (gobject_class, PROP_BLUR_OBJECTS,
  123. g_param_spec_boolean ("blur-objects",
  124. "Blur Objects",
  125. "Enable to blur the objects detected in full-frame=0 mode"
  126. "by primary detector", DEFAULT_BLUR_OBJECTS, (GParamFlags)
  127. (G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS)));
  128. g_object_class_install_property (gobject_class, PROP_GPU_DEVICE_ID,
  129. g_param_spec_uint ("gpu-id",
  130. "Set GPU Device ID",
  131. "Set GPU Device ID", 0,
  132. G_MAXUINT, 0,
  133. GParamFlags
  134. (G_PARAM_READWRITE |
  135. G_PARAM_STATIC_STRINGS | GST_PARAM_MUTABLE_READY)));
  136. /* Set sink and src pad capabilities */
  137. gst_element_class_add_pad_template (gstelement_class,
  138. gst_static_pad_template_get (&gst_dsexample_src_template));
  139. gst_element_class_add_pad_template (gstelement_class,
  140. gst_static_pad_template_get (&gst_dsexample_sink_template));
  141. /* Set metadata describing the element */
  142. gst_element_class_set_details_simple (gstelement_class,
  143. "DsExample plugin",
  144. "DsExample Plugin",
  145. "Process a 3rdparty example algorithm on objects / full frame",
  146. "NVIDIA Corporation. Post on Deepstream for Tesla forum for any queries "
  147. "@ https://devtalk.nvidia.com/default/board/209/");
  148. }
  149. static void
  150. gst_dsexample_init (GstDsExample * dsexample)
  151. {
  152. GstBaseTransform *btrans = GST_BASE_TRANSFORM (dsexample);
  153. /* We will not be generating a new buffer. Just adding / updating * metadata. */
  154. gst_base_transform_set_in_place (GST_BASE_TRANSFORM (btrans), TRUE);
  155. /* We do not want to change the input caps. Set to passthrough. transform_ip * is still called. */
  156. gst_base_transform_set_passthrough (GST_BASE_TRANSFORM (btrans), TRUE);
  157. /* Initialize all property variables to default values */
  158. dsexample->unique_id = DEFAULT_UNIQUE_ID;
  159. dsexample->processing_width = DEFAULT_PROCESSING_WIDTH;
  160. dsexample->processing_height = DEFAULT_PROCESSING_HEIGHT;
  161. dsexample->process_full_frame = DEFAULT_PROCESS_FULL_FRAME;
  162. dsexample->blur_objects = DEFAULT_BLUR_OBJECTS;
  163. dsexample->gpu_id = DEFAULT_GPU_ID;
  164. /* This quark is required to identify NvDsMeta when iterating through * the buffer metadatas */
  165. if (!_dsmeta_quark)
  166. _dsmeta_quark = g_quark_from_static_string (NVDS_META_STRING);
  167. }
  168. /* Function called when a property of the element is set. Standard boilerplate. */
  169. static void
  170. gst_dsexample_set_property (GObject * object, guint prop_id,
  171. const GValue * value, GParamSpec * pspec)
  172. {
  173. GstDsExample *dsexample = GST_DSEXAMPLE (object);
  174. switch (prop_id) {
  175. case PROP_UNIQUE_ID:
  176. dsexample->unique_id = g_value_get_uint (value);
  177. break;
  178. case PROP_PROCESSING_WIDTH:
  179. dsexample->processing_width = g_value_get_int (value);
  180. break;
  181. case PROP_PROCESSING_HEIGHT:
  182. dsexample->processing_height = g_value_get_int (value);
  183. break;
  184. case PROP_PROCESS_FULL_FRAME:
  185. dsexample->process_full_frame = g_value_get_boolean (value);
  186. break;
  187. case PROP_BLUR_OBJECTS:
  188. dsexample->blur_objects = g_value_get_boolean (value);
  189. break;
  190. case PROP_GPU_DEVICE_ID:
  191. dsexample->gpu_id = g_value_get_uint (value);
  192. break;
  193. default:
  194. G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
  195. break;
  196. }
  197. }
  198. /* Function called when a property of the element is requested. Standard * boilerplate. */
  199. static void
  200. gst_dsexample_get_property (GObject * object, guint prop_id,
  201. GValue * value, GParamSpec * pspec)
  202. {
  203. GstDsExample *dsexample = GST_DSEXAMPLE (object);
  204. switch (prop_id) {
  205. case PROP_UNIQUE_ID:
  206. g_value_set_uint (value, dsexample->unique_id);
  207. break;
  208. case PROP_PROCESSING_WIDTH:
  209. g_value_set_int (value, dsexample->processing_width);
  210. break;
  211. case PROP_PROCESSING_HEIGHT:
  212. g_value_set_int (value, dsexample->processing_height);
  213. break;
  214. case PROP_PROCESS_FULL_FRAME:
  215. g_value_set_boolean (value, dsexample->process_full_frame);
  216. break;
  217. case PROP_BLUR_OBJECTS:
  218. g_value_set_boolean (value, dsexample->blur_objects);
  219. break;
  220. case PROP_GPU_DEVICE_ID:
  221. g_value_set_uint (value, dsexample->gpu_id);
  222. break;
  223. default:
  224. G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
  225. break;
  226. }
  227. }
  228. /** * Initialize all resources and start the output thread */
  229. static gboolean
  230. gst_dsexample_start (GstBaseTransform * btrans)
  231. {
  232. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  233. NvBufSurfaceCreateParams create_params;
  234. DsExampleInitParams init_params =
  235. { dsexample->processing_width, dsexample->processing_height,
  236. dsexample->process_full_frame
  237. };
  238. GstQuery *queryparams = NULL;
  239. guint batch_size = 1;
  240. /* Algorithm specific initializations and resource allocation. */
  241. dsexample->dsexamplelib_ctx = DsExampleCtxInit (&init_params);
  242. GST_DEBUG_OBJECT (dsexample, "ctx lib %p \n", dsexample->dsexamplelib_ctx);
  243. CHECK_CUDA_STATUS (cudaSetDevice (dsexample->gpu_id),
  244. "Unable to set cuda device");
  245. dsexample->batch_size = 1;
  246. queryparams = gst_nvquery_batch_size_new ();
  247. if (gst_pad_peer_query (GST_BASE_TRANSFORM_SINK_PAD (btrans), queryparams)
  248. || gst_pad_peer_query (GST_BASE_TRANSFORM_SRC_PAD (btrans), queryparams)) {
  249. if (gst_nvquery_batch_size_parse (queryparams, &batch_size)) {
  250. dsexample->batch_size = batch_size;
  251. }
  252. }
  253. GST_DEBUG_OBJECT (dsexample, "Setting batch-size %d \n",
  254. dsexample->batch_size);
  255. gst_query_unref (queryparams);
  256. if (dsexample->process_full_frame && dsexample->blur_objects) {
  257. GST_ERROR ("Error: does not support blurring while processing full frame");
  258. goto error;
  259. }
  260. CHECK_CUDA_STATUS (cudaStreamCreate (&dsexample->cuda_stream),
  261. "Could not create cuda stream");
  262. if (dsexample->inter_buf)
  263. NvBufSurfaceDestroy (dsexample->inter_buf);
  264. dsexample->inter_buf = NULL;
  265. /* An intermediate buffer for NV12/RGBA to BGR conversion will be * required. Can be skipped if custom algorithm can work directly on NV12/RGBA. */
  266. create_params.gpuId = dsexample->gpu_id;
  267. create_params.width = dsexample->processing_width;
  268. create_params.height = dsexample->processing_height;
  269. create_params.size = 0;
  270. create_params.colorFormat = NVBUF_COLOR_FORMAT_RGBA;
  271. create_params.layout = NVBUF_LAYOUT_PITCH;
  272. #ifdef __aarch64__
  273. create_params.memType = NVBUF_MEM_DEFAULT;
  274. #else
  275. create_params.memType = NVBUF_MEM_CUDA_UNIFIED;
  276. #endif
  277. if (NvBufSurfaceCreate (&dsexample->inter_buf, 1,
  278. &create_params) != 0) {
  279. GST_ERROR ("Error: Could not allocate internal buffer for dsexample");
  280. goto error;
  281. }
  282. /* Create host memory for storing converted/scaled interleaved RGB data */
  283. CHECK_CUDA_STATUS (cudaMallocHost (&dsexample->host_rgb_buf,
  284. dsexample->processing_width * dsexample->processing_height *
  285. RGB_BYTES_PER_PIXEL), "Could not allocate cuda host buffer");
  286. GST_DEBUG_OBJECT (dsexample, "allocated cuda buffer %p \n",
  287. dsexample->host_rgb_buf);
  288. /* CV Mat containing interleaved RGB data. This call does not allocate memory. * It uses host_rgb_buf as data. */
  289. dsexample->cvmat =
  290. new cv::Mat (dsexample->processing_height, dsexample->processing_width,
  291. CV_8UC3, dsexample->host_rgb_buf,
  292. dsexample->processing_width * RGB_BYTES_PER_PIXEL);
  293. if (!dsexample->cvmat)
  294. goto error;
  295. GST_DEBUG_OBJECT (dsexample, "created CV Mat\n");
  296. return TRUE;
  297. error:
  298. if (dsexample->host_rgb_buf) {
  299. cudaFreeHost (dsexample->host_rgb_buf);
  300. dsexample->host_rgb_buf = NULL;
  301. }
  302. if (dsexample->cuda_stream) {
  303. cudaStreamDestroy (dsexample->cuda_stream);
  304. dsexample->cuda_stream = NULL;
  305. }
  306. if (dsexample->dsexamplelib_ctx)
  307. DsExampleCtxDeinit (dsexample->dsexamplelib_ctx);
  308. return FALSE;
  309. }
  310. /** * Stop the output thread and free up all the resources */
  311. static gboolean
  312. gst_dsexample_stop (GstBaseTransform * btrans)
  313. {
  314. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  315. if (dsexample->inter_buf)
  316. NvBufSurfaceDestroy(dsexample->inter_buf);
  317. dsexample->inter_buf = NULL;
  318. if (dsexample->cuda_stream)
  319. cudaStreamDestroy (dsexample->cuda_stream);
  320. dsexample->cuda_stream = NULL;
  321. delete dsexample->cvmat;
  322. dsexample->cvmat = NULL;
  323. if (dsexample->host_rgb_buf) {
  324. cudaFreeHost (dsexample->host_rgb_buf);
  325. dsexample->host_rgb_buf = NULL;
  326. }
  327. GST_DEBUG_OBJECT (dsexample, "deleted CV Mat \n");
  328. /* Deinit the algorithm library */
  329. DsExampleCtxDeinit (dsexample->dsexamplelib_ctx);
  330. dsexample->dsexamplelib_ctx = NULL;
  331. GST_DEBUG_OBJECT (dsexample, "ctx lib released \n");
  332. return TRUE;
  333. }
  334. /** * Called when source / sink pad capabilities have been negotiated. */
  335. static gboolean
  336. gst_dsexample_set_caps (GstBaseTransform * btrans, GstCaps * incaps,
  337. GstCaps * outcaps)
  338. {
  339. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  340. /* Save the input video information, since this will be required later. */
  341. gst_video_info_from_caps (&dsexample->video_info, incaps);
  342. if (dsexample->blur_objects && !dsexample->process_full_frame) {
  343. /* requires RGBA format for blurring the objects in opencv */
  344. if (dsexample->video_info.finfo->format != GST_VIDEO_FORMAT_RGBA) {
  345. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  346. ("input format should be RGBA when using blur-objects property"), (NULL));
  347. goto error;
  348. }
  349. }
  350. return TRUE;
  351. error:
  352. return FALSE;
  353. }
  354. /** * Scale the entire frame to the processing resolution maintaining aspect ratio. * Or crop and scale objects to the processing resolution maintaining the aspect * ratio. Remove the padding required by hardware and convert from RGBA to RGB * using openCV. These steps can be skipped if the algorithm can work with * padded data and/or can work with RGBA. */
  355. static GstFlowReturn
  356. get_converted_mat (GstDsExample * dsexample, NvBufSurface *input_buf, gint idx,
  357. NvOSD_RectParams * crop_rect_params, gdouble & ratio, gint input_width,
  358. gint input_height)
  359. {
  360. NvBufSurfTransform_Error err;
  361. NvBufSurfTransformConfigParams transform_config_params;
  362. NvBufSurfTransformParams transform_params;
  363. NvBufSurfTransformRect src_rect;
  364. NvBufSurfTransformRect dst_rect;
  365. NvBufSurface ip_surf;
  366. cv::Mat in_mat;
  367. ip_surf = *input_buf;
  368. ip_surf.numFilled = ip_surf.batchSize = 1;
  369. ip_surf.surfaceList = &(input_buf->surfaceList[idx]);
  370. gint src_left = GST_ROUND_UP_2((unsigned int)crop_rect_params->left);
  371. gint src_top = GST_ROUND_UP_2((unsigned int)crop_rect_params->top);
  372. gint src_width = GST_ROUND_DOWN_2((unsigned int)crop_rect_params->width);
  373. gint src_height = GST_ROUND_DOWN_2((unsigned int)crop_rect_params->height);
  374. /* Maintain aspect ratio */
  375. double hdest = dsexample->processing_width * src_height / (double) src_width;
  376. double wdest = dsexample->processing_height * src_width / (double) src_height;
  377. guint dest_width, dest_height;
  378. if (hdest <= dsexample->processing_height) {
  379. dest_width = dsexample->processing_width;
  380. dest_height = hdest;
  381. } else {
  382. dest_width = wdest;
  383. dest_height = dsexample->processing_height;
  384. }
  385. /* Configure transform session parameters for the transformation */
  386. transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
  387. transform_config_params.gpu_id = dsexample->gpu_id;
  388. transform_config_params.cuda_stream = dsexample->cuda_stream;
  389. /* Set the transform session parameters for the conversions executed in this * thread. */
  390. err = NvBufSurfTransformSetSessionParams (&transform_config_params);
  391. if (err != NvBufSurfTransformError_Success) {
  392. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  393. ("NvBufSurfTransformSetSessionParams failed with error %d", err), (NULL));
  394. goto error;
  395. }
  396. /* Calculate scaling ratio while maintaining aspect ratio */
  397. ratio = MIN (1.0 * dest_width/ src_width, 1.0 * dest_height / src_height);
  398. if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
  399. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  400. ("%s:crop_rect_params dimensions are zero",__func__), (NULL));
  401. goto error;
  402. }
  403. #ifdef __aarch64__
  404. if (ratio <= 1.0 / 16 || ratio >= 16.0) {
  405. /* Currently cannot scale by ratio > 16 or < 1/16 for Jetson */
  406. goto error;
  407. }
  408. #endif
  409. /* Set the transform ROIs for source and destination */
  410. src_rect = { (guint)src_top, (guint)src_left, (guint)src_width, (guint)src_height};
  411. dst_rect = { 0, 0, (guint)dest_width, (guint)dest_height};
  412. /* Set the transform parameters */
  413. transform_params.src_rect = &src_rect;
  414. transform_params.dst_rect = &dst_rect;
  415. transform_params.transform_flag =
  416. NVBUFSURF_TRANSFORM_FILTER | NVBUFSURF_TRANSFORM_CROP_SRC |
  417. NVBUFSURF_TRANSFORM_CROP_DST;
  418. transform_params.transform_filter = NvBufSurfTransformInter_Default;
  419. /* Memset the memory */
  420. NvBufSurfaceMemSet (dsexample->inter_buf, 0, 0, 0);
  421. GST_DEBUG_OBJECT (dsexample, "Scaling and converting input buffer\n");
  422. /* Transformation scaling+format conversion if any. */
  423. err = NvBufSurfTransform (&ip_surf, dsexample->inter_buf, &transform_params);
  424. if (err != NvBufSurfTransformError_Success) {
  425. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  426. ("NvBufSurfTransform failed with error %d while converting buffer", err),
  427. (NULL));
  428. goto error;
  429. }
  430. /* Map the buffer so that it can be accessed by CPU */
  431. if (NvBufSurfaceMap (dsexample->inter_buf, 0, 0, NVBUF_MAP_READ) != 0){
  432. goto error;
  433. }
  434. /* Cache the mapped data for CPU access */
  435. NvBufSurfaceSyncForCpu (dsexample->inter_buf, 0, 0);
  436. /* Use openCV to remove padding and convert RGBA to BGR. Can be skipped if * algorithm can handle padded RGBA data. */
  437. in_mat =
  438. cv::Mat (dsexample->processing_height, dsexample->processing_width,
  439. CV_8UC4, dsexample->inter_buf->surfaceList[0].mappedAddr.addr[0],
  440. dsexample->inter_buf->surfaceList[0].pitch);
  441. #if (CV_MAJOR_VERSION >= 4)
  442. cv::cvtColor (in_mat, *dsexample->cvmat, cv::COLOR_RGBA2BGR);
  443. #else
  444. cv::cvtColor (in_mat, *dsexample->cvmat, CV_RGBA2BGR);
  445. #endif
  446. if (NvBufSurfaceUnMap (dsexample->inter_buf, 0, 0)){
  447. goto error;
  448. }
  449. #ifdef __aarch64__
  450. /* To use the converted buffer in CUDA, create an EGLImage and then use * CUDA-EGL interop APIs */
  451. if (USE_EGLIMAGE) {
  452. if (NvBufSurfaceMapEglImage (dsexample->inter_buf, 0) !=0 ) {
  453. goto error;
  454. }
  455. /* dsexample->inter_buf->surfaceList[0].mappedAddr.eglImage * Use interop APIs cuGraphicsEGLRegisterImage and * cuGraphicsResourceGetMappedEglFrame to access the buffer in CUDA */
  456. /* Destroy the EGLImage */
  457. NvBufSurfaceUnMapEglImage (dsexample->inter_buf, 0);
  458. }
  459. #endif
  460. /* We will first convert only the Region of Interest (the entire frame or the * object bounding box) to RGB and then scale the converted RGB frame to * processing resolution. */
  461. return GST_FLOW_OK;
  462. error:
  463. return GST_FLOW_ERROR;
  464. }
  465. /* * Blur the detected objects when processing in object mode (full-frame=0) */
  466. static GstFlowReturn
  467. blur_objects (GstDsExample * dsexample, gint idx,
  468. NvOSD_RectParams * crop_rect_params, cv::Mat in_mat)
  469. {
  470. cv::Rect crop_rect;
  471. if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
  472. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  473. ("%s:crop_rect_params dimensions are zero",__func__), (NULL));
  474. return GST_FLOW_ERROR;
  475. }
  476. /* rectangle for cropped objects */
  477. crop_rect = cv::Rect (crop_rect_params->left, crop_rect_params->top,
  478. crop_rect_params->width, crop_rect_params->height);
  479. /* apply gaussian blur to the detected objects */
  480. //GaussianBlur(in_mat(crop_rect), in_mat(crop_rect), cv::Size(15,15), 4);
  481. return GST_FLOW_OK;
  482. }
  483. /** * Called when element recieves an input buffer from upstream element. */
  484. static GstFlowReturn
  485. gst_dsexample_transform_ip (GstBaseTransform * btrans, GstBuffer * inbuf)
  486. {
  487. GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  488. GstMapInfo in_map_info;
  489. GstFlowReturn flow_ret = GST_FLOW_ERROR;
  490. gdouble scale_ratio = 1.0;
  491. DsExampleOutput *output;
  492. NvBufSurface *surface = NULL;
  493. NvDsBatchMeta *batch_meta = NULL;
  494. NvDsFrameMeta *frame_meta = NULL;
  495. NvDsMetaList * l_frame = NULL;
  496. guint i = 0;
  497. dsexample->frame_num++;
  498. CHECK_CUDA_STATUS (cudaSetDevice (dsexample->gpu_id),
  499. "Unable to set cuda device");
  500. memset (&in_map_info, 0, sizeof (in_map_info));
  501. if (!gst_buffer_map (inbuf, &in_map_info, GST_MAP_READ)) {
  502. g_print ("Error: Failed to map gst buffer\n");
  503. goto error;
  504. }
  505. nvds_set_input_system_timestamp (inbuf, GST_ELEMENT_NAME (dsexample));
  506. surface = (NvBufSurface *) in_map_info.data;
  507. GST_DEBUG_OBJECT (dsexample,
  508. "Processing Frame %" G_GUINT64_FORMAT " Surface %p\n",
  509. dsexample->frame_num, surface);
  510. if (CHECK_NVDS_MEMORY_AND_GPUID (dsexample, surface))
  511. goto error;
  512. batch_meta = gst_buffer_get_nvds_batch_meta (inbuf);
  513. if (batch_meta == nullptr) {
  514. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  515. ("NvDsBatchMeta not found for input buffer."), (NULL));
  516. return GST_FLOW_ERROR;
  517. }
  518. if (dsexample->process_full_frame) {
  519. for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
  520. l_frame = l_frame->next)
  521. {
  522. frame_meta = (NvDsFrameMeta *) (l_frame->data);
  523. NvOSD_RectParams rect_params;
  524. /* Scale the entire frame to processing resolution */
  525. rect_params.left = 0;
  526. rect_params.top = 0;
  527. rect_params.width = dsexample->video_info.width;
  528. rect_params.height = dsexample->video_info.height;
  529. /* Scale and convert the frame */
  530. if (get_converted_mat (dsexample, surface, i, &rect_params,
  531. scale_ratio, dsexample->video_info.width,
  532. dsexample->video_info.height) != GST_FLOW_OK) {
  533. goto error;
  534. }
  535. /* Process to get the output */
  536. output =
  537. DsExampleProcess (dsexample->dsexamplelib_ctx,
  538. dsexample->cvmat->data);
  539. /* Attach the metadata for the full frame */
  540. attach_metadata_full_frame (dsexample, frame_meta, scale_ratio, output, i);
  541. i++;
  542. free (output);
  543. }
  544. } else {
  545. /* Using object crops as input to the algorithm. The objects are detected by * the primary detector */
  546. NvDsMetaList * l_obj = NULL;
  547. NvDsObjectMeta *obj_meta = NULL;
  548. #ifndef __aarch64__
  549. if (dsexample->blur_objects) {
  550. if (surface->memType != NVBUF_MEM_CUDA_UNIFIED){
  551. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  552. ("%s:need NVBUF_MEM_CUDA_UNIFIED memory for opencv blurring",__func__), (NULL));
  553. return GST_FLOW_ERROR;
  554. }
  555. }
  556. #endif
  557. for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
  558. l_frame = l_frame->next)
  559. {
  560. frame_meta = (NvDsFrameMeta *) (l_frame->data);
  561. cv::Mat in_mat;
  562. if (dsexample->blur_objects) {
  563. /* Map the buffer so that it can be accessed by CPU */
  564. if (surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0] == NULL){
  565. if (NvBufSurfaceMap (surface, frame_meta->batch_id, 0, NVBUF_MAP_READ_WRITE) != 0){
  566. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  567. ("%s:buffer map to be accessed by CPU failed", __func__), (NULL));
  568. return GST_FLOW_ERROR;
  569. }
  570. }
  571. /* Cache the mapped data for CPU access */
  572. NvBufSurfaceSyncForCpu (surface, frame_meta->batch_id, 0);
  573. in_mat =
  574. cv::Mat (surface->surfaceList[frame_meta->batch_id].planeParams.height[0],
  575. surface->surfaceList[frame_meta->batch_id].planeParams.width[0], CV_8UC4,
  576. surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
  577. surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0]);
  578. }
  579. for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
  580. l_obj = l_obj->next)
  581. {
  582. obj_meta = (NvDsObjectMeta *) (l_obj->data);
  583. if (dsexample->blur_objects) {
  584. /* gaussian blur the detected objects using opencv */
  585. if (blur_objects (dsexample, frame_meta->batch_id,
  586. &obj_meta->rect_params, in_mat) != GST_FLOW_OK) {
  587. /* Error in blurring, skip processing on object. */
  588. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  589. ("blurring the object failed"), (NULL));
  590. if (NvBufSurfaceUnMap (surface, frame_meta->batch_id, 0)){
  591. GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
  592. ("%s:buffer unmap to be accessed by CPU failed", __func__), (NULL));
  593. }
  594. return GST_FLOW_ERROR;
  595. }
  596. continue;
  597. }
  598. /* Should not process on objects smaller than MIN_INPUT_OBJECT_WIDTH x MIN_INPUT_OBJECT_HEIGHT * since it will cause hardware scaling issues. */
  599. if (obj_meta->rect_params.width < MIN_INPUT_OBJECT_WIDTH ||
  600. obj_meta->rect_params.height < MIN_INPUT_OBJECT_HEIGHT)
  601. continue;
  602. /* Crop and scale the object */
  603. if (get_converted_mat (dsexample,
  604. surface, frame_meta->batch_id, &obj_meta->rect_params,
  605. scale_ratio, dsexample->video_info.width,
  606. dsexample->video_info.height) != GST_FLOW_OK) {
  607. /* Error in conversion, skip processing on object. */
  608. continue;
  609. }
  610. /* Process the object crop to obtain label */
  611. output = DsExampleProcess (dsexample->dsexamplelib_ctx,
  612. dsexample->cvmat->data);
  613. /* Attach labels for the object */
  614. attach_metadata_object (dsexample, obj_meta, output);
  615. free (output);
  616. }
  617. if (dsexample->blur_objects) {
  618. /* Cache the mapped data for device access */
  619. NvBufSurfaceSyncForDevice (surface, frame_meta->batch_id, 0);
  620. #ifdef DSEXAMPLE_DEBUG
  621. /* Use openCV to remove padding and convert RGBA to BGR. Can be skipped if * algorithm can handle padded RGBA data. */
  622. #if (CV_MAJOR_VERSION >= 4)
  623. cv::cvtColor (in_mat, *dsexample->cvmat, cv::COLOR_RGBA2BGR);
  624. #else
  625. cv::cvtColor (in_mat, *dsexample->cvmat, CV_RGBA2BGR);
  626. #endif
  627. /* used to dump the converted mat to files for debug */
  628. static guint cnt = 0;
  629. cv::imwrite("out_" + std::to_string (cnt) + ".jpeg", *dsexample->cvmat);
  630. cnt++;
  631. #endif
  632. }
  633. }
  634. }
  635. flow_ret = GST_FLOW_OK;
  636. error:
  637. nvds_set_output_system_timestamp (inbuf, GST_ELEMENT_NAME (dsexample));
  638. gst_buffer_unmap (inbuf, &in_map_info);
  639. return flow_ret;
  640. }
  641. /** * Attach metadata for the full frame. We will be adding a new metadata. */
  642. static void
  643. attach_metadata_full_frame (GstDsExample * dsexample, NvDsFrameMeta *frame_meta,
  644. gdouble scale_ratio, DsExampleOutput * output, guint batch_id)
  645. {
  646. NvDsBatchMeta *batch_meta = frame_meta->base_meta.batch_meta;
  647. NvDsObjectMeta *object_meta = NULL;
  648. static gchar font_name[] = "Serif";
  649. GST_DEBUG_OBJECT (dsexample, "Attaching metadata %d\n", output->numObjects);
  650. for (gint i = 0; i < output->numObjects; i++) {
  651. DsExampleObject *obj = &output->object[i];
  652. object_meta = nvds_acquire_obj_meta_from_pool(batch_meta);
  653. NvOSD_RectParams & rect_params = object_meta->rect_params;
  654. NvOSD_TextParams & text_params = object_meta->text_params;
  655. /* Assign bounding box coordinates */
  656. rect_params.left = obj->left;
  657. rect_params.top = obj->top;
  658. rect_params.width = obj->width;
  659. rect_params.height = obj->height;
  660. /* Semi-transparent yellow background */
  661. rect_params.has_bg_color = 0;
  662. rect_params.bg_color = (NvOSD_ColorParams) {
  663. 1, 1, 0, 0.4};
  664. /* Red border of width 6 */
  665. rect_params.border_width = 3;
  666. rect_params.border_color = (NvOSD_ColorParams) {
  667. 1, 0, 0, 1};
  668. /* Scale the bounding boxes proportionally based on how the object/frame was * scaled during input */
  669. rect_params.left /= scale_ratio;
  670. rect_params.top /= scale_ratio;
  671. rect_params.width /= scale_ratio;
  672. rect_params.height /= scale_ratio;
  673. GST_DEBUG_OBJECT (dsexample, "Attaching rect%d of batch%u"
  674. " left->%f top->%f width->%f"
  675. " height->%f label->%s\n", i, batch_id, rect_params.left,
  676. rect_params.top, rect_params.width, rect_params.height, obj->label);
  677. object_meta->object_id = UNTRACKED_OBJECT_ID;
  678. g_strlcpy (object_meta->obj_label, obj->label, MAX_LABEL_SIZE);
  679. /* display_text required heap allocated memory */
  680. text_params.display_text = g_strdup (obj->label);
  681. /* Display text above the left top corner of the object */
  682. text_params.x_offset = rect_params.left;
  683. text_params.y_offset = rect_params.top - 10;
  684. /* Set black background for the text */
  685. text_params.set_bg_clr = 1;
  686. text_params.text_bg_clr = (NvOSD_ColorParams) {
  687. 0, 0, 0, 1};
  688. /* Font face, size and color */
  689. text_params.font_params.font_name = font_name;
  690. text_params.font_params.font_size = 11;
  691. text_params.font_params.font_color = (NvOSD_ColorParams) {
  692. 1, 1, 1, 1};
  693. nvds_add_obj_meta_to_frame(frame_meta, object_meta, NULL);
  694. frame_meta->bInferDone = TRUE;
  695. }
  696. }
  697. /** * Only update string label in an existing object metadata. No bounding boxes. * We assume only one label per object is generated */
  698. static void
  699. attach_metadata_object (GstDsExample * dsexample, NvDsObjectMeta * obj_meta,
  700. DsExampleOutput * output)
  701. {
  702. if (output->numObjects == 0)
  703. return;
  704. NvDsBatchMeta *batch_meta = obj_meta->base_meta.batch_meta;
  705. NvDsClassifierMeta *classifier_meta =
  706. nvds_acquire_classifier_meta_from_pool (batch_meta);
  707. classifier_meta->unique_component_id = dsexample->unique_id;
  708. NvDsLabelInfo *label_info =
  709. nvds_acquire_label_info_meta_from_pool (batch_meta);
  710. g_strlcpy (label_info->result_label, output->object[0].label, MAX_LABEL_SIZE);
  711. nvds_add_label_info_meta_to_classifier(classifier_meta, label_info);
  712. nvds_add_classifier_meta_to_object (obj_meta, classifier_meta);
  713. nvds_acquire_meta_lock (batch_meta);
  714. NvOSD_TextParams & text_params = obj_meta->text_params;
  715. NvOSD_RectParams & rect_params = obj_meta->rect_params;
  716. /* Below code to display the result */
  717. /* Set black background for the text * display_text required heap allocated memory */
  718. if (text_params.display_text) {
  719. gchar *conc_string = g_strconcat (text_params.display_text, " ",
  720. output->object[0].label, NULL);
  721. g_free (text_params.display_text);
  722. text_params.display_text = conc_string;
  723. } else {
  724. /* Display text above the left top corner of the object */
  725. text_params.x_offset = rect_params.left;
  726. text_params.y_offset = rect_params.top - 10;
  727. text_params.display_text = g_strdup (output->object[0].label);
  728. /* Font face, size and color */
  729. text_params.font_params.font_name = (char *)"Serif";
  730. text_params.font_params.font_size = 11;
  731. text_params.font_params.font_color = (NvOSD_ColorParams) {
  732. 1, 1, 1, 1};
  733. /* Set black background for the text */
  734. text_params.set_bg_clr = 1;
  735. text_params.text_bg_clr = (NvOSD_ColorParams) {
  736. 0, 0, 0, 1};
  737. }
  738. nvds_release_meta_lock (batch_meta);
  739. }
  740. /** * Boiler plate for registering a plugin and an element. */
  741. static gboolean
  742. dsexample_plugin_init (GstPlugin * plugin)
  743. {
  744. GST_DEBUG_CATEGORY_INIT (gst_dsexample_debug, "dsexample", 0,
  745. "dsexample plugin");
  746. return gst_element_register (plugin, "dsexample", GST_RANK_PRIMARY,
  747. GST_TYPE_DSEXAMPLE);
  748. }
  749. GST_PLUGIN_DEFINE (GST_VERSION_MAJOR,
  750. GST_VERSION_MINOR,
  751. nvdsgst_dsexample,
  752. DESCRIPTION, dsexample_plugin_init, DS_VERSION, LICENSE, BINARY_PACKAGE, URL)

发表评论

表情:
评论列表 (有 0 条评论,235人围观)

还没有评论,来说两句吧...

相关阅读

    相关 temp ssr

    由服务端请求首屏数据,而不是客户端请求首屏数据,这是“快”的一个主要原因。服务端在内网进行请求,数据响应速度快。客户端在不同网络环境进行数据请求,且外网http请求开销大,导致