Line data Source code
1 : /**
2 : * GStreamer
3 : * Copyright (C) 2005 Thomas Vander Stichele <thomas@apestaart.org>
4 : * Copyright (C) 2005 Ronald S. Bultje <rbultje@ronald.bitfreak.net>
5 : * Copyright (C) 2018 MyungJoo Ham <myungjoo.ham@samsung.com>
6 : *
7 : * This library is free software; you can redistribute it and/or
8 : * modify it under the terms of the GNU Library General Public
9 : * License as published by the Free Software Foundation;
10 : * version 2.1 of the License.
11 : *
12 : * This library is distributed in the hope that it will be useful,
13 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 : * Library General Public License for more details.
16 : */
17 :
18 : /**
19 : * @file gsttensor_converter.c
20 : * @date 26 Mar 2018
21 : * @brief GStreamer plugin to convert media types to tensors (as a filter for other general neural network filters)
22 : * @see https://github.com/nnstreamer/nnstreamer
23 : * @author MyungJoo Ham <myungjoo.ham@samsung.com>
24 : * @bug No known bugs except for NYI items
25 : * @todo For flatbuffers, support other/tensors with properties
26 : * @todo Subplugins are not tested, yet.
27 : */
28 :
29 : /**
30 : * SECTION:element-tensor_converter
31 : *
32 : * A filter that converts media stream to tensor stream for NN frameworks.
33 : * The output is always in the format of other/tensor or other/tensors.
34 : *
35 : * <refsect2>
36 : * <title>Example launch line</title>
37 : * |[
38 : * gst-launch-1.0 videotestsrc ! video/x-raw,format=RGB,width=640,height=480 ! tensor_converter ! tensor_sink
39 : * ]|
40 : * </refsect2>
41 : */
42 :
43 : #ifdef HAVE_CONFIG_H
44 : #include <config.h>
45 : #endif
46 :
47 : #include <string.h>
48 : #include "gsttensor_converter.h"
49 : #include "tensor_meta.h"
50 :
51 : #ifdef NO_VIDEO
52 : #include "gsttensor_converter_media_no_video.h"
53 : #else
54 : #include "gsttensor_converter_media_info_video.h"
55 : #endif
56 :
57 : #ifdef NO_AUDIO
58 : #include "gsttensor_converter_media_no_audio.h"
59 : #else
60 : #include "gsttensor_converter_media_info_audio.h"
61 : #endif
62 : #include <nnstreamer_log.h>
63 : #include <nnstreamer_subplugin.h>
64 : #include <nnstreamer_util.h>
65 :
66 : /**
67 : * @brief Caps string for text input
68 : */
69 : #define TEXT_CAPS_STR "text/x-raw, format = (string) utf8"
70 :
71 : #define append_text_caps_template(caps) \
72 : gst_caps_append (caps, gst_caps_from_string (TEXT_CAPS_STR))
73 :
74 : /**
75 : * @brief Caps string for binary stream
76 : */
77 : #define OCTET_CAPS_STR "application/octet-stream"
78 :
79 : #define append_octet_caps_template(caps) \
80 : gst_caps_append (caps, gst_caps_from_string (OCTET_CAPS_STR))
81 :
82 : /**
83 : * @brief Macro to append template caps for flexible tensor
84 : */
85 : #define append_flex_tensor_caps_template(caps) \
86 : gst_caps_append (caps, gst_caps_from_string (GST_TENSORS_FLEX_CAP_DEFAULT))
87 :
88 : /**
89 : * @brief Macro for debug mode.
90 : */
91 : #ifndef DBG
92 : #define DBG (!self->silent)
93 : #endif
94 :
95 : #define silent_debug_timestamp(self, buf) do { \
96 : if (DBG) { \
97 : GST_DEBUG_OBJECT (self, "pts = %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_PTS (buf))); \
98 : GST_DEBUG_OBJECT (self, "dts = %" GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_DTS (buf))); \
99 : GST_DEBUG_OBJECT (self, "duration = %" GST_TIME_FORMAT "\n", GST_TIME_ARGS (GST_BUFFER_DURATION (buf))); \
100 : } \
101 : } while (0)
102 :
103 : GST_DEBUG_CATEGORY_STATIC (gst_tensor_converter_debug);
104 : #define GST_CAT_DEFAULT gst_tensor_converter_debug
105 :
106 : #define STRING_CUSTOM_MODE(self) \
107 : (((self)->mode == _CONVERTER_MODE_CUSTOM_CODE) ? \
108 : "custom_code (function)" : \
109 : (((self)->mode == _CONVERTER_MODE_CUSTOM_SCRIPT) ? \
110 : "custom_script (py)" : \
111 : "unknown custom mode (internal error!)"))
112 :
113 : /**
114 : * @brief tensor_converter properties
115 : * @todo For flatbuffers, support other/tensors.
116 : */
117 : enum
118 : {
119 : PROP_0,
120 : PROP_INPUT_DIMENSION,
121 : PROP_INPUT_TYPE,
122 : PROP_FRAMES_PER_TENSOR,
123 : PROP_SET_TIMESTAMP,
124 : PROP_SUBPLUGINS,
125 : PROP_SILENT,
126 : PROP_MODE
127 : };
128 :
129 : /**
130 : * @brief Flag to set timestamp when received a buffer with invalid timestamp.
131 : */
132 : #define DEFAULT_SET_TIMESTAMP TRUE
133 :
134 : /**
135 : * @brief Flag to print minimized log.
136 : */
137 : #define DEFAULT_SILENT TRUE
138 :
139 : /**
140 : * @brief Frames in output tensor.
141 : */
142 : #define DEFAULT_FRAMES_PER_TENSOR 1
143 :
144 : #define gst_tensor_converter_parent_class parent_class
145 56915 : G_DEFINE_TYPE (GstTensorConverter, gst_tensor_converter, GST_TYPE_ELEMENT);
146 :
147 : static void gst_tensor_converter_finalize (GObject * object);
148 : static void gst_tensor_converter_set_property (GObject * object,
149 : guint prop_id, const GValue * value, GParamSpec * pspec);
150 : static void gst_tensor_converter_get_property (GObject * object,
151 : guint prop_id, GValue * value, GParamSpec * pspec);
152 :
153 : static gboolean gst_tensor_converter_sink_event (GstPad * pad,
154 : GstObject * parent, GstEvent * event);
155 : static gboolean gst_tensor_converter_sink_query (GstPad * pad,
156 : GstObject * parent, GstQuery * query);
157 : static gboolean gst_tensor_converter_src_query (GstPad * pad,
158 : GstObject * parent, GstQuery * query);
159 : static GstFlowReturn gst_tensor_converter_chain (GstPad * pad,
160 : GstObject * parent, GstBuffer * buf);
161 : static GstStateChangeReturn
162 : gst_tensor_converter_change_state (GstElement * element,
163 : GstStateChange transition);
164 :
165 : static void gst_tensor_converter_reset (GstTensorConverter * self);
166 : static GstCaps *gst_tensor_converter_query_caps (GstTensorConverter * self,
167 : GstPad * pad, GstCaps * filter);
168 : static gboolean gst_tensor_converter_parse_caps (GstTensorConverter * self,
169 : const GstCaps * caps);
170 : static void gst_tensor_converter_update_caps (GstTensorConverter * self);
171 : static const NNStreamerExternalConverter *findExternalConverter (const char
172 : *media_type_name);
173 :
174 : /**
175 : * @brief Initialize the tensor_converter's class.
176 : */
177 : static void
178 420 : gst_tensor_converter_class_init (GstTensorConverterClass * klass)
179 : {
180 : GObjectClass *object_class;
181 : GstElementClass *element_class;
182 : GstPadTemplate *pad_template;
183 : GstCaps *pad_caps;
184 : gchar **str_array;
185 : guint total, i;
186 : const NNStreamerExternalConverter *ex;
187 :
188 420 : GST_DEBUG_CATEGORY_INIT (gst_tensor_converter_debug, "tensor_converter", 0,
189 : "Element to convert media stream to tensor stream");
190 :
191 420 : object_class = (GObjectClass *) klass;
192 420 : element_class = (GstElementClass *) klass;
193 :
194 : /* GObjectClass vmethods */
195 420 : object_class->set_property = gst_tensor_converter_set_property;
196 420 : object_class->get_property = gst_tensor_converter_get_property;
197 420 : object_class->finalize = gst_tensor_converter_finalize;
198 :
199 : /**
200 : * GstTensorConverter::input-dim:
201 : *
202 : * Input tensor dimension from inner array.
203 : * Generally this property is used to set tensor configuration for byte-stream (application/octet-stream).
204 : * When setting this property and input media type is video or audio stream, GstTensorConverter will compare the media info with this.
205 : * (If it is different, it will be failed.)
206 : */
207 420 : g_object_class_install_property (object_class, PROP_INPUT_DIMENSION,
208 : g_param_spec_string ("input-dim", "Input tensor dimension",
209 : "Input tensor dimension from inner array", "",
210 : G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
211 :
212 : /**
213 : * GstTensorConverter::input-type:
214 : *
215 : * Type of each element of the input tensor.
216 : * Generally this property is used to set tensor configuration for byte-stream (application/octet-stream).
217 : * When setting this property and input media type is video or audio stream, GstTensorConverter will compare the media info with this.
218 : * (If it is different, it will be failed.)
219 : */
220 420 : g_object_class_install_property (object_class, PROP_INPUT_TYPE,
221 : g_param_spec_string ("input-type", "Input tensor type",
222 : "Type of each element of the input tensor", "",
223 : G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
224 :
225 : /**
226 : * GstTensorConverter::frames-per-tensor:
227 : *
228 : * The number of frames in outgoing buffer. (buffer is a single tensor instance)
229 : * GstTensorConverter can push a buffer with multiple media frames.
230 : */
231 420 : g_object_class_install_property (object_class, PROP_FRAMES_PER_TENSOR,
232 : g_param_spec_uint ("frames-per-tensor", "Frames per tensor",
233 : "The number of frames in output tensor", 1, G_MAXUINT,
234 : DEFAULT_FRAMES_PER_TENSOR,
235 : G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
236 :
237 : /**
238 : * GstTensorConverter::set-timestamp:
239 : *
240 : * The flag to set timestamp when received a buffer with invalid timestamp.
241 : */
242 420 : g_object_class_install_property (object_class, PROP_SET_TIMESTAMP,
243 : g_param_spec_boolean ("set-timestamp", "Set timestamp",
244 : "The flag to set timestamp when received a buffer with invalid timestamp",
245 : DEFAULT_SET_TIMESTAMP, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
246 :
247 : /**
248 : * GstTensorConverter::sub-plugins:
249 : *
250 : * Registrable sub-plugins list of tensor-converter.
251 : */
252 420 : g_object_class_install_property (object_class, PROP_SUBPLUGINS,
253 : g_param_spec_string ("sub-plugins", "Sub-plugins",
254 : "Registrable sub-plugins list", "",
255 : G_PARAM_READABLE | G_PARAM_STATIC_STRINGS));
256 :
257 : /**
258 : * GstTensorConverter::silent:
259 : *
260 : * The flag to enable/disable debugging messages.
261 : */
262 420 : g_object_class_install_property (object_class, PROP_SILENT,
263 : g_param_spec_boolean ("silent", "Silent", "Produce verbose output",
264 : DEFAULT_SILENT, G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
265 :
266 : /**
267 : * GstTensorConverter::mode:
268 : *
269 : * Generally this property is used to set tensor converter custom mode.
270 : */
271 420 : g_object_class_install_property (object_class, PROP_MODE,
272 : g_param_spec_string ("mode", "Mode",
273 : "Converter mode. e.g., mode=custom-code:<registered callback name>. For detail, refer to https://github.com/nnstreamer/nnstreamer/blob/main/gst/nnstreamer/elements/gsttensor_converter.md#custom-converter",
274 : "", G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS));
275 :
276 : /* set src pad template */
277 : pad_caps =
278 420 : gst_caps_from_string (GST_TENSOR_CAP_DEFAULT ";"
279 : GST_TENSORS_CAP_MAKE ("{ static, flexible }"));
280 :
281 420 : pad_template = gst_pad_template_new ("src", GST_PAD_SRC, GST_PAD_ALWAYS,
282 : pad_caps);
283 420 : gst_element_class_add_pad_template (element_class, pad_template);
284 :
285 420 : gst_caps_unref (pad_caps);
286 :
287 : /* set sink pad template */
288 420 : pad_caps = gst_caps_new_empty ();
289 :
290 : /* append caps string for all media types */
291 420 : append_video_caps_template (pad_caps);
292 420 : append_audio_caps_template (pad_caps);
293 420 : append_text_caps_template (pad_caps);
294 420 : append_octet_caps_template (pad_caps);
295 420 : append_flex_tensor_caps_template (pad_caps);
296 :
297 : /* append sub-plugin template caps */
298 420 : str_array = get_all_subplugins (NNS_SUBPLUGIN_CONVERTER);
299 420 : if (str_array) {
300 420 : total = g_strv_length (str_array);
301 :
302 2100 : for (i = 0; i < total; i++) {
303 1680 : ex = nnstreamer_converter_find (str_array[i]);
304 1680 : if (ex && ex->query_caps)
305 1680 : gst_caps_append (pad_caps, ex->query_caps (NULL));
306 : }
307 :
308 420 : g_strfreev (str_array);
309 : }
310 :
311 420 : pad_template = gst_pad_template_new ("sink", GST_PAD_SINK, GST_PAD_ALWAYS,
312 : pad_caps);
313 420 : gst_element_class_add_pad_template (element_class, pad_template);
314 :
315 420 : gst_caps_unref (pad_caps);
316 :
317 420 : gst_element_class_set_static_metadata (element_class,
318 : "TensorConverter",
319 : "Converter/Tensor",
320 : "Converts an audio, video, text, or arbitrary stream to a tensor stream of C-Array for neural network framework filters",
321 : "MyungJoo Ham <myungjoo.ham@samsung.com>");
322 :
323 : /* GstElementClass vmethods */
324 420 : element_class->change_state = gst_tensor_converter_change_state;
325 420 : }
326 :
327 : /**
328 : * @brief Initialize tensor_converter element.
329 : */
330 : static void
331 902 : gst_tensor_converter_init (GstTensorConverter * self)
332 : {
333 : /** setup sink pad */
334 902 : self->sinkpad =
335 902 : gst_pad_new_from_template (gst_element_class_get_pad_template
336 902 : (GST_ELEMENT_GET_CLASS (self), "sink"), "sink");
337 902 : gst_pad_set_event_function (self->sinkpad,
338 : GST_DEBUG_FUNCPTR (gst_tensor_converter_sink_event));
339 902 : gst_pad_set_query_function (self->sinkpad,
340 : GST_DEBUG_FUNCPTR (gst_tensor_converter_sink_query));
341 902 : gst_pad_set_chain_function (self->sinkpad,
342 : GST_DEBUG_FUNCPTR (gst_tensor_converter_chain));
343 902 : GST_PAD_SET_PROXY_CAPS (self->sinkpad);
344 902 : gst_element_add_pad (GST_ELEMENT (self), self->sinkpad);
345 :
346 : /** setup src pad */
347 902 : self->srcpad =
348 902 : gst_pad_new_from_template (gst_element_class_get_pad_template
349 902 : (GST_ELEMENT_GET_CLASS (self), "src"), "src");
350 902 : gst_pad_set_query_function (self->srcpad,
351 : GST_DEBUG_FUNCPTR (gst_tensor_converter_src_query));
352 902 : GST_PAD_SET_PROXY_CAPS (self->srcpad);
353 902 : gst_element_add_pad (GST_ELEMENT (self), self->srcpad);
354 :
355 : /** init properties */
356 902 : self->silent = DEFAULT_SILENT;
357 902 : self->set_timestamp = DEFAULT_SET_TIMESTAMP;
358 902 : self->frames_per_tensor = DEFAULT_FRAMES_PER_TENSOR;
359 902 : self->in_media_type = _NNS_MEDIA_INVALID;
360 902 : self->frame_size = 0;
361 902 : self->remove_padding = FALSE;
362 902 : self->externalConverter = NULL;
363 902 : self->priv_data = NULL;
364 902 : self->mode = _CONVERTER_MODE_NONE;
365 902 : self->mode_option = NULL;
366 902 : self->custom.func = NULL;
367 902 : self->custom.data = NULL;
368 902 : self->do_not_append_header = FALSE;
369 902 : gst_tensors_info_init (&self->tensors_info);
370 902 : gst_tensors_config_init (&self->tensors_config);
371 902 : self->tensors_configured = FALSE;
372 :
373 902 : self->adapter_table = gst_tensor_aggregation_init ();
374 902 : gst_tensor_converter_reset (self);
375 902 : }
376 :
377 : /**
378 : * @brief Function to finalize instance.
379 : */
380 : static void
381 845 : gst_tensor_converter_finalize (GObject * object)
382 : {
383 : GstTensorConverter *self;
384 :
385 845 : self = GST_TENSOR_CONVERTER (object);
386 :
387 845 : gst_tensor_converter_reset (self);
388 :
389 845 : gst_tensors_config_free (&self->tensors_config);
390 845 : gst_tensors_info_free (&self->tensors_info);
391 845 : g_hash_table_destroy (self->adapter_table);
392 :
393 845 : g_free (self->mode_option);
394 845 : g_free (self->ext_fw);
395 845 : self->custom.func = NULL;
396 845 : self->custom.data = NULL;
397 845 : if (self->externalConverter && self->externalConverter->close)
398 12 : self->externalConverter->close (&self->priv_data);
399 845 : G_OBJECT_CLASS (parent_class)->finalize (object);
400 845 : }
401 :
402 : /**
403 : * @brief Setter for tensor_converter properties.
404 : */
405 : static void
406 285 : gst_tensor_converter_set_property (GObject * object, guint prop_id,
407 : const GValue * value, GParamSpec * pspec)
408 : {
409 : GstTensorConverter *self;
410 : GstTensorsInfo *info;
411 : GstTensorInfo *_info;
412 : guint i, j, num;
413 : const gchar *value_str;
414 :
415 285 : self = GST_TENSOR_CONVERTER (object);
416 285 : info = &self->tensors_info;
417 :
418 285 : switch (prop_id) {
419 108 : case PROP_INPUT_DIMENSION:
420 108 : value_str = g_value_get_string (value);
421 108 : num = gst_tensors_info_parse_dimensions_string (info, value_str);
422 :
423 108 : if (num == 0) {
424 0 : GST_WARNING ("%s is invalid dimension string.", value_str);
425 108 : } else if (info->num_tensors > 0 && info->num_tensors != num) {
426 1 : GST_WARNING ("%s, the number of tensor is %u.", value_str, num);
427 : }
428 :
429 : /* prevent invalid value, init dimensions. */
430 27633 : for (i = num; i < NNS_TENSOR_SIZE_LIMIT; ++i) {
431 27525 : _info = gst_tensors_info_get_nth_info (info, i);
432 :
433 467925 : for (j = 0; j < NNS_TENSOR_RANK_LIMIT; ++j)
434 440400 : _info->dimension[j] = 0;
435 : }
436 :
437 108 : info->num_tensors = num;
438 108 : break;
439 98 : case PROP_INPUT_TYPE:
440 98 : value_str = g_value_get_string (value);
441 98 : num = gst_tensors_info_parse_types_string (info, value_str);
442 :
443 98 : if (num == 0) {
444 0 : GST_WARNING ("%s is invalid type string.", value_str);
445 98 : } else if (info->num_tensors > 0 && info->num_tensors != num) {
446 2 : GST_WARNING ("%s, the number of tensor is %u.", value_str, num);
447 : }
448 :
449 : /* prevent invalid value, init types. */
450 25074 : for (i = num; i < NNS_TENSOR_SIZE_LIMIT; ++i) {
451 24976 : _info = gst_tensors_info_get_nth_info (info, i);
452 24976 : _info->type = _NNS_END;
453 : }
454 :
455 98 : info->num_tensors = num;
456 98 : break;
457 51 : case PROP_FRAMES_PER_TENSOR:
458 51 : self->frames_per_tensor = g_value_get_uint (value);
459 51 : silent_debug (self, "Set frames in output = %d", self->frames_per_tensor);
460 51 : break;
461 2 : case PROP_SET_TIMESTAMP:
462 2 : self->set_timestamp = g_value_get_boolean (value);
463 2 : silent_debug (self, "Set timestamp = %d", self->set_timestamp);
464 2 : break;
465 12 : case PROP_SILENT:
466 12 : self->silent = g_value_get_boolean (value);
467 12 : silent_debug (self, "Set silent = %d", self->silent);
468 12 : break;
469 14 : case PROP_MODE:
470 : {
471 14 : const gchar *param = g_value_get_string (value);
472 14 : const converter_custom_cb_s *ptr = NULL;
473 14 : gchar **strv = g_strsplit_set (param, ":", -1);
474 14 : self->custom.func = NULL;
475 :
476 14 : if (g_strv_length (strv) < 2) {
477 0 : nns_logw
478 : ("Tensor converter mode option is incorrect. Please specify mode option as <MODE>:<MODE_OPTION>. Refer to https://github.com/nnstreamer/nnstreamer/blob/main/gst/nnstreamer/elements/gsttensor_converter.md#custom-converter for detail.");
479 0 : g_strfreev (strv);
480 0 : break;
481 : }
482 :
483 14 : self->mode_option = g_strdup (strv[1]);
484 14 : if (g_ascii_strcasecmp (strv[0], "custom-code") == 0) {
485 1 : self->mode = _CONVERTER_MODE_CUSTOM_CODE;
486 1 : ptr = get_subplugin (NNS_CUSTOM_CONVERTER, self->mode_option);
487 1 : if (!ptr) {
488 0 : nns_logw
489 : ("Failed to find custom subplugin of the tensor_converter. The custom-code for tensor_converter, \"%s\" is not registered by nnstreamer_converter_custom_register() function. Refer to https://github.com/nnstreamer/nnstreamer/blob/main/gst/nnstreamer/elements/gsttensor_converter.md#custom-converter for detail.",
490 : strv[1]);
491 0 : return;
492 : }
493 1 : self->custom.func = ptr->func;
494 1 : self->custom.data = ptr->data;
495 13 : } else if (g_ascii_strcasecmp (strv[0], "custom-script") == 0) {
496 13 : self->mode = _CONVERTER_MODE_CUSTOM_SCRIPT;
497 : /** @todo detects framework based on the script extension */
498 13 : self->ext_fw = g_strdup ("python3");
499 : }
500 14 : g_strfreev (strv);
501 :
502 14 : break;
503 : }
504 0 : default:
505 0 : G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
506 0 : break;
507 : }
508 : }
509 :
510 : /**
511 : * @brief Getter for tensor_converter properties.
512 : */
513 : static void
514 37 : gst_tensor_converter_get_property (GObject * object, guint prop_id,
515 : GValue * value, GParamSpec * pspec)
516 : {
517 : GstTensorConverter *self;
518 : GstTensorsInfo *info;
519 :
520 37 : self = GST_TENSOR_CONVERTER (object);
521 37 : info = &self->tensors_info;
522 :
523 37 : switch (prop_id) {
524 7 : case PROP_INPUT_DIMENSION:
525 7 : if (info->num_tensors > 0) {
526 5 : g_value_take_string (value,
527 : gst_tensors_info_get_dimensions_string (info));
528 : } else {
529 2 : g_value_set_string (value, "");
530 : }
531 7 : break;
532 7 : case PROP_INPUT_TYPE:
533 7 : if (info->num_tensors > 0) {
534 5 : g_value_take_string (value, gst_tensors_info_get_types_string (info));
535 : } else {
536 2 : g_value_set_string (value, "");
537 : }
538 7 : break;
539 7 : case PROP_FRAMES_PER_TENSOR:
540 7 : g_value_set_uint (value, self->frames_per_tensor);
541 7 : break;
542 6 : case PROP_SET_TIMESTAMP:
543 6 : g_value_set_boolean (value, self->set_timestamp);
544 6 : break;
545 2 : case PROP_SUBPLUGINS:
546 : {
547 2 : gchar **str_array = get_all_subplugins (NNS_SUBPLUGIN_CONVERTER);
548 :
549 2 : if (str_array) {
550 2 : g_value_take_string (value, g_strjoinv (",", str_array));
551 2 : g_strfreev (str_array);
552 : } else {
553 0 : g_value_set_string (value, "");
554 : }
555 2 : break;
556 : }
557 6 : case PROP_SILENT:
558 6 : g_value_set_boolean (value, self->silent);
559 6 : break;
560 2 : case PROP_MODE:
561 : {
562 2 : gchar *mode_str = NULL;
563 2 : if (self->mode_option == NULL)
564 2 : mode_str = g_strdup ("");
565 : else {
566 0 : if (self->mode == _CONVERTER_MODE_CUSTOM_CODE)
567 : mode_str =
568 0 : g_strdup_printf ("%s:%s", "custom-code", self->mode_option);
569 0 : else if (self->mode == _CONVERTER_MODE_CUSTOM_SCRIPT)
570 : mode_str =
571 0 : g_strdup_printf ("%s:%s", "custom-script", self->mode_option);
572 : }
573 2 : g_value_take_string (value, mode_str);
574 2 : break;
575 : }
576 0 : default:
577 0 : G_OBJECT_WARN_INVALID_PROPERTY_ID (object, prop_id, pspec);
578 0 : break;
579 : }
580 37 : }
581 :
582 : /**
583 : * @brief This function handles sink event.
584 : */
585 : static gboolean
586 3385 : gst_tensor_converter_sink_event (GstPad * pad, GstObject * parent,
587 : GstEvent * event)
588 : {
589 : GstTensorConverter *self;
590 :
591 3385 : self = GST_TENSOR_CONVERTER (parent);
592 :
593 3385 : GST_DEBUG_OBJECT (self, "Received %s event: %" GST_PTR_FORMAT,
594 : GST_EVENT_TYPE_NAME (event), event);
595 :
596 3385 : switch (GST_EVENT_TYPE (event)) {
597 864 : case GST_EVENT_CAPS:
598 : {
599 : GstCaps *in_caps;
600 :
601 864 : gst_event_parse_caps (event, &in_caps);
602 864 : silent_debug_caps (self, in_caps, "in-caps");
603 :
604 864 : if (gst_tensor_converter_parse_caps (self, in_caps)) {
605 834 : gst_tensor_converter_update_caps (self);
606 834 : gst_event_unref (event);
607 864 : return TRUE;
608 : } else {
609 30 : gst_event_unref (event);
610 30 : return FALSE;
611 : }
612 : break;
613 : }
614 0 : case GST_EVENT_FLUSH_STOP:
615 0 : gst_tensor_converter_reset (self);
616 0 : break;
617 829 : case GST_EVENT_SEGMENT:
618 : {
619 : GstSegment seg;
620 :
621 829 : gst_event_copy_segment (event, &seg);
622 829 : silent_debug (self, "received seg %s", gst_format_get_name (seg.format));
623 :
624 829 : self->segment = seg;
625 829 : self->have_segment = TRUE;
626 :
627 829 : if (seg.format == GST_FORMAT_TIME) {
628 829 : return gst_pad_push_event (self->srcpad, event);
629 : }
630 :
631 100 : if (seg.format == GST_FORMAT_BYTES) {
632 : /* handle seg event in chain function */
633 100 : self->need_segment = TRUE;
634 100 : gst_event_unref (event);
635 100 : return TRUE;
636 : }
637 :
638 0 : GST_ERROR_OBJECT (self, "Unsupported format = %s\n",
639 : gst_format_get_name (seg.format));
640 0 : gst_event_unref (event);
641 0 : return FALSE;
642 : }
643 1692 : default:
644 1692 : break;
645 : }
646 :
647 1692 : return gst_pad_event_default (pad, parent, event);
648 : }
649 :
650 : /**
651 : * @brief This function handles sink pad query.
652 : */
653 : static gboolean
654 6821 : gst_tensor_converter_sink_query (GstPad * pad, GstObject * parent,
655 : GstQuery * query)
656 : {
657 : GstTensorConverter *self;
658 :
659 6821 : self = GST_TENSOR_CONVERTER (parent);
660 6821 : GST_DEBUG_OBJECT (self, "Received %s query: %" GST_PTR_FORMAT,
661 : GST_QUERY_TYPE_NAME (query), query);
662 :
663 6821 : switch (GST_QUERY_TYPE (query)) {
664 4178 : case GST_QUERY_CAPS:
665 : {
666 : GstCaps *caps;
667 : GstCaps *filter;
668 :
669 4178 : gst_query_parse_caps (query, &filter);
670 4178 : caps = gst_tensor_converter_query_caps (self, pad, filter);
671 :
672 4178 : gst_query_set_caps_result (query, caps);
673 4178 : gst_caps_unref (caps);
674 4178 : return TRUE;
675 : }
676 1669 : case GST_QUERY_ACCEPT_CAPS:
677 : {
678 : GstCaps *caps;
679 : GstCaps *template_caps;
680 1669 : gboolean res = FALSE;
681 :
682 1669 : gst_query_parse_accept_caps (query, &caps);
683 1669 : silent_debug_caps (self, caps, "accept-caps");
684 :
685 1669 : if (gst_caps_is_fixed (caps)) {
686 1669 : template_caps = gst_pad_get_pad_template_caps (pad);
687 :
688 1669 : res = gst_caps_can_intersect (template_caps, caps);
689 1669 : gst_caps_unref (template_caps);
690 : }
691 :
692 1669 : gst_query_set_accept_caps_result (query, res);
693 1669 : return TRUE;
694 : }
695 974 : default:
696 974 : break;
697 : }
698 :
699 974 : return gst_pad_query_default (pad, parent, query);
700 : }
701 :
702 : /**
703 : * @brief Internal function to get adapter.
704 : */
705 : static GstAdapter *
706 121 : gst_tensor_converter_get_adapter (GstTensorConverter * self, GstBuffer * buf)
707 : {
708 : GstMetaQuery *meta;
709 121 : guint32 key = 0;
710 :
711 121 : meta = gst_buffer_get_meta_query (buf);
712 121 : if (meta)
713 0 : key = meta->client_id;
714 :
715 121 : return gst_tensor_aggregation_get_adapter (self->adapter_table, key);
716 : }
717 :
718 : /**
719 : * @brief This function handles src pad query.
720 : */
721 : static gboolean
722 5896 : gst_tensor_converter_src_query (GstPad * pad, GstObject * parent,
723 : GstQuery * query)
724 : {
725 : GstTensorConverter *self;
726 :
727 5896 : self = GST_TENSOR_CONVERTER (parent);
728 :
729 5896 : GST_DEBUG_OBJECT (self, "Received %s query: %" GST_PTR_FORMAT,
730 : GST_QUERY_TYPE_NAME (query), query);
731 :
732 5896 : switch (GST_QUERY_TYPE (query)) {
733 3416 : case GST_QUERY_CAPS:
734 : {
735 : GstCaps *caps;
736 : GstCaps *filter;
737 :
738 3416 : gst_query_parse_caps (query, &filter);
739 3416 : caps = gst_tensor_converter_query_caps (self, pad, filter);
740 :
741 3416 : gst_query_set_caps_result (query, caps);
742 3416 : gst_caps_unref (caps);
743 3416 : return TRUE;
744 : }
745 2480 : default:
746 2480 : break;
747 : }
748 :
749 2480 : return gst_pad_query_default (pad, parent, query);
750 : }
751 :
752 : /** @brief Chain function's private routine */
753 : static void
754 33390 : _gst_tensor_converter_chain_segment (GstTensorConverter * self,
755 : gsize frame_size)
756 : {
757 33390 : if (self->need_segment) {
758 : GstTensorsConfig *config;
759 : GstSegment seg;
760 : guint64 start;
761 : gboolean have_framerate;
762 :
763 100 : config = &self->tensors_config;
764 100 : have_framerate = (config->rate_n > 0 && config->rate_d > 0);
765 :
766 : /** This is an internal logic error. */
767 100 : g_assert (self->have_segment);
768 100 : start = self->segment.start;
769 :
770 100 : gst_segment_init (&seg, GST_FORMAT_TIME);
771 :
772 100 : if (have_framerate && start > 0) {
773 0 : start = gst_util_uint64_scale_int (start * config->rate_d, GST_SECOND,
774 0 : frame_size * config->rate_n);
775 0 : seg.start = seg.time = start;
776 : }
777 :
778 100 : self->segment = seg;
779 100 : self->need_segment = FALSE;
780 :
781 100 : gst_pad_push_event (self->srcpad, gst_event_new_segment (&seg));
782 : }
783 33390 : }
784 :
785 : /** @brief Chain function's private routine */
786 : static void
787 33390 : _gst_tensor_converter_chain_timestamp (GstTensorConverter * self,
788 : GstBuffer * inbuf, guint frames_in)
789 : {
790 33390 : if (self->set_timestamp) {
791 : GstTensorsConfig *config;
792 : GstClockTime pts, duration;
793 : gboolean have_framerate;
794 :
795 33370 : config = &self->tensors_config;
796 33370 : have_framerate = (config->rate_n > 0 && config->rate_d > 0);
797 :
798 : /* set duration */
799 33370 : duration = GST_BUFFER_DURATION (inbuf);
800 :
801 33370 : if (!GST_CLOCK_TIME_IS_VALID (duration)) {
802 371 : if (have_framerate) {
803 : duration =
804 78 : gst_util_uint64_scale_int ((guint64) frames_in * config->rate_d,
805 : GST_SECOND, config->rate_n);
806 :
807 78 : GST_BUFFER_DURATION (inbuf) = duration;
808 : }
809 : }
810 :
811 : /* set timestamp if buffer has invalid timestamp */
812 33370 : pts = GST_BUFFER_TIMESTAMP (inbuf);
813 :
814 33370 : if (!GST_CLOCK_TIME_IS_VALID (pts)) {
815 194 : pts = self->segment.start;
816 :
817 194 : if (have_framerate) {
818 46 : if (GST_CLOCK_TIME_IS_VALID (self->old_timestamp)) {
819 32 : pts = self->old_timestamp + duration;
820 : }
821 : } else {
822 : GstClock *clock;
823 :
824 148 : clock = gst_element_get_clock (GST_ELEMENT (self));
825 :
826 148 : if (clock) {
827 : GstClockTime now, base;
828 :
829 70 : base = gst_element_get_base_time (GST_ELEMENT (self));
830 70 : now = gst_clock_get_time (clock);
831 :
832 70 : pts = (base < now) ? (now - base) : 0;
833 70 : gst_object_unref (clock);
834 : }
835 : }
836 :
837 194 : GST_BUFFER_TIMESTAMP (inbuf) = pts;
838 : }
839 : }
840 :
841 : /* update old timestamp */
842 33390 : self->old_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
843 33390 : }
844 :
845 : /** @brief Chain function's private routine to process octet stream */
846 : static GstBuffer *
847 234 : _gst_tensor_converter_chain_octet (GstTensorConverter * self, GstBuffer * buf)
848 : {
849 234 : GstBuffer *buffer = buf;
850 234 : GstTensorsInfo *info = &self->tensors_config.info;
851 234 : gboolean multi = (info->num_tensors > 1);
852 :
853 : /* configure multi tensors */
854 234 : if (multi || gst_buffer_n_memory (buf) > 1) {
855 : GstTensorInfo *_info;
856 : GstMemory *mem, *new_mem;
857 : gsize offset, size;
858 : guint i;
859 :
860 18 : g_assert (self->frames_per_tensor == 1);
861 :
862 18 : offset = 0;
863 18 : buffer = gst_buffer_new ();
864 18 : mem = gst_buffer_get_all_memory (buf);
865 :
866 18 : if (multi) {
867 53 : for (i = 0; i < info->num_tensors; ++i) {
868 36 : _info = gst_tensors_info_get_nth_info (info, i);
869 :
870 36 : size = gst_tensor_info_get_size (_info);
871 36 : new_mem = gst_memory_share (mem, offset, size);
872 36 : offset += size;
873 :
874 36 : gst_tensor_buffer_append_memory (buffer, new_mem, _info);
875 : }
876 :
877 17 : gst_memory_unref (mem);
878 : } else {
879 1 : _info = gst_tensors_info_get_nth_info (info, 0);
880 1 : gst_tensor_buffer_append_memory (buffer, mem, _info);
881 : }
882 :
883 : /* copy timestamps */
884 18 : gst_buffer_copy_into (buffer, buf, GST_BUFFER_COPY_METADATA, 0, -1);
885 18 : gst_buffer_unref (buf);
886 : }
887 :
888 234 : return buffer;
889 : }
890 :
891 : /** @brief Chain function's private routine to process flex tensor */
892 : static GstBuffer *
893 220 : _gst_tensor_converter_chain_flex_tensor (GstTensorConverter * self,
894 : GstBuffer * buf)
895 : {
896 : GstBuffer *buffer;
897 : GstMemory *mem, *new_mem;
898 : GstTensorsInfo *info;
899 : GstTensorInfo *_info;
900 : GstTensorMetaInfo meta;
901 : guint i;
902 :
903 220 : info = &self->tensors_config.info;
904 220 : buffer = gst_buffer_new ();
905 :
906 445 : for (i = 0; i < info->num_tensors; i++) {
907 225 : _info = gst_tensors_info_get_nth_info (info, i);
908 225 : gst_tensor_info_convert_to_meta (_info, &meta);
909 :
910 : /* set media type */
911 225 : switch (self->in_media_type) {
912 225 : case _NNS_VIDEO:
913 : case _NNS_AUDIO:
914 : case _NNS_TEXT:
915 : case _NNS_OCTET:
916 225 : meta.media_type = self->in_media_type;
917 225 : break;
918 0 : default:
919 : /* default output type is tensor */
920 0 : meta.media_type = _NNS_TENSOR;
921 0 : break;
922 : }
923 :
924 225 : mem = gst_tensor_buffer_get_nth_memory (buf, i);
925 225 : new_mem = gst_tensor_meta_info_append_header (&meta, mem);
926 225 : gst_memory_unref (mem);
927 :
928 225 : gst_tensor_buffer_append_memory (buffer, new_mem, _info);
929 : }
930 :
931 220 : gst_buffer_copy_into (buffer, buf, GST_BUFFER_COPY_METADATA, 0, -1);
932 220 : gst_buffer_unref (buf);
933 220 : return buffer;
934 : }
935 :
936 : /** @brief Chain function's private routine to push buffer into src pad */
937 : static GstFlowReturn
938 33381 : _gst_tensor_converter_chain_push (GstTensorConverter * self, GstBuffer * buf)
939 : {
940 33381 : GstBuffer *buffer = buf;
941 :
942 33381 : if (self->in_media_type == _NNS_OCTET) {
943 : /* configure multi tensors */
944 234 : buffer = _gst_tensor_converter_chain_octet (self, buffer);
945 : }
946 :
947 : /* if output is flexible, add header. */
948 33381 : if (!self->do_not_append_header
949 33363 : && gst_tensor_pad_caps_is_flexible (self->srcpad)) {
950 220 : buffer = _gst_tensor_converter_chain_flex_tensor (self, buffer);
951 : }
952 :
953 33381 : silent_debug_timestamp (self, buffer);
954 33381 : return gst_pad_push (self->srcpad, buffer);
955 : }
956 :
957 : /** @brief Chain function's private routine to push multiple buffers */
958 : static GstFlowReturn
959 121 : _gst_tensor_converter_chain_chunk (GstTensorConverter * self,
960 : GstBuffer * inbuf, guint frames_in, guint frames_out, gsize frame_size)
961 : {
962 : GstAdapter *adapter;
963 : GstTensorsConfig *config;
964 121 : GstFlowReturn ret = GST_FLOW_OK;
965 : GstClockTime pts, dts, duration;
966 : gsize avail, out_size;
967 : gboolean have_framerate;
968 :
969 121 : config = &self->tensors_config;
970 121 : adapter = gst_tensor_converter_get_adapter (self, inbuf);
971 121 : g_assert (adapter != NULL);
972 :
973 121 : have_framerate = (config->rate_n > 0 && config->rate_d > 0);
974 :
975 121 : duration = GST_BUFFER_DURATION (inbuf);
976 121 : if (GST_CLOCK_TIME_IS_VALID (duration)) {
977 : /** supposed same duration for incoming buffer */
978 119 : duration = gst_util_uint64_scale_int (duration, frames_out, frames_in);
979 : }
980 :
981 121 : gst_adapter_push (adapter, inbuf);
982 :
983 121 : out_size = frames_out * frame_size;
984 233 : while ((avail = gst_adapter_available (adapter)) >= out_size &&
985 : ret == GST_FLOW_OK) {
986 : GstBuffer *outbuf;
987 : guint64 pts_dist, dts_dist;
988 :
989 112 : pts = gst_adapter_prev_pts (adapter, &pts_dist);
990 112 : dts = gst_adapter_prev_dts (adapter, &dts_dist);
991 :
992 : /**
993 : * Update timestamp.
994 : * If frames-in is larger then frames-out, the same timestamp (pts and dts) would be returned.
995 : */
996 112 : if (frames_in > 1 && have_framerate) {
997 47 : if (GST_CLOCK_TIME_IS_VALID (pts)) {
998 47 : pts +=
999 47 : gst_util_uint64_scale_int (pts_dist * config->rate_d, GST_SECOND,
1000 47 : config->rate_n * frame_size);
1001 : }
1002 :
1003 47 : if (GST_CLOCK_TIME_IS_VALID (dts)) {
1004 20 : dts +=
1005 20 : gst_util_uint64_scale_int (dts_dist * config->rate_d, GST_SECOND,
1006 20 : config->rate_n * frame_size);
1007 : }
1008 : }
1009 :
1010 112 : outbuf = gst_adapter_take_buffer (adapter, out_size);
1011 112 : outbuf = gst_buffer_make_writable (outbuf);
1012 :
1013 : /** set timestamp */
1014 112 : GST_BUFFER_PTS (outbuf) = pts;
1015 112 : GST_BUFFER_DTS (outbuf) = dts;
1016 112 : GST_BUFFER_DURATION (outbuf) = duration;
1017 :
1018 112 : ret = _gst_tensor_converter_chain_push (self, outbuf);
1019 : }
1020 :
1021 121 : return ret;
1022 : }
1023 :
1024 : /**
1025 : * @brief Chain function, this function does the actual processing.
1026 : */
1027 : static GstFlowReturn
1028 33392 : gst_tensor_converter_chain (GstPad * pad, GstObject * parent, GstBuffer * buf)
1029 : {
1030 : GstTensorConverter *self;
1031 : GstTensorsConfig *config;
1032 : GstTensorsConfig new_config;
1033 : GstTensorInfo *_info;
1034 : GstBuffer *inbuf;
1035 : gsize buf_size, frame_size;
1036 : guint frames_in, frames_out;
1037 : UNUSED (pad);
1038 :
1039 33392 : buf_size = gst_buffer_get_size (buf);
1040 66783 : g_return_val_if_fail (buf_size > 0, GST_FLOW_ERROR);
1041 :
1042 33392 : self = GST_TENSOR_CONVERTER (parent);
1043 :
1044 : /** This is an internal logic error. */
1045 33392 : g_assert (self->tensors_configured);
1046 33392 : config = &self->tensors_config;
1047 33392 : gst_tensors_config_init (&new_config);
1048 :
1049 33392 : frames_out = self->frames_per_tensor;
1050 33392 : inbuf = buf;
1051 :
1052 : /**
1053 : * Supposed 1 frame in buffer (default).
1054 : * Update frame size for each media type.
1055 : */
1056 33392 : frame_size = self->frame_size;
1057 33392 : frames_in = 1;
1058 :
1059 33392 : switch (self->in_media_type) {
1060 32773 : case _NNS_VIDEO:
1061 : {
1062 : guint color, width, height;
1063 : gsize type;
1064 :
1065 32773 : color = config->info.info[0].dimension[0];
1066 32773 : width = config->info.info[0].dimension[1];
1067 32773 : height = config->info.info[0].dimension[2];
1068 32773 : type = gst_tensor_get_element_size (config->info.info[0].type);
1069 :
1070 : /** type * colorspace * width * height */
1071 32773 : frame_size = type * color * width * height;
1072 :
1073 : /** supposed 1 frame in buffer */
1074 32773 : g_assert ((buf_size / self->frame_size) == 1);
1075 :
1076 32773 : if (self->remove_padding) {
1077 : GstMapInfo src_info, dest_info;
1078 : guint d0, d1;
1079 110 : unsigned int src_idx = 0, dest_idx = 0;
1080 : size_t size, offset;
1081 :
1082 110 : if (!gst_buffer_map (buf, &src_info, GST_MAP_READ)) {
1083 0 : ml_logf
1084 : ("tensor_converter: Cannot map src buffer at tensor_converter/video. The incoming buffer (GstBuffer) for the sinkpad of tensor_converter cannot be mapped for reading.\n");
1085 0 : goto error;
1086 : }
1087 :
1088 110 : inbuf = gst_buffer_new_and_alloc (frame_size);
1089 110 : gst_buffer_memset (inbuf, 0, 0, frame_size);
1090 110 : if (!gst_buffer_map (inbuf, &dest_info, GST_MAP_WRITE)) {
1091 0 : ml_logf
1092 : ("tensor_converter: Cannot map dest buffer at tensor_converter/video. The outgoing buffer (GstBuffer) for the srcpad of tensor_converter cannot be mapped for writing.\n");
1093 0 : gst_buffer_unmap (buf, &src_info);
1094 0 : gst_buffer_unref (inbuf); /* the new buffer is wasted. */
1095 0 : goto error;
1096 : }
1097 :
1098 : /**
1099 : * Refer: https://gstreamer.freedesktop.org/documentation/design/mediatype-video-raw.html
1100 : */
1101 110 : size = offset = type * color * width;
1102 :
1103 110 : g_assert (offset % 4); /** Internal logic error! */
1104 110 : if (offset % 4) {
1105 110 : offset += 4 - (offset % 4);
1106 : }
1107 :
1108 220 : for (d0 = 0; d0 < frames_in; d0++) {
1109 11095 : for (d1 = 0; d1 < height; d1++) {
1110 10985 : memcpy (dest_info.data + dest_idx, src_info.data + src_idx, size);
1111 10985 : dest_idx += size;
1112 10985 : src_idx += offset;
1113 : }
1114 : }
1115 :
1116 110 : gst_buffer_unmap (buf, &src_info);
1117 110 : gst_buffer_unmap (inbuf, &dest_info);
1118 :
1119 : /** copy timestamps */
1120 110 : gst_buffer_copy_into (inbuf, buf, GST_BUFFER_COPY_METADATA, 0, -1);
1121 : }
1122 32773 : break;
1123 : }
1124 134 : case _NNS_AUDIO:
1125 : /* number of bytes for one frame */
1126 134 : frames_in = buf_size / frame_size;
1127 134 : break;
1128 36 : case _NNS_TEXT:
1129 36 : if (buf_size != frame_size) {
1130 : GstMapInfo src_info, dest_info;
1131 20 : gsize block_size = MIN (buf_size, frame_size);
1132 :
1133 20 : if (!gst_buffer_map (buf, &src_info, GST_MAP_READ)) {
1134 0 : ml_logf
1135 : ("tensor_converter: Cannot map src buffer at tensor_converter/text. The incoming buffer (GstBuffer) for the sinkpad of tensor_converter cannot be mapped for reading.\n");
1136 0 : goto error;
1137 : }
1138 :
1139 20 : inbuf = gst_buffer_new_and_alloc (frame_size);
1140 20 : gst_buffer_memset (inbuf, 0, 0, frame_size);
1141 20 : if (!gst_buffer_map (inbuf, &dest_info, GST_MAP_WRITE)) {
1142 0 : ml_logf
1143 : ("tensor_converter: Cannot map dest buffer at tensor_converter/text. The outgoing buffer (GstBuffer) for the srcpad of tensor_converter cannot be mapped for writing.\n");
1144 0 : gst_buffer_unmap (buf, &src_info);
1145 0 : gst_buffer_unref (inbuf); /* the new buffer is wasted. */
1146 0 : goto error;
1147 : }
1148 :
1149 20 : memcpy (dest_info.data, src_info.data, block_size);
1150 :
1151 20 : gst_buffer_unmap (buf, &src_info);
1152 20 : gst_buffer_unmap (inbuf, &dest_info);
1153 :
1154 : /** copy timestamps */
1155 20 : gst_buffer_copy_into (inbuf, buf, GST_BUFFER_COPY_METADATA, 0, -1);
1156 : }
1157 36 : break;
1158 206 : case _NNS_OCTET:
1159 206 : if (gst_tensors_config_is_flexible (config)) {
1160 : /* update dimension with buffer size */
1161 3 : config->info.info[0].dimension[0] = buf_size;
1162 3 : frame_size = buf_size;
1163 : } else {
1164 : /* get frame size from the properties */
1165 203 : g_assert ((buf_size % frame_size) == 0); /** @todo need rewrite. do not use assert */
1166 203 : frames_in = buf_size / frame_size;
1167 : }
1168 206 : break;
1169 9 : case _NNS_TENSOR:
1170 : {
1171 : GstTensorMetaInfo meta;
1172 : GstTensorsConfig tmp;
1173 : GstMemory *mem, *new_mem;
1174 : gsize s1, s2, hsize;
1175 : guint n;
1176 :
1177 9 : gst_tensors_config_init (&tmp);
1178 9 : tmp.rate_n = config->rate_n;
1179 9 : tmp.rate_d = config->rate_d;
1180 :
1181 9 : tmp.info.format = _NNS_TENSOR_FORMAT_FLEXIBLE;
1182 9 : buf = gst_tensor_buffer_from_config (buf, &tmp);
1183 :
1184 : /* type and dimension from buffer */
1185 9 : tmp.info.format = _NNS_TENSOR_FORMAT_STATIC;
1186 9 : tmp.info.num_tensors = gst_tensor_buffer_get_count (buf);
1187 :
1188 : /* compare data size and append memory */
1189 9 : inbuf = gst_buffer_new ();
1190 :
1191 19 : for (n = 0; n < tmp.info.num_tensors; n++) {
1192 11 : _info = gst_tensors_info_get_nth_info (&tmp.info, n);
1193 11 : mem = gst_tensor_buffer_get_nth_memory (buf, n);
1194 11 : s1 = gst_memory_get_sizes (mem, NULL, NULL);
1195 :
1196 : /* flex-tensor has header in each mem block */
1197 11 : gst_tensor_meta_info_parse_memory (&meta, mem);
1198 11 : gst_tensor_meta_info_convert (&meta, _info);
1199 11 : hsize = gst_tensor_meta_info_get_header_size (&meta);
1200 11 : s1 -= hsize;
1201 :
1202 11 : s2 = gst_tensor_info_get_size (_info);
1203 :
1204 : /**
1205 : * @todo expand mem if given property is larger than mem size.
1206 : * Now compare same size, later we should modify mem block if developer sets different dimension.
1207 : */
1208 11 : if (s1 != s2) {
1209 1 : nns_loge
1210 : ("Cannot process an incoming buffer frame for tensor_converter (chain function). It appears that it is trying to convert other/tensors,format=flexible to other/tensors,format=static. Incoming buffer has invalid data size %zd, expected size is %zd (%u/%u).",
1211 : s1, s2, (n + 1), tmp.info.num_tensors);
1212 1 : gst_memory_unref (mem);
1213 1 : gst_buffer_unref (inbuf);
1214 2 : goto error;
1215 : }
1216 :
1217 10 : new_mem = gst_memory_share (mem, hsize, s1);
1218 10 : gst_memory_unref (mem);
1219 10 : gst_tensor_buffer_append_memory (inbuf, new_mem, _info);
1220 : }
1221 :
1222 8 : gst_buffer_copy_into (inbuf, buf, GST_BUFFER_COPY_METADATA, 0, -1);
1223 :
1224 8 : if (!gst_tensors_config_is_equal (config, &tmp)) {
1225 3 : if (gst_tensors_info_validate (&self->tensors_info)) {
1226 1 : nns_loge
1227 : ("Incoming buffer does not match with given tensors info. It appears that it is trying to convert other/tensors,format=flexible to other/tensors,format=static. The converted output appears not compatible with the given configuration.");
1228 1 : gst_buffer_unref (inbuf);
1229 1 : goto error;
1230 : }
1231 :
1232 : /* update caps with new configuration */
1233 2 : *config = tmp;
1234 2 : gst_tensor_converter_update_caps (self);
1235 : }
1236 7 : break;
1237 : }
1238 234 : case _NNS_MEDIA_ANY:
1239 : {
1240 234 : if (self->mode == _CONVERTER_MODE_CUSTOM_CODE) {
1241 1 : if (self->custom.func == NULL) {
1242 0 : nns_loge
1243 : ("Tensor converter is in custom/code mode (mode=custom-code:${funcname}), where a user code as a callback function is required. However, the required information to configure the tensor converter is not given or incorrectly given. For detail, please refer to https://github.com/nnstreamer/nnstreamer/blob/main/gst/nnstreamer/elements/gsttensor_converter.md#custom-converter. The given ${funcname} is \"%s\", which is an invalid/unregistered name.",
1244 : self->mode_option);
1245 0 : goto error;
1246 : }
1247 1 : inbuf = self->custom.func (buf, self->custom.data, &new_config);
1248 :
1249 1 : if (inbuf == NULL) {
1250 0 : nns_loge
1251 : ("Failed to convert input streams to tensors: the converted result of the incoming buffer is NULL. The converter is custom-func with %s function, which is available and loaded, but has returned NULL buffer after the conversion.",
1252 : self->mode_option);
1253 0 : goto error;
1254 : }
1255 233 : } else if (self->externalConverter && self->externalConverter->convert) {
1256 : inbuf =
1257 233 : self->externalConverter->convert (buf, &new_config,
1258 : self->priv_data);
1259 :
1260 233 : if (inbuf == NULL) {
1261 0 : nns_loge
1262 : ("Failed to convert input streams to tensors: the converted result of the incoming buffer is NULL. The converter is using external tensor_converter subplugin (%s), which is available and loaded, but has returned NULL buffer after the conversion.",
1263 : self->externalConverter->name);
1264 0 : goto error;
1265 : }
1266 0 : } else if (self->mode == _CONVERTER_MODE_CUSTOM_SCRIPT) {
1267 : /* self->externalConverter->converter should've been available! */
1268 0 : GST_ERROR_OBJECT (self,
1269 : "Tensor converter is in custom/script mode (mode=custom-script:${scriptpath}), where a path to a script file is required. However, it is not properly configured. The given ${scriptpath} is \"%s\".",
1270 : self->mode_option);
1271 0 : goto error;
1272 : } else {
1273 : /** @todo identify and printout the given input stream caps. */
1274 0 : GST_ERROR_OBJECT (self,
1275 : "Tensor converter has an undefined behavior with type _NNS_MEDIA_ANY. It should've been custom-code or custom-script mode or a corresponding external converter should've been registered (tensor_converter subplugin). However, nothing is available for the given input stream.");
1276 0 : goto error;
1277 : }
1278 234 : self->do_not_append_header =
1279 234 : (new_config.info.format == _NNS_TENSOR_FORMAT_FLEXIBLE);
1280 :
1281 234 : frames_in = 1;
1282 234 : frame_size = gst_buffer_get_size (inbuf);
1283 :
1284 234 : if (!gst_tensors_config_is_equal (config, &new_config)) {
1285 72 : gst_tensors_config_free (config);
1286 72 : *config = new_config;
1287 :
1288 72 : gst_tensor_converter_update_caps (self);
1289 : } else {
1290 162 : gst_tensors_config_free (&new_config);
1291 : }
1292 :
1293 234 : break;
1294 : }
1295 0 : case _NNS_MEDIA_INVALID:
1296 0 : GST_ERROR_OBJECT (self,
1297 : "The incoming tensor to be converted has no type defined (INVALID). This is an internal unknown error. Please report the case to https://github.com/nnstreamer/issues with the pipeline description reproducing the error.");
1298 0 : goto error;
1299 : break;
1300 0 : default:
1301 0 : GST_ERROR_OBJECT (self,
1302 : "The incoming tensor to be converted has unknown type (type value not recognized: %d). This is an internal unknown error. Please report the case to https://github.com/nnstreamer/issues with the pipeline description reproducing the error.",
1303 : self->in_media_type);
1304 0 : goto error;
1305 : }
1306 :
1307 33390 : if (inbuf != buf)
1308 371 : gst_buffer_unref (buf);
1309 :
1310 : /** convert format (bytes > time) and push segment event.
1311 : * It will push event if needed (self->need_segment is true). */
1312 33390 : _gst_tensor_converter_chain_segment (self, frame_size);
1313 :
1314 : /** configures timestamp if required (self->set_timestamp is true) */
1315 33390 : _gst_tensor_converter_chain_timestamp (self, inbuf, frames_in);
1316 :
1317 33390 : if (frames_in == frames_out) {
1318 : /** do nothing, push the incoming buffer */
1319 33269 : return _gst_tensor_converter_chain_push (self, inbuf);
1320 : }
1321 :
1322 : /* push multiple buffers */
1323 121 : return _gst_tensor_converter_chain_chunk (self, inbuf, frames_in,
1324 : frames_out, frame_size);
1325 :
1326 2 : error:
1327 2 : gst_buffer_unref (buf);
1328 2 : gst_tensors_config_free (&new_config);
1329 2 : return GST_FLOW_ERROR;
1330 : }
1331 :
1332 : /**
1333 : * @brief Called to perform state change.
1334 : */
1335 : static GstStateChangeReturn
1336 4970 : gst_tensor_converter_change_state (GstElement * element,
1337 : GstStateChange transition)
1338 : {
1339 : GstTensorConverter *self;
1340 : GstStateChangeReturn ret;
1341 :
1342 4970 : self = GST_TENSOR_CONVERTER (element);
1343 :
1344 4970 : switch (transition) {
1345 856 : case GST_STATE_CHANGE_READY_TO_PAUSED:
1346 856 : gst_tensor_converter_reset (self);
1347 856 : break;
1348 4114 : default:
1349 4114 : break;
1350 : }
1351 :
1352 4970 : ret = GST_ELEMENT_CLASS (parent_class)->change_state (element, transition);
1353 :
1354 4970 : switch (transition) {
1355 804 : case GST_STATE_CHANGE_PAUSED_TO_READY:
1356 804 : gst_tensor_converter_reset (self);
1357 804 : break;
1358 4166 : default:
1359 4166 : break;
1360 : }
1361 :
1362 4970 : return ret;
1363 : }
1364 :
1365 : /**
1366 : * @brief Clear and reset data.
1367 : */
1368 : static void
1369 3407 : gst_tensor_converter_reset (GstTensorConverter * self)
1370 : {
1371 : /* remove all buffers from adapter */
1372 3407 : gst_tensor_aggregation_clear_all (self->adapter_table);
1373 :
1374 3407 : self->have_segment = FALSE;
1375 3407 : self->need_segment = FALSE;
1376 3407 : gst_segment_init (&self->segment, GST_FORMAT_TIME);
1377 :
1378 3407 : self->old_timestamp = GST_CLOCK_TIME_NONE;
1379 3407 : }
1380 :
1381 : /**
1382 : * @brief Get supported format list.
1383 : */
1384 : static void
1385 317 : gst_tensor_converter_get_format_list (GValue * list, ...)
1386 : {
1387 317 : GValue item = G_VALUE_INIT;
1388 : gchar *str;
1389 : va_list args;
1390 :
1391 317 : g_value_init (list, GST_TYPE_LIST);
1392 :
1393 317 : va_start (args, list);
1394 960 : while ((str = va_arg (args, gchar *))) {
1395 643 : g_value_init (&item, G_TYPE_STRING);
1396 643 : g_value_set_string (&item, str);
1397 :
1398 643 : gst_value_list_append_value (list, &item);
1399 643 : g_value_unset (&item);
1400 : }
1401 317 : va_end (args);
1402 317 : }
1403 :
1404 : /**
1405 : * @brief Determine if we need zero-padding
1406 : * @return TRUE if we need to add (or remove) stride per row from the stream data.
1407 : */
1408 : static gboolean
1409 620 : gst_tensor_converter_video_stride (GstVideoFormat format, gint width)
1410 : {
1411 : /**
1412 : * @todo The actual list is much longer, fill them.
1413 : * (read https://gstreamer.freedesktop.org/documentation/design/mediatype-video-raw.html)
1414 : */
1415 620 : switch (format) {
1416 530 : case GST_VIDEO_FORMAT_GRAY8:
1417 : case GST_VIDEO_FORMAT_RGB:
1418 : case GST_VIDEO_FORMAT_BGR:
1419 : case GST_VIDEO_FORMAT_I420:
1420 : #if GST_CHECK_VERSION(1, 20, 0)
1421 : case GST_VIDEO_FORMAT_RGBP:
1422 : case GST_VIDEO_FORMAT_BGRP:
1423 : #endif
1424 530 : if (width % 4) {
1425 32 : return TRUE;
1426 : }
1427 498 : break;
1428 90 : default:
1429 90 : break;
1430 : }
1431 :
1432 588 : return FALSE;
1433 : }
1434 :
1435 : /**
1436 : * @brief Set the tensors config structure from video info (internal static function)
1437 : * @param self this pointer to GstTensorConverter
1438 : * @param caps caps for media stream
1439 : * @param config tensors config structure to be filled
1440 : * @note Change dimension if tensor contains N frames.
1441 : * @return TRUE if supported type
1442 : */
1443 : static gboolean
1444 620 : gst_tensor_converter_parse_video (GstTensorConverter * self,
1445 : const GstCaps * caps, GstTensorsConfig * config)
1446 : {
1447 : /**
1448 : * Refer: https://www.tensorflow.org/api_docs/python/tf/summary/image
1449 : * A 4-D uint8 or float32 Tensor of shape [batch_size, height, width, channels]
1450 : * where channels is 1, 3, or 4.
1451 : */
1452 : GstVideoInfo vinfo;
1453 : GstVideoFormat format;
1454 : gint width, height, views;
1455 : guint i;
1456 :
1457 1240 : g_return_val_if_fail (config != NULL, FALSE);
1458 :
1459 620 : gst_tensors_config_init (config);
1460 :
1461 620 : gst_video_info_init (&vinfo);
1462 620 : if (!gst_video_info_from_caps (&vinfo, caps)) {
1463 0 : char *capstr = gst_caps_to_string (caps);
1464 0 : GST_ERROR_OBJECT (self,
1465 : "Failed to get video info from caps; gst_video_info_from_caps (&info, \"%s\") has returned FALSE, which means the given caps cannot be parsed as a video.",
1466 : capstr);
1467 0 : g_free (capstr);
1468 0 : return FALSE;
1469 : }
1470 :
1471 620 : format = GST_VIDEO_INFO_FORMAT (&vinfo);
1472 620 : width = GST_VIDEO_INFO_WIDTH (&vinfo);
1473 620 : height = GST_VIDEO_INFO_HEIGHT (&vinfo);
1474 620 : views = GST_VIDEO_INFO_VIEWS (&vinfo);
1475 :
1476 620 : if (views > 1) {
1477 0 : GST_WARNING_OBJECT (self,
1478 : "Incoming video caps should have 'views=(int)1 but has views=(int)%d - ignoring all but view #0. \n",
1479 : views);
1480 : }
1481 :
1482 620 : config->info.num_tensors = 1;
1483 :
1484 : /* [color-space][width][height][frames] */
1485 620 : switch (format) {
1486 18 : case GST_VIDEO_FORMAT_GRAY8:
1487 18 : config->info.info[0].type = _NNS_UINT8;
1488 18 : config->info.info[0].dimension[0] = 1;
1489 18 : config->info.info[0].dimension[1] = width;
1490 18 : config->info.info[0].dimension[2] = height;
1491 18 : break;
1492 6 : case GST_VIDEO_FORMAT_GRAY16_BE:
1493 : case GST_VIDEO_FORMAT_GRAY16_LE:
1494 6 : config->info.info[0].type = _NNS_UINT16;
1495 6 : config->info.info[0].dimension[0] = 1;
1496 6 : config->info.info[0].dimension[1] = width;
1497 6 : config->info.info[0].dimension[2] = height;
1498 6 : break;
1499 510 : case GST_VIDEO_FORMAT_RGB:
1500 : case GST_VIDEO_FORMAT_BGR:
1501 510 : config->info.info[0].type = _NNS_UINT8;
1502 510 : config->info.info[0].dimension[0] = 3;
1503 510 : config->info.info[0].dimension[1] = width;
1504 510 : config->info.info[0].dimension[2] = height;
1505 510 : break;
1506 84 : case GST_VIDEO_FORMAT_RGBx:
1507 : case GST_VIDEO_FORMAT_BGRx:
1508 : case GST_VIDEO_FORMAT_xRGB:
1509 : case GST_VIDEO_FORMAT_xBGR:
1510 : case GST_VIDEO_FORMAT_RGBA:
1511 : case GST_VIDEO_FORMAT_BGRA:
1512 : case GST_VIDEO_FORMAT_ARGB:
1513 : case GST_VIDEO_FORMAT_ABGR:
1514 84 : config->info.info[0].type = _NNS_UINT8;
1515 84 : config->info.info[0].dimension[0] = 4;
1516 84 : config->info.info[0].dimension[1] = width;
1517 84 : config->info.info[0].dimension[2] = height;
1518 84 : break;
1519 : #if GST_CHECK_VERSION(1, 20, 0)
1520 2 : case GST_VIDEO_FORMAT_RGBP:
1521 : case GST_VIDEO_FORMAT_BGRP:
1522 2 : config->info.info[0].type = _NNS_UINT8;
1523 2 : config->info.info[0].dimension[0] = width;
1524 2 : config->info.info[0].dimension[1] = height;
1525 2 : config->info.info[0].dimension[2] = 3;
1526 2 : break;
1527 : #endif
1528 0 : default:
1529 0 : GST_WARNING_OBJECT (self,
1530 : "The given video caps with format \"%s\" is not supported. Please use " NNS_VIDEO_FORMAT,
1531 : GST_STR_NULL (gst_video_format_to_string (format)));
1532 0 : break;
1533 : }
1534 :
1535 : /* Supposed 1 frame in tensor, change dimension[3] if tensor contains N frames. */
1536 620 : config->info.info[0].dimension[3] = 1;
1537 8060 : for (i = 4; i < NNS_TENSOR_RANK_LIMIT; i++)
1538 7440 : config->info.info[0].dimension[i] = 0;
1539 :
1540 620 : config->rate_n = GST_VIDEO_INFO_FPS_N (&vinfo);
1541 620 : config->rate_d = GST_VIDEO_INFO_FPS_D (&vinfo);
1542 :
1543 : /**
1544 : * Emit Warning if RSTRIDE = RU4 (3BPP) && Width % 4 > 0
1545 : * @todo Add more conditions!
1546 : */
1547 620 : if (gst_tensor_converter_video_stride (format, width)) {
1548 32 : self->remove_padding = TRUE;
1549 32 : silent_debug (self, "Set flag to remove padding, width = %d", width);
1550 :
1551 : #if GST_CHECK_VERSION(1, 20, 0)
1552 32 : if (format == GST_VIDEO_FORMAT_RGBP || format == GST_VIDEO_FORMAT_BGRP) {
1553 0 : if (self->remove_padding) {
1554 0 : GST_ERROR_OBJECT (self,
1555 : "Padding removal is not supported for RGBP and BGRP formats. Please use width as multiple of 4. Given width: %d",
1556 : width);
1557 0 : return FALSE;
1558 : }
1559 : }
1560 : #endif
1561 :
1562 : /** @todo need rewrite. */
1563 32 : GST_WARNING_OBJECT (self,
1564 : "\nYOUR STREAM CONFIGURATION INCURS PERFORMANCE DETERIORATION!\n"
1565 : "Please use 4 x n as image width for inputs; the width of your input is %d.\n",
1566 : width);
1567 : }
1568 :
1569 620 : self->frame_size = GST_VIDEO_INFO_SIZE (&vinfo);
1570 620 : return (config->info.info[0].type != _NNS_END);
1571 : }
1572 :
1573 : /**
1574 : * @brief Set the tensors config structure from audio info (internal static function)
1575 : * @param self this pointer to GstTensorConverter
1576 : * @param caps caps for media stream
1577 : * @param config tensors config structure to be filled
1578 : * @note Change dimension if tensor contains N frames.
1579 : * @return TRUE if supported type
1580 : */
1581 : static gboolean
1582 37 : gst_tensor_converter_parse_audio (GstTensorConverter * self,
1583 : const GstCaps * caps, GstTensorsConfig * config)
1584 : {
1585 : /**
1586 : * Refer: https://www.tensorflow.org/api_docs/python/tf/summary/audio
1587 : * A 3-D float32 Tensor of shape [batch_size, frames, channels]
1588 : * or a 2-D float32 Tensor of shape [batch_size, frames].
1589 : */
1590 : GstAudioInfo ainfo;
1591 : GstAudioFormat format;
1592 : gint channels;
1593 : guint i;
1594 :
1595 74 : g_return_val_if_fail (config != NULL, FALSE);
1596 :
1597 37 : gst_tensors_config_init (config);
1598 :
1599 37 : gst_audio_info_init (&ainfo);
1600 37 : if (!gst_audio_info_from_caps (&ainfo, caps)) {
1601 0 : char *capstr = gst_caps_to_string (caps);
1602 0 : GST_ERROR_OBJECT (self,
1603 : "Failed to get audio info from caps; gst_audio_info_from_caps(&info, \"%s\") has returned FALSE.\n",
1604 : capstr);
1605 0 : g_free (capstr);
1606 0 : return FALSE;
1607 : }
1608 :
1609 37 : format = GST_AUDIO_INFO_FORMAT (&ainfo);
1610 37 : channels = GST_AUDIO_INFO_CHANNELS (&ainfo);
1611 :
1612 37 : config->info.num_tensors = 1;
1613 :
1614 : /* [channels][frames] */
1615 37 : switch (format) {
1616 1 : case GST_AUDIO_FORMAT_S8:
1617 1 : config->info.info[0].type = _NNS_INT8;
1618 1 : break;
1619 7 : case GST_AUDIO_FORMAT_U8:
1620 7 : config->info.info[0].type = _NNS_UINT8;
1621 7 : break;
1622 16 : case GST_AUDIO_FORMAT_S16:
1623 16 : config->info.info[0].type = _NNS_INT16;
1624 16 : break;
1625 8 : case GST_AUDIO_FORMAT_U16:
1626 8 : config->info.info[0].type = _NNS_UINT16;
1627 8 : break;
1628 2 : case GST_AUDIO_FORMAT_S32:
1629 2 : config->info.info[0].type = _NNS_INT32;
1630 2 : break;
1631 1 : case GST_AUDIO_FORMAT_U32:
1632 1 : config->info.info[0].type = _NNS_UINT32;
1633 1 : break;
1634 1 : case GST_AUDIO_FORMAT_F32:
1635 1 : config->info.info[0].type = _NNS_FLOAT32;
1636 1 : break;
1637 1 : case GST_AUDIO_FORMAT_F64:
1638 1 : config->info.info[0].type = _NNS_FLOAT64;
1639 1 : break;
1640 0 : default:
1641 0 : GST_WARNING_OBJECT (self,
1642 : "Audio format \"%s\" is not supported. Please use S8, U8, S16, U16, S32, U32, F32, or F64.\n",
1643 : GST_STR_NULL (gst_audio_format_to_string (format)));
1644 0 : break;
1645 : }
1646 :
1647 37 : config->info.info[0].dimension[0] = channels;
1648 :
1649 : /* Supposed 1 frame in tensor, change dimension[1] if tensor contains N frames. */
1650 37 : config->info.info[0].dimension[1] = 1;
1651 555 : for (i = 2; i < NNS_TENSOR_RANK_LIMIT; i++)
1652 518 : config->info.info[0].dimension[i] = 0;
1653 :
1654 37 : config->rate_n = GST_AUDIO_INFO_RATE (&ainfo);
1655 37 : config->rate_d = 1;
1656 :
1657 37 : self->frame_size = GST_AUDIO_INFO_BPF (&ainfo);
1658 37 : return (config->info.info[0].type != _NNS_END);
1659 : }
1660 :
1661 : /**
1662 : * @brief Set the tensors config structure from text info (internal static function)
1663 : * @param self this pointer to GstTensorConverter
1664 : * @param config tensors config structure to be filled
1665 : * @param structure caps structure
1666 : * @note Change dimension if tensors contains N frames.
1667 : * @return TRUE if supported type
1668 : */
1669 : static gboolean
1670 10 : gst_tensor_converter_parse_text (GstTensorConverter * self,
1671 : GstTensorsConfig * config, const GstStructure * structure)
1672 : {
1673 : /**
1674 : * Refer: https://www.tensorflow.org/api_docs/python/tf/summary/text
1675 : * A string-type Tensor
1676 : */
1677 : const gchar *format_string;
1678 : guint i, text_size;
1679 :
1680 10 : g_return_val_if_fail (config != NULL, FALSE);
1681 10 : g_return_val_if_fail (structure != NULL, FALSE);
1682 :
1683 10 : gst_tensors_config_init (config);
1684 :
1685 : /* get fixed size of text string from property */
1686 10 : text_size = self->tensors_info.info[0].dimension[0];
1687 10 : if (text_size == 0) {
1688 0 : GST_ERROR_OBJECT (self,
1689 : "Failed to get tensor info, need to update string size.");
1690 :
1691 0 : ml_loge
1692 : ("tensor_converter: Please set the property input-dim to convert stream manually for text streams unlike video streams. For example, input-dim=30 to handle up to 30 bytes of string per frame.");
1693 0 : return FALSE;
1694 : }
1695 :
1696 10 : format_string = gst_structure_get_string (structure, "format");
1697 10 : if (format_string) {
1698 10 : if (g_ascii_strcasecmp (format_string, "utf8") == 0) {
1699 10 : config->info.info[0].type = _NNS_UINT8;
1700 : } else {
1701 0 : GST_WARNING_OBJECT (self,
1702 : "For text streams, only utf8 streams are supported; format = \"%s\" is not supported.\n",
1703 : format_string);
1704 0 : return FALSE;
1705 : }
1706 : }
1707 :
1708 10 : config->info.num_tensors = 1;
1709 :
1710 : /* [size][frames] */
1711 : /* Fixed size of string, we cannot get the size from caps. */
1712 10 : config->info.info[0].dimension[0] = text_size;
1713 :
1714 : /* Supposed 1 frame in tensor, change dimension[1] if tensor contains N frames. */
1715 10 : config->info.info[0].dimension[1] = 1;
1716 150 : for (i = 2; i < NNS_TENSOR_RANK_LIMIT; i++)
1717 140 : config->info.info[0].dimension[i] = 0;
1718 :
1719 10 : if (gst_structure_has_field (structure, "framerate")) {
1720 1 : gst_structure_get_fraction (structure, "framerate", &config->rate_n,
1721 1 : &config->rate_d);
1722 : } else {
1723 : /* cannot get the framerate for text type */
1724 9 : config->rate_n = 0;
1725 9 : config->rate_d = 1;
1726 : }
1727 :
1728 10 : self->frame_size = gst_tensor_info_get_size (&config->info.info[0]);
1729 10 : return (config->info.info[0].type != _NNS_END);
1730 : }
1731 :
1732 : /**
1733 : * @brief Set the tensors configs structure from octet stream (internal static function)
1734 : * @param self this pointer to GstTensorConverter
1735 : * @param config tensors config structure to be filled
1736 : * @param structure caps structure
1737 : * @note Change tensors dimension and type.
1738 : * @return TRUE if supported type
1739 : */
1740 : static gboolean
1741 116 : gst_tensor_converter_parse_octet (GstTensorConverter * self,
1742 : GstTensorsConfig * config, const GstStructure * structure)
1743 : {
1744 116 : GstTensorsInfo *info = &self->tensors_info;
1745 : GstTensorsConfig peer;
1746 : gboolean flexible, configured;
1747 : guint i;
1748 :
1749 232 : g_return_val_if_fail (config != NULL, FALSE);
1750 116 : g_return_val_if_fail (structure != NULL, FALSE);
1751 :
1752 116 : gst_tensors_config_init (config);
1753 116 : flexible = configured = FALSE;
1754 :
1755 : /* get possible tensors info from peer if no property is given */
1756 116 : if (!gst_tensors_info_validate (info)) {
1757 22 : if (gst_tensors_config_from_peer (self->srcpad, &peer, NULL)) {
1758 6 : flexible = gst_tensors_config_is_flexible (&peer);
1759 6 : configured = gst_tensors_info_validate (&peer.info);
1760 :
1761 6 : if (configured)
1762 6 : info = &peer.info;
1763 : }
1764 :
1765 22 : if (!flexible && !configured) {
1766 16 : GST_ERROR_OBJECT (self,
1767 : "Failed to get tensor info, need to update dimension and type.");
1768 :
1769 16 : ml_loge
1770 : ("tensor_converter: Please set the properties input-dim and input-type to convert application/stream to non-flexible other/tensors. Use other/tensors,format=flexible if you want flexible dimensions. For static (non-flexible) tensors, you may, for example, use input-dim=30,input-type=uint8 to handle 30 bytes of bin data as a single frame.");
1771 16 : return FALSE;
1772 : }
1773 : }
1774 :
1775 100 : if (self->frames_per_tensor > 1) {
1776 : /**
1777 : * Failure case when octet-stream has multi tensors and multi frames.
1778 : */
1779 8 : if (info->num_tensors > 1) {
1780 4 : ml_loge
1781 : ("tensor_converter: Cannot configure multiple tensors (num_tensors = %u) from an application/octet stream with frames_per_tensor (= %u)> 1. Please set the property frames-per-tensor 1 to convert stream to multiple-tensors (num_tensors > 1).",
1782 : info->num_tensors, self->frames_per_tensor);
1783 4 : return FALSE;
1784 : }
1785 4 : if (flexible) {
1786 4 : ml_loge
1787 : ("tensor_converter: Cannot configure flexible tensors from an application/octet stream with frames_per_tensor (%u) > 1. Please set the property frames-per-tensor 1 to convert stream to flexible tensors.",
1788 : self->frames_per_tensor);
1789 4 : return FALSE;
1790 : }
1791 : }
1792 :
1793 92 : if (gst_structure_has_field (structure, "framerate")) {
1794 4 : gst_structure_get_fraction (structure, "framerate", &config->rate_n,
1795 4 : &config->rate_d);
1796 : } else {
1797 : /* cannot get the framerate */
1798 88 : config->rate_n = 0;
1799 88 : config->rate_d = 1;
1800 : }
1801 :
1802 : /**
1803 : * Raw byte-stream (application/octet-stream)
1804 : * We cannot get the exact tensors info from caps.
1805 : * All tensors info should be updated.
1806 : * If output is flexible, dimension should be updated in chain function with buffer size.
1807 : * (data format for tensor: [size])
1808 : */
1809 92 : if (flexible) {
1810 1 : config->info.format = _NNS_TENSOR_FORMAT_FLEXIBLE;
1811 :
1812 1 : config->info.num_tensors = 1;
1813 1 : config->info.info[0].type = _NNS_UINT8;
1814 1 : config->info.info[0].dimension[0] = 1;
1815 16 : for (i = 1; i < NNS_TENSOR_RANK_LIMIT; i++)
1816 15 : config->info.info[0].dimension[i] = 0;
1817 : } else {
1818 91 : gst_tensors_info_copy (&config->info, info);
1819 91 : self->frame_size = gst_tensors_info_get_size (&config->info, -1);
1820 : }
1821 :
1822 92 : return TRUE;
1823 : }
1824 :
1825 : /**
1826 : * @brief Set the tensors configs structure from fliex tensor stream (internal static function)
1827 : * @param self this pointer to GstTensorConverter
1828 : * @param config tensors config structure to be filled
1829 : * @param structure caps structure
1830 : * @note Change tensors dimension and type.
1831 : * @return TRUE if supported type
1832 : */
1833 : static gboolean
1834 5 : gst_tensor_converter_parse_tensor (GstTensorConverter * self,
1835 : GstTensorsConfig * config, const GstStructure * structure)
1836 : {
1837 5 : GstTensorsInfo *info = &self->tensors_info;
1838 : guint i;
1839 :
1840 5 : g_return_val_if_fail (config != NULL, FALSE);
1841 5 : g_return_val_if_fail (structure != NULL, FALSE);
1842 :
1843 5 : gst_tensors_config_init (config);
1844 :
1845 5 : if (self->frames_per_tensor > 1) {
1846 0 : ml_loge
1847 : ("tensor_converter: Cannot configure multiple tensors. Please set the property frames-per-tensor (%u != 1) 1 to convert stream into tensor stream with num_tensor > 1.",
1848 : self->frames_per_tensor);
1849 0 : return FALSE;
1850 : }
1851 :
1852 : /* update tensor info from properties */
1853 5 : if (gst_tensors_info_validate (info)) {
1854 3 : gst_tensors_info_copy (&config->info, info);
1855 3 : self->frame_size = gst_tensors_info_get_size (&config->info, -1);
1856 : } else {
1857 : /**
1858 : * We cannot get the exact tensors info from caps.
1859 : * All tensors info should be updated in chain function.
1860 : * (data format for tensor: [size])
1861 : */
1862 2 : config->info.num_tensors = 1;
1863 2 : config->info.info[0].type = _NNS_UINT8;
1864 2 : config->info.info[0].dimension[0] = 1;
1865 32 : for (i = 1; i < NNS_TENSOR_RANK_LIMIT; i++)
1866 30 : config->info.info[0].dimension[i] = 0;
1867 : }
1868 :
1869 5 : if (gst_structure_has_field (structure, "framerate")) {
1870 5 : gst_structure_get_fraction (structure, "framerate", &config->rate_n,
1871 5 : &config->rate_d);
1872 : } else {
1873 : /* cannot get the framerate */
1874 0 : config->rate_n = 0;
1875 0 : config->rate_d = 1;
1876 : }
1877 :
1878 5 : return TRUE;
1879 : }
1880 :
1881 : /**
1882 : * @brief Set the tensors config structure from caps (internal static function for custom mode)
1883 : * @param self this pointer to GstTensorConverter
1884 : * @param config tensors config structure to be filled
1885 : * @param caps incoming caps
1886 : * @return TRUE if supported type
1887 : */
1888 : static gboolean
1889 76 : gst_tensor_converter_parse_custom (GstTensorConverter * self,
1890 : GstTensorsConfig * config, const GstCaps * caps)
1891 : {
1892 : GstStructure *structure;
1893 : const gchar *mimetype;
1894 76 : gboolean is_fixed = FALSE;
1895 :
1896 152 : g_return_val_if_fail (config != NULL, FALSE);
1897 76 : g_return_val_if_fail (gst_caps_is_fixed (caps), FALSE);
1898 :
1899 76 : gst_tensors_config_from_peer (self->srcpad, config, &is_fixed);
1900 :
1901 76 : structure = gst_caps_get_structure (caps, 0);
1902 76 : mimetype = gst_structure_get_name (structure);
1903 :
1904 76 : if (self->mode == _CONVERTER_MODE_CUSTOM_CODE) {
1905 1 : if (!is_fixed) {
1906 1 : gst_tensors_config_init (config);
1907 : /* All tensor info should be updated later in chain function. */
1908 1 : config->info.num_tensors = 1;
1909 1 : config->info.info[0].type = _NNS_UINT8;
1910 1 : gst_tensor_parse_dimension ("1:1:1:1", config->info.info[0].dimension);
1911 :
1912 1 : if (gst_structure_has_field (structure, "framerate")) {
1913 0 : gst_structure_get_fraction (structure, "framerate", &config->rate_n,
1914 0 : &config->rate_d);
1915 : } else {
1916 : /* cannot get the framerate */
1917 1 : config->rate_n = 0;
1918 1 : config->rate_d = 1;
1919 : }
1920 : }
1921 75 : } else if (!self->externalConverter) {
1922 : const NNStreamerExternalConverter *ex;
1923 75 : if (self->mode == _CONVERTER_MODE_CUSTOM_SCRIPT) {
1924 18 : mimetype = self->ext_fw;
1925 : }
1926 75 : if (!(ex = findExternalConverter (mimetype))) {
1927 0 : ml_loge
1928 : ("tensor_converter: Failed to get external converter for %s. Cannot find a corresponding external converter for the given type. The custom converter mode is %s with \"%s\"",
1929 : mimetype, STRING_CUSTOM_MODE (self), self->mode_option);
1930 0 : return FALSE;
1931 : }
1932 :
1933 75 : if (!is_fixed) {
1934 75 : if (!ex->get_out_config) {
1935 0 : ml_loge
1936 : ("tensor_converter: Failed to get tensors info from %s (%s:%s). Its corresponding external converter is found. but its mandatory callback, get_out_config is not available.",
1937 : mimetype, STRING_CUSTOM_MODE (self), self->mode_option);
1938 0 : return FALSE;
1939 : }
1940 75 : if (!ex->get_out_config (caps, config)) {
1941 0 : char *capstr = gst_caps_to_string (caps);
1942 0 : ml_loge
1943 : ("tensor_converter: Failed to get tensors info from %s (%s:%s). Its corresponding external converter is found. but its mandatory callback, get_out_config(\"%s\", config) has returned FALSE (cannot get config from the caps).",
1944 : mimetype, STRING_CUSTOM_MODE (self), self->mode_option, capstr);
1945 0 : g_free (capstr);
1946 0 : return FALSE;
1947 : }
1948 : }
1949 :
1950 75 : self->externalConverter = ex;
1951 75 : if (self->mode == _CONVERTER_MODE_CUSTOM_SCRIPT) {
1952 18 : int ret = 0;
1953 18 : if (self->externalConverter->open &&
1954 18 : (ret = self->externalConverter->open (self->mode_option,
1955 : &self->priv_data)) < 0) {
1956 6 : ml_loge
1957 : ("tensor_converter: Failed to open tensor converter custom subplugin: custom-script mode with \"%s\" for \"%s\" (%s) has 'open' callback; however, it has returned %d error.\n",
1958 : self->mode_option, mimetype, self->externalConverter->name, ret);
1959 6 : self->externalConverter = NULL;
1960 6 : return FALSE;
1961 : }
1962 : }
1963 : }
1964 :
1965 70 : return TRUE;
1966 : }
1967 :
1968 : /**
1969 : * @brief Get possible media-caps from downstream element.
1970 : */
1971 : static GstCaps *
1972 4178 : gst_tensor_converter_get_possible_media_caps (GstTensorConverter * self)
1973 : {
1974 4178 : GstCaps *media_caps = NULL;
1975 : GstTensorsConfig config;
1976 :
1977 : /* get possible caps from downstream element */
1978 4178 : if (gst_tensors_config_from_peer (self->srcpad, &config, NULL)) {
1979 : GstStructure *st;
1980 : guint i, caps_len;
1981 : media_type type;
1982 :
1983 : /* convert peer caps to possible media caps */
1984 2137 : media_caps = gst_pad_get_pad_template_caps (self->sinkpad);
1985 2137 : media_caps = gst_caps_make_writable (media_caps);
1986 :
1987 2137 : caps_len = gst_caps_get_size (media_caps);
1988 :
1989 21370 : for (i = 0; i < caps_len; ++i) {
1990 19233 : st = gst_caps_get_structure (media_caps, i);
1991 19233 : type = gst_structure_get_media_type (st);
1992 :
1993 19233 : switch (type) {
1994 2137 : case _NNS_VIDEO:
1995 : /* video caps from tensor info */
1996 : if (is_video_supported (self)) {
1997 2137 : GValue supported_formats = G_VALUE_INIT;
1998 : gint colorspace, width, height;
1999 :
2000 2137 : colorspace = config.info.info[0].dimension[0];
2001 2137 : width = config.info.info[0].dimension[1];
2002 2137 : height = config.info.info[0].dimension[2];
2003 :
2004 2137 : switch (colorspace) {
2005 9 : case 1:
2006 9 : gst_tensor_converter_get_format_list (&supported_formats,
2007 : "GRAY8", "GRAY16_BE", "GRAY16_LE", NULL);
2008 9 : break;
2009 303 : case 3:
2010 303 : gst_tensor_converter_get_format_list (&supported_formats,
2011 : "RGB", "BGR", NULL);
2012 303 : break;
2013 0 : case 4:
2014 0 : gst_tensor_converter_get_format_list (&supported_formats,
2015 : "RGBx", "BGRx", "xRGB", "xBGR", "RGBA", "BGRA", "ARGB",
2016 : "ABGR", NULL);
2017 0 : break;
2018 1825 : default:
2019 : /* unsupported format, set default video formats */
2020 1825 : break;
2021 : }
2022 :
2023 2137 : if (G_VALUE_TYPE (&supported_formats) == GST_TYPE_LIST &&
2024 312 : gst_value_list_get_size (&supported_formats) > 0) {
2025 312 : gst_structure_set_value (st, "format", &supported_formats);
2026 : }
2027 2137 : g_value_unset (&supported_formats);
2028 :
2029 2137 : if (width > 0) {
2030 317 : gst_structure_set (st, "width", G_TYPE_INT, width, NULL);
2031 : }
2032 :
2033 2137 : if (height > 0) {
2034 308 : gst_structure_set (st, "height", G_TYPE_INT, height, NULL);
2035 : }
2036 :
2037 2137 : if (config.rate_n >= 0 && config.rate_d > 0) {
2038 315 : gst_structure_set (st, "framerate", GST_TYPE_FRACTION,
2039 : config.rate_n, config.rate_d, NULL);
2040 : }
2041 :
2042 : /* add new structure for NCHW formats */
2043 : #if GST_CHECK_VERSION(1, 20, 0)
2044 2137 : width = config.info.info[0].dimension[0];
2045 2137 : height = config.info.info[0].dimension[1];
2046 2137 : colorspace = config.info.info[0].dimension[2];
2047 :
2048 2137 : if (colorspace == 3) {
2049 5 : GValue nchw_format = G_VALUE_INIT;
2050 5 : GstStructure *nchw_st = gst_structure_copy (st);
2051 :
2052 5 : gst_tensor_converter_get_format_list (&nchw_format,
2053 : "RGBP", "BGRP", NULL);
2054 :
2055 5 : if (G_VALUE_TYPE (&nchw_format) == GST_TYPE_LIST &&
2056 5 : gst_value_list_get_size (&nchw_format) > 0) {
2057 5 : gst_structure_set_value (nchw_st, "format", &nchw_format);
2058 : }
2059 5 : g_value_unset (&nchw_format);
2060 :
2061 5 : if (width > 0) {
2062 5 : gst_structure_set (nchw_st, "width", G_TYPE_INT, width, NULL);
2063 : }
2064 :
2065 5 : if (height > 0) {
2066 5 : gst_structure_set (nchw_st, "height", G_TYPE_INT, height, NULL);
2067 : }
2068 5 : gst_caps_append_structure (media_caps, nchw_st);
2069 : }
2070 : #endif
2071 : }
2072 2137 : break;
2073 2137 : case _NNS_AUDIO:
2074 : /* audio caps from tensor info */
2075 2137 : if (is_audio_supported (self)
2076 2137 : && config.info.info[0].type != _NNS_END) {
2077 : gint ch, rate;
2078 : GstAudioFormat aformat;
2079 :
2080 320 : switch (config.info.info[0].type) {
2081 0 : case _NNS_INT8:
2082 0 : aformat = GST_AUDIO_FORMAT_S8;
2083 0 : break;
2084 263 : case _NNS_UINT8:
2085 263 : aformat = GST_AUDIO_FORMAT_U8;
2086 263 : break;
2087 0 : case _NNS_INT16:
2088 0 : aformat = GST_AUDIO_FORMAT_S16;
2089 0 : break;
2090 0 : case _NNS_UINT16:
2091 0 : aformat = GST_AUDIO_FORMAT_U16;
2092 0 : break;
2093 0 : case _NNS_INT32:
2094 0 : aformat = GST_AUDIO_FORMAT_S32;
2095 0 : break;
2096 0 : case _NNS_UINT32:
2097 0 : aformat = GST_AUDIO_FORMAT_U32;
2098 0 : break;
2099 9 : case _NNS_FLOAT16:
2100 9 : aformat = GST_AUDIO_FORMAT_UNKNOWN;
2101 9 : ml_loge
2102 : ("tensor_converter: Audio stream cannot be converted to float16 stream directly because GStreamer's standard audio streams do not support float16. Try Float32 or Float64 instead and 'transform' it to Float16 later.\n");
2103 9 : break;
2104 48 : case _NNS_FLOAT32:
2105 48 : aformat = GST_AUDIO_FORMAT_F32;
2106 48 : break;
2107 0 : case _NNS_FLOAT64:
2108 0 : aformat = GST_AUDIO_FORMAT_F64;
2109 0 : break;
2110 0 : default:
2111 : /* unsupported format */
2112 0 : aformat = GST_AUDIO_FORMAT_UNKNOWN;
2113 0 : break;
2114 : }
2115 :
2116 320 : if (aformat != GST_AUDIO_FORMAT_UNKNOWN) {
2117 311 : gst_structure_set (st, "format", G_TYPE_STRING,
2118 : gst_audio_format_to_string (aformat), NULL);
2119 :
2120 311 : if ((ch = config.info.info[0].dimension[0]) > 0) {
2121 306 : gst_structure_set (st, "channels", G_TYPE_INT, ch, NULL);
2122 : }
2123 :
2124 311 : if ((rate = config.rate_n) > 0) {
2125 3 : gst_structure_set (st, "rate", G_TYPE_INT, rate, NULL);
2126 : }
2127 : }
2128 : }
2129 2137 : break;
2130 14959 : default:
2131 : /* do nothing for text and octet stream */
2132 14959 : break;
2133 : }
2134 : }
2135 : }
2136 :
2137 4178 : return media_caps;
2138 : }
2139 :
2140 : /**
2141 : * @brief Get pad caps for caps negotiation.
2142 : */
2143 : static GstCaps *
2144 7594 : gst_tensor_converter_query_caps (GstTensorConverter * self, GstPad * pad,
2145 : GstCaps * filter)
2146 : {
2147 : GstCaps *caps;
2148 :
2149 7594 : caps = gst_pad_get_current_caps (pad);
2150 7594 : if (!caps) {
2151 7074 : caps = gst_pad_get_pad_template_caps (pad);
2152 : }
2153 :
2154 7594 : if (pad == self->sinkpad) {
2155 : GstCaps *media_caps;
2156 :
2157 4178 : media_caps = gst_tensor_converter_get_possible_media_caps (self);
2158 4178 : if (media_caps) {
2159 : /* intersect with pad caps */
2160 2137 : GstCaps *tmp = gst_caps_intersect_full (media_caps, caps,
2161 : GST_CAPS_INTERSECT_FIRST);
2162 2137 : gst_caps_unref (caps);
2163 2137 : caps = tmp;
2164 :
2165 2137 : gst_caps_unref (media_caps);
2166 : }
2167 : }
2168 :
2169 7594 : silent_debug_caps (self, caps, "caps");
2170 7594 : silent_debug_caps (self, filter, "filter");
2171 :
2172 7594 : if (filter) {
2173 : GstCaps *intersection;
2174 :
2175 : intersection =
2176 2173 : gst_caps_intersect_full (filter, caps, GST_CAPS_INTERSECT_FIRST);
2177 :
2178 2173 : gst_caps_unref (caps);
2179 2173 : caps = intersection;
2180 : }
2181 :
2182 7594 : silent_debug_caps (self, caps, "result");
2183 7594 : return caps;
2184 : }
2185 :
2186 : /**
2187 : * @brief Parse caps and set tensors info.
2188 : */
2189 : static gboolean
2190 864 : gst_tensor_converter_parse_caps (GstTensorConverter * self,
2191 : const GstCaps * caps)
2192 : {
2193 : GstStructure *structure;
2194 : GstTensorsConfig config;
2195 : media_type in_type;
2196 864 : gint frames_dim = -1; /** dimension index of frames in configured tensors */
2197 :
2198 1728 : g_return_val_if_fail (caps != NULL, FALSE);
2199 864 : g_return_val_if_fail (gst_caps_is_fixed (caps), FALSE);
2200 :
2201 864 : structure = gst_caps_get_structure (caps, 0);
2202 864 : if (self->mode != _CONVERTER_MODE_NONE) {
2203 19 : in_type = _NNS_MEDIA_ANY;
2204 : } else {
2205 845 : in_type = gst_structure_get_media_type (structure);
2206 : }
2207 :
2208 864 : switch (in_type) {
2209 620 : case _NNS_VIDEO:
2210 : if (is_video_supported (self)) {
2211 620 : if (!gst_tensor_converter_parse_video (self, caps, &config)) {
2212 0 : char *capstr = gst_caps_to_string (caps);
2213 0 : GST_ERROR_OBJECT (self,
2214 : "Failed to configure tensor from gst cap \"%s\" for video streams.",
2215 : capstr);
2216 0 : g_free (capstr);
2217 0 : return FALSE;
2218 : }
2219 :
2220 620 : frames_dim = 3;
2221 : } else {
2222 : ml_loge
2223 : ("tensor_converter: This binary does not support video type. Please build NNStreamer with -Dvideo-support=enabled (default). You have configured -Dvideo-support=disabled when you build this binary.\n");
2224 : return FALSE;
2225 : }
2226 620 : break;
2227 37 : case _NNS_AUDIO:
2228 : if (is_audio_supported (self)) {
2229 37 : if (!gst_tensor_converter_parse_audio (self, caps, &config)) {
2230 0 : char *capstr = gst_caps_to_string (caps);
2231 0 : GST_ERROR_OBJECT (self,
2232 : "Failed to configure tensor from gst cap \"%s\" for audio streams.",
2233 : capstr);
2234 0 : g_free (capstr);
2235 0 : return FALSE;
2236 : }
2237 :
2238 37 : frames_dim = 1;
2239 : } else {
2240 : ml_loge
2241 : ("tensor_converter: This binary does not support audio type. Please build NNStreamer with -Daudio-support=enabled (default). You have configured -Daudio-support=disabled when you build this binary.\n");
2242 : return FALSE;
2243 : }
2244 37 : break;
2245 10 : case _NNS_TEXT:
2246 10 : if (!gst_tensor_converter_parse_text (self, &config, structure)) {
2247 0 : char *capstr = gst_caps_to_string (caps);
2248 0 : GST_ERROR_OBJECT (self,
2249 : "Failed to configure tensor from gst cap \"%s\" for text streams.",
2250 : capstr);
2251 0 : g_free (capstr);
2252 0 : return FALSE;
2253 : }
2254 :
2255 10 : frames_dim = 1;
2256 10 : break;
2257 116 : case _NNS_OCTET:
2258 116 : if (!gst_tensor_converter_parse_octet (self, &config, structure)) {
2259 24 : char *capstr = gst_caps_to_string (caps);
2260 24 : GST_ERROR_OBJECT (self,
2261 : "Failed to configure tensors from gst cap \"%s\" for octet streams.",
2262 : capstr);
2263 24 : g_free (capstr);
2264 24 : return FALSE;
2265 : }
2266 92 : break;
2267 5 : case _NNS_TENSOR:
2268 : /* flexible tensor to static tensor stream */
2269 5 : if (!gst_tensor_converter_parse_tensor (self, &config, structure)) {
2270 0 : char *capstr = gst_caps_to_string (caps);
2271 0 : GST_ERROR_OBJECT (self,
2272 : "Failed to configure tensor from flexible tensor (%s); trying to convert to static tensor.",
2273 : capstr);
2274 0 : g_free (capstr);
2275 0 : return FALSE;
2276 : }
2277 5 : break;
2278 76 : default:
2279 76 : if (!gst_tensor_converter_parse_custom (self, &config, caps)) {
2280 6 : char *capstr = gst_caps_to_string (caps);
2281 6 : GST_ERROR_OBJECT (self,
2282 : "Failed to configure tensors with custom mode from streams of gst cap (%s) with custom converter subplugins.",
2283 : capstr);
2284 6 : g_free (capstr);
2285 6 : return FALSE;
2286 : }
2287 70 : in_type = _NNS_MEDIA_ANY;
2288 70 : break;
2289 : }
2290 :
2291 : /** set the number of frames in dimension */
2292 834 : if (frames_dim >= 0) {
2293 667 : config.info.info[0].dimension[frames_dim] = self->frames_per_tensor;
2294 : }
2295 :
2296 834 : if (!gst_tensors_config_validate (&config)) {
2297 : /** not fully configured. the resulting config is weird. */
2298 0 : char *capstr = gst_caps_to_string (caps);
2299 0 : char *cfgstr = gst_tensors_config_to_string (&config);
2300 0 : GST_ERROR_OBJECT (self,
2301 : "Failed to configure tensors info with gst cap (%s). Cannot validate tensor configuration acquired from the given gst cap. The resulting invalid tensor config is: %s\n",
2302 : capstr, cfgstr);
2303 0 : g_free (capstr);
2304 0 : g_free (cfgstr);
2305 0 : return FALSE;
2306 : }
2307 :
2308 834 : if (gst_tensors_info_validate (&self->tensors_info)) {
2309 : /** compare tensor info */
2310 93 : if (!gst_tensors_info_is_equal (&self->tensors_info, &config.info)) {
2311 0 : gchar *str1 = gst_tensors_info_to_string (&self->tensors_info);
2312 0 : gchar *str2 = gst_tensors_info_to_string (&config.info);
2313 0 : GST_ERROR_OBJECT (self,
2314 : "Failed, mismatched tensor info. The two tensor configuration should match: \"configured by properties and tensor output\": %s and \"configured by input stream\": %s\n",
2315 : str1, str2);
2316 0 : g_free (str1);
2317 0 : g_free (str2);
2318 0 : return FALSE;
2319 : }
2320 : }
2321 :
2322 834 : self->in_media_type = in_type;
2323 834 : self->tensors_configured = TRUE;
2324 834 : self->tensors_config = config;
2325 :
2326 834 : return TRUE;
2327 : }
2328 :
2329 : /**
2330 : * @brief Update src pad caps from tensors config.
2331 : */
2332 : static void
2333 908 : gst_tensor_converter_update_caps (GstTensorConverter * self)
2334 : {
2335 : GstTensorsConfig *config;
2336 : GstCaps *curr_caps, *out_caps;
2337 :
2338 908 : config = &self->tensors_config;
2339 908 : out_caps = gst_tensor_pad_caps_from_config (self->srcpad, config);
2340 :
2341 : /* Update src pad caps if it is different. */
2342 908 : curr_caps = gst_pad_get_current_caps (self->srcpad);
2343 908 : if (curr_caps == NULL || !gst_caps_is_equal (curr_caps, out_caps)) {
2344 908 : silent_debug_caps (self, out_caps, "set out-caps");
2345 908 : gst_pad_set_caps (self->srcpad, out_caps);
2346 : }
2347 :
2348 908 : if (curr_caps)
2349 74 : gst_caps_unref (curr_caps);
2350 :
2351 908 : gst_caps_unref (out_caps);
2352 908 : }
2353 :
2354 : /**
2355 : * @brief Find converter sub-plugin with the name.
2356 : * @param[in] name The name of converter sub-plugin.
2357 : * @return NULL if not found or the sub-plugin object has an error.
2358 : */
2359 : const NNStreamerExternalConverter *
2360 1884 : nnstreamer_converter_find (const char *name)
2361 : {
2362 1884 : return get_subplugin (NNS_SUBPLUGIN_CONVERTER, name);
2363 : }
2364 :
2365 : /**
2366 : * @brief Validate converter sub-plugin's data.
2367 : */
2368 : static gboolean
2369 1687 : nnstreamer_converter_validate (const NNStreamerExternalConverter * converter)
2370 : {
2371 1687 : if (!converter || !converter->name) {
2372 : /* invalid name */
2373 2 : return FALSE;
2374 : }
2375 :
2376 1685 : if (!converter->query_caps || !converter->get_out_config
2377 1683 : || !converter->convert) {
2378 : /* invalid methods in converter sub-plugin */
2379 3 : return FALSE;
2380 : }
2381 :
2382 1682 : return TRUE;
2383 : }
2384 :
2385 : /**
2386 : * @brief Converter's external subplugins should call this at init.
2387 : */
2388 : int
2389 1687 : registerExternalConverter (NNStreamerExternalConverter * ex)
2390 : {
2391 1687 : g_return_val_if_fail (nnstreamer_converter_validate (ex), FALSE);
2392 1682 : return register_subplugin (NNS_SUBPLUGIN_CONVERTER, ex->name, ex);
2393 : }
2394 :
2395 : /**
2396 : * @brief Converter's external subplugins should call this at exit.
2397 : */
2398 : void
2399 1681 : unregisterExternalConverter (const char *name)
2400 : {
2401 1681 : unregister_subplugin (NNS_SUBPLUGIN_CONVERTER, name);
2402 1681 : }
2403 :
2404 : /**
2405 : * @brief Internal static function to find registered subplugins.
2406 : */
2407 : static const NNStreamerExternalConverter *
2408 75 : findExternalConverter (const char *media_type)
2409 : {
2410 : gchar **str_array;
2411 : guint total, i, j, caps_size;
2412 : GstCaps *caps;
2413 : const gchar *caps_name;
2414 : const NNStreamerExternalConverter *ex;
2415 :
2416 75 : str_array = get_all_subplugins (NNS_SUBPLUGIN_CONVERTER);
2417 75 : if (str_array) {
2418 75 : total = g_strv_length (str_array);
2419 :
2420 188 : for (i = 0; i < total; i++) {
2421 188 : ex = nnstreamer_converter_find (str_array[i]);
2422 :
2423 188 : if (g_strcmp0 (media_type, str_array[i]) == 0) {
2424 : /* found matched media type */
2425 18 : g_strfreev (str_array);
2426 18 : return ex;
2427 : }
2428 :
2429 170 : if (ex && ex->query_caps) {
2430 170 : caps = ex->query_caps (NULL);
2431 170 : caps_size = gst_caps_get_size (caps);
2432 :
2433 283 : for (j = 0; j < caps_size; j++) {
2434 170 : caps_name = gst_structure_get_name (gst_caps_get_structure (caps, j));
2435 170 : if (g_strcmp0 (media_type, caps_name) == 0) {
2436 : /* found matched media type */
2437 57 : gst_caps_unref (caps);
2438 57 : g_strfreev (str_array);
2439 57 : return ex;
2440 : }
2441 : }
2442 :
2443 113 : gst_caps_unref (caps);
2444 : }
2445 : }
2446 :
2447 0 : g_strfreev (str_array);
2448 : }
2449 :
2450 0 : return NULL;
2451 : }
2452 :
2453 : /**
2454 : * @brief set custom property description for tensor converter sub-plugin
2455 : */
2456 : void
2457 0 : nnstreamer_converter_set_custom_property_desc (const char *name,
2458 : const char *prop, ...)
2459 : {
2460 : va_list varargs;
2461 :
2462 0 : va_start (varargs, prop);
2463 0 : subplugin_set_custom_property_desc (NNS_SUBPLUGIN_CONVERTER, name, prop,
2464 : varargs);
2465 0 : va_end (varargs);
2466 0 : }
2467 :
2468 : /**
2469 : * @brief Registers a callback for tensor_converter custom condition
2470 : * @return 0 if success. -ERRNO if error.
2471 : */
2472 : int
2473 5 : nnstreamer_converter_custom_register (const gchar * name,
2474 : tensor_converter_custom func, void *data)
2475 : {
2476 : converter_custom_cb_s *ptr;
2477 :
2478 5 : g_return_val_if_fail (name && strlen (name), -EINVAL);
2479 4 : g_return_val_if_fail (func, -EINVAL);
2480 :
2481 3 : ptr = g_new0 (converter_custom_cb_s, 1);
2482 3 : ptr->func = func;
2483 3 : ptr->data = data;
2484 :
2485 3 : if (register_subplugin (NNS_CUSTOM_CONVERTER, name, ptr))
2486 2 : return 0;
2487 :
2488 1 : g_free (ptr);
2489 1 : ml_loge
2490 : ("tensor_converter: cannot register a converter subplugin, \"%s\" function. register_subplugin () has failed to register \"%s\".",
2491 : name, name);
2492 1 : return -EINVAL;
2493 : }
2494 :
2495 : /**
2496 : * @brief Unregisters a callback for tensor_converter custom condition
2497 : * @return 0 if success. -ERRNO if error.
2498 : */
2499 : int
2500 4 : nnstreamer_converter_custom_unregister (const gchar * name)
2501 : {
2502 : converter_custom_cb_s *ptr;
2503 :
2504 4 : ptr = (converter_custom_cb_s *) get_subplugin (NNS_CUSTOM_CONVERTER, name);
2505 4 : if (!unregister_subplugin (NNS_CUSTOM_CONVERTER, name)) {
2506 2 : ml_loge ("tensor_converter: Failed to unregister custom callback %s.",
2507 : name);
2508 2 : return -EINVAL;
2509 : }
2510 2 : g_free (ptr);
2511 :
2512 2 : return 0;
2513 : }
|