Line data Source code
1 : /* SPDX-License-Identifier: Apache-2.0 */
2 : /**
3 : * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
4 : *
5 : * @file ml-api-inference-single.c
6 : * @date 29 Aug 2019
7 : * @brief NNStreamer/Single C-API Wrapper.
8 : * This allows to invoke individual input frame with NNStreamer.
9 : * @see https://github.com/nnstreamer/nnstreamer
10 : * @author MyungJoo Ham <myungjoo.ham@samsung.com>
11 : * @author Parichay Kapoor <pk.kapoor@samsung.com>
12 : * @bug No known bugs except for NYI items
13 : */
14 :
15 : #include <string.h>
16 : #include <nnstreamer-single.h>
17 : #include <nnstreamer-tizen-internal.h> /* Tizen platform header */
18 : #include <nnstreamer_internal.h>
19 : #include <nnstreamer_plugin_api_util.h>
20 : #include <tensor_filter_single.h>
21 :
22 : #include "ml-api-inference-internal.h"
23 : #include "ml-api-internal.h"
24 : #include "ml-api-inference-single-internal.h"
25 :
26 : #define ML_SINGLE_MAGIC 0xfeedfeed
27 :
28 : /**
29 : * @brief Default time to wait for an output in milliseconds (0 will wait for the output).
30 : */
31 : #define SINGLE_DEFAULT_TIMEOUT 0
32 :
33 : /**
34 : * @brief Global lock for single shot API
35 : * @detail This lock ensures that ml_single_close is thread safe. All other API
36 : * functions use the mutex from the single handle. However for close,
37 : * single handle mutex cannot be used as single handle is destroyed at
38 : * close
39 : * @note This mutex is automatically initialized as it is statically declared
40 : */
41 : G_LOCK_DEFINE_STATIC (magic);
42 :
43 : /**
44 : * @brief Get valid handle after magic verification
45 : * @note handle's mutex (single_h->mutex) is acquired after this
46 : * @param[out] single_h The handle properly casted: (ml_single *).
47 : * @param[in] single The handle to be validated: (void *).
48 : * @param[in] reset Set TRUE if the handle is to be reset (magic = 0).
49 : */
50 : #define ML_SINGLE_GET_VALID_HANDLE_LOCKED(single_h, single, reset) do { \
51 : G_LOCK (magic); \
52 : single_h = (ml_single *) single; \
53 : if (G_UNLIKELY(single_h->magic != ML_SINGLE_MAGIC)) { \
54 : _ml_error_report \
55 : ("The given param, %s (ml_single_h), is invalid. It is not a single_h instance or the user thread has modified it.", \
56 : #single); \
57 : G_UNLOCK (magic); \
58 : return ML_ERROR_INVALID_PARAMETER; \
59 : } \
60 : if (G_UNLIKELY(reset)) \
61 : single_h->magic = 0; \
62 : g_mutex_lock (&single_h->mutex); \
63 : G_UNLOCK (magic); \
64 : } while (0)
65 :
66 : /**
67 : * @brief This is for the symmetricity with ML_SINGLE_GET_VALID_HANDLE_LOCKED
68 : * @param[in] single_h The casted handle (ml_single *).
69 : */
70 : #define ML_SINGLE_HANDLE_UNLOCK(single_h) g_mutex_unlock (&single_h->mutex);
71 :
72 : /** define string names for input/output */
73 : #define INPUT_STR "input"
74 : #define OUTPUT_STR "output"
75 : #define TYPE_STR "type"
76 : #define NAME_STR "name"
77 :
78 : /** concat string from #define */
79 : #define CONCAT_MACRO_STR(STR1,STR2) STR1 STR2
80 :
81 : /** States for invoke thread */
82 : typedef enum
83 : {
84 : IDLE = 0, /**< ready to accept next input */
85 : RUNNING, /**< running an input, cannot accept more input */
86 : JOIN_REQUESTED /**< should join the thread, will exit soon */
87 : } thread_state;
88 :
89 : /**
90 : * @brief The name of sub-plugin for defined neural net frameworks.
91 : * @note The sub-plugin for Android is not declared (e.g., snap)
92 : */
93 : static const char *ml_nnfw_subplugin_name[] = {
94 : [ML_NNFW_TYPE_ANY] = "any", /* DO NOT use this name ('any') to get the sub-plugin */
95 : [ML_NNFW_TYPE_CUSTOM_FILTER] = "custom",
96 : [ML_NNFW_TYPE_TENSORFLOW_LITE] = "tensorflow-lite",
97 : [ML_NNFW_TYPE_TENSORFLOW] = "tensorflow",
98 : [ML_NNFW_TYPE_NNFW] = "nnfw",
99 : [ML_NNFW_TYPE_MVNC] = "movidius-ncsdk2",
100 : [ML_NNFW_TYPE_OPENVINO] = "openvino",
101 : [ML_NNFW_TYPE_VIVANTE] = "vivante",
102 : [ML_NNFW_TYPE_EDGE_TPU] = "edgetpu",
103 : [ML_NNFW_TYPE_ARMNN] = "armnn",
104 : [ML_NNFW_TYPE_SNPE] = "snpe",
105 : [ML_NNFW_TYPE_PYTORCH] = "pytorch",
106 : [ML_NNFW_TYPE_NNTR_INF] = "nntrainer",
107 : [ML_NNFW_TYPE_VD_AIFW] = "vd_aifw",
108 : [ML_NNFW_TYPE_TRIX_ENGINE] = "trix-engine",
109 : [ML_NNFW_TYPE_MXNET] = "mxnet",
110 : [ML_NNFW_TYPE_TVM] = "tvm",
111 : [ML_NNFW_TYPE_ONNX_RUNTIME] = "onnxruntime",
112 : [ML_NNFW_TYPE_NCNN] = "ncnn",
113 : [ML_NNFW_TYPE_TENSORRT] = "tensorrt",
114 : [ML_NNFW_TYPE_QNN] = "qnn",
115 : [ML_NNFW_TYPE_LLAMACPP] = "llamacpp",
116 : [ML_NNFW_TYPE_TIZEN_HAL] = "tizen-hal",
117 : NULL
118 : };
119 :
120 : /** ML single api data structure for handle */
121 : typedef struct
122 : {
123 : GTensorFilterSingleClass *klass; /**< tensor filter class structure*/
124 : GTensorFilterSingle *filter; /**< tensor filter element */
125 : GstTensorsInfo in_info; /**< info about input */
126 : GstTensorsInfo out_info; /**< info about output */
127 : ml_nnfw_type_e nnfw; /**< nnfw type for this filter */
128 : guint magic; /**< code to verify valid handle */
129 :
130 : GThread *thread; /**< thread for invoking */
131 : GMutex mutex; /**< mutex for synchronization */
132 : GCond cond; /**< condition for synchronization */
133 : ml_tensors_data_h input; /**< input received from user */
134 : ml_tensors_data_h output; /**< output to be sent back to user */
135 : guint timeout; /**< timeout for invoking */
136 : thread_state state; /**< current state of the thread */
137 : gboolean free_output; /**< true if output tensors are allocated in single-shot */
138 : int status; /**< status of processing */
139 : gboolean invoking; /**< invoke running flag */
140 : ml_tensors_data_h in_tensors; /**< input tensor wrapper for processing */
141 : ml_tensors_data_h out_tensors; /**< output tensor wrapper for processing */
142 :
143 : GList *destroy_data_list; /**< data to be freed by filter */
144 : gboolean invoke_dynamic; /**< true to invoke flexible tensor */
145 : gboolean invoke_async; /**< true to invoke and return result asynchronously */
146 : ml_tensors_data_cb invoke_async_cb; /**< Callback function to be called when the sub-plugin generates an output asynchronously. */
147 : void *invoke_async_pdata; /**< Private data to be passed to async callback. */
148 : } ml_single;
149 :
150 : /**
151 : * @brief Internal function to get the nnfw type.
152 : */
153 : ml_nnfw_type_e
154 96 : _ml_get_nnfw_type_by_subplugin_name (const char *name)
155 : {
156 96 : ml_nnfw_type_e nnfw_type = ML_NNFW_TYPE_ANY;
157 96 : int idx = -1;
158 :
159 96 : if (name == NULL)
160 2 : return ML_NNFW_TYPE_ANY;
161 :
162 94 : idx = find_key_strv (ml_nnfw_subplugin_name, name);
163 94 : if (idx < 0) {
164 : /* check sub-plugin for android */
165 2 : if (g_ascii_strcasecmp (name, "snap") == 0)
166 1 : nnfw_type = ML_NNFW_TYPE_SNAP;
167 : else
168 1 : _ml_error_report ("Cannot find nnfw, %s is an invalid name.",
169 : _STR_NULL (name));
170 : } else {
171 92 : nnfw_type = (ml_nnfw_type_e) idx;
172 : }
173 :
174 94 : return nnfw_type;
175 : }
176 :
177 : /**
178 : * @brief Internal function to get the sub-plugin name.
179 : */
180 : const char *
181 370 : _ml_get_nnfw_subplugin_name (ml_nnfw_type_e nnfw)
182 : {
183 : /* check sub-plugin for android */
184 370 : if (nnfw == ML_NNFW_TYPE_SNAP)
185 1 : return "snap";
186 :
187 369 : return ml_nnfw_subplugin_name[nnfw];
188 : }
189 :
190 : /**
191 : * @brief Convert c-api based hw to internal representation
192 : */
193 : accl_hw
194 271 : _ml_nnfw_to_accl_hw (const ml_nnfw_hw_e hw)
195 : {
196 271 : switch (hw) {
197 249 : case ML_NNFW_HW_ANY:
198 249 : return ACCL_DEFAULT;
199 3 : case ML_NNFW_HW_AUTO:
200 3 : return ACCL_AUTO;
201 5 : case ML_NNFW_HW_CPU:
202 5 : return ACCL_CPU;
203 : #if defined (__aarch64__) || defined (__arm__)
204 : case ML_NNFW_HW_CPU_NEON:
205 : return ACCL_CPU_NEON;
206 : #else
207 2 : case ML_NNFW_HW_CPU_SIMD:
208 2 : return ACCL_CPU_SIMD;
209 : #endif
210 3 : case ML_NNFW_HW_GPU:
211 3 : return ACCL_GPU;
212 2 : case ML_NNFW_HW_NPU:
213 2 : return ACCL_NPU;
214 2 : case ML_NNFW_HW_NPU_MOVIDIUS:
215 2 : return ACCL_NPU_MOVIDIUS;
216 1 : case ML_NNFW_HW_NPU_EDGE_TPU:
217 1 : return ACCL_NPU_EDGE_TPU;
218 1 : case ML_NNFW_HW_NPU_VIVANTE:
219 1 : return ACCL_NPU_VIVANTE;
220 1 : case ML_NNFW_HW_NPU_SLSI:
221 1 : return ACCL_NPU_SLSI;
222 2 : case ML_NNFW_HW_NPU_SR:
223 : /** @todo how to get srcn npu */
224 2 : return ACCL_NPU_SR;
225 0 : default:
226 0 : return ACCL_AUTO;
227 : }
228 : }
229 :
230 : /**
231 : * @brief Checks the availability of the given execution environments with custom option.
232 : */
233 : int
234 193 : ml_check_nnfw_availability_full (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw,
235 : const char *custom, bool *available)
236 : {
237 193 : const char *fw_name = NULL;
238 :
239 193 : check_feature_state (ML_FEATURE_INFERENCE);
240 :
241 193 : if (!available)
242 2 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
243 : "The parameter, available (bool *), is NULL. It should be a valid pointer of bool. E.g., bool a; ml_check_nnfw_availability_full (..., &a);");
244 :
245 : /* init false */
246 191 : *available = false;
247 :
248 191 : if (nnfw == ML_NNFW_TYPE_ANY)
249 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
250 : "The parameter, nnfw (ml_nnfw_type_e), is ML_NNFW_TYPE_ANY. It should specify the framework to be probed for the hardware availability.");
251 :
252 190 : fw_name = _ml_get_nnfw_subplugin_name (nnfw);
253 :
254 190 : if (fw_name) {
255 190 : if (nnstreamer_filter_find (fw_name) != NULL) {
256 189 : accl_hw accl = _ml_nnfw_to_accl_hw (hw);
257 :
258 189 : if (gst_tensor_filter_check_hw_availability (fw_name, accl, custom)) {
259 180 : *available = true;
260 : } else {
261 9 : _ml_logi ("%s is supported but not with the specified hardware.",
262 : fw_name);
263 : }
264 : } else {
265 1 : _ml_logi ("%s is not supported.", fw_name);
266 : }
267 : } else {
268 0 : _ml_logw ("Cannot get the name of sub-plugin for given nnfw.");
269 : }
270 :
271 190 : return ML_ERROR_NONE;
272 : }
273 :
274 : /**
275 : * @brief Checks the availability of the given execution environments.
276 : */
277 : int
278 191 : ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw,
279 : bool *available)
280 : {
281 191 : return ml_check_nnfw_availability_full (nnfw, hw, NULL, available);
282 : }
283 :
284 : /**
285 : * @brief setup input and output tensor memory to pass to the tensor_filter.
286 : * @note this tensor memory wrapper will be reused for each invoke.
287 : */
288 : static void
289 98 : __setup_in_out_tensors (ml_single * single_h)
290 : {
291 : guint i;
292 98 : ml_tensors_data_s *in_tensors = (ml_tensors_data_s *) single_h->in_tensors;
293 98 : ml_tensors_data_s *out_tensors = (ml_tensors_data_s *) single_h->out_tensors;
294 :
295 : /* Setup input buffer */
296 98 : if (in_tensors) {
297 20 : _ml_tensors_info_free (in_tensors->info);
298 20 : _ml_tensors_info_copy_from_gst (in_tensors->info, &single_h->in_info);
299 : } else {
300 : ml_tensors_info_h info;
301 :
302 78 : _ml_tensors_info_create_from_gst (&info, &single_h->in_info);
303 78 : _ml_tensors_data_create_no_alloc (info, &single_h->in_tensors);
304 :
305 78 : ml_tensors_info_destroy (info);
306 78 : in_tensors = (ml_tensors_data_s *) single_h->in_tensors;
307 : }
308 :
309 98 : in_tensors->num_tensors = single_h->in_info.num_tensors;
310 229 : for (i = 0; i < in_tensors->num_tensors; i++) {
311 : /** memory will be allocated by tensor_filter_single */
312 131 : in_tensors->tensors[i].data = NULL;
313 131 : in_tensors->tensors[i].size =
314 131 : gst_tensors_info_get_size (&single_h->in_info, i);
315 : }
316 :
317 : /* Setup output buffer */
318 98 : if (out_tensors) {
319 20 : _ml_tensors_info_free (out_tensors->info);
320 20 : _ml_tensors_info_copy_from_gst (out_tensors->info, &single_h->out_info);
321 : } else {
322 : ml_tensors_info_h info;
323 :
324 78 : _ml_tensors_info_create_from_gst (&info, &single_h->out_info);
325 78 : _ml_tensors_data_create_no_alloc (info, &single_h->out_tensors);
326 :
327 78 : ml_tensors_info_destroy (info);
328 78 : out_tensors = (ml_tensors_data_s *) single_h->out_tensors;
329 : }
330 :
331 98 : out_tensors->num_tensors = single_h->out_info.num_tensors;
332 227 : for (i = 0; i < out_tensors->num_tensors; i++) {
333 : /** memory will be allocated by tensor_filter_single */
334 129 : out_tensors->tensors[i].data = NULL;
335 129 : out_tensors->tensors[i].size =
336 129 : gst_tensors_info_get_size (&single_h->out_info, i);
337 : }
338 98 : }
339 :
340 : /**
341 : * @brief To call the framework to destroy the allocated output data
342 : */
343 : static inline void
344 0 : __destroy_notify (gpointer data_h, gpointer single_data)
345 : {
346 : ml_single *single_h;
347 : ml_tensors_data_s *data;
348 :
349 0 : data = (ml_tensors_data_s *) data_h;
350 0 : single_h = (ml_single *) single_data;
351 :
352 0 : if (G_LIKELY (single_h->filter)) {
353 0 : if (single_h->klass->allocate_in_invoke (single_h->filter)) {
354 0 : single_h->klass->destroy_notify (single_h->filter, data->tensors);
355 : }
356 : }
357 :
358 : /* reset callback function */
359 0 : data->destroy = NULL;
360 0 : }
361 :
362 : /**
363 : * @brief Wrapper function for __destroy_notify
364 : */
365 : static int
366 0 : ml_single_destroy_notify_cb (void *handle, void *user_data)
367 : {
368 0 : ml_tensors_data_h data = (ml_tensors_data_h) handle;
369 0 : ml_single_h single = (ml_single_h) user_data;
370 : ml_single *single_h;
371 0 : int status = ML_ERROR_NONE;
372 :
373 0 : if (G_UNLIKELY (!single))
374 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
375 : "Failed to destroy data buffer. Callback function argument from _ml_tensors_data_destroy_internal is invalid. The given 'user_data' is NULL. It appears to be an internal error of ML-API or the user thread has touched private data structure.");
376 0 : if (G_UNLIKELY (!data))
377 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
378 : "Failed to destroy data buffer. Callback function argument from _ml_tensors_data_destroy_internal is invalid. The given 'handle' is NULL. It appears to be an internal error of ML-API or the user thread has touched private data structure.");
379 :
380 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
381 :
382 0 : if (G_UNLIKELY (!single_h->filter)) {
383 0 : status = ML_ERROR_INVALID_PARAMETER;
384 0 : _ml_error_report
385 : ("Failed to destroy the data buffer. The handle instance (single_h) is invalid. It appears to be an internal error of ML-API of the user thread has touched private data structure.");
386 0 : goto exit;
387 : }
388 :
389 0 : single_h->destroy_data_list =
390 0 : g_list_remove (single_h->destroy_data_list, data);
391 0 : __destroy_notify (data, single_h);
392 :
393 0 : exit:
394 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
395 :
396 0 : return status;
397 : }
398 :
399 : /**
400 : * @brief setup the destroy notify for the allocated output data.
401 : * @note this stores the data entry in the single list.
402 : * @note this has not overhead if the allocation of output is not performed by
403 : * the framework but by tensor filter element.
404 : */
405 : static void
406 78 : set_destroy_notify (ml_single * single_h, ml_tensors_data_s * data,
407 : gboolean add)
408 : {
409 78 : if (single_h->klass->allocate_in_invoke (single_h->filter)) {
410 0 : data->destroy = ml_single_destroy_notify_cb;
411 0 : data->user_data = single_h;
412 0 : add = TRUE;
413 : }
414 :
415 78 : if (add) {
416 4 : single_h->destroy_data_list = g_list_append (single_h->destroy_data_list,
417 : (gpointer) data);
418 : }
419 78 : }
420 :
421 : /**
422 : * @brief Internal function to call subplugin's invoke
423 : */
424 : static inline int
425 80 : __invoke (ml_single * single_h, ml_tensors_data_h in, ml_tensors_data_h out,
426 : gboolean alloc_output)
427 : {
428 : ml_tensors_data_s *in_data, *out_data;
429 80 : int status = ML_ERROR_NONE;
430 :
431 80 : in_data = (ml_tensors_data_s *) in;
432 80 : out_data = (ml_tensors_data_s *) out;
433 :
434 : /* Prevent error case when input or output is null in invoke thread. */
435 80 : if (!in_data || !out_data) {
436 0 : _ml_error_report ("Failed to invoke a model, invalid data handle.");
437 0 : return ML_ERROR_STREAMS_PIPE;
438 : }
439 :
440 : /* Invoke the thread. */
441 80 : if (!single_h->klass->invoke (single_h->filter, in_data->tensors,
442 80 : out_data->tensors, alloc_output)) {
443 0 : const char *fw_name = _ml_get_nnfw_subplugin_name (single_h->nnfw);
444 0 : _ml_error_report
445 : ("Failed to invoke the tensors. The invoke callback of the tensor-filter subplugin '%s' has failed. Please contact the author of tensor-filter-%s (nnstreamer-%s) or review its source code. Note that this usually happens when the designated framework does not support the given model (e.g., trying to run tf-lite 2.6 model with tf-lite 1.13).",
446 : fw_name, fw_name, fw_name);
447 0 : status = ML_ERROR_STREAMS_PIPE;
448 : }
449 :
450 80 : return status;
451 : }
452 :
453 : /**
454 : * @brief Internal function to post-process given output.
455 : * @note Do not call this if single_h->free_output is false (output data is not allocated in single-shot).
456 : */
457 : static inline void
458 75 : __process_output (ml_single * single_h, ml_tensors_data_h output)
459 : {
460 : ml_tensors_data_s *out_data;
461 :
462 75 : if (g_list_find (single_h->destroy_data_list, output)) {
463 : /**
464 : * Caller of the invoke thread has returned back with timeout.
465 : * So, free the memory allocated by the invoke as their is no receiver.
466 : */
467 1 : single_h->destroy_data_list =
468 1 : g_list_remove (single_h->destroy_data_list, output);
469 1 : ml_tensors_data_destroy (output);
470 : } else {
471 74 : out_data = (ml_tensors_data_s *) output;
472 74 : set_destroy_notify (single_h, out_data, FALSE);
473 : }
474 75 : }
475 :
476 : /**
477 : * @brief thread to execute calls to invoke
478 : *
479 : * @details The thread behavior is detailed as below:
480 : * - Starting with IDLE state, the thread waits for an input or change
481 : * in state externally.
482 : * - If state is not RUNNING, exit this thread, else process the
483 : * request.
484 : * - Process input, call invoke, process output. Any error in this
485 : * state sets the status to be used by ml_single_invoke().
486 : * - State is set back to IDLE and thread moves back to start.
487 : *
488 : * State changes performed by this function when:
489 : * RUNNING -> IDLE - processing is finished.
490 : * JOIN_REQUESTED -> IDLE - close is requested.
491 : *
492 : * @note Error while processing an input is provided back to requesting
493 : * function, and further processing of invoke_thread is not affected.
494 : */
495 : static void *
496 82 : invoke_thread (void *arg)
497 : {
498 : ml_single *single_h;
499 : ml_tensors_data_h input, output;
500 82 : gboolean alloc_output = FALSE;
501 :
502 82 : single_h = (ml_single *) arg;
503 :
504 82 : g_mutex_lock (&single_h->mutex);
505 :
506 101 : while (single_h->state <= RUNNING) {
507 100 : int status = ML_ERROR_NONE;
508 :
509 : /** wait for data */
510 123 : while (single_h->state != RUNNING) {
511 100 : g_cond_wait (&single_h->cond, &single_h->mutex);
512 98 : if (single_h->state == JOIN_REQUESTED)
513 75 : goto exit;
514 : }
515 :
516 23 : input = single_h->input;
517 23 : output = single_h->output;
518 : /* Set null to prevent double-free. */
519 23 : single_h->input = single_h->output = NULL;
520 :
521 23 : single_h->invoking = TRUE;
522 23 : alloc_output = single_h->free_output;
523 23 : g_mutex_unlock (&single_h->mutex);
524 23 : status = __invoke (single_h, input, output, alloc_output);
525 23 : g_mutex_lock (&single_h->mutex);
526 : /* Clear input data after invoke is done. */
527 23 : ml_tensors_data_destroy (input);
528 23 : single_h->invoking = FALSE;
529 :
530 23 : if (status != ML_ERROR_NONE || single_h->state == JOIN_REQUESTED) {
531 4 : if (alloc_output) {
532 4 : single_h->destroy_data_list =
533 4 : g_list_remove (single_h->destroy_data_list, output);
534 4 : ml_tensors_data_destroy (output);
535 : }
536 :
537 4 : if (single_h->state == JOIN_REQUESTED)
538 4 : goto exit;
539 0 : goto wait_for_next;
540 : }
541 :
542 19 : if (alloc_output)
543 19 : __process_output (single_h, output);
544 :
545 : /** loop over to wait for the next element */
546 0 : wait_for_next:
547 19 : single_h->status = status;
548 19 : if (single_h->state == RUNNING)
549 19 : single_h->state = IDLE;
550 19 : g_cond_broadcast (&single_h->cond);
551 : }
552 :
553 1 : exit:
554 : /* Do not set IDLE if JOIN_REQUESTED */
555 80 : if (single_h->state == JOIN_REQUESTED) {
556 : /* Release input and output data */
557 80 : if (single_h->input)
558 0 : ml_tensors_data_destroy (single_h->input);
559 :
560 80 : if (alloc_output && single_h->output) {
561 0 : single_h->destroy_data_list =
562 0 : g_list_remove (single_h->destroy_data_list, single_h->output);
563 0 : ml_tensors_data_destroy (single_h->output);
564 : }
565 :
566 80 : single_h->input = single_h->output = NULL;
567 0 : } else if (single_h->state == RUNNING)
568 0 : single_h->state = IDLE;
569 80 : g_mutex_unlock (&single_h->mutex);
570 80 : return NULL;
571 : }
572 :
573 : /**
574 : * @brief Internal function to get the asynchronous invoke.
575 : */
576 : static int
577 0 : ml_single_async_cb (GstTensorMemory * data, GstTensorsInfo * info,
578 : void *user_data)
579 : {
580 0 : ml_single_h single = (ml_single_h) user_data;
581 : ml_single *single_h;
582 0 : ml_tensors_info_h _info = NULL;
583 0 : ml_tensors_data_h _data = NULL;
584 : unsigned int i;
585 0 : int ret = ML_ERROR_NONE;
586 :
587 0 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
588 :
589 0 : if (!single_h->invoke_async_cb) {
590 : /* No callback, do nothing. Internal state changing? */
591 0 : goto done;
592 : }
593 :
594 0 : ret = _ml_tensors_info_create_from_gst (&_info, info);
595 0 : if (ret != ML_ERROR_NONE) {
596 0 : _ml_error_report
597 : ("Cannot handle tensor data stream. Failed to create ml information.");
598 0 : goto done;
599 : }
600 :
601 0 : ret = ml_tensors_data_create (_info, &_data);
602 0 : if (ret != ML_ERROR_NONE) {
603 0 : _ml_error_report
604 : ("Cannot handle tensor data stream. Failed to create ml data.");
605 0 : goto done;
606 : }
607 :
608 0 : for (i = 0; i < info->num_tensors; ++i) {
609 0 : ret = ml_tensors_data_set_tensor_data (_data, i,
610 0 : data[i].data, data[i].size);
611 0 : if (ret != ML_ERROR_NONE) {
612 0 : _ml_error_report
613 : ("Cannot handle tensor data stream. Failed to update ml data of index %u, size is %zu.",
614 : i, data[i].size);
615 0 : goto done;
616 : }
617 : }
618 :
619 0 : ret = single_h->invoke_async_cb (_data, single_h->invoke_async_pdata);
620 0 : if (ret != ML_ERROR_NONE) {
621 0 : _ml_error_report
622 : ("Cannot handle tensor data stream. The callback function returns error '%d'.",
623 : ret);
624 : }
625 :
626 0 : done:
627 0 : if (_info) {
628 0 : ml_tensors_info_destroy (_info);
629 : }
630 :
631 0 : if (_data) {
632 0 : ml_tensors_data_destroy (_data);
633 : }
634 :
635 0 : ML_SINGLE_HANDLE_UNLOCK (single_h);
636 0 : return (ret == ML_ERROR_NONE) ? 0 : -1;
637 : }
638 :
639 : /**
640 : * @brief Sets the information (tensor dimension, type, name and so on) of required input data for the given model, and get updated output data information.
641 : * @details Note that a model/framework may not support setting such information.
642 : * @since_tizen 6.0
643 : * @param[in] single The model handle.
644 : * @param[in] in_info The handle of input tensors information.
645 : * @param[out] out_info The handle of output tensors information. The caller is responsible for freeing the information with ml_tensors_info_destroy().
646 : * @return @c 0 on success. Otherwise a negative error value.
647 : * @retval #ML_ERROR_NONE Successful
648 : * @retval #ML_ERROR_NOT_SUPPORTED This implies that the given framework does not support dynamic dimensions.
649 : * Use ml_single_get_input_info() and ml_single_get_output_info() instead for this framework.
650 : * @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid.
651 : */
652 : static int
653 7 : ml_single_update_info (ml_single_h single,
654 : const ml_tensors_info_h in_info, ml_tensors_info_h * out_info)
655 : {
656 7 : if (!single)
657 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
658 : "The parameter, single (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
659 7 : if (!in_info)
660 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
661 : "The parameter, in_info (const ml_tensors_info_h), is NULL. It should be a valid instance of ml_tensors_info_h, usually created by ml_tensors_info_create() and configured by the application.");
662 7 : if (!out_info)
663 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
664 : "The parameter, out_info (ml_tensors_info_h *), is NULL. It should be a valid pointer to an instance ml_tensors_info_h, usually created by ml_tensors_info_h(). Note that out_info is supposed to be overwritten by this API call.");
665 :
666 : /* init null */
667 7 : *out_info = NULL;
668 :
669 7 : _ml_error_report_return_continue_iferr (ml_single_set_input_info (single,
670 : in_info),
671 : "Configuring the neural network model with the given input information has failed with %d error code. The given input information ('in_info' parameter) might be invalid or the given neural network cannot accept it as its input data.",
672 : _ERRNO);
673 :
674 5 : __setup_in_out_tensors (single);
675 5 : _ml_error_report_return_continue_iferr (ml_single_get_output_info (single,
676 : out_info),
677 : "Fetching output info after configuring input information has failed with %d error code.",
678 : _ERRNO);
679 :
680 5 : return ML_ERROR_NONE;
681 : }
682 :
683 : /**
684 : * @brief Internal function to get the gst info from tensor-filter.
685 : */
686 : static void
687 169 : ml_single_get_gst_info (ml_single * single_h, gboolean is_input,
688 : GstTensorsInfo * gst_info)
689 : {
690 : const gchar *prop_prefix, *prop_name, *prop_type;
691 : gchar *val;
692 : guint num;
693 :
694 169 : if (is_input) {
695 89 : prop_prefix = INPUT_STR;
696 89 : prop_type = CONCAT_MACRO_STR (INPUT_STR, TYPE_STR);
697 89 : prop_name = CONCAT_MACRO_STR (INPUT_STR, NAME_STR);
698 : } else {
699 80 : prop_prefix = OUTPUT_STR;
700 80 : prop_type = CONCAT_MACRO_STR (OUTPUT_STR, TYPE_STR);
701 80 : prop_name = CONCAT_MACRO_STR (OUTPUT_STR, NAME_STR);
702 : }
703 :
704 169 : gst_tensors_info_init (gst_info);
705 :
706 : /* get dimensions */
707 169 : g_object_get (single_h->filter, prop_prefix, &val, NULL);
708 169 : num = gst_tensors_info_parse_dimensions_string (gst_info, val);
709 169 : g_free (val);
710 :
711 : /* set the number of tensors */
712 169 : gst_info->num_tensors = num;
713 :
714 : /* get types */
715 169 : g_object_get (single_h->filter, prop_type, &val, NULL);
716 169 : num = gst_tensors_info_parse_types_string (gst_info, val);
717 169 : g_free (val);
718 :
719 169 : if (gst_info->num_tensors != num) {
720 0 : _ml_logw ("The number of tensor type is mismatched in filter.");
721 : }
722 :
723 : /* get names */
724 169 : g_object_get (single_h->filter, prop_name, &val, NULL);
725 169 : num = gst_tensors_info_parse_names_string (gst_info, val);
726 169 : g_free (val);
727 :
728 169 : if (gst_info->num_tensors != num) {
729 8 : _ml_logw ("The number of tensor name is mismatched in filter.");
730 : }
731 :
732 169 : if (single_h->invoke_dynamic) {
733 : /* flexible tensor stream */
734 0 : gst_info->format = _NNS_TENSOR_FORMAT_FLEXIBLE;
735 :
736 : /** @todo Consider multiple input tensors while invoking a model. */
737 0 : if (gst_info->num_tensors == 0) {
738 0 : gst_info->num_tensors = 1;
739 : }
740 : }
741 169 : }
742 :
743 : /**
744 : * @brief Internal function to set the gst info in tensor-filter.
745 : */
746 : static int
747 21 : ml_single_set_gst_info (ml_single * single_h, const GstTensorsInfo * in_info)
748 : {
749 : GstTensorsInfo out_info;
750 21 : int status = ML_ERROR_NONE;
751 21 : int ret = -EINVAL;
752 :
753 21 : gst_tensors_info_init (&out_info);
754 21 : ret = single_h->klass->set_input_info (single_h->filter, in_info, &out_info);
755 21 : if (ret == 0) {
756 15 : gst_tensors_info_free (&single_h->in_info);
757 15 : gst_tensors_info_free (&single_h->out_info);
758 15 : gst_tensors_info_copy (&single_h->in_info, in_info);
759 15 : gst_tensors_info_copy (&single_h->out_info, &out_info);
760 :
761 15 : __setup_in_out_tensors (single_h);
762 6 : } else if (ret == -ENOENT) {
763 0 : status = ML_ERROR_NOT_SUPPORTED;
764 : } else {
765 6 : status = ML_ERROR_INVALID_PARAMETER;
766 : }
767 :
768 21 : gst_tensors_info_free (&out_info);
769 :
770 21 : return status;
771 : }
772 :
773 : /**
774 : * @brief Set the info for input/output tensors
775 : */
776 : static int
777 0 : ml_single_set_inout_tensors_info (GObject * object,
778 : const gboolean is_input, ml_tensors_info_s * tensors_info)
779 : {
780 0 : int status = ML_ERROR_NONE;
781 : GstTensorsInfo info;
782 : gchar *str_dim, *str_type, *str_name;
783 : const gchar *str_type_name, *str_name_name;
784 : const gchar *prefix;
785 :
786 0 : if (is_input) {
787 0 : prefix = INPUT_STR;
788 0 : str_type_name = CONCAT_MACRO_STR (INPUT_STR, TYPE_STR);
789 0 : str_name_name = CONCAT_MACRO_STR (INPUT_STR, NAME_STR);
790 : } else {
791 0 : prefix = OUTPUT_STR;
792 0 : str_type_name = CONCAT_MACRO_STR (OUTPUT_STR, TYPE_STR);
793 0 : str_name_name = CONCAT_MACRO_STR (OUTPUT_STR, NAME_STR);
794 : }
795 :
796 0 : _ml_error_report_return_continue_iferr
797 : (_ml_tensors_info_copy_from_ml (&info, tensors_info),
798 : "Cannot fetch tensor-info from the given information. Error code: %d",
799 : _ERRNO);
800 :
801 : /* Set input option */
802 0 : str_dim = gst_tensors_info_get_dimensions_string (&info);
803 0 : str_type = gst_tensors_info_get_types_string (&info);
804 0 : str_name = gst_tensors_info_get_names_string (&info);
805 :
806 0 : if (!str_dim || !str_type || !str_name) {
807 0 : if (!str_dim)
808 0 : _ml_error_report
809 : ("Cannot fetch specific tensor-info from the given information: cannot fetch tensor dimension information.");
810 0 : if (!str_type)
811 0 : _ml_error_report
812 : ("Cannot fetch specific tensor-info from the given information: cannot fetch tensor type information.");
813 0 : if (!str_name)
814 0 : _ml_error_report
815 : ("Cannot fetch specific tensor-info from the given information: cannot fetch tensor name information. Even if tensor names are not defined, this should be able to fetch a list of empty strings.");
816 :
817 0 : status = ML_ERROR_INVALID_PARAMETER;
818 : } else {
819 0 : g_object_set (object, prefix, str_dim, str_type_name, str_type,
820 : str_name_name, str_name, NULL);
821 : }
822 :
823 0 : g_free (str_dim);
824 0 : g_free (str_type);
825 0 : g_free (str_name);
826 :
827 0 : gst_tensors_info_free (&info);
828 :
829 0 : return status;
830 : }
831 :
832 : /**
833 : * @brief Internal static function to set tensors info in the handle.
834 : */
835 : static gboolean
836 162 : ml_single_set_info_in_handle (ml_single_h single, gboolean is_input,
837 : ml_tensors_info_s * tensors_info)
838 : {
839 : int status;
840 : ml_single *single_h;
841 : GstTensorsInfo *dest;
842 162 : gboolean configured = FALSE;
843 162 : gboolean is_valid = FALSE;
844 : GObject *filter_obj;
845 :
846 162 : single_h = (ml_single *) single;
847 162 : filter_obj = G_OBJECT (single_h->filter);
848 :
849 162 : if (is_input) {
850 82 : dest = &single_h->in_info;
851 82 : configured = single_h->klass->input_configured (single_h->filter);
852 : } else {
853 80 : dest = &single_h->out_info;
854 80 : configured = single_h->klass->output_configured (single_h->filter);
855 : }
856 :
857 162 : if (configured) {
858 : /* get configured info and compare with input info */
859 : GstTensorsInfo gst_info;
860 162 : ml_tensors_info_h info = NULL;
861 :
862 162 : ml_single_get_gst_info (single_h, is_input, &gst_info);
863 162 : _ml_tensors_info_create_from_gst (&info, &gst_info);
864 :
865 162 : gst_tensors_info_free (&gst_info);
866 :
867 162 : if (tensors_info && !ml_tensors_info_is_equal (tensors_info, info)) {
868 : /* given input info is not matched with configured */
869 5 : ml_tensors_info_destroy (info);
870 5 : if (is_input) {
871 : /* try to update tensors info */
872 3 : status = ml_single_update_info (single, tensors_info, &info);
873 3 : if (status != ML_ERROR_NONE)
874 4 : goto done;
875 : } else {
876 2 : goto done;
877 : }
878 : }
879 :
880 158 : gst_tensors_info_free (dest);
881 158 : _ml_tensors_info_copy_from_ml (dest, info);
882 158 : ml_tensors_info_destroy (info);
883 0 : } else if (tensors_info) {
884 : status =
885 0 : ml_single_set_inout_tensors_info (filter_obj, is_input, tensors_info);
886 0 : if (status != ML_ERROR_NONE)
887 0 : goto done;
888 :
889 0 : gst_tensors_info_free (dest);
890 0 : _ml_tensors_info_copy_from_ml (dest, tensors_info);
891 : }
892 :
893 158 : is_valid = gst_tensors_info_validate (dest);
894 :
895 162 : done:
896 162 : return is_valid;
897 : }
898 :
899 : /**
900 : * @brief Internal function to create and initialize the single handle.
901 : */
902 : static ml_single *
903 82 : ml_single_create_handle (ml_nnfw_type_e nnfw)
904 : {
905 : ml_single *single_h;
906 : GError *error;
907 82 : gboolean created = FALSE;
908 :
909 82 : single_h = g_new0 (ml_single, 1);
910 82 : if (single_h == NULL)
911 82 : _ml_error_report_return (NULL,
912 : "Failed to allocate memory for the single_h handle. Out of memory?");
913 :
914 82 : single_h->filter = g_object_new (G_TYPE_TENSOR_FILTER_SINGLE, NULL);
915 82 : if (single_h->filter == NULL) {
916 0 : _ml_error_report
917 : ("Failed to create a new instance for filter. Out of memory?");
918 0 : g_free (single_h);
919 0 : return NULL;
920 : }
921 :
922 82 : single_h->magic = ML_SINGLE_MAGIC;
923 82 : single_h->timeout = SINGLE_DEFAULT_TIMEOUT;
924 82 : single_h->nnfw = nnfw;
925 82 : single_h->state = IDLE;
926 82 : single_h->thread = NULL;
927 82 : single_h->input = NULL;
928 82 : single_h->output = NULL;
929 82 : single_h->destroy_data_list = NULL;
930 82 : single_h->invoking = FALSE;
931 :
932 82 : gst_tensors_info_init (&single_h->in_info);
933 82 : gst_tensors_info_init (&single_h->out_info);
934 82 : g_mutex_init (&single_h->mutex);
935 82 : g_cond_init (&single_h->cond);
936 :
937 82 : single_h->klass = g_type_class_ref (G_TYPE_TENSOR_FILTER_SINGLE);
938 82 : if (single_h->klass == NULL) {
939 0 : _ml_error_report
940 : ("Failed to get class of the tensor-filter of single API. This binary is not compiled properly or required libraries are not loaded.");
941 0 : goto done;
942 : }
943 :
944 82 : single_h->thread =
945 82 : g_thread_try_new (NULL, invoke_thread, (gpointer) single_h, &error);
946 82 : if (single_h->thread == NULL) {
947 0 : _ml_error_report
948 : ("Failed to create the invoke thread of single API, g_thread_try_new has reported an error: %s.",
949 : error->message);
950 0 : g_clear_error (&error);
951 0 : goto done;
952 : }
953 :
954 82 : created = TRUE;
955 :
956 82 : done:
957 82 : if (!created) {
958 0 : ml_single_close (single_h);
959 0 : single_h = NULL;
960 : }
961 :
962 82 : return single_h;
963 : }
964 :
965 : /**
966 : * @brief Validate arguments for open
967 : */
968 : static int
969 91 : _ml_single_open_custom_validate_arguments (ml_single_h * single,
970 : ml_single_preset * info)
971 : {
972 91 : if (!single)
973 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
974 : "The parameter, 'single' (ml_single_h *), is NULL. It should be a valid pointer to an instance of ml_single_h.");
975 90 : if (!info)
976 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
977 : "The parameter, 'info' (ml_single_preset *), is NULL. It should be a valid pointer to a valid instance of ml_single_preset.");
978 :
979 : /* Validate input tensor info. */
980 90 : if (info->input_info && !ml_tensors_info_is_valid (info->input_info))
981 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
982 : "The parameter, 'info' (ml_single_preset *), is not valid. It has 'input_info' entry that cannot be validated. ml_tensors_info_is_valid(info->input_info) has failed while info->input_info exists.");
983 :
984 : /* Validate output tensor info. */
985 89 : if (info->output_info && !ml_tensors_info_is_valid (info->output_info))
986 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
987 : "The parameter, 'info' (ml_single_preset *), is not valid. It has 'output_info' entry that cannot be validated. ml_tensors_info_is_valid(info->output_info) has failed while info->output_info exists.");
988 :
989 88 : if (!info->models)
990 2 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
991 : "The parameter, 'info' (ml_single_preset *), is not valid. Its models entry if NULL (info->models is NULL).");
992 :
993 86 : if (info->invoke_async && !info->invoke_async_cb)
994 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
995 : "The parameter, 'info' (ml_single_preset *), is not valid. It has 'invoke_async' entry but its callback 'invoke_async_cb' is NULL");
996 :
997 86 : return ML_ERROR_NONE;
998 : }
999 :
1000 : /**
1001 : * @brief Internal function to convert accelerator as tensor_filter property format.
1002 : * @note returned value must be freed by the caller
1003 : * @note More details on format can be found in gst_tensor_filter_install_properties() in tensor_filter_common.c.
1004 : */
1005 : char *
1006 82 : _ml_nnfw_to_str_prop (const ml_nnfw_hw_e hw)
1007 : {
1008 : const gchar *hw_name;
1009 82 : const gchar *use_accl = "true:";
1010 82 : gchar *str_prop = NULL;
1011 :
1012 82 : hw_name = get_accl_hw_str (_ml_nnfw_to_accl_hw (hw));
1013 82 : str_prop = g_strdup_printf ("%s%s", use_accl, hw_name);
1014 :
1015 82 : return str_prop;
1016 : }
1017 :
1018 : /**
1019 : * @brief Opens an ML model with the custom options and returns the instance as a handle.
1020 : */
1021 : int
1022 91 : ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
1023 : {
1024 : ml_single *single_h;
1025 : GObject *filter_obj;
1026 91 : int status = ML_ERROR_NONE;
1027 : ml_tensors_info_s *in_tensors_info, *out_tensors_info;
1028 : ml_nnfw_type_e nnfw;
1029 : ml_nnfw_hw_e hw;
1030 : const gchar *fw_name;
1031 91 : g_autofree gchar *converted_models = NULL;
1032 : gchar **list_models;
1033 : guint i, num_models;
1034 : char *hw_name;
1035 :
1036 91 : check_feature_state (ML_FEATURE_INFERENCE);
1037 :
1038 : /* Validate the params */
1039 91 : _ml_error_report_return_continue_iferr
1040 : (_ml_single_open_custom_validate_arguments (single, info),
1041 : "The parameter, 'info' (ml_single_preset *), cannot be validated. Please provide valid information for this object.");
1042 :
1043 : /* init null */
1044 86 : *single = NULL;
1045 :
1046 86 : in_tensors_info = (ml_tensors_info_s *) info->input_info;
1047 86 : out_tensors_info = (ml_tensors_info_s *) info->output_info;
1048 86 : nnfw = info->nnfw;
1049 86 : hw = info->hw;
1050 86 : fw_name = _ml_get_nnfw_subplugin_name (nnfw);
1051 86 : converted_models = _ml_convert_predefined_entity (info->models);
1052 :
1053 : /**
1054 : * 1. Determine nnfw and validate model file
1055 : */
1056 86 : list_models = g_strsplit (converted_models, ",", -1);
1057 86 : num_models = g_strv_length (list_models);
1058 172 : for (i = 0; i < num_models; i++)
1059 86 : g_strstrip (list_models[i]);
1060 :
1061 86 : status = _ml_validate_model_file ((const char **) list_models, num_models,
1062 : &nnfw);
1063 86 : if (status != ML_ERROR_NONE) {
1064 4 : _ml_error_report_continue
1065 : ("Cannot validate the model (1st model: %s. # models: %d). Error code: %d",
1066 : list_models[0], num_models, status);
1067 4 : g_strfreev (list_models);
1068 4 : return status;
1069 : }
1070 :
1071 82 : g_strfreev (list_models);
1072 :
1073 : /**
1074 : * 2. Determine hw
1075 : * (Supposed CPU only) Support others later.
1076 : */
1077 82 : if (!_ml_nnfw_is_available (nnfw, hw)) {
1078 0 : _ml_error_report_return (ML_ERROR_NOT_SUPPORTED,
1079 : "The given nnfw, '%s', is not supported. There is no corresponding tensor-filter subplugin available or the given hardware requirement is not supported for the given nnfw.",
1080 : fw_name);
1081 : }
1082 :
1083 : /* Create ml_single object */
1084 82 : if ((single_h = ml_single_create_handle (nnfw)) == NULL) {
1085 0 : _ml_error_report_return_continue (ML_ERROR_OUT_OF_MEMORY,
1086 : "Cannot create handle for the given nnfw, %s", fw_name);
1087 : }
1088 :
1089 82 : single_h->invoke_dynamic = info->invoke_dynamic;
1090 82 : single_h->invoke_async = info->invoke_async;
1091 82 : single_h->invoke_async_cb = info->invoke_async_cb;
1092 82 : single_h->invoke_async_pdata = info->invoke_async_pdata;
1093 :
1094 82 : filter_obj = G_OBJECT (single_h->filter);
1095 :
1096 : /**
1097 : * 3. Construct a direct connection with the nnfw.
1098 : * Note that we do not construct a pipeline since 2019.12.
1099 : */
1100 82 : if (nnfw == ML_NNFW_TYPE_TENSORFLOW || nnfw == ML_NNFW_TYPE_SNAP ||
1101 82 : nnfw == ML_NNFW_TYPE_PYTORCH || nnfw == ML_NNFW_TYPE_TRIX_ENGINE ||
1102 82 : nnfw == ML_NNFW_TYPE_NCNN) {
1103 : /* set input and output tensors information */
1104 0 : if (in_tensors_info && out_tensors_info) {
1105 : status =
1106 0 : ml_single_set_inout_tensors_info (filter_obj, TRUE, in_tensors_info);
1107 0 : if (status != ML_ERROR_NONE) {
1108 0 : _ml_error_report_continue
1109 : ("Input tensors info is given; however, failed to set input tensors info. Error code: %d",
1110 : status);
1111 0 : goto error;
1112 : }
1113 :
1114 : status =
1115 0 : ml_single_set_inout_tensors_info (filter_obj, FALSE,
1116 : out_tensors_info);
1117 0 : if (status != ML_ERROR_NONE) {
1118 0 : _ml_error_report_continue
1119 : ("Output tensors info is given; however, failed to set output tensors info. Error code: %d",
1120 : status);
1121 0 : goto error;
1122 : }
1123 : } else {
1124 0 : _ml_error_report
1125 : ("To run the given nnfw, '%s', with a neural network model, both input and output information should be provided.",
1126 : fw_name);
1127 0 : status = ML_ERROR_INVALID_PARAMETER;
1128 0 : goto error;
1129 : }
1130 82 : } else if (nnfw == ML_NNFW_TYPE_ARMNN) {
1131 : /* set input and output tensors information, if available */
1132 0 : if (in_tensors_info) {
1133 : status =
1134 0 : ml_single_set_inout_tensors_info (filter_obj, TRUE, in_tensors_info);
1135 0 : if (status != ML_ERROR_NONE) {
1136 0 : _ml_error_report_continue
1137 : ("With nnfw '%s', input tensors info is optional. However, the user has provided an invalid input tensors info. Error code: %d",
1138 : fw_name, status);
1139 0 : goto error;
1140 : }
1141 : }
1142 0 : if (out_tensors_info) {
1143 : status =
1144 0 : ml_single_set_inout_tensors_info (filter_obj, FALSE,
1145 : out_tensors_info);
1146 0 : if (status != ML_ERROR_NONE) {
1147 0 : _ml_error_report_continue
1148 : ("With nnfw '%s', output tensors info is optional. However, the user has provided an invalid output tensors info. Error code: %d",
1149 : fw_name, status);
1150 0 : goto error;
1151 : }
1152 : }
1153 : }
1154 :
1155 : /* set accelerator, framework, model files and custom option */
1156 82 : if (info->fw_name) {
1157 33 : fw_name = (const char *) info->fw_name;
1158 : } else {
1159 49 : fw_name = _ml_get_nnfw_subplugin_name (nnfw); /* retry for "auto" */
1160 : }
1161 82 : hw_name = _ml_nnfw_to_str_prop (hw);
1162 :
1163 82 : g_object_set (filter_obj, "framework", fw_name, "accelerator", hw_name,
1164 : "model", converted_models, "invoke-dynamic", single_h->invoke_dynamic,
1165 : "invoke-async", single_h->invoke_async, NULL);
1166 82 : g_free (hw_name);
1167 :
1168 82 : if (info->custom_option) {
1169 0 : g_object_set (filter_obj, "custom", info->custom_option, NULL);
1170 : }
1171 :
1172 : /* Set async callback. */
1173 82 : if (single_h->invoke_async) {
1174 0 : single_h->klass->set_invoke_async_callback (single_h->filter,
1175 : ml_single_async_cb, single_h);
1176 : }
1177 :
1178 : /* 4. Start the nnfw to get inout configurations if needed */
1179 82 : if (!single_h->klass->start (single_h->filter)) {
1180 0 : _ml_error_report
1181 : ("Failed to start NNFW, '%s', to get inout configurations. Subplugin class method has failed to start.",
1182 : fw_name);
1183 0 : status = ML_ERROR_STREAMS_PIPE;
1184 0 : goto error;
1185 : }
1186 :
1187 82 : if (nnfw == ML_NNFW_TYPE_NNTR_INF) {
1188 0 : if (!in_tensors_info || !out_tensors_info) {
1189 0 : if (!in_tensors_info) {
1190 : GstTensorsInfo in_info;
1191 :
1192 0 : gst_tensors_info_init (&in_info);
1193 :
1194 : /* ml_single_set_input_info() can't be done as it checks num_tensors */
1195 0 : status = ml_single_set_gst_info (single_h, &in_info);
1196 0 : if (status != ML_ERROR_NONE) {
1197 0 : _ml_error_report_continue
1198 : ("NNTrainer-inference-single cannot configure single_h handle instance with the given in_info. This might be an ML-API / NNTrainer internal error. Error Code: %d",
1199 : status);
1200 0 : goto error;
1201 : }
1202 : } else {
1203 0 : status = ml_single_set_input_info (single_h, in_tensors_info);
1204 0 : if (status != ML_ERROR_NONE) {
1205 0 : _ml_error_report_continue
1206 : ("NNTrainer-inference-single cannot configure single_h handle instance with the given in_info from the user. Error code: %d",
1207 : status);
1208 0 : goto error;
1209 : }
1210 : }
1211 : }
1212 : }
1213 :
1214 : /* 5. Set in/out configs and metadata */
1215 82 : if (!ml_single_set_info_in_handle (single_h, TRUE, in_tensors_info)) {
1216 2 : _ml_error_report
1217 : ("The input tensors info is invalid. Cannot configure single_h handle with the given input tensors info.");
1218 2 : status = ML_ERROR_INVALID_PARAMETER;
1219 2 : goto error;
1220 : }
1221 :
1222 80 : if (!ml_single_set_info_in_handle (single_h, FALSE, out_tensors_info)) {
1223 2 : _ml_error_report
1224 : ("The output tensors info is invalid. Cannot configure single_h handle with the given output tensors info.");
1225 2 : status = ML_ERROR_INVALID_PARAMETER;
1226 2 : goto error;
1227 : }
1228 :
1229 : /* Setup input and output memory buffers for invoke */
1230 78 : __setup_in_out_tensors (single_h);
1231 :
1232 78 : *single = single_h;
1233 78 : return ML_ERROR_NONE;
1234 :
1235 4 : error:
1236 4 : ml_single_close (single_h);
1237 4 : return status;
1238 : }
1239 :
1240 : /**
1241 : * @brief Opens an ML model and returns the instance as a handle.
1242 : */
1243 : int
1244 53 : ml_single_open (ml_single_h * single, const char *model,
1245 : const ml_tensors_info_h input_info, const ml_tensors_info_h output_info,
1246 : ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw)
1247 : {
1248 53 : return ml_single_open_full (single, model, input_info, output_info, nnfw, hw,
1249 : NULL);
1250 : }
1251 :
1252 : /**
1253 : * @brief Opens an ML model and returns the instance as a handle.
1254 : */
1255 : int
1256 53 : ml_single_open_full (ml_single_h * single, const char *model,
1257 : const ml_tensors_info_h input_info, const ml_tensors_info_h output_info,
1258 : ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, const char *custom_option)
1259 : {
1260 53 : ml_single_preset info = { 0, };
1261 :
1262 53 : info.input_info = input_info;
1263 53 : info.output_info = output_info;
1264 53 : info.nnfw = nnfw;
1265 53 : info.hw = hw;
1266 53 : info.models = (char *) model;
1267 53 : info.custom_option = (char *) custom_option;
1268 :
1269 53 : return ml_single_open_custom (single, &info);
1270 : }
1271 :
1272 : /**
1273 : * @brief Open new single handle with given option.
1274 : */
1275 : int
1276 39 : ml_single_open_with_option (ml_single_h * single, const ml_option_h option)
1277 : {
1278 : void *value;
1279 39 : ml_single_preset info = { 0, };
1280 :
1281 78 : check_feature_state (ML_FEATURE_INFERENCE);
1282 :
1283 39 : if (!option) {
1284 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1285 : "The parameter, 'option' is NULL. It should be a valid ml_option_h, which should be created by ml_option_create().");
1286 : }
1287 :
1288 38 : if (!single)
1289 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1290 : "The parameter, 'single' (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
1291 :
1292 38 : if (ML_ERROR_NONE == ml_option_get (option, "input_info", &value))
1293 16 : info.input_info = value;
1294 38 : if (ML_ERROR_NONE == ml_option_get (option, "output_info", &value))
1295 16 : info.output_info = value;
1296 38 : if (ML_ERROR_NONE == ml_option_get (option, "nnfw", &value))
1297 2 : info.nnfw = *((ml_nnfw_type_e *) value);
1298 38 : if (ML_ERROR_NONE == ml_option_get (option, "hw", &value))
1299 0 : info.hw = *((ml_nnfw_hw_e *) value);
1300 38 : if (ML_ERROR_NONE == ml_option_get (option, "models", &value))
1301 37 : info.models = (gchar *) value;
1302 38 : if (ML_ERROR_NONE == ml_option_get (option, "custom", &value))
1303 0 : info.custom_option = (gchar *) value;
1304 38 : if (ML_ERROR_NONE == ml_option_get (option, "framework_name", &value) ||
1305 5 : ML_ERROR_NONE == ml_option_get (option, "framework", &value))
1306 33 : info.fw_name = (gchar *) value;
1307 38 : if (ML_ERROR_NONE == ml_option_get (option, "invoke_dynamic", &value)) {
1308 0 : if (g_ascii_strcasecmp ((gchar *) value, "true") == 0)
1309 0 : info.invoke_dynamic = TRUE;
1310 : }
1311 38 : if (ML_ERROR_NONE == ml_option_get (option, "invoke_async", &value)) {
1312 0 : if (g_ascii_strcasecmp ((gchar *) value, "true") == 0)
1313 0 : info.invoke_async = TRUE;
1314 : }
1315 38 : if (ML_ERROR_NONE == ml_option_get (option, "async_callback", &value)) {
1316 0 : info.invoke_async_cb = (ml_tensors_data_cb) value;
1317 : }
1318 38 : if (ML_ERROR_NONE == ml_option_get (option, "async_data", &value)) {
1319 0 : info.invoke_async_pdata = value;
1320 : }
1321 :
1322 38 : return ml_single_open_custom (single, &info);
1323 : }
1324 :
1325 : /**
1326 : * @brief Closes the opened model handle.
1327 : *
1328 : * @details State changes performed by this function:
1329 : * ANY STATE -> JOIN REQUESTED - on receiving a request to close
1330 : *
1331 : * Once requested to close, invoke_thread() will exit after processing
1332 : * the current input (if any).
1333 : */
1334 : int
1335 82 : ml_single_close (ml_single_h single)
1336 : {
1337 : ml_single *single_h;
1338 : gboolean invoking;
1339 :
1340 82 : check_feature_state (ML_FEATURE_INFERENCE);
1341 :
1342 82 : if (!single)
1343 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1344 : "The parameter, 'single' (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
1345 :
1346 81 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 1);
1347 :
1348 : /* First, clear all callbacks. */
1349 80 : single_h->invoke_async_cb = NULL;
1350 :
1351 80 : single_h->state = JOIN_REQUESTED;
1352 80 : g_cond_broadcast (&single_h->cond);
1353 80 : invoking = single_h->invoking;
1354 80 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1355 :
1356 : /** Wait until invoke process is finished */
1357 1686 : while (invoking) {
1358 1606 : _ml_logd ("Wait 1 ms until invoke is finished and close the handle.");
1359 1606 : g_usleep (1000);
1360 1606 : invoking = single_h->invoking;
1361 : /**
1362 : * single_h->invoking is the only protected value here and we are
1363 : * doing a read-only operation and do not need to project its value
1364 : * after the assignment.
1365 : * Thus, we do not need to lock single_h here.
1366 : */
1367 : }
1368 :
1369 80 : if (single_h->thread != NULL)
1370 80 : g_thread_join (single_h->thread);
1371 :
1372 : /** locking ensures correctness with parallel calls on close */
1373 80 : if (single_h->filter) {
1374 80 : g_list_foreach (single_h->destroy_data_list, __destroy_notify, single_h);
1375 80 : g_list_free (single_h->destroy_data_list);
1376 :
1377 80 : if (single_h->klass)
1378 80 : single_h->klass->stop (single_h->filter);
1379 :
1380 80 : g_object_unref (single_h->filter);
1381 80 : single_h->filter = NULL;
1382 : }
1383 :
1384 80 : if (single_h->klass) {
1385 80 : g_type_class_unref (single_h->klass);
1386 80 : single_h->klass = NULL;
1387 : }
1388 :
1389 80 : gst_tensors_info_free (&single_h->in_info);
1390 80 : gst_tensors_info_free (&single_h->out_info);
1391 :
1392 80 : ml_tensors_data_destroy (single_h->in_tensors);
1393 80 : ml_tensors_data_destroy (single_h->out_tensors);
1394 :
1395 80 : g_cond_clear (&single_h->cond);
1396 80 : g_mutex_clear (&single_h->mutex);
1397 :
1398 80 : g_free (single_h);
1399 80 : return ML_ERROR_NONE;
1400 : }
1401 :
1402 : /**
1403 : * @brief Internal function to validate input/output data.
1404 : */
1405 : static int
1406 92 : _ml_single_invoke_validate_data (ml_single_h single,
1407 : const ml_tensors_data_h data, const gboolean is_input)
1408 : {
1409 : ml_single *single_h;
1410 : ml_tensors_data_s *_data;
1411 : ml_tensors_data_s *_model;
1412 : guint i;
1413 : size_t raw_size;
1414 :
1415 92 : single_h = (ml_single *) single;
1416 92 : _data = (ml_tensors_data_s *) data;
1417 :
1418 92 : if (G_UNLIKELY (!_data))
1419 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1420 : "(internal function) The parameter, 'data' (const ml_tensors_data_h), is NULL. It should be a valid instance of ml_tensors_data_h.");
1421 :
1422 92 : if (is_input)
1423 91 : _model = (ml_tensors_data_s *) single_h->in_tensors;
1424 : else
1425 1 : _model = (ml_tensors_data_s *) single_h->out_tensors;
1426 :
1427 92 : if (G_UNLIKELY (_data->num_tensors != _model->num_tensors))
1428 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1429 : "(internal function) The number of %s tensors is not compatible with model. Given: %u, Expected: %u.",
1430 : (is_input) ? "input" : "output", _data->num_tensors,
1431 : _model->num_tensors);
1432 :
1433 335 : for (i = 0; i < _data->num_tensors; i++) {
1434 247 : if (G_UNLIKELY (!_data->tensors[i].data))
1435 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1436 : "The %d-th input tensor is not valid. There is no valid dimension metadata for this tensor.",
1437 : i);
1438 :
1439 246 : if (single_h->invoke_dynamic) {
1440 : /* If tensor is not static, we cannot check tensor data size. */
1441 0 : continue;
1442 : }
1443 :
1444 246 : raw_size = _model->tensors[i].size;
1445 246 : if (G_UNLIKELY (_data->tensors[i].size != raw_size))
1446 2 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1447 : "The size of %d-th %s tensor is not compatible with model. Given: %zu, Expected: %zu.",
1448 : i, (is_input) ? "input" : "output", _data->tensors[i].size, raw_size);
1449 : }
1450 :
1451 88 : return ML_ERROR_NONE;
1452 : }
1453 :
1454 : /**
1455 : * @brief Internal function to invoke the model.
1456 : *
1457 : * @details State changes performed by this function:
1458 : * IDLE -> RUNNING - on receiving a valid request
1459 : *
1460 : * Invoke returns error if the current state is not IDLE.
1461 : * If IDLE, then invoke is requested to the thread.
1462 : * Invoke waits for the processing to be complete, and returns back
1463 : * the result once notified by the processing thread.
1464 : *
1465 : * @note IDLE is the valid thread state before and after this function call.
1466 : */
1467 : static int
1468 104 : _ml_single_invoke_internal (ml_single_h single,
1469 : const ml_tensors_data_h input, ml_tensors_data_h * output,
1470 : const gboolean need_alloc)
1471 : {
1472 : ml_single *single_h;
1473 : ml_tensors_data_h _in, _out;
1474 : gint64 end_time;
1475 104 : int status = ML_ERROR_NONE;
1476 :
1477 208 : check_feature_state (ML_FEATURE_INFERENCE);
1478 :
1479 104 : if (G_UNLIKELY (!single))
1480 2 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1481 : "(internal function) The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, usually created by ml_single_open().");
1482 :
1483 102 : if (G_UNLIKELY (!input))
1484 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1485 : "(internal function) The parameter, input (ml_tensors_data_h), is NULL. It should be a valid instance of ml_tensors_data_h.");
1486 :
1487 101 : if (G_UNLIKELY (!output))
1488 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1489 : "(internal function) The parameter, output (ml_tensors_data_h *), is NULL. It should be a valid pointer to an instance of ml_tensors_data_h to store the inference results.");
1490 :
1491 100 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1492 :
1493 91 : if (G_UNLIKELY (!single_h->filter)) {
1494 0 : _ml_error_report
1495 : ("The tensor_filter element of this single handle (single_h) is not valid. It appears that the handle (ml_single_h single) is not appropriately created by ml_single_open(), user thread has touched its internal data, or the handle is already closed or freed by user.");
1496 0 : status = ML_ERROR_INVALID_PARAMETER;
1497 0 : goto exit;
1498 : }
1499 :
1500 : /* Validate input/output data */
1501 91 : status = _ml_single_invoke_validate_data (single, input, TRUE);
1502 91 : if (status != ML_ERROR_NONE) {
1503 4 : _ml_error_report_continue
1504 : ("The input data for the inference is not valid: error code %d. Please check the dimensions, type, number-of-tensors, and size information of the input data.",
1505 : status);
1506 4 : goto exit;
1507 : }
1508 :
1509 87 : if (!need_alloc) {
1510 1 : status = _ml_single_invoke_validate_data (single, *output, FALSE);
1511 1 : if (status != ML_ERROR_NONE) {
1512 0 : _ml_error_report_continue
1513 : ("The output data buffer provided by the user is not valid for the given neural network mode: error code %d. Please check the dimensions, type, number-of-tensors, and size information of the output data buffer.",
1514 : status);
1515 0 : goto exit;
1516 : }
1517 : }
1518 :
1519 87 : if (single_h->state != IDLE) {
1520 7 : if (G_UNLIKELY (single_h->state == JOIN_REQUESTED)) {
1521 0 : _ml_error_report
1522 : ("The handle (single_h single) is closed or being closed awaiting for the last ongoing invocation. Invoking with such a handle is not allowed. Please open another single_h handle to invoke.");
1523 0 : status = ML_ERROR_STREAMS_PIPE;
1524 0 : goto exit;
1525 : }
1526 7 : _ml_error_report
1527 : ("The handle (single_h single) is busy. There is another thread waiting for inference results with this handle. Please retry invoking again later when the handle becomes idle after completing the current inference task.");
1528 7 : status = ML_ERROR_TRY_AGAIN;
1529 7 : goto exit;
1530 : }
1531 :
1532 : /* prepare output data */
1533 80 : if (need_alloc) {
1534 79 : *output = NULL;
1535 :
1536 79 : status = _ml_tensors_data_clone_no_alloc (single_h->out_tensors, &_out);
1537 79 : if (status != ML_ERROR_NONE)
1538 0 : goto exit;
1539 : } else {
1540 1 : _out = *output;
1541 : }
1542 :
1543 : /**
1544 : * Clone input data here to prevent use-after-free case.
1545 : * We should release single_h->input after calling __invoke() function.
1546 : */
1547 80 : status = ml_tensors_data_clone (input, &_in);
1548 80 : if (status != ML_ERROR_NONE)
1549 0 : goto exit;
1550 :
1551 80 : single_h->state = RUNNING;
1552 80 : single_h->free_output = need_alloc;
1553 80 : single_h->input = _in;
1554 80 : single_h->output = _out;
1555 :
1556 80 : if (single_h->timeout > 0) {
1557 : /* Wake up "invoke_thread" */
1558 23 : g_cond_broadcast (&single_h->cond);
1559 :
1560 : /* set timeout */
1561 23 : end_time = g_get_monotonic_time () +
1562 23 : single_h->timeout * G_TIME_SPAN_MILLISECOND;
1563 :
1564 23 : if (g_cond_wait_until (&single_h->cond, &single_h->mutex, end_time)) {
1565 19 : status = single_h->status;
1566 : } else {
1567 4 : _ml_logw ("Wait for invoke has timed out");
1568 4 : status = ML_ERROR_TIMED_OUT;
1569 : /** This is set to notify invoke_thread to not process if timed out */
1570 4 : if (need_alloc)
1571 4 : set_destroy_notify (single_h, _out, TRUE);
1572 : }
1573 : } else {
1574 : /**
1575 : * Don't worry. We have locked single_h->mutex, thus there is no
1576 : * other thread with ml_single_invoke function on the same handle
1577 : * that are in this if-then-else block, which means that there is
1578 : * no other thread with active invoke-thread (calling __invoke())
1579 : * with the same handle. Thus we can call __invoke without
1580 : * having yet another mutex for __invoke.
1581 : */
1582 57 : single_h->invoking = TRUE;
1583 57 : status = __invoke (single_h, _in, _out, need_alloc);
1584 57 : ml_tensors_data_destroy (_in);
1585 57 : single_h->invoking = FALSE;
1586 57 : single_h->state = IDLE;
1587 :
1588 57 : if (status != ML_ERROR_NONE) {
1589 0 : if (need_alloc)
1590 0 : ml_tensors_data_destroy (_out);
1591 0 : goto exit;
1592 : }
1593 :
1594 57 : if (need_alloc)
1595 56 : __process_output (single_h, _out);
1596 : }
1597 :
1598 1 : exit:
1599 91 : if (status == ML_ERROR_NONE) {
1600 76 : if (need_alloc)
1601 75 : *output = _out;
1602 : }
1603 :
1604 91 : single_h->input = single_h->output = NULL;
1605 91 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1606 91 : return status;
1607 : }
1608 :
1609 : /**
1610 : * @brief Invokes the model with the given input data.
1611 : */
1612 : int
1613 103 : ml_single_invoke (ml_single_h single,
1614 : const ml_tensors_data_h input, ml_tensors_data_h * output)
1615 : {
1616 103 : return _ml_single_invoke_internal (single, input, output, TRUE);
1617 : }
1618 :
1619 : /**
1620 : * @brief Invokes the model with the given input data and fills the output data handle.
1621 : */
1622 : int
1623 1 : ml_single_invoke_fast (ml_single_h single,
1624 : const ml_tensors_data_h input, ml_tensors_data_h output)
1625 : {
1626 1 : return _ml_single_invoke_internal (single, input, &output, FALSE);
1627 : }
1628 :
1629 : /**
1630 : * @brief Gets the tensors info for the given handle.
1631 : * @param[out] info A pointer to a NULL (unallocated) instance.
1632 : */
1633 : static int
1634 61 : ml_single_get_tensors_info (ml_single_h single, gboolean is_input,
1635 : ml_tensors_info_h * info)
1636 : {
1637 : ml_single *single_h;
1638 61 : int status = ML_ERROR_NONE;
1639 :
1640 61 : check_feature_state (ML_FEATURE_INFERENCE);
1641 :
1642 61 : if (!single)
1643 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1644 : "(internal function) The parameter, 'single' (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
1645 61 : if (!info)
1646 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1647 : "(internal function) The parameter, 'info' (ml_tensors_info_h *) is NULL. It should be a valid pointer to an empty (NULL) instance of ml_tensor_info_h, which is supposed to be filled with the fetched info by this function.");
1648 :
1649 61 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1650 :
1651 61 : if (is_input)
1652 39 : status = _ml_tensors_info_create_from_gst (info, &single_h->in_info);
1653 : else
1654 22 : status = _ml_tensors_info_create_from_gst (info, &single_h->out_info);
1655 :
1656 61 : if (status != ML_ERROR_NONE) {
1657 0 : _ml_error_report_continue
1658 : ("(internal function) Failed to create an entry for the ml_tensors_info_h instance. Error code: %d",
1659 : status);
1660 : }
1661 :
1662 61 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1663 61 : return status;
1664 : }
1665 :
1666 : /**
1667 : * @brief Gets the information of required input data for the given handle.
1668 : * @note information = (tensor dimension, type, name and so on)
1669 : */
1670 : int
1671 39 : ml_single_get_input_info (ml_single_h single, ml_tensors_info_h * info)
1672 : {
1673 39 : return ml_single_get_tensors_info (single, TRUE, info);
1674 : }
1675 :
1676 : /**
1677 : * @brief Gets the information of output data for the given handle.
1678 : * @note information = (tensor dimension, type, name and so on)
1679 : */
1680 : int
1681 22 : ml_single_get_output_info (ml_single_h single, ml_tensors_info_h * info)
1682 : {
1683 22 : return ml_single_get_tensors_info (single, FALSE, info);
1684 : }
1685 :
1686 : /**
1687 : * @brief Sets the maximum amount of time to wait for an output, in milliseconds.
1688 : */
1689 : int
1690 19 : ml_single_set_timeout (ml_single_h single, unsigned int timeout)
1691 : {
1692 : ml_single *single_h;
1693 :
1694 19 : check_feature_state (ML_FEATURE_INFERENCE);
1695 :
1696 19 : if (!single)
1697 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1698 : "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
1699 :
1700 19 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1701 :
1702 19 : single_h->timeout = (guint) timeout;
1703 :
1704 19 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1705 19 : return ML_ERROR_NONE;
1706 : }
1707 :
1708 : /**
1709 : * @brief Sets the information (tensor dimension, type, name and so on) of required input data for the given model.
1710 : */
1711 : int
1712 17 : ml_single_set_input_info (ml_single_h single, const ml_tensors_info_h info)
1713 : {
1714 : ml_single *single_h;
1715 : GstTensorsInfo gst_info;
1716 17 : int status = ML_ERROR_NONE;
1717 :
1718 34 : check_feature_state (ML_FEATURE_INFERENCE);
1719 :
1720 17 : if (!single)
1721 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1722 : "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
1723 17 : if (!info)
1724 2 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1725 : "The parameter, info (const ml_tensors_info_h), is NULL. It should be a valid instance of ml_tensors_info_h, which is usually created by ml_tensors_info_create() or other APIs.");
1726 :
1727 15 : if (!ml_tensors_info_is_valid (info))
1728 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1729 : "The parameter, info (const ml_tensors_info_h), is not valid. Although it is not NULL, the content of 'info' is invalid. If it is created by ml_tensors_info_create(), which creates an empty instance, it should be filled by users afterwards. Please check if 'info' has all elements filled with valid values.");
1730 :
1731 14 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1732 14 : _ml_tensors_info_copy_from_ml (&gst_info, info);
1733 14 : status = ml_single_set_gst_info (single_h, &gst_info);
1734 14 : gst_tensors_info_free (&gst_info);
1735 14 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1736 :
1737 14 : if (status != ML_ERROR_NONE)
1738 5 : _ml_error_report_continue
1739 : ("ml_single_set_gst_info() has failed to configure the single_h handle with the given info. Error code: %d",
1740 : status);
1741 :
1742 14 : return status;
1743 : }
1744 :
1745 : /**
1746 : * @brief Invokes the model with the given input data with the given info.
1747 : */
1748 : int
1749 9 : ml_single_invoke_dynamic (ml_single_h single,
1750 : const ml_tensors_data_h input, const ml_tensors_info_h in_info,
1751 : ml_tensors_data_h * output, ml_tensors_info_h * out_info)
1752 : {
1753 : int status;
1754 9 : ml_tensors_info_h cur_in_info = NULL;
1755 :
1756 9 : if (!single)
1757 9 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1758 : "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
1759 8 : if (!input)
1760 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1761 : "The parameter, input (const ml_tensors_data_h), is NULL. It should be a valid instance of ml_tensors_data_h with input data frame for inference.");
1762 7 : if (!in_info)
1763 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1764 : "The parameter, in_info (const ml_tensors_info_h), is NULL. It should be a valid instance of ml_tensor_info_h that describes metadata of the given input for inference (input).");
1765 6 : if (!output)
1766 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1767 : "The parameter, output (ml_tensors_data_h *), is NULL. It should be a pointer to an empty (NULL or do-not-care) instance of ml_tensors_data_h, which is filled by this API with the result of inference.");
1768 5 : if (!out_info)
1769 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1770 : "The parameter, out_info (ml_tensors_info_h *), is NULL. It should be a pointer to an empty (NULL or do-not-care) instance of ml_tensors_info_h, which is filled by this API with the neural network model info.");
1771 :
1772 : /* init null */
1773 4 : *output = NULL;
1774 4 : *out_info = NULL;
1775 :
1776 4 : status = ml_single_get_input_info (single, &cur_in_info);
1777 4 : if (status != ML_ERROR_NONE) {
1778 0 : _ml_error_report_continue
1779 : ("Failed to get input metadata configured by the opened single_h handle instance. Error code: %d.",
1780 : status);
1781 0 : goto exit;
1782 : }
1783 4 : status = ml_single_update_info (single, in_info, out_info);
1784 4 : if (status != ML_ERROR_NONE) {
1785 0 : _ml_error_report_continue
1786 : ("Failed to reconfigure the opened single_h handle instance with the updated input/output metadata. Error code: %d.",
1787 : status);
1788 0 : goto exit;
1789 : }
1790 :
1791 4 : status = ml_single_invoke (single, input, output);
1792 4 : if (status != ML_ERROR_NONE) {
1793 0 : ml_single_set_input_info (single, cur_in_info);
1794 0 : if (status != ML_ERROR_TRY_AGAIN) {
1795 : /* If it's TRY_AGAIN, ml_single_invoke() has already gave enough info. */
1796 0 : _ml_error_report_continue
1797 : ("Invoking the given neural network has failed. Error code: %d.",
1798 : status);
1799 : }
1800 : }
1801 :
1802 4 : exit:
1803 4 : if (cur_in_info)
1804 4 : ml_tensors_info_destroy (cur_in_info);
1805 :
1806 4 : if (status != ML_ERROR_NONE) {
1807 0 : if (*out_info) {
1808 0 : ml_tensors_info_destroy (*out_info);
1809 0 : *out_info = NULL;
1810 : }
1811 : }
1812 :
1813 4 : return status;
1814 : }
1815 :
1816 : /**
1817 : * @brief Sets the property value for the given model.
1818 : */
1819 : int
1820 13 : ml_single_set_property (ml_single_h single, const char *name, const char *value)
1821 : {
1822 : ml_single *single_h;
1823 13 : int status = ML_ERROR_NONE;
1824 13 : char *old_value = NULL;
1825 :
1826 26 : check_feature_state (ML_FEATURE_INFERENCE);
1827 :
1828 13 : if (!single)
1829 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1830 : "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
1831 13 : if (!name)
1832 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1833 : "The parameter, name (const char *), is NULL. It should be a valid string representing a property key.");
1834 :
1835 : /* get old value, also check the property is updatable. */
1836 12 : _ml_error_report_return_continue_iferr
1837 : (ml_single_get_property (single, name, &old_value),
1838 : "Cannot fetch the previous value for the given property name, '%s'. It appears that the property key, '%s', is invalid (not supported).",
1839 : name, name);
1840 :
1841 : /* if sets same value, do not change. */
1842 11 : if (old_value && value && g_ascii_strcasecmp (old_value, value) == 0) {
1843 1 : g_free (old_value);
1844 1 : return ML_ERROR_NONE;
1845 : }
1846 :
1847 10 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1848 :
1849 : /* update property */
1850 10 : if (g_str_equal (name, "is-updatable")) {
1851 2 : if (!value)
1852 0 : goto error;
1853 :
1854 : /* boolean */
1855 2 : if (g_ascii_strcasecmp (value, "true") == 0) {
1856 1 : if (g_ascii_strcasecmp (old_value, "true") != 0)
1857 1 : g_object_set (G_OBJECT (single_h->filter), name, (gboolean) TRUE, NULL);
1858 1 : } else if (g_ascii_strcasecmp (value, "false") == 0) {
1859 1 : if (g_ascii_strcasecmp (old_value, "false") != 0)
1860 1 : g_object_set (G_OBJECT (single_h->filter), name, (gboolean) FALSE,
1861 : NULL);
1862 : } else {
1863 0 : _ml_error_report
1864 : ("The property value, '%s', is not appropriate for a boolean property 'is-updatable'. It should be either 'true' or 'false'.",
1865 : value);
1866 0 : status = ML_ERROR_INVALID_PARAMETER;
1867 : }
1868 8 : } else if (g_str_equal (name, "input") || g_str_equal (name, "inputtype")
1869 0 : || g_str_equal (name, "inputname") || g_str_equal (name, "output")
1870 7 : || g_str_equal (name, "outputtype") || g_str_equal (name, "outputname")) {
1871 : GstTensorsInfo gst_info;
1872 8 : gboolean is_input = g_str_has_prefix (name, "input");
1873 : guint num;
1874 :
1875 8 : if (!value)
1876 1 : goto error;
1877 :
1878 7 : ml_single_get_gst_info (single_h, is_input, &gst_info);
1879 :
1880 7 : if (g_str_has_suffix (name, "type"))
1881 0 : num = gst_tensors_info_parse_types_string (&gst_info, value);
1882 7 : else if (g_str_has_suffix (name, "name"))
1883 0 : num = gst_tensors_info_parse_names_string (&gst_info, value);
1884 : else
1885 7 : num = gst_tensors_info_parse_dimensions_string (&gst_info, value);
1886 :
1887 7 : if (num == gst_info.num_tensors) {
1888 : /* change configuration */
1889 7 : status = ml_single_set_gst_info (single_h, &gst_info);
1890 : } else {
1891 0 : _ml_error_report
1892 : ("The property value, '%s', is not appropriate for the given property key, '%s'. The API has failed to parse the given property value.",
1893 : value, name);
1894 0 : status = ML_ERROR_INVALID_PARAMETER;
1895 : }
1896 :
1897 7 : gst_tensors_info_free (&gst_info);
1898 : } else {
1899 0 : g_object_set (G_OBJECT (single_h->filter), name, value, NULL);
1900 : }
1901 9 : goto done;
1902 1 : error:
1903 1 : _ml_error_report
1904 : ("The parameter, value (const char *), is NULL. It should be a valid string representing the value to be set for the given property key, '%s'",
1905 : name);
1906 1 : status = ML_ERROR_INVALID_PARAMETER;
1907 10 : done:
1908 10 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1909 :
1910 10 : g_free (old_value);
1911 10 : return status;
1912 : }
1913 :
1914 : /**
1915 : * @brief Gets the property value for the given model.
1916 : */
1917 : int
1918 27 : ml_single_get_property (ml_single_h single, const char *name, char **value)
1919 : {
1920 : ml_single *single_h;
1921 27 : int status = ML_ERROR_NONE;
1922 :
1923 27 : check_feature_state (ML_FEATURE_INFERENCE);
1924 :
1925 27 : if (!single)
1926 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1927 : "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
1928 27 : if (!name)
1929 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1930 : "The parameter, name (const char *), is NULL. It should be a valid string representing a property key.");
1931 26 : if (!value)
1932 1 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1933 : "The parameter, value (const char *), is NULL. It should be a valid string representing the value to be set for the given property key, '%s'",
1934 : name);
1935 :
1936 : /* init null */
1937 25 : *value = NULL;
1938 :
1939 25 : ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
1940 :
1941 25 : if (g_str_equal (name, "input") || g_str_equal (name, "output") ||
1942 8 : g_str_equal (name, "inputtype") || g_str_equal (name, "inputname") ||
1943 8 : g_str_equal (name, "inputlayout") || g_str_equal (name, "outputtype") ||
1944 7 : g_str_equal (name, "outputname") || g_str_equal (name, "outputlayout") ||
1945 7 : g_str_equal (name, "accelerator") || g_str_equal (name, "custom")) {
1946 : /* string */
1947 18 : g_object_get (G_OBJECT (single_h->filter), name, value, NULL);
1948 7 : } else if (g_str_equal (name, "is-updatable")) {
1949 5 : gboolean bool_value = FALSE;
1950 :
1951 : /* boolean */
1952 5 : g_object_get (G_OBJECT (single_h->filter), name, &bool_value, NULL);
1953 10 : *value = (bool_value) ? g_strdup ("true") : g_strdup ("false");
1954 : } else {
1955 2 : _ml_error_report
1956 : ("The property key, '%s', is not available for get_property and not recognized by the API. It should be one of {input, inputtype, inputname, inputlayout, output, outputtype, outputname, outputlayout, accelerator, custom, is-updatable}.",
1957 : name);
1958 2 : status = ML_ERROR_NOT_SUPPORTED;
1959 : }
1960 :
1961 25 : ML_SINGLE_HANDLE_UNLOCK (single_h);
1962 25 : return status;
1963 : }
1964 :
1965 : /**
1966 : * @brief Internal helper function to validate model files.
1967 : */
1968 : static int
1969 90 : __ml_validate_model_file (const char *const *model,
1970 : const unsigned int num_models, gboolean * is_dir)
1971 : {
1972 : guint i;
1973 :
1974 90 : if (!model)
1975 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1976 : "The parameter, model, is NULL. It should be a valid array of strings, where each string is a valid file path for a neural network model file.");
1977 90 : if (num_models < 1)
1978 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1979 : "The parameter, num_models, is 0. It should be the number of files for the given neural network model.");
1980 :
1981 90 : if (g_file_test (model[0], G_FILE_TEST_IS_DIR)) {
1982 4 : *is_dir = TRUE;
1983 4 : return ML_ERROR_NONE;
1984 : }
1985 :
1986 169 : for (i = 0; i < num_models; i++) {
1987 86 : if (!model[i] ||
1988 86 : !g_file_test (model[i], G_FILE_TEST_EXISTS | G_FILE_TEST_IS_REGULAR)) {
1989 3 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
1990 : "The given param, model path [%d] = \"%s\" is invalid or the file is not found or accessible.",
1991 : i, _STR_NULL (model[i]));
1992 : }
1993 : }
1994 :
1995 83 : *is_dir = FALSE;
1996 :
1997 83 : return ML_ERROR_NONE;
1998 : }
1999 :
2000 : /**
2001 : * @brief Validates the nnfw model file.
2002 : * @since_tizen 5.5
2003 : * @param[in] model The path of model file.
2004 : * @param[in/out] nnfw The type of NNFW.
2005 : * @return @c 0 on success. Otherwise a negative error value.
2006 : * @retval #ML_ERROR_NONE Successful
2007 : * @retval #ML_ERROR_NOT_SUPPORTED Not supported, or framework to support this model file is unavailable in the environment.
2008 : * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
2009 : */
2010 : int
2011 90 : _ml_validate_model_file (const char *const *model,
2012 : const unsigned int num_models, ml_nnfw_type_e * nnfw)
2013 : {
2014 90 : int status = ML_ERROR_NONE;
2015 90 : ml_nnfw_type_e detected = ML_NNFW_TYPE_ANY;
2016 90 : gboolean is_dir = FALSE;
2017 : gchar *pos, *fw_name;
2018 90 : gchar **file_ext = NULL;
2019 : guint i;
2020 :
2021 90 : if (!nnfw)
2022 90 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
2023 : "The parameter, nnfw, is NULL. It should be a valid pointer of ml_nnfw_type_e.");
2024 :
2025 90 : _ml_error_report_return_continue_iferr (__ml_validate_model_file (model,
2026 : num_models, &is_dir),
2027 : "The parameters, model and num_models, are not valid.");
2028 :
2029 : /**
2030 : * @note detect-fw checks the file ext and returns proper fw name for given models.
2031 : * If detected fw and given nnfw are same, we don't need to check the file extension.
2032 : * If any condition for auto detection is added later, below code also should be updated.
2033 : */
2034 87 : fw_name = gst_tensor_filter_detect_framework (model, num_models, TRUE);
2035 87 : detected = _ml_get_nnfw_type_by_subplugin_name (fw_name);
2036 87 : g_free (fw_name);
2037 :
2038 87 : if (*nnfw == ML_NNFW_TYPE_ANY) {
2039 37 : if (detected == ML_NNFW_TYPE_ANY) {
2040 0 : _ml_error_report
2041 : ("The given neural network model (1st path is \"%s\", and there are %d paths declared) has unknown or unsupported extension. Please check its corresponding neural network framework and try to specify it instead of \"ML_NNFW_TYPE_ANY\".",
2042 : model[0], num_models);
2043 0 : status = ML_ERROR_INVALID_PARAMETER;
2044 : } else {
2045 37 : _ml_logi ("The given model is supposed a %s model.",
2046 : _ml_get_nnfw_subplugin_name (detected));
2047 37 : *nnfw = detected;
2048 : }
2049 :
2050 37 : goto done;
2051 50 : } else if (is_dir && *nnfw != ML_NNFW_TYPE_NNFW) {
2052 : /* supposed it is ONE if given model is directory */
2053 2 : _ml_error_report
2054 : ("The given model (1st path is \"%s\", and there are %d paths declared) is directory, which is allowed by \"NNFW (One Runtime)\" only, Please check the model and framework.",
2055 : model[0], num_models);
2056 2 : status = ML_ERROR_INVALID_PARAMETER;
2057 2 : goto done;
2058 48 : } else if (detected == *nnfw) {
2059 : /* Expected framework, nothing to do. */
2060 43 : goto done;
2061 : }
2062 :
2063 : /* Handle mismatched case, check file extension. */
2064 5 : file_ext = g_malloc0 (sizeof (char *) * (num_models + 1));
2065 10 : for (i = 0; i < num_models; i++) {
2066 5 : if ((pos = strrchr (model[i], '.')) == NULL) {
2067 0 : _ml_error_report ("The given model [%d]=\"%s\" has invalid extension.", i,
2068 : model[i]);
2069 0 : status = ML_ERROR_INVALID_PARAMETER;
2070 0 : goto done;
2071 : }
2072 :
2073 5 : file_ext[i] = g_ascii_strdown (pos, -1);
2074 : }
2075 :
2076 : /** @todo Make sure num_models is correct for each nnfw type */
2077 5 : switch (*nnfw) {
2078 4 : case ML_NNFW_TYPE_NNFW:
2079 : case ML_NNFW_TYPE_TVM:
2080 : case ML_NNFW_TYPE_ONNX_RUNTIME:
2081 : case ML_NNFW_TYPE_NCNN:
2082 : case ML_NNFW_TYPE_TENSORRT:
2083 : case ML_NNFW_TYPE_QNN:
2084 : case ML_NNFW_TYPE_LLAMACPP:
2085 : case ML_NNFW_TYPE_TIZEN_HAL:
2086 : /**
2087 : * We cannot check the file ext with NNFW.
2088 : * NNFW itself will validate metadata and model file.
2089 : */
2090 4 : break;
2091 0 : case ML_NNFW_TYPE_MVNC:
2092 : case ML_NNFW_TYPE_OPENVINO:
2093 : case ML_NNFW_TYPE_EDGE_TPU:
2094 : /**
2095 : * @todo Need to check method to validate model
2096 : * Although nnstreamer supports these frameworks,
2097 : * ML-API implementation is not ready.
2098 : */
2099 0 : _ml_error_report
2100 : ("Given NNFW is not supported by ML-API Inference.Single, yet, although it is supported by NNStreamer. If you have such NNFW integrated into your machine and want to access via ML-API, please update the corresponding implementation or report and discuss at github.com/nnstreamer/nnstreamer/issues.");
2101 0 : status = ML_ERROR_NOT_SUPPORTED;
2102 0 : break;
2103 0 : case ML_NNFW_TYPE_VD_AIFW:
2104 0 : if (!g_str_equal (file_ext[0], ".nb") &&
2105 0 : !g_str_equal (file_ext[0], ".ncp") &&
2106 0 : !g_str_equal (file_ext[0], ".tvn") &&
2107 0 : !g_str_equal (file_ext[0], ".bin")) {
2108 0 : status = ML_ERROR_INVALID_PARAMETER;
2109 : }
2110 0 : break;
2111 0 : case ML_NNFW_TYPE_SNAP:
2112 : #if !defined (__ANDROID__)
2113 0 : _ml_error_report ("SNAP is supported by Android/arm64-v8a devices only.");
2114 0 : status = ML_ERROR_NOT_SUPPORTED;
2115 : #endif
2116 : /* SNAP requires multiple files, set supported if model file exists. */
2117 0 : break;
2118 0 : case ML_NNFW_TYPE_ARMNN:
2119 0 : if (!g_str_equal (file_ext[0], ".caffemodel") &&
2120 0 : !g_str_equal (file_ext[0], ".tflite") &&
2121 0 : !g_str_equal (file_ext[0], ".pb") &&
2122 0 : !g_str_equal (file_ext[0], ".prototxt")) {
2123 0 : _ml_error_report
2124 : ("ARMNN accepts .caffemodel, .tflite, .pb, and .prototxt files only. Please support correct file extension. You have specified: \"%s\"",
2125 : file_ext[0]);
2126 0 : status = ML_ERROR_INVALID_PARAMETER;
2127 : }
2128 0 : break;
2129 0 : case ML_NNFW_TYPE_MXNET:
2130 0 : if (!g_str_equal (file_ext[0], ".params") &&
2131 0 : !g_str_equal (file_ext[0], ".json")) {
2132 0 : status = ML_ERROR_INVALID_PARAMETER;
2133 : }
2134 0 : break;
2135 1 : default:
2136 1 : _ml_error_report
2137 : ("You have designated an incorrect neural network framework (out of bound).");
2138 1 : status = ML_ERROR_INVALID_PARAMETER;
2139 1 : break;
2140 : }
2141 :
2142 87 : done:
2143 87 : if (status == ML_ERROR_NONE) {
2144 84 : if (!_ml_nnfw_is_available (*nnfw, ML_NNFW_HW_ANY)) {
2145 1 : status = ML_ERROR_NOT_SUPPORTED;
2146 1 : _ml_error_report
2147 : ("The subplugin for tensor-filter \"%s\" is not available. Please install the corresponding tensor-filter subplugin file (usually, \"libnnstreamer_filter_${NAME}.so\") at the correct path. Please use \"nnstreamer-check\" utility to check related configurations. If you do not have the utility ready, build and install \"confchk\", which is located at ${nnstreamer_source}/tools/development/confchk/ .",
2148 : _ml_get_nnfw_subplugin_name (*nnfw));
2149 : }
2150 : } else {
2151 3 : _ml_error_report
2152 : ("The given model file, \"%s\" (1st of %d files), is invalid.",
2153 : model[0], num_models);
2154 : }
2155 :
2156 87 : g_strfreev (file_ext);
2157 87 : return status;
2158 : }
|