LCOV - code coverage report
Current view: top level - capi-machine-learning-inference-1.8.6/c/src - ml-api-inference-single.c (source / functions) Coverage Total Hit
Test: ML API 1.8.6-0 nnstreamer/api#abde31caf90ada0ea14929b563b6d19f563740eb Lines: 74.6 % 871 650
Test Date: 2025-08-15 05:27:32 Functions: 92.3 % 39 36

            Line data    Source code
       1              : /* SPDX-License-Identifier: Apache-2.0 */
       2              : /**
       3              :  * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved.
       4              :  *
       5              :  * @file ml-api-inference-single.c
       6              :  * @date 29 Aug 2019
       7              :  * @brief NNStreamer/Single C-API Wrapper.
       8              :  *        This allows to invoke individual input frame with NNStreamer.
       9              :  * @see https://github.com/nnstreamer/nnstreamer
      10              :  * @author MyungJoo Ham <myungjoo.ham@samsung.com>
      11              :  * @author Parichay Kapoor <pk.kapoor@samsung.com>
      12              :  * @bug No known bugs except for NYI items
      13              :  */
      14              : 
      15              : #include <string.h>
      16              : #include <nnstreamer-single.h>
      17              : #include <nnstreamer-tizen-internal.h>  /* Tizen platform header */
      18              : #include <nnstreamer_internal.h>
      19              : #include <nnstreamer_plugin_api_util.h>
      20              : #include <tensor_filter_single.h>
      21              : 
      22              : #include "ml-api-inference-internal.h"
      23              : #include "ml-api-internal.h"
      24              : #include "ml-api-inference-single-internal.h"
      25              : 
      26              : #define ML_SINGLE_MAGIC 0xfeedfeed
      27              : 
      28              : /**
      29              :  * @brief Default time to wait for an output in milliseconds (0 will wait for the output).
      30              :  */
      31              : #define SINGLE_DEFAULT_TIMEOUT 0
      32              : 
      33              : /**
      34              :  * @brief Global lock for single shot API
      35              :  * @detail This lock ensures that ml_single_close is thread safe. All other API
      36              :  *         functions use the mutex from the single handle. However for close,
      37              :  *         single handle mutex cannot be used as single handle is destroyed at
      38              :  *         close
      39              :  * @note This mutex is automatically initialized as it is statically declared
      40              :  */
      41              : G_LOCK_DEFINE_STATIC (magic);
      42              : 
      43              : /**
      44              :  * @brief Get valid handle after magic verification
      45              :  * @note handle's mutex (single_h->mutex) is acquired after this
      46              :  * @param[out] single_h The handle properly casted: (ml_single *).
      47              :  * @param[in] single The handle to be validated: (void *).
      48              :  * @param[in] reset Set TRUE if the handle is to be reset (magic = 0).
      49              :  */
      50              : #define ML_SINGLE_GET_VALID_HANDLE_LOCKED(single_h, single, reset) do { \
      51              :   G_LOCK (magic); \
      52              :   single_h = (ml_single *) single; \
      53              :   if (G_UNLIKELY(single_h->magic != ML_SINGLE_MAGIC)) { \
      54              :     _ml_error_report \
      55              :         ("The given param, %s (ml_single_h), is invalid. It is not a single_h instance or the user thread has modified it.", \
      56              :         #single); \
      57              :     G_UNLOCK (magic); \
      58              :     return ML_ERROR_INVALID_PARAMETER; \
      59              :   } \
      60              :   if (G_UNLIKELY(reset)) \
      61              :     single_h->magic = 0; \
      62              :   g_mutex_lock (&single_h->mutex); \
      63              :   G_UNLOCK (magic); \
      64              : } while (0)
      65              : 
      66              : /**
      67              :  * @brief This is for the symmetricity with ML_SINGLE_GET_VALID_HANDLE_LOCKED
      68              :  * @param[in] single_h The casted handle (ml_single *).
      69              :  */
      70              : #define ML_SINGLE_HANDLE_UNLOCK(single_h) g_mutex_unlock (&single_h->mutex);
      71              : 
      72              : /** define string names for input/output */
      73              : #define INPUT_STR "input"
      74              : #define OUTPUT_STR "output"
      75              : #define TYPE_STR "type"
      76              : #define NAME_STR "name"
      77              : 
      78              : /** concat string from #define */
      79              : #define CONCAT_MACRO_STR(STR1,STR2) STR1 STR2
      80              : 
      81              : /** States for invoke thread */
      82              : typedef enum
      83              : {
      84              :   IDLE = 0,           /**< ready to accept next input */
      85              :   RUNNING,            /**< running an input, cannot accept more input */
      86              :   JOIN_REQUESTED      /**< should join the thread, will exit soon */
      87              : } thread_state;
      88              : 
      89              : /**
      90              :  * @brief The name of sub-plugin for defined neural net frameworks.
      91              :  * @note The sub-plugin for Android is not declared (e.g., snap)
      92              :  */
      93              : static const char *ml_nnfw_subplugin_name[] = {
      94              :   [ML_NNFW_TYPE_ANY] = "any",   /* DO NOT use this name ('any') to get the sub-plugin */
      95              :   [ML_NNFW_TYPE_CUSTOM_FILTER] = "custom",
      96              :   [ML_NNFW_TYPE_TENSORFLOW_LITE] = "tensorflow-lite",
      97              :   [ML_NNFW_TYPE_TENSORFLOW] = "tensorflow",
      98              :   [ML_NNFW_TYPE_NNFW] = "nnfw",
      99              :   [ML_NNFW_TYPE_MVNC] = "movidius-ncsdk2",
     100              :   [ML_NNFW_TYPE_OPENVINO] = "openvino",
     101              :   [ML_NNFW_TYPE_VIVANTE] = "vivante",
     102              :   [ML_NNFW_TYPE_EDGE_TPU] = "edgetpu",
     103              :   [ML_NNFW_TYPE_ARMNN] = "armnn",
     104              :   [ML_NNFW_TYPE_SNPE] = "snpe",
     105              :   [ML_NNFW_TYPE_PYTORCH] = "pytorch",
     106              :   [ML_NNFW_TYPE_NNTR_INF] = "nntrainer",
     107              :   [ML_NNFW_TYPE_VD_AIFW] = "vd_aifw",
     108              :   [ML_NNFW_TYPE_TRIX_ENGINE] = "trix-engine",
     109              :   [ML_NNFW_TYPE_MXNET] = "mxnet",
     110              :   [ML_NNFW_TYPE_TVM] = "tvm",
     111              :   [ML_NNFW_TYPE_ONNX_RUNTIME] = "onnxruntime",
     112              :   [ML_NNFW_TYPE_NCNN] = "ncnn",
     113              :   [ML_NNFW_TYPE_TENSORRT] = "tensorrt",
     114              :   [ML_NNFW_TYPE_QNN] = "qnn",
     115              :   [ML_NNFW_TYPE_LLAMACPP] = "llamacpp",
     116              :   [ML_NNFW_TYPE_TIZEN_HAL] = "tizen-hal",
     117              :   NULL
     118              : };
     119              : 
     120              : /** ML single api data structure for handle */
     121              : typedef struct
     122              : {
     123              :   GTensorFilterSingleClass *klass;    /**< tensor filter class structure*/
     124              :   GTensorFilterSingle *filter;        /**< tensor filter element */
     125              :   GstTensorsInfo in_info;             /**< info about input */
     126              :   GstTensorsInfo out_info;            /**< info about output */
     127              :   ml_nnfw_type_e nnfw;                /**< nnfw type for this filter */
     128              :   guint magic;                        /**< code to verify valid handle */
     129              : 
     130              :   GThread *thread;                    /**< thread for invoking */
     131              :   GMutex mutex;                       /**< mutex for synchronization */
     132              :   GCond cond;                         /**< condition for synchronization */
     133              :   ml_tensors_data_h input;            /**< input received from user */
     134              :   ml_tensors_data_h output;           /**< output to be sent back to user */
     135              :   guint timeout;                      /**< timeout for invoking */
     136              :   thread_state state;                 /**< current state of the thread */
     137              :   gboolean free_output;               /**< true if output tensors are allocated in single-shot */
     138              :   int status;                         /**< status of processing */
     139              :   gboolean invoking;                  /**< invoke running flag */
     140              :   ml_tensors_data_h in_tensors;       /**< input tensor wrapper for processing */
     141              :   ml_tensors_data_h out_tensors;      /**< output tensor wrapper for processing */
     142              : 
     143              :   GList *destroy_data_list;           /**< data to be freed by filter */
     144              :   gboolean invoke_dynamic;            /**< true to invoke flexible tensor */
     145              :   gboolean invoke_async;              /**< true to invoke and return result asynchronously */
     146              : } ml_single;
     147              : 
     148              : /**
     149              :  * @brief Internal function to get the nnfw type.
     150              :  */
     151              : ml_nnfw_type_e
     152           96 : _ml_get_nnfw_type_by_subplugin_name (const char *name)
     153              : {
     154           96 :   ml_nnfw_type_e nnfw_type = ML_NNFW_TYPE_ANY;
     155           96 :   int idx = -1;
     156              : 
     157           96 :   if (name == NULL)
     158            2 :     return ML_NNFW_TYPE_ANY;
     159              : 
     160           94 :   idx = find_key_strv (ml_nnfw_subplugin_name, name);
     161           94 :   if (idx < 0) {
     162              :     /* check sub-plugin for android */
     163            2 :     if (g_ascii_strcasecmp (name, "snap") == 0)
     164            1 :       nnfw_type = ML_NNFW_TYPE_SNAP;
     165              :     else
     166            1 :       _ml_error_report ("Cannot find nnfw, %s is an invalid name.",
     167              :           _STR_NULL (name));
     168              :   } else {
     169           92 :     nnfw_type = (ml_nnfw_type_e) idx;
     170              :   }
     171              : 
     172           94 :   return nnfw_type;
     173              : }
     174              : 
     175              : /**
     176              :  * @brief Internal function to get the sub-plugin name.
     177              :  */
     178              : const char *
     179          370 : _ml_get_nnfw_subplugin_name (ml_nnfw_type_e nnfw)
     180              : {
     181              :   /* check sub-plugin for android */
     182          370 :   if (nnfw == ML_NNFW_TYPE_SNAP)
     183            1 :     return "snap";
     184              : 
     185          369 :   return ml_nnfw_subplugin_name[nnfw];
     186              : }
     187              : 
     188              : /**
     189              :  * @brief Convert c-api based hw to internal representation
     190              :  */
     191              : accl_hw
     192          271 : _ml_nnfw_to_accl_hw (const ml_nnfw_hw_e hw)
     193              : {
     194          271 :   switch (hw) {
     195          249 :     case ML_NNFW_HW_ANY:
     196          249 :       return ACCL_DEFAULT;
     197            3 :     case ML_NNFW_HW_AUTO:
     198            3 :       return ACCL_AUTO;
     199            5 :     case ML_NNFW_HW_CPU:
     200            5 :       return ACCL_CPU;
     201              : #if defined (__aarch64__) || defined (__arm__)
     202              :     case ML_NNFW_HW_CPU_NEON:
     203              :       return ACCL_CPU_NEON;
     204              : #else
     205            2 :     case ML_NNFW_HW_CPU_SIMD:
     206            2 :       return ACCL_CPU_SIMD;
     207              : #endif
     208            3 :     case ML_NNFW_HW_GPU:
     209            3 :       return ACCL_GPU;
     210            2 :     case ML_NNFW_HW_NPU:
     211            2 :       return ACCL_NPU;
     212            2 :     case ML_NNFW_HW_NPU_MOVIDIUS:
     213            2 :       return ACCL_NPU_MOVIDIUS;
     214            1 :     case ML_NNFW_HW_NPU_EDGE_TPU:
     215            1 :       return ACCL_NPU_EDGE_TPU;
     216            1 :     case ML_NNFW_HW_NPU_VIVANTE:
     217            1 :       return ACCL_NPU_VIVANTE;
     218            1 :     case ML_NNFW_HW_NPU_SLSI:
     219            1 :       return ACCL_NPU_SLSI;
     220            2 :     case ML_NNFW_HW_NPU_SR:
     221              :       /** @todo how to get srcn npu */
     222            2 :       return ACCL_NPU_SR;
     223            0 :     default:
     224            0 :       return ACCL_AUTO;
     225              :   }
     226              : }
     227              : 
     228              : /**
     229              :  * @brief Checks the availability of the given execution environments with custom option.
     230              :  */
     231              : int
     232          193 : ml_check_nnfw_availability_full (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw,
     233              :     const char *custom, bool *available)
     234              : {
     235          193 :   const char *fw_name = NULL;
     236              : 
     237          193 :   check_feature_state (ML_FEATURE_INFERENCE);
     238              : 
     239          193 :   if (!available)
     240            2 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     241              :         "The parameter, available (bool *), is NULL. It should be a valid pointer of bool. E.g., bool a; ml_check_nnfw_availability_full (..., &a);");
     242              : 
     243              :   /* init false */
     244          191 :   *available = false;
     245              : 
     246          191 :   if (nnfw == ML_NNFW_TYPE_ANY)
     247            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     248              :         "The parameter, nnfw (ml_nnfw_type_e), is ML_NNFW_TYPE_ANY. It should specify the framework to be probed for the hardware availability.");
     249              : 
     250          190 :   fw_name = _ml_get_nnfw_subplugin_name (nnfw);
     251              : 
     252          190 :   if (fw_name) {
     253          190 :     if (nnstreamer_filter_find (fw_name) != NULL) {
     254          189 :       accl_hw accl = _ml_nnfw_to_accl_hw (hw);
     255              : 
     256          189 :       if (gst_tensor_filter_check_hw_availability (fw_name, accl, custom)) {
     257          180 :         *available = true;
     258              :       } else {
     259            9 :         _ml_logi ("%s is supported but not with the specified hardware.",
     260              :             fw_name);
     261              :       }
     262              :     } else {
     263            1 :       _ml_logi ("%s is not supported.", fw_name);
     264              :     }
     265              :   } else {
     266            0 :     _ml_logw ("Cannot get the name of sub-plugin for given nnfw.");
     267              :   }
     268              : 
     269          190 :   return ML_ERROR_NONE;
     270              : }
     271              : 
     272              : /**
     273              :  * @brief Checks the availability of the given execution environments.
     274              :  */
     275              : int
     276          191 : ml_check_nnfw_availability (ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw,
     277              :     bool *available)
     278              : {
     279          191 :   return ml_check_nnfw_availability_full (nnfw, hw, NULL, available);
     280              : }
     281              : 
     282              : /**
     283              :  * @brief setup input and output tensor memory to pass to the tensor_filter.
     284              :  * @note this tensor memory wrapper will be reused for each invoke.
     285              :  */
     286              : static void
     287           98 : __setup_in_out_tensors (ml_single * single_h)
     288              : {
     289              :   guint i;
     290           98 :   ml_tensors_data_s *in_tensors = (ml_tensors_data_s *) single_h->in_tensors;
     291           98 :   ml_tensors_data_s *out_tensors = (ml_tensors_data_s *) single_h->out_tensors;
     292              : 
     293              :   /* Setup input buffer */
     294           98 :   if (in_tensors) {
     295           20 :     _ml_tensors_info_free (in_tensors->info);
     296           20 :     _ml_tensors_info_copy_from_gst (in_tensors->info, &single_h->in_info);
     297              :   } else {
     298              :     ml_tensors_info_h info;
     299              : 
     300           78 :     _ml_tensors_info_create_from_gst (&info, &single_h->in_info);
     301           78 :     _ml_tensors_data_create_no_alloc (info, &single_h->in_tensors);
     302              : 
     303           78 :     ml_tensors_info_destroy (info);
     304           78 :     in_tensors = (ml_tensors_data_s *) single_h->in_tensors;
     305              :   }
     306              : 
     307           98 :   in_tensors->num_tensors = single_h->in_info.num_tensors;
     308          229 :   for (i = 0; i < in_tensors->num_tensors; i++) {
     309              :     /** memory will be allocated by tensor_filter_single */
     310          131 :     in_tensors->tensors[i].data = NULL;
     311          131 :     in_tensors->tensors[i].size =
     312          131 :         gst_tensors_info_get_size (&single_h->in_info, i);
     313              :   }
     314              : 
     315              :   /* Setup output buffer */
     316           98 :   if (out_tensors) {
     317           20 :     _ml_tensors_info_free (out_tensors->info);
     318           20 :     _ml_tensors_info_copy_from_gst (out_tensors->info, &single_h->out_info);
     319              :   } else {
     320              :     ml_tensors_info_h info;
     321              : 
     322           78 :     _ml_tensors_info_create_from_gst (&info, &single_h->out_info);
     323           78 :     _ml_tensors_data_create_no_alloc (info, &single_h->out_tensors);
     324              : 
     325           78 :     ml_tensors_info_destroy (info);
     326           78 :     out_tensors = (ml_tensors_data_s *) single_h->out_tensors;
     327              :   }
     328              : 
     329           98 :   out_tensors->num_tensors = single_h->out_info.num_tensors;
     330          227 :   for (i = 0; i < out_tensors->num_tensors; i++) {
     331              :     /** memory will be allocated by tensor_filter_single */
     332          129 :     out_tensors->tensors[i].data = NULL;
     333          129 :     out_tensors->tensors[i].size =
     334          129 :         gst_tensors_info_get_size (&single_h->out_info, i);
     335              :   }
     336           98 : }
     337              : 
     338              : /**
     339              :  * @brief To call the framework to destroy the allocated output data
     340              :  */
     341              : static inline void
     342            0 : __destroy_notify (gpointer data_h, gpointer single_data)
     343              : {
     344              :   ml_single *single_h;
     345              :   ml_tensors_data_s *data;
     346              : 
     347            0 :   data = (ml_tensors_data_s *) data_h;
     348            0 :   single_h = (ml_single *) single_data;
     349              : 
     350            0 :   if (G_LIKELY (single_h->filter)) {
     351            0 :     if (single_h->klass->allocate_in_invoke (single_h->filter)) {
     352            0 :       single_h->klass->destroy_notify (single_h->filter, data->tensors);
     353              :     }
     354              :   }
     355              : 
     356              :   /* reset callback function */
     357            0 :   data->destroy = NULL;
     358            0 : }
     359              : 
     360              : /**
     361              :  * @brief Wrapper function for __destroy_notify
     362              :  */
     363              : static int
     364            0 : ml_single_destroy_notify_cb (void *handle, void *user_data)
     365              : {
     366            0 :   ml_tensors_data_h data = (ml_tensors_data_h) handle;
     367            0 :   ml_single_h single = (ml_single_h) user_data;
     368              :   ml_single *single_h;
     369            0 :   int status = ML_ERROR_NONE;
     370              : 
     371            0 :   if (G_UNLIKELY (!single))
     372            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     373              :         "Failed to destroy data buffer. Callback function argument from _ml_tensors_data_destroy_internal is invalid. The given 'user_data' is NULL. It appears to be an internal error of ML-API or the user thread has touched private data structure.");
     374            0 :   if (G_UNLIKELY (!data))
     375            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     376              :         "Failed to destroy data buffer. Callback function argument from _ml_tensors_data_destroy_internal is invalid. The given 'handle' is NULL. It appears to be an internal error of ML-API or the user thread has touched private data structure.");
     377              : 
     378            0 :   ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
     379              : 
     380            0 :   if (G_UNLIKELY (!single_h->filter)) {
     381            0 :     status = ML_ERROR_INVALID_PARAMETER;
     382            0 :     _ml_error_report
     383              :         ("Failed to destroy the data buffer. The handle instance (single_h) is invalid. It appears to be an internal error of ML-API of the user thread has touched private data structure.");
     384            0 :     goto exit;
     385              :   }
     386              : 
     387            0 :   single_h->destroy_data_list =
     388            0 :       g_list_remove (single_h->destroy_data_list, data);
     389            0 :   __destroy_notify (data, single_h);
     390              : 
     391            0 : exit:
     392            0 :   ML_SINGLE_HANDLE_UNLOCK (single_h);
     393              : 
     394            0 :   return status;
     395              : }
     396              : 
     397              : /**
     398              :  * @brief setup the destroy notify for the allocated output data.
     399              :  * @note this stores the data entry in the single list.
     400              :  * @note this has not overhead if the allocation of output is not performed by
     401              :  * the framework but by tensor filter element.
     402              :  */
     403              : static void
     404           78 : set_destroy_notify (ml_single * single_h, ml_tensors_data_s * data,
     405              :     gboolean add)
     406              : {
     407           78 :   if (single_h->klass->allocate_in_invoke (single_h->filter)) {
     408            0 :     data->destroy = ml_single_destroy_notify_cb;
     409            0 :     data->user_data = single_h;
     410            0 :     add = TRUE;
     411              :   }
     412              : 
     413           78 :   if (add) {
     414            4 :     single_h->destroy_data_list = g_list_append (single_h->destroy_data_list,
     415              :         (gpointer) data);
     416              :   }
     417           78 : }
     418              : 
     419              : /**
     420              :  * @brief Internal function to call subplugin's invoke
     421              :  */
     422              : static inline int
     423           80 : __invoke (ml_single * single_h, ml_tensors_data_h in, ml_tensors_data_h out,
     424              :     gboolean alloc_output)
     425              : {
     426              :   ml_tensors_data_s *in_data, *out_data;
     427           80 :   int status = ML_ERROR_NONE;
     428              : 
     429           80 :   in_data = (ml_tensors_data_s *) in;
     430           80 :   out_data = (ml_tensors_data_s *) out;
     431              : 
     432              :   /* Prevent error case when input or output is null in invoke thread. */
     433           80 :   if (!in_data || !out_data) {
     434            0 :     _ml_error_report ("Failed to invoke a model, invalid data handle.");
     435            0 :     return ML_ERROR_STREAMS_PIPE;
     436              :   }
     437              : 
     438              :   /* Invoke the thread. */
     439           80 :   if (!single_h->klass->invoke (single_h->filter, in_data->tensors,
     440           80 :           out_data->tensors, alloc_output)) {
     441            0 :     const char *fw_name = _ml_get_nnfw_subplugin_name (single_h->nnfw);
     442            0 :     _ml_error_report
     443              :         ("Failed to invoke the tensors. The invoke callback of the tensor-filter subplugin '%s' has failed. Please contact the author of tensor-filter-%s (nnstreamer-%s) or review its source code. Note that this usually happens when the designated framework does not support the given model (e.g., trying to run tf-lite 2.6 model with tf-lite 1.13).",
     444              :         fw_name, fw_name, fw_name);
     445            0 :     status = ML_ERROR_STREAMS_PIPE;
     446              :   }
     447              : 
     448           80 :   return status;
     449              : }
     450              : 
     451              : /**
     452              :  * @brief Internal function to post-process given output.
     453              :  * @note Do not call this if single_h->free_output is false (output data is not allocated in single-shot).
     454              :  */
     455              : static inline void
     456           75 : __process_output (ml_single * single_h, ml_tensors_data_h output)
     457              : {
     458              :   ml_tensors_data_s *out_data;
     459              : 
     460           75 :   if (g_list_find (single_h->destroy_data_list, output)) {
     461              :     /**
     462              :      * Caller of the invoke thread has returned back with timeout.
     463              :      * So, free the memory allocated by the invoke as their is no receiver.
     464              :      */
     465            1 :     single_h->destroy_data_list =
     466            1 :         g_list_remove (single_h->destroy_data_list, output);
     467            1 :     ml_tensors_data_destroy (output);
     468              :   } else {
     469           74 :     out_data = (ml_tensors_data_s *) output;
     470           74 :     set_destroy_notify (single_h, out_data, FALSE);
     471              :   }
     472           75 : }
     473              : 
     474              : /**
     475              :  * @brief thread to execute calls to invoke
     476              :  *
     477              :  * @details The thread behavior is detailed as below:
     478              :  *          - Starting with IDLE state, the thread waits for an input or change
     479              :  *          in state externally.
     480              :  *          - If state is not RUNNING, exit this thread, else process the
     481              :  *          request.
     482              :  *          - Process input, call invoke, process output. Any error in this
     483              :  *          state sets the status to be used by ml_single_invoke().
     484              :  *          - State is set back to IDLE and thread moves back to start.
     485              :  *
     486              :  *          State changes performed by this function when:
     487              :  *          RUNNING -> IDLE - processing is finished.
     488              :  *          JOIN_REQUESTED -> IDLE - close is requested.
     489              :  *
     490              :  * @note Error while processing an input is provided back to requesting
     491              :  *       function, and further processing of invoke_thread is not affected.
     492              :  */
     493              : static void *
     494           82 : invoke_thread (void *arg)
     495              : {
     496              :   ml_single *single_h;
     497              :   ml_tensors_data_h input, output;
     498           82 :   gboolean alloc_output = FALSE;
     499              : 
     500           82 :   single_h = (ml_single *) arg;
     501              : 
     502           82 :   g_mutex_lock (&single_h->mutex);
     503              : 
     504          101 :   while (single_h->state <= RUNNING) {
     505          101 :     int status = ML_ERROR_NONE;
     506              : 
     507              :     /** wait for data */
     508          124 :     while (single_h->state != RUNNING) {
     509          101 :       g_cond_wait (&single_h->cond, &single_h->mutex);
     510           99 :       if (single_h->state == JOIN_REQUESTED)
     511           76 :         goto exit;
     512              :     }
     513              : 
     514           23 :     input = single_h->input;
     515           23 :     output = single_h->output;
     516              :     /* Set null to prevent double-free. */
     517           23 :     single_h->input = single_h->output = NULL;
     518              : 
     519           23 :     single_h->invoking = TRUE;
     520           23 :     alloc_output = single_h->free_output;
     521           23 :     g_mutex_unlock (&single_h->mutex);
     522           23 :     status = __invoke (single_h, input, output, alloc_output);
     523           23 :     g_mutex_lock (&single_h->mutex);
     524              :     /* Clear input data after invoke is done. */
     525           23 :     ml_tensors_data_destroy (input);
     526           23 :     single_h->invoking = FALSE;
     527              : 
     528           23 :     if (status != ML_ERROR_NONE || single_h->state == JOIN_REQUESTED) {
     529            4 :       if (alloc_output) {
     530            4 :         single_h->destroy_data_list =
     531            4 :             g_list_remove (single_h->destroy_data_list, output);
     532            4 :         ml_tensors_data_destroy (output);
     533              :       }
     534              : 
     535            4 :       if (single_h->state == JOIN_REQUESTED)
     536            4 :         goto exit;
     537            0 :       goto wait_for_next;
     538              :     }
     539              : 
     540           19 :     if (alloc_output)
     541           19 :       __process_output (single_h, output);
     542              : 
     543              :     /** loop over to wait for the next element */
     544            0 :   wait_for_next:
     545           19 :     single_h->status = status;
     546           19 :     if (single_h->state == RUNNING)
     547           19 :       single_h->state = IDLE;
     548           19 :     g_cond_broadcast (&single_h->cond);
     549              :   }
     550              : 
     551            0 : exit:
     552              :   /* Do not set IDLE if JOIN_REQUESTED */
     553           80 :   if (single_h->state == JOIN_REQUESTED) {
     554              :     /* Release input and output data */
     555           80 :     if (single_h->input)
     556            0 :       ml_tensors_data_destroy (single_h->input);
     557              : 
     558           80 :     if (alloc_output && single_h->output) {
     559            0 :       single_h->destroy_data_list =
     560            0 :           g_list_remove (single_h->destroy_data_list, single_h->output);
     561            0 :       ml_tensors_data_destroy (single_h->output);
     562              :     }
     563              : 
     564           80 :     single_h->input = single_h->output = NULL;
     565            0 :   } else if (single_h->state == RUNNING)
     566            0 :     single_h->state = IDLE;
     567           80 :   g_mutex_unlock (&single_h->mutex);
     568           80 :   return NULL;
     569              : }
     570              : 
     571              : /**
     572              :  * @brief Sets the information (tensor dimension, type, name and so on) of required input data for the given model, and get updated output data information.
     573              :  * @details Note that a model/framework may not support setting such information.
     574              :  * @since_tizen 6.0
     575              :  * @param[in] single The model handle.
     576              :  * @param[in] in_info The handle of input tensors information.
     577              :  * @param[out] out_info The handle of output tensors information. The caller is responsible for freeing the information with ml_tensors_info_destroy().
     578              :  * @return @c 0 on success. Otherwise a negative error value.
     579              :  * @retval #ML_ERROR_NONE Successful
     580              :  * @retval #ML_ERROR_NOT_SUPPORTED This implies that the given framework does not support dynamic dimensions.
     581              :  *         Use ml_single_get_input_info() and ml_single_get_output_info() instead for this framework.
     582              :  * @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid.
     583              :  */
     584              : static int
     585            7 : ml_single_update_info (ml_single_h single,
     586              :     const ml_tensors_info_h in_info, ml_tensors_info_h * out_info)
     587              : {
     588            7 :   if (!single)
     589            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     590              :         "The parameter, single (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
     591            7 :   if (!in_info)
     592            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     593              :         "The parameter, in_info (const ml_tensors_info_h), is NULL. It should be a valid instance of ml_tensors_info_h, usually created by ml_tensors_info_create() and configured by the application.");
     594            7 :   if (!out_info)
     595            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     596              :         "The parameter, out_info (ml_tensors_info_h *), is NULL. It should be a valid pointer to an instance ml_tensors_info_h, usually created by ml_tensors_info_h(). Note that out_info is supposed to be overwritten by this API call.");
     597              : 
     598              :   /* init null */
     599            7 :   *out_info = NULL;
     600              : 
     601            7 :   _ml_error_report_return_continue_iferr (ml_single_set_input_info (single,
     602              :           in_info),
     603              :       "Configuring the neural network model with the given input information has failed with %d error code. The given input information ('in_info' parameter) might be invalid or the given neural network cannot accept it as its input data.",
     604              :       _ERRNO);
     605              : 
     606            5 :   __setup_in_out_tensors (single);
     607            5 :   _ml_error_report_return_continue_iferr (ml_single_get_output_info (single,
     608              :           out_info),
     609              :       "Fetching output info after configuring input information has failed with %d error code.",
     610              :       _ERRNO);
     611              : 
     612            5 :   return ML_ERROR_NONE;
     613              : }
     614              : 
     615              : /**
     616              :  * @brief Internal function to get the gst info from tensor-filter.
     617              :  */
     618              : static void
     619          169 : ml_single_get_gst_info (ml_single * single_h, gboolean is_input,
     620              :     GstTensorsInfo * gst_info)
     621              : {
     622              :   const gchar *prop_prefix, *prop_name, *prop_type;
     623              :   gchar *val;
     624              :   guint num;
     625              : 
     626          169 :   if (is_input) {
     627           89 :     prop_prefix = INPUT_STR;
     628           89 :     prop_type = CONCAT_MACRO_STR (INPUT_STR, TYPE_STR);
     629           89 :     prop_name = CONCAT_MACRO_STR (INPUT_STR, NAME_STR);
     630              :   } else {
     631           80 :     prop_prefix = OUTPUT_STR;
     632           80 :     prop_type = CONCAT_MACRO_STR (OUTPUT_STR, TYPE_STR);
     633           80 :     prop_name = CONCAT_MACRO_STR (OUTPUT_STR, NAME_STR);
     634              :   }
     635              : 
     636          169 :   gst_tensors_info_init (gst_info);
     637              : 
     638              :   /* get dimensions */
     639          169 :   g_object_get (single_h->filter, prop_prefix, &val, NULL);
     640          169 :   num = gst_tensors_info_parse_dimensions_string (gst_info, val);
     641          169 :   g_free (val);
     642              : 
     643              :   /* set the number of tensors */
     644          169 :   gst_info->num_tensors = num;
     645              : 
     646              :   /* get types */
     647          169 :   g_object_get (single_h->filter, prop_type, &val, NULL);
     648          169 :   num = gst_tensors_info_parse_types_string (gst_info, val);
     649          169 :   g_free (val);
     650              : 
     651          169 :   if (gst_info->num_tensors != num) {
     652            0 :     _ml_logw ("The number of tensor type is mismatched in filter.");
     653              :   }
     654              : 
     655              :   /* get names */
     656          169 :   g_object_get (single_h->filter, prop_name, &val, NULL);
     657          169 :   num = gst_tensors_info_parse_names_string (gst_info, val);
     658          169 :   g_free (val);
     659              : 
     660          169 :   if (gst_info->num_tensors != num) {
     661            8 :     _ml_logw ("The number of tensor name is mismatched in filter.");
     662              :   }
     663              : 
     664          169 :   if (single_h->invoke_dynamic) {
     665              :     /* flexible tensor stream */
     666            0 :     gst_info->format = _NNS_TENSOR_FORMAT_FLEXIBLE;
     667              : 
     668              :     /** @todo Consider multiple input tensors while invoking a model. */
     669            0 :     if (gst_info->num_tensors == 0) {
     670            0 :       gst_info->num_tensors = 1;
     671              :     }
     672              :   }
     673          169 : }
     674              : 
     675              : /**
     676              :  * @brief Internal function to set the gst info in tensor-filter.
     677              :  */
     678              : static int
     679           21 : ml_single_set_gst_info (ml_single * single_h, const GstTensorsInfo * in_info)
     680              : {
     681              :   GstTensorsInfo out_info;
     682           21 :   int status = ML_ERROR_NONE;
     683           21 :   int ret = -EINVAL;
     684              : 
     685           21 :   gst_tensors_info_init (&out_info);
     686           21 :   ret = single_h->klass->set_input_info (single_h->filter, in_info, &out_info);
     687           21 :   if (ret == 0) {
     688           15 :     gst_tensors_info_free (&single_h->in_info);
     689           15 :     gst_tensors_info_free (&single_h->out_info);
     690           15 :     gst_tensors_info_copy (&single_h->in_info, in_info);
     691           15 :     gst_tensors_info_copy (&single_h->out_info, &out_info);
     692              : 
     693           15 :     __setup_in_out_tensors (single_h);
     694            6 :   } else if (ret == -ENOENT) {
     695            0 :     status = ML_ERROR_NOT_SUPPORTED;
     696              :   } else {
     697            6 :     status = ML_ERROR_INVALID_PARAMETER;
     698              :   }
     699              : 
     700           21 :   gst_tensors_info_free (&out_info);
     701              : 
     702           21 :   return status;
     703              : }
     704              : 
     705              : /**
     706              :  * @brief Set the info for input/output tensors
     707              :  */
     708              : static int
     709            0 : ml_single_set_inout_tensors_info (GObject * object,
     710              :     const gboolean is_input, ml_tensors_info_s * tensors_info)
     711              : {
     712            0 :   int status = ML_ERROR_NONE;
     713              :   GstTensorsInfo info;
     714              :   gchar *str_dim, *str_type, *str_name;
     715              :   const gchar *str_type_name, *str_name_name;
     716              :   const gchar *prefix;
     717              : 
     718            0 :   if (is_input) {
     719            0 :     prefix = INPUT_STR;
     720            0 :     str_type_name = CONCAT_MACRO_STR (INPUT_STR, TYPE_STR);
     721            0 :     str_name_name = CONCAT_MACRO_STR (INPUT_STR, NAME_STR);
     722              :   } else {
     723            0 :     prefix = OUTPUT_STR;
     724            0 :     str_type_name = CONCAT_MACRO_STR (OUTPUT_STR, TYPE_STR);
     725            0 :     str_name_name = CONCAT_MACRO_STR (OUTPUT_STR, NAME_STR);
     726              :   }
     727              : 
     728            0 :   _ml_error_report_return_continue_iferr
     729              :       (_ml_tensors_info_copy_from_ml (&info, tensors_info),
     730              :       "Cannot fetch tensor-info from the given information. Error code: %d",
     731              :       _ERRNO);
     732              : 
     733              :   /* Set input option */
     734            0 :   str_dim = gst_tensors_info_get_dimensions_string (&info);
     735            0 :   str_type = gst_tensors_info_get_types_string (&info);
     736            0 :   str_name = gst_tensors_info_get_names_string (&info);
     737              : 
     738            0 :   if (!str_dim || !str_type || !str_name) {
     739            0 :     if (!str_dim)
     740            0 :       _ml_error_report
     741              :           ("Cannot fetch specific tensor-info from the given information: cannot fetch tensor dimension information.");
     742            0 :     if (!str_type)
     743            0 :       _ml_error_report
     744              :           ("Cannot fetch specific tensor-info from the given information: cannot fetch tensor type information.");
     745            0 :     if (!str_name)
     746            0 :       _ml_error_report
     747              :           ("Cannot fetch specific tensor-info from the given information: cannot fetch tensor name information. Even if tensor names are not defined, this should be able to fetch a list of empty strings.");
     748              : 
     749            0 :     status = ML_ERROR_INVALID_PARAMETER;
     750              :   } else {
     751            0 :     g_object_set (object, prefix, str_dim, str_type_name, str_type,
     752              :         str_name_name, str_name, NULL);
     753              :   }
     754              : 
     755            0 :   g_free (str_dim);
     756            0 :   g_free (str_type);
     757            0 :   g_free (str_name);
     758              : 
     759            0 :   gst_tensors_info_free (&info);
     760              : 
     761            0 :   return status;
     762              : }
     763              : 
     764              : /**
     765              :  * @brief Internal static function to set tensors info in the handle.
     766              :  */
     767              : static gboolean
     768          162 : ml_single_set_info_in_handle (ml_single_h single, gboolean is_input,
     769              :     ml_tensors_info_s * tensors_info)
     770              : {
     771              :   int status;
     772              :   ml_single *single_h;
     773              :   GstTensorsInfo *dest;
     774          162 :   gboolean configured = FALSE;
     775          162 :   gboolean is_valid = FALSE;
     776              :   GObject *filter_obj;
     777              : 
     778          162 :   single_h = (ml_single *) single;
     779          162 :   filter_obj = G_OBJECT (single_h->filter);
     780              : 
     781          162 :   if (is_input) {
     782           82 :     dest = &single_h->in_info;
     783           82 :     configured = single_h->klass->input_configured (single_h->filter);
     784              :   } else {
     785           80 :     dest = &single_h->out_info;
     786           80 :     configured = single_h->klass->output_configured (single_h->filter);
     787              :   }
     788              : 
     789          162 :   if (configured) {
     790              :     /* get configured info and compare with input info */
     791              :     GstTensorsInfo gst_info;
     792          162 :     ml_tensors_info_h info = NULL;
     793              : 
     794          162 :     ml_single_get_gst_info (single_h, is_input, &gst_info);
     795          162 :     _ml_tensors_info_create_from_gst (&info, &gst_info);
     796              : 
     797          162 :     gst_tensors_info_free (&gst_info);
     798              : 
     799          162 :     if (tensors_info && !ml_tensors_info_is_equal (tensors_info, info)) {
     800              :       /* given input info is not matched with configured */
     801            5 :       ml_tensors_info_destroy (info);
     802            5 :       if (is_input) {
     803              :         /* try to update tensors info */
     804            3 :         status = ml_single_update_info (single, tensors_info, &info);
     805            3 :         if (status != ML_ERROR_NONE)
     806            4 :           goto done;
     807              :       } else {
     808            2 :         goto done;
     809              :       }
     810              :     }
     811              : 
     812          158 :     gst_tensors_info_free (dest);
     813          158 :     _ml_tensors_info_copy_from_ml (dest, info);
     814          158 :     ml_tensors_info_destroy (info);
     815            0 :   } else if (tensors_info) {
     816              :     status =
     817            0 :         ml_single_set_inout_tensors_info (filter_obj, is_input, tensors_info);
     818            0 :     if (status != ML_ERROR_NONE)
     819            0 :       goto done;
     820              : 
     821            0 :     gst_tensors_info_free (dest);
     822            0 :     _ml_tensors_info_copy_from_ml (dest, tensors_info);
     823              :   }
     824              : 
     825          158 :   is_valid = gst_tensors_info_validate (dest);
     826              : 
     827          162 : done:
     828          162 :   return is_valid;
     829              : }
     830              : 
     831              : /**
     832              :  * @brief Internal function to create and initialize the single handle.
     833              :  */
     834              : static ml_single *
     835           82 : ml_single_create_handle (ml_nnfw_type_e nnfw)
     836              : {
     837              :   ml_single *single_h;
     838              :   GError *error;
     839           82 :   gboolean created = FALSE;
     840              : 
     841           82 :   single_h = g_new0 (ml_single, 1);
     842           82 :   if (single_h == NULL)
     843           82 :     _ml_error_report_return (NULL,
     844              :         "Failed to allocate memory for the single_h handle. Out of memory?");
     845              : 
     846           82 :   single_h->filter = g_object_new (G_TYPE_TENSOR_FILTER_SINGLE, NULL);
     847           82 :   if (single_h->filter == NULL) {
     848            0 :     _ml_error_report
     849              :         ("Failed to create a new instance for filter. Out of memory?");
     850            0 :     g_free (single_h);
     851            0 :     return NULL;
     852              :   }
     853              : 
     854           82 :   single_h->magic = ML_SINGLE_MAGIC;
     855           82 :   single_h->timeout = SINGLE_DEFAULT_TIMEOUT;
     856           82 :   single_h->nnfw = nnfw;
     857           82 :   single_h->state = IDLE;
     858           82 :   single_h->thread = NULL;
     859           82 :   single_h->input = NULL;
     860           82 :   single_h->output = NULL;
     861           82 :   single_h->destroy_data_list = NULL;
     862           82 :   single_h->invoking = FALSE;
     863              : 
     864           82 :   gst_tensors_info_init (&single_h->in_info);
     865           82 :   gst_tensors_info_init (&single_h->out_info);
     866           82 :   g_mutex_init (&single_h->mutex);
     867           82 :   g_cond_init (&single_h->cond);
     868              : 
     869           82 :   single_h->klass = g_type_class_ref (G_TYPE_TENSOR_FILTER_SINGLE);
     870           82 :   if (single_h->klass == NULL) {
     871            0 :     _ml_error_report
     872              :         ("Failed to get class of the tensor-filter of single API. This binary is not compiled properly or required libraries are not loaded.");
     873            0 :     goto done;
     874              :   }
     875              : 
     876           82 :   single_h->thread =
     877           82 :       g_thread_try_new (NULL, invoke_thread, (gpointer) single_h, &error);
     878           82 :   if (single_h->thread == NULL) {
     879            0 :     _ml_error_report
     880              :         ("Failed to create the invoke thread of single API, g_thread_try_new has reported an error: %s.",
     881              :         error->message);
     882            0 :     g_clear_error (&error);
     883            0 :     goto done;
     884              :   }
     885              : 
     886           82 :   created = TRUE;
     887              : 
     888           82 : done:
     889           82 :   if (!created) {
     890            0 :     ml_single_close (single_h);
     891            0 :     single_h = NULL;
     892              :   }
     893              : 
     894           82 :   return single_h;
     895              : }
     896              : 
     897              : /**
     898              :  * @brief Validate arguments for open
     899              :  */
     900              : static int
     901           91 : _ml_single_open_custom_validate_arguments (ml_single_h * single,
     902              :     ml_single_preset * info)
     903              : {
     904           91 :   if (!single)
     905            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     906              :         "The parameter, 'single' (ml_single_h *), is NULL. It should be a valid pointer to an instance of ml_single_h.");
     907           90 :   if (!info)
     908            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     909              :         "The parameter, 'info' (ml_single_preset *), is NULL. It should be a valid pointer to a valid instance of ml_single_preset.");
     910              : 
     911              :   /* Validate input tensor info. */
     912           90 :   if (info->input_info && !ml_tensors_info_is_valid (info->input_info))
     913            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     914              :         "The parameter, 'info' (ml_single_preset *), is not valid. It has 'input_info' entry that cannot be validated. ml_tensors_info_is_valid(info->input_info) has failed while info->input_info exists.");
     915              : 
     916              :   /* Validate output tensor info. */
     917           89 :   if (info->output_info && !ml_tensors_info_is_valid (info->output_info))
     918            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     919              :         "The parameter, 'info' (ml_single_preset *), is not valid. It has 'output_info' entry that cannot be validated. ml_tensors_info_is_valid(info->output_info) has failed while info->output_info exists.");
     920              : 
     921           88 :   if (!info->models)
     922            2 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     923              :         "The parameter, 'info' (ml_single_preset *), is not valid. Its models entry if NULL (info->models is NULL).");
     924              : 
     925           86 :   return ML_ERROR_NONE;
     926              : }
     927              : 
     928              : /**
     929              :  * @brief Internal function to convert accelerator as tensor_filter property format.
     930              :  * @note returned value must be freed by the caller
     931              :  * @note More details on format can be found in gst_tensor_filter_install_properties() in tensor_filter_common.c.
     932              :  */
     933              : char *
     934           82 : _ml_nnfw_to_str_prop (const ml_nnfw_hw_e hw)
     935              : {
     936              :   const gchar *hw_name;
     937           82 :   const gchar *use_accl = "true:";
     938           82 :   gchar *str_prop = NULL;
     939              : 
     940           82 :   hw_name = get_accl_hw_str (_ml_nnfw_to_accl_hw (hw));
     941           82 :   str_prop = g_strdup_printf ("%s%s", use_accl, hw_name);
     942              : 
     943           82 :   return str_prop;
     944              : }
     945              : 
     946              : /**
     947              :  * @brief Opens an ML model with the custom options and returns the instance as a handle.
     948              :  */
     949              : int
     950           91 : ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
     951              : {
     952              :   ml_single *single_h;
     953              :   GObject *filter_obj;
     954           91 :   int status = ML_ERROR_NONE;
     955              :   ml_tensors_info_s *in_tensors_info, *out_tensors_info;
     956              :   ml_nnfw_type_e nnfw;
     957              :   ml_nnfw_hw_e hw;
     958              :   const gchar *fw_name;
     959           91 :   g_autofree gchar *converted_models = NULL;
     960              :   gchar **list_models;
     961              :   guint i, num_models;
     962              :   char *hw_name;
     963              : 
     964           91 :   check_feature_state (ML_FEATURE_INFERENCE);
     965              : 
     966              :   /* Validate the params */
     967           91 :   _ml_error_report_return_continue_iferr
     968              :       (_ml_single_open_custom_validate_arguments (single, info),
     969              :       "The parameter, 'info' (ml_single_preset *), cannot be validated. Please provide valid information for this object.");
     970              : 
     971              :   /* init null */
     972           86 :   *single = NULL;
     973              : 
     974           86 :   in_tensors_info = (ml_tensors_info_s *) info->input_info;
     975           86 :   out_tensors_info = (ml_tensors_info_s *) info->output_info;
     976           86 :   nnfw = info->nnfw;
     977           86 :   hw = info->hw;
     978           86 :   fw_name = _ml_get_nnfw_subplugin_name (nnfw);
     979           86 :   converted_models = _ml_convert_predefined_entity (info->models);
     980              : 
     981              :   /**
     982              :    * 1. Determine nnfw and validate model file
     983              :    */
     984           86 :   list_models = g_strsplit (converted_models, ",", -1);
     985           86 :   num_models = g_strv_length (list_models);
     986          172 :   for (i = 0; i < num_models; i++)
     987           86 :     g_strstrip (list_models[i]);
     988              : 
     989           86 :   status = _ml_validate_model_file ((const char **) list_models, num_models,
     990              :       &nnfw);
     991           86 :   if (status != ML_ERROR_NONE) {
     992            4 :     _ml_error_report_continue
     993              :         ("Cannot validate the model (1st model: %s. # models: %d). Error code: %d",
     994              :         list_models[0], num_models, status);
     995            4 :     g_strfreev (list_models);
     996            4 :     return status;
     997              :   }
     998              : 
     999           82 :   g_strfreev (list_models);
    1000              : 
    1001              :   /**
    1002              :    * 2. Determine hw
    1003              :    * (Supposed CPU only) Support others later.
    1004              :    */
    1005           82 :   if (!_ml_nnfw_is_available (nnfw, hw)) {
    1006            0 :     _ml_error_report_return (ML_ERROR_NOT_SUPPORTED,
    1007              :         "The given nnfw, '%s', is not supported. There is no corresponding tensor-filter subplugin available or the given hardware requirement is not supported for the given nnfw.",
    1008              :         fw_name);
    1009              :   }
    1010              : 
    1011              :   /* Create ml_single object */
    1012           82 :   if ((single_h = ml_single_create_handle (nnfw)) == NULL) {
    1013            0 :     _ml_error_report_return_continue (ML_ERROR_OUT_OF_MEMORY,
    1014              :         "Cannot create handle for the given nnfw, %s", fw_name);
    1015              :   }
    1016              : 
    1017           82 :   single_h->invoke_dynamic = info->invoke_dynamic;
    1018           82 :   single_h->invoke_async = info->invoke_async;
    1019              : 
    1020           82 :   filter_obj = G_OBJECT (single_h->filter);
    1021              : 
    1022              :   /**
    1023              :    * 3. Construct a direct connection with the nnfw.
    1024              :    * Note that we do not construct a pipeline since 2019.12.
    1025              :    */
    1026           82 :   if (nnfw == ML_NNFW_TYPE_TENSORFLOW || nnfw == ML_NNFW_TYPE_SNAP ||
    1027           82 :       nnfw == ML_NNFW_TYPE_PYTORCH || nnfw == ML_NNFW_TYPE_TRIX_ENGINE ||
    1028           82 :       nnfw == ML_NNFW_TYPE_NCNN) {
    1029              :     /* set input and output tensors information */
    1030            0 :     if (in_tensors_info && out_tensors_info) {
    1031              :       status =
    1032            0 :           ml_single_set_inout_tensors_info (filter_obj, TRUE, in_tensors_info);
    1033            0 :       if (status != ML_ERROR_NONE) {
    1034            0 :         _ml_error_report_continue
    1035              :             ("Input tensors info is given; however, failed to set input tensors info. Error code: %d",
    1036              :             status);
    1037            0 :         goto error;
    1038              :       }
    1039              : 
    1040              :       status =
    1041            0 :           ml_single_set_inout_tensors_info (filter_obj, FALSE,
    1042              :           out_tensors_info);
    1043            0 :       if (status != ML_ERROR_NONE) {
    1044            0 :         _ml_error_report_continue
    1045              :             ("Output tensors info is given; however, failed to set output tensors info. Error code: %d",
    1046              :             status);
    1047            0 :         goto error;
    1048              :       }
    1049              :     } else {
    1050            0 :       _ml_error_report
    1051              :           ("To run the given nnfw, '%s', with a neural network model, both input and output information should be provided.",
    1052              :           fw_name);
    1053            0 :       status = ML_ERROR_INVALID_PARAMETER;
    1054            0 :       goto error;
    1055              :     }
    1056           82 :   } else if (nnfw == ML_NNFW_TYPE_ARMNN) {
    1057              :     /* set input and output tensors information, if available */
    1058            0 :     if (in_tensors_info) {
    1059              :       status =
    1060            0 :           ml_single_set_inout_tensors_info (filter_obj, TRUE, in_tensors_info);
    1061            0 :       if (status != ML_ERROR_NONE) {
    1062            0 :         _ml_error_report_continue
    1063              :             ("With nnfw '%s', input tensors info is optional. However, the user has provided an invalid input tensors info. Error code: %d",
    1064              :             fw_name, status);
    1065            0 :         goto error;
    1066              :       }
    1067              :     }
    1068            0 :     if (out_tensors_info) {
    1069              :       status =
    1070            0 :           ml_single_set_inout_tensors_info (filter_obj, FALSE,
    1071              :           out_tensors_info);
    1072            0 :       if (status != ML_ERROR_NONE) {
    1073            0 :         _ml_error_report_continue
    1074              :             ("With nnfw '%s', output tensors info is optional. However, the user has provided an invalid output tensors info. Error code: %d",
    1075              :             fw_name, status);
    1076            0 :         goto error;
    1077              :       }
    1078              :     }
    1079              :   }
    1080              : 
    1081              :   /* set accelerator, framework, model files and custom option */
    1082           82 :   if (info->fw_name) {
    1083           33 :     fw_name = (const char *) info->fw_name;
    1084              :   } else {
    1085           49 :     fw_name = _ml_get_nnfw_subplugin_name (nnfw);       /* retry for "auto" */
    1086              :   }
    1087           82 :   hw_name = _ml_nnfw_to_str_prop (hw);
    1088              : 
    1089           82 :   g_object_set (filter_obj, "framework", fw_name, "accelerator", hw_name,
    1090              :       "model", converted_models, "invoke-dynamic", single_h->invoke_dynamic,
    1091              :       "invoke-async", single_h->invoke_async, NULL);
    1092           82 :   g_free (hw_name);
    1093              : 
    1094           82 :   if (info->custom_option) {
    1095            0 :     g_object_set (filter_obj, "custom", info->custom_option, NULL);
    1096              :   }
    1097              : 
    1098              :   /* 4. Start the nnfw to get inout configurations if needed */
    1099           82 :   if (!single_h->klass->start (single_h->filter)) {
    1100            0 :     _ml_error_report
    1101              :         ("Failed to start NNFW, '%s', to get inout configurations. Subplugin class method has failed to start.",
    1102              :         fw_name);
    1103            0 :     status = ML_ERROR_STREAMS_PIPE;
    1104            0 :     goto error;
    1105              :   }
    1106              : 
    1107           82 :   if (nnfw == ML_NNFW_TYPE_NNTR_INF) {
    1108            0 :     if (!in_tensors_info || !out_tensors_info) {
    1109            0 :       if (!in_tensors_info) {
    1110              :         GstTensorsInfo in_info;
    1111              : 
    1112            0 :         gst_tensors_info_init (&in_info);
    1113              : 
    1114              :         /* ml_single_set_input_info() can't be done as it checks num_tensors */
    1115            0 :         status = ml_single_set_gst_info (single_h, &in_info);
    1116            0 :         if (status != ML_ERROR_NONE) {
    1117            0 :           _ml_error_report_continue
    1118              :               ("NNTrainer-inference-single cannot configure single_h handle instance with the given in_info. This might be an ML-API / NNTrainer internal error. Error Code: %d",
    1119              :               status);
    1120            0 :           goto error;
    1121              :         }
    1122              :       } else {
    1123            0 :         status = ml_single_set_input_info (single_h, in_tensors_info);
    1124            0 :         if (status != ML_ERROR_NONE) {
    1125            0 :           _ml_error_report_continue
    1126              :               ("NNTrainer-inference-single cannot configure single_h handle instance with the given in_info from the user. Error code: %d",
    1127              :               status);
    1128            0 :           goto error;
    1129              :         }
    1130              :       }
    1131              :     }
    1132              :   }
    1133              : 
    1134              :   /* 5. Set in/out configs and metadata */
    1135           82 :   if (!ml_single_set_info_in_handle (single_h, TRUE, in_tensors_info)) {
    1136            2 :     _ml_error_report
    1137              :         ("The input tensors info is invalid. Cannot configure single_h handle with the given input tensors info.");
    1138            2 :     status = ML_ERROR_INVALID_PARAMETER;
    1139            2 :     goto error;
    1140              :   }
    1141              : 
    1142           80 :   if (!ml_single_set_info_in_handle (single_h, FALSE, out_tensors_info)) {
    1143            2 :     _ml_error_report
    1144              :         ("The output tensors info is invalid. Cannot configure single_h handle with the given output tensors info.");
    1145            2 :     status = ML_ERROR_INVALID_PARAMETER;
    1146            2 :     goto error;
    1147              :   }
    1148              : 
    1149              :   /* Setup input and output memory buffers for invoke */
    1150           78 :   __setup_in_out_tensors (single_h);
    1151              : 
    1152           78 :   *single = single_h;
    1153           78 :   return ML_ERROR_NONE;
    1154              : 
    1155            4 : error:
    1156            4 :   ml_single_close (single_h);
    1157            4 :   return status;
    1158              : }
    1159              : 
    1160              : /**
    1161              :  * @brief Opens an ML model and returns the instance as a handle.
    1162              :  */
    1163              : int
    1164           53 : ml_single_open (ml_single_h * single, const char *model,
    1165              :     const ml_tensors_info_h input_info, const ml_tensors_info_h output_info,
    1166              :     ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw)
    1167              : {
    1168           53 :   return ml_single_open_full (single, model, input_info, output_info, nnfw, hw,
    1169              :       NULL);
    1170              : }
    1171              : 
    1172              : /**
    1173              :  * @brief Opens an ML model and returns the instance as a handle.
    1174              :  */
    1175              : int
    1176           53 : ml_single_open_full (ml_single_h * single, const char *model,
    1177              :     const ml_tensors_info_h input_info, const ml_tensors_info_h output_info,
    1178              :     ml_nnfw_type_e nnfw, ml_nnfw_hw_e hw, const char *custom_option)
    1179              : {
    1180           53 :   ml_single_preset info = { 0, };
    1181              : 
    1182           53 :   info.input_info = input_info;
    1183           53 :   info.output_info = output_info;
    1184           53 :   info.nnfw = nnfw;
    1185           53 :   info.hw = hw;
    1186           53 :   info.models = (char *) model;
    1187           53 :   info.custom_option = (char *) custom_option;
    1188              : 
    1189           53 :   return ml_single_open_custom (single, &info);
    1190              : }
    1191              : 
    1192              : /**
    1193              :  * @brief Open new single handle with given option.
    1194              :  */
    1195              : int
    1196           39 : ml_single_open_with_option (ml_single_h * single, const ml_option_h option)
    1197              : {
    1198              :   void *value;
    1199           39 :   ml_single_preset info = { 0, };
    1200              : 
    1201           78 :   check_feature_state (ML_FEATURE_INFERENCE);
    1202              : 
    1203           39 :   if (!option) {
    1204            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1205              :         "The parameter, 'option' is NULL. It should be a valid ml_option_h, which should be created by ml_option_create().");
    1206              :   }
    1207              : 
    1208           38 :   if (!single)
    1209            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1210              :         "The parameter, 'single' (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
    1211              : 
    1212           38 :   if (ML_ERROR_NONE == ml_option_get (option, "input_info", &value))
    1213           16 :     info.input_info = value;
    1214           38 :   if (ML_ERROR_NONE == ml_option_get (option, "output_info", &value))
    1215           16 :     info.output_info = value;
    1216           38 :   if (ML_ERROR_NONE == ml_option_get (option, "nnfw", &value))
    1217            2 :     info.nnfw = *((ml_nnfw_type_e *) value);
    1218           38 :   if (ML_ERROR_NONE == ml_option_get (option, "hw", &value))
    1219            0 :     info.hw = *((ml_nnfw_hw_e *) value);
    1220           38 :   if (ML_ERROR_NONE == ml_option_get (option, "models", &value))
    1221           37 :     info.models = (gchar *) value;
    1222           38 :   if (ML_ERROR_NONE == ml_option_get (option, "custom", &value))
    1223            0 :     info.custom_option = (gchar *) value;
    1224           38 :   if (ML_ERROR_NONE == ml_option_get (option, "framework_name", &value) ||
    1225            5 :       ML_ERROR_NONE == ml_option_get (option, "framework", &value))
    1226           33 :     info.fw_name = (gchar *) value;
    1227           38 :   if (ML_ERROR_NONE == ml_option_get (option, "invoke_dynamic", &value)) {
    1228            0 :     if (g_ascii_strcasecmp ((gchar *) value, "true") == 0)
    1229            0 :       info.invoke_dynamic = TRUE;
    1230              :   }
    1231           38 :   if (ML_ERROR_NONE == ml_option_get (option, "invoke_async", &value)) {
    1232            0 :     if (g_ascii_strcasecmp ((gchar *) value, "true") == 0)
    1233            0 :       info.invoke_async = TRUE;
    1234              :   }
    1235              : 
    1236           38 :   return ml_single_open_custom (single, &info);
    1237              : }
    1238              : 
    1239              : /**
    1240              :  * @brief Closes the opened model handle.
    1241              :  *
    1242              :  * @details State changes performed by this function:
    1243              :  *          ANY STATE -> JOIN REQUESTED - on receiving a request to close
    1244              :  *
    1245              :  *          Once requested to close, invoke_thread() will exit after processing
    1246              :  *          the current input (if any).
    1247              :  */
    1248              : int
    1249           82 : ml_single_close (ml_single_h single)
    1250              : {
    1251              :   ml_single *single_h;
    1252              :   gboolean invoking;
    1253              : 
    1254           82 :   check_feature_state (ML_FEATURE_INFERENCE);
    1255              : 
    1256           82 :   if (!single)
    1257            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1258              :         "The parameter, 'single' (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
    1259              : 
    1260           81 :   ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 1);
    1261              : 
    1262           80 :   single_h->state = JOIN_REQUESTED;
    1263           80 :   g_cond_broadcast (&single_h->cond);
    1264           80 :   invoking = single_h->invoking;
    1265           80 :   ML_SINGLE_HANDLE_UNLOCK (single_h);
    1266              : 
    1267              :   /** Wait until invoke process is finished */
    1268         1697 :   while (invoking) {
    1269         1617 :     _ml_logd ("Wait 1 ms until invoke is finished and close the handle.");
    1270         1617 :     g_usleep (1000);
    1271         1617 :     invoking = single_h->invoking;
    1272              :     /**
    1273              :      * single_h->invoking is the only protected value here and we are
    1274              :      * doing a read-only operation and do not need to project its value
    1275              :      * after the assignment.
    1276              :      * Thus, we do not need to lock single_h here.
    1277              :      */
    1278              :   }
    1279              : 
    1280           80 :   if (single_h->thread != NULL)
    1281           80 :     g_thread_join (single_h->thread);
    1282              : 
    1283              :   /** locking ensures correctness with parallel calls on close */
    1284           80 :   if (single_h->filter) {
    1285           80 :     g_list_foreach (single_h->destroy_data_list, __destroy_notify, single_h);
    1286           80 :     g_list_free (single_h->destroy_data_list);
    1287              : 
    1288           80 :     if (single_h->klass)
    1289           80 :       single_h->klass->stop (single_h->filter);
    1290              : 
    1291           80 :     g_object_unref (single_h->filter);
    1292           80 :     single_h->filter = NULL;
    1293              :   }
    1294              : 
    1295           80 :   if (single_h->klass) {
    1296           80 :     g_type_class_unref (single_h->klass);
    1297           80 :     single_h->klass = NULL;
    1298              :   }
    1299              : 
    1300           80 :   gst_tensors_info_free (&single_h->in_info);
    1301           80 :   gst_tensors_info_free (&single_h->out_info);
    1302              : 
    1303           80 :   ml_tensors_data_destroy (single_h->in_tensors);
    1304           80 :   ml_tensors_data_destroy (single_h->out_tensors);
    1305              : 
    1306           80 :   g_cond_clear (&single_h->cond);
    1307           80 :   g_mutex_clear (&single_h->mutex);
    1308              : 
    1309           80 :   g_free (single_h);
    1310           80 :   return ML_ERROR_NONE;
    1311              : }
    1312              : 
    1313              : /**
    1314              :  * @brief Internal function to validate input/output data.
    1315              :  */
    1316              : static int
    1317           92 : _ml_single_invoke_validate_data (ml_single_h single,
    1318              :     const ml_tensors_data_h data, const gboolean is_input)
    1319              : {
    1320              :   ml_single *single_h;
    1321              :   ml_tensors_data_s *_data;
    1322              :   ml_tensors_data_s *_model;
    1323              :   guint i;
    1324              :   size_t raw_size;
    1325              : 
    1326           92 :   single_h = (ml_single *) single;
    1327           92 :   _data = (ml_tensors_data_s *) data;
    1328              : 
    1329           92 :   if (G_UNLIKELY (!_data))
    1330            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1331              :         "(internal function) The parameter, 'data' (const ml_tensors_data_h), is NULL. It should be a valid instance of ml_tensors_data_h.");
    1332              : 
    1333           92 :   if (is_input)
    1334           91 :     _model = (ml_tensors_data_s *) single_h->in_tensors;
    1335              :   else
    1336            1 :     _model = (ml_tensors_data_s *) single_h->out_tensors;
    1337              : 
    1338           92 :   if (G_UNLIKELY (_data->num_tensors != _model->num_tensors))
    1339            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1340              :         "(internal function) The number of %s tensors is not compatible with model. Given: %u, Expected: %u.",
    1341              :         (is_input) ? "input" : "output", _data->num_tensors,
    1342              :         _model->num_tensors);
    1343              : 
    1344          335 :   for (i = 0; i < _data->num_tensors; i++) {
    1345          247 :     if (G_UNLIKELY (!_data->tensors[i].data))
    1346            1 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1347              :           "The %d-th input tensor is not valid. There is no valid dimension metadata for this tensor.",
    1348              :           i);
    1349              : 
    1350          246 :     if (single_h->invoke_dynamic) {
    1351              :       /* If tensor is not static, we cannot check tensor data size. */
    1352            0 :       continue;
    1353              :     }
    1354              : 
    1355          246 :     raw_size = _model->tensors[i].size;
    1356          246 :     if (G_UNLIKELY (_data->tensors[i].size != raw_size))
    1357            2 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1358              :           "The size of %d-th %s tensor is not compatible with model. Given: %zu, Expected: %zu.",
    1359              :           i, (is_input) ? "input" : "output", _data->tensors[i].size, raw_size);
    1360              :   }
    1361              : 
    1362           88 :   return ML_ERROR_NONE;
    1363              : }
    1364              : 
    1365              : /**
    1366              :  * @brief Internal function to invoke the model.
    1367              :  *
    1368              :  * @details State changes performed by this function:
    1369              :  *          IDLE -> RUNNING - on receiving a valid request
    1370              :  *
    1371              :  *          Invoke returns error if the current state is not IDLE.
    1372              :  *          If IDLE, then invoke is requested to the thread.
    1373              :  *          Invoke waits for the processing to be complete, and returns back
    1374              :  *          the result once notified by the processing thread.
    1375              :  *
    1376              :  * @note IDLE is the valid thread state before and after this function call.
    1377              :  */
    1378              : static int
    1379          104 : _ml_single_invoke_internal (ml_single_h single,
    1380              :     const ml_tensors_data_h input, ml_tensors_data_h * output,
    1381              :     const gboolean need_alloc)
    1382              : {
    1383              :   ml_single *single_h;
    1384              :   ml_tensors_data_h _in, _out;
    1385              :   gint64 end_time;
    1386          104 :   int status = ML_ERROR_NONE;
    1387              : 
    1388          208 :   check_feature_state (ML_FEATURE_INFERENCE);
    1389              : 
    1390          104 :   if (G_UNLIKELY (!single))
    1391            2 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1392              :         "(internal function) The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, usually created by ml_single_open().");
    1393              : 
    1394          102 :   if (G_UNLIKELY (!input))
    1395            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1396              :         "(internal function) The parameter, input (ml_tensors_data_h), is NULL. It should be a valid instance of ml_tensors_data_h.");
    1397              : 
    1398          101 :   if (G_UNLIKELY (!output))
    1399            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1400              :         "(internal function) The parameter, output (ml_tensors_data_h *), is NULL. It should be a valid pointer to an instance of ml_tensors_data_h to store the inference results.");
    1401              : 
    1402          100 :   ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
    1403              : 
    1404           91 :   if (G_UNLIKELY (!single_h->filter)) {
    1405            0 :     _ml_error_report
    1406              :         ("The tensor_filter element of this single handle (single_h) is not valid. It appears that the handle (ml_single_h single) is not appropriately created by ml_single_open(), user thread has touched its internal data, or the handle is already closed or freed by user.");
    1407            0 :     status = ML_ERROR_INVALID_PARAMETER;
    1408            0 :     goto exit;
    1409              :   }
    1410              : 
    1411              :   /* Validate input/output data */
    1412           91 :   status = _ml_single_invoke_validate_data (single, input, TRUE);
    1413           91 :   if (status != ML_ERROR_NONE) {
    1414            4 :     _ml_error_report_continue
    1415              :         ("The input data for the inference is not valid: error code %d. Please check the dimensions, type, number-of-tensors, and size information of the input data.",
    1416              :         status);
    1417            4 :     goto exit;
    1418              :   }
    1419              : 
    1420           87 :   if (!need_alloc) {
    1421            1 :     status = _ml_single_invoke_validate_data (single, *output, FALSE);
    1422            1 :     if (status != ML_ERROR_NONE) {
    1423            0 :       _ml_error_report_continue
    1424              :           ("The output data buffer provided by the user is not valid for the given neural network mode: error code %d. Please check the dimensions, type, number-of-tensors, and size information of the output data buffer.",
    1425              :           status);
    1426            0 :       goto exit;
    1427              :     }
    1428              :   }
    1429              : 
    1430           87 :   if (single_h->state != IDLE) {
    1431            7 :     if (G_UNLIKELY (single_h->state == JOIN_REQUESTED)) {
    1432            0 :       _ml_error_report
    1433              :           ("The handle (single_h single) is closed or being closed awaiting for the last ongoing invocation. Invoking with such a handle is not allowed. Please open another single_h handle to invoke.");
    1434            0 :       status = ML_ERROR_STREAMS_PIPE;
    1435            0 :       goto exit;
    1436              :     }
    1437            7 :     _ml_error_report
    1438              :         ("The handle (single_h single) is busy. There is another thread waiting for inference results with this handle. Please retry invoking again later when the handle becomes idle after completing the current inference task.");
    1439            7 :     status = ML_ERROR_TRY_AGAIN;
    1440            7 :     goto exit;
    1441              :   }
    1442              : 
    1443              :   /* prepare output data */
    1444           80 :   if (need_alloc) {
    1445           79 :     *output = NULL;
    1446              : 
    1447           79 :     status = _ml_tensors_data_clone_no_alloc (single_h->out_tensors, &_out);
    1448           79 :     if (status != ML_ERROR_NONE)
    1449            0 :       goto exit;
    1450              :   } else {
    1451            1 :     _out = *output;
    1452              :   }
    1453              : 
    1454              :   /**
    1455              :    * Clone input data here to prevent use-after-free case.
    1456              :    * We should release single_h->input after calling __invoke() function.
    1457              :    */
    1458           80 :   status = ml_tensors_data_clone (input, &_in);
    1459           80 :   if (status != ML_ERROR_NONE)
    1460            0 :     goto exit;
    1461              : 
    1462           80 :   single_h->state = RUNNING;
    1463           80 :   single_h->free_output = need_alloc;
    1464           80 :   single_h->input = _in;
    1465           80 :   single_h->output = _out;
    1466              : 
    1467           80 :   if (single_h->timeout > 0) {
    1468              :     /* Wake up "invoke_thread" */
    1469           23 :     g_cond_broadcast (&single_h->cond);
    1470              : 
    1471              :     /* set timeout */
    1472           23 :     end_time = g_get_monotonic_time () +
    1473           23 :         single_h->timeout * G_TIME_SPAN_MILLISECOND;
    1474              : 
    1475           23 :     if (g_cond_wait_until (&single_h->cond, &single_h->mutex, end_time)) {
    1476           19 :       status = single_h->status;
    1477              :     } else {
    1478            4 :       _ml_logw ("Wait for invoke has timed out");
    1479            4 :       status = ML_ERROR_TIMED_OUT;
    1480              :       /** This is set to notify invoke_thread to not process if timed out */
    1481            4 :       if (need_alloc)
    1482            4 :         set_destroy_notify (single_h, _out, TRUE);
    1483              :     }
    1484              :   } else {
    1485              :     /**
    1486              :      * Don't worry. We have locked single_h->mutex, thus there is no
    1487              :      * other thread with ml_single_invoke function on the same handle
    1488              :      * that are in this if-then-else block, which means that there is
    1489              :      * no other thread with active invoke-thread (calling __invoke())
    1490              :      * with the same handle. Thus we can call __invoke without
    1491              :      * having yet another mutex for __invoke.
    1492              :      */
    1493           57 :     single_h->invoking = TRUE;
    1494           57 :     status = __invoke (single_h, _in, _out, need_alloc);
    1495           57 :     ml_tensors_data_destroy (_in);
    1496           57 :     single_h->invoking = FALSE;
    1497           57 :     single_h->state = IDLE;
    1498              : 
    1499           57 :     if (status != ML_ERROR_NONE) {
    1500            0 :       if (need_alloc)
    1501            0 :         ml_tensors_data_destroy (_out);
    1502            0 :       goto exit;
    1503              :     }
    1504              : 
    1505           57 :     if (need_alloc)
    1506           56 :       __process_output (single_h, _out);
    1507              :   }
    1508              : 
    1509            1 : exit:
    1510           91 :   if (status == ML_ERROR_NONE) {
    1511           76 :     if (need_alloc)
    1512           75 :       *output = _out;
    1513              :   }
    1514              : 
    1515           91 :   single_h->input = single_h->output = NULL;
    1516           91 :   ML_SINGLE_HANDLE_UNLOCK (single_h);
    1517           91 :   return status;
    1518              : }
    1519              : 
    1520              : /**
    1521              :  * @brief Invokes the model with the given input data.
    1522              :  */
    1523              : int
    1524          103 : ml_single_invoke (ml_single_h single,
    1525              :     const ml_tensors_data_h input, ml_tensors_data_h * output)
    1526              : {
    1527          103 :   return _ml_single_invoke_internal (single, input, output, TRUE);
    1528              : }
    1529              : 
    1530              : /**
    1531              :  * @brief Invokes the model with the given input data and fills the output data handle.
    1532              :  */
    1533              : int
    1534            1 : ml_single_invoke_fast (ml_single_h single,
    1535              :     const ml_tensors_data_h input, ml_tensors_data_h output)
    1536              : {
    1537            1 :   return _ml_single_invoke_internal (single, input, &output, FALSE);
    1538              : }
    1539              : 
    1540              : /**
    1541              :  * @brief Gets the tensors info for the given handle.
    1542              :  * @param[out] info A pointer to a NULL (unallocated) instance.
    1543              :  */
    1544              : static int
    1545           61 : ml_single_get_tensors_info (ml_single_h single, gboolean is_input,
    1546              :     ml_tensors_info_h * info)
    1547              : {
    1548              :   ml_single *single_h;
    1549           61 :   int status = ML_ERROR_NONE;
    1550              : 
    1551           61 :   check_feature_state (ML_FEATURE_INFERENCE);
    1552              : 
    1553           61 :   if (!single)
    1554            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1555              :         "(internal function) The parameter, 'single' (ml_single_h), is NULL. It should be a valid ml_single_h instance, usually created by ml_single_open().");
    1556           61 :   if (!info)
    1557            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1558              :         "(internal function) The parameter, 'info' (ml_tensors_info_h *) is NULL. It should be a valid pointer to an empty (NULL) instance of ml_tensor_info_h, which is supposed to be filled with the fetched info by this function.");
    1559              : 
    1560           61 :   ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
    1561              : 
    1562           61 :   if (is_input)
    1563           39 :     status = _ml_tensors_info_create_from_gst (info, &single_h->in_info);
    1564              :   else
    1565           22 :     status = _ml_tensors_info_create_from_gst (info, &single_h->out_info);
    1566              : 
    1567           61 :   if (status != ML_ERROR_NONE) {
    1568            0 :     _ml_error_report_continue
    1569              :         ("(internal function) Failed to create an entry for the ml_tensors_info_h instance. Error code: %d",
    1570              :         status);
    1571              :   }
    1572              : 
    1573           61 :   ML_SINGLE_HANDLE_UNLOCK (single_h);
    1574           61 :   return status;
    1575              : }
    1576              : 
    1577              : /**
    1578              :  * @brief Gets the information of required input data for the given handle.
    1579              :  * @note information = (tensor dimension, type, name and so on)
    1580              :  */
    1581              : int
    1582           39 : ml_single_get_input_info (ml_single_h single, ml_tensors_info_h * info)
    1583              : {
    1584           39 :   return ml_single_get_tensors_info (single, TRUE, info);
    1585              : }
    1586              : 
    1587              : /**
    1588              :  * @brief Gets the information of output data for the given handle.
    1589              :  * @note information = (tensor dimension, type, name and so on)
    1590              :  */
    1591              : int
    1592           22 : ml_single_get_output_info (ml_single_h single, ml_tensors_info_h * info)
    1593              : {
    1594           22 :   return ml_single_get_tensors_info (single, FALSE, info);
    1595              : }
    1596              : 
    1597              : /**
    1598              :  * @brief Sets the maximum amount of time to wait for an output, in milliseconds.
    1599              :  */
    1600              : int
    1601           19 : ml_single_set_timeout (ml_single_h single, unsigned int timeout)
    1602              : {
    1603              :   ml_single *single_h;
    1604              : 
    1605           19 :   check_feature_state (ML_FEATURE_INFERENCE);
    1606              : 
    1607           19 :   if (!single)
    1608            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1609              :         "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
    1610              : 
    1611           19 :   ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
    1612              : 
    1613           19 :   single_h->timeout = (guint) timeout;
    1614              : 
    1615           19 :   ML_SINGLE_HANDLE_UNLOCK (single_h);
    1616           19 :   return ML_ERROR_NONE;
    1617              : }
    1618              : 
    1619              : /**
    1620              :  * @brief Sets the information (tensor dimension, type, name and so on) of required input data for the given model.
    1621              :  */
    1622              : int
    1623           17 : ml_single_set_input_info (ml_single_h single, const ml_tensors_info_h info)
    1624              : {
    1625              :   ml_single *single_h;
    1626              :   GstTensorsInfo gst_info;
    1627           17 :   int status = ML_ERROR_NONE;
    1628              : 
    1629           34 :   check_feature_state (ML_FEATURE_INFERENCE);
    1630              : 
    1631           17 :   if (!single)
    1632            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1633              :         "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
    1634           17 :   if (!info)
    1635            2 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1636              :         "The parameter, info (const ml_tensors_info_h), is NULL. It should be a valid instance of ml_tensors_info_h, which is usually created by ml_tensors_info_create() or other APIs.");
    1637              : 
    1638           15 :   if (!ml_tensors_info_is_valid (info))
    1639            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1640              :         "The parameter, info (const ml_tensors_info_h), is not valid. Although it is not NULL, the content of 'info' is invalid. If it is created by ml_tensors_info_create(), which creates an empty instance, it should be filled by users afterwards. Please check if 'info' has all elements filled with valid values.");
    1641              : 
    1642           14 :   ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
    1643           14 :   _ml_tensors_info_copy_from_ml (&gst_info, info);
    1644           14 :   status = ml_single_set_gst_info (single_h, &gst_info);
    1645           14 :   gst_tensors_info_free (&gst_info);
    1646           14 :   ML_SINGLE_HANDLE_UNLOCK (single_h);
    1647              : 
    1648           14 :   if (status != ML_ERROR_NONE)
    1649            5 :     _ml_error_report_continue
    1650              :         ("ml_single_set_gst_info() has failed to configure the single_h handle with the given info. Error code: %d",
    1651              :         status);
    1652              : 
    1653           14 :   return status;
    1654              : }
    1655              : 
    1656              : /**
    1657              :  * @brief Invokes the model with the given input data with the given info.
    1658              :  */
    1659              : int
    1660            9 : ml_single_invoke_dynamic (ml_single_h single,
    1661              :     const ml_tensors_data_h input, const ml_tensors_info_h in_info,
    1662              :     ml_tensors_data_h * output, ml_tensors_info_h * out_info)
    1663              : {
    1664              :   int status;
    1665            9 :   ml_tensors_info_h cur_in_info = NULL;
    1666              : 
    1667            9 :   if (!single)
    1668            9 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1669              :         "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
    1670            8 :   if (!input)
    1671            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1672              :         "The parameter, input (const ml_tensors_data_h), is NULL. It should be a valid instance of ml_tensors_data_h with input data frame for inference.");
    1673            7 :   if (!in_info)
    1674            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1675              :         "The parameter, in_info (const ml_tensors_info_h), is NULL. It should be a valid instance of ml_tensor_info_h that describes metadata of the given input for inference (input).");
    1676            6 :   if (!output)
    1677            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1678              :         "The parameter, output (ml_tensors_data_h *), is NULL. It should be a pointer to an empty (NULL or do-not-care) instance of ml_tensors_data_h, which is filled by this API with the result of inference.");
    1679            5 :   if (!out_info)
    1680            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1681              :         "The parameter, out_info (ml_tensors_info_h *), is NULL. It should be a pointer to an empty (NULL or do-not-care) instance of ml_tensors_info_h, which is filled by this API with the neural network model info.");
    1682              : 
    1683              :   /* init null */
    1684            4 :   *output = NULL;
    1685            4 :   *out_info = NULL;
    1686              : 
    1687            4 :   status = ml_single_get_input_info (single, &cur_in_info);
    1688            4 :   if (status != ML_ERROR_NONE) {
    1689            0 :     _ml_error_report_continue
    1690              :         ("Failed to get input metadata configured by the opened single_h handle instance. Error code: %d.",
    1691              :         status);
    1692            0 :     goto exit;
    1693              :   }
    1694            4 :   status = ml_single_update_info (single, in_info, out_info);
    1695            4 :   if (status != ML_ERROR_NONE) {
    1696            0 :     _ml_error_report_continue
    1697              :         ("Failed to reconfigure the opened single_h handle instance with the updated input/output metadata. Error code: %d.",
    1698              :         status);
    1699            0 :     goto exit;
    1700              :   }
    1701              : 
    1702            4 :   status = ml_single_invoke (single, input, output);
    1703            4 :   if (status != ML_ERROR_NONE) {
    1704            0 :     ml_single_set_input_info (single, cur_in_info);
    1705            0 :     if (status != ML_ERROR_TRY_AGAIN) {
    1706              :       /* If it's TRY_AGAIN, ml_single_invoke() has already gave enough info. */
    1707            0 :       _ml_error_report_continue
    1708              :           ("Invoking the given neural network has failed. Error code: %d.",
    1709              :           status);
    1710              :     }
    1711              :   }
    1712              : 
    1713            4 : exit:
    1714            4 :   if (cur_in_info)
    1715            4 :     ml_tensors_info_destroy (cur_in_info);
    1716              : 
    1717            4 :   if (status != ML_ERROR_NONE) {
    1718            0 :     if (*out_info) {
    1719            0 :       ml_tensors_info_destroy (*out_info);
    1720            0 :       *out_info = NULL;
    1721              :     }
    1722              :   }
    1723              : 
    1724            4 :   return status;
    1725              : }
    1726              : 
    1727              : /**
    1728              :  * @brief Sets the property value for the given model.
    1729              :  */
    1730              : int
    1731           13 : ml_single_set_property (ml_single_h single, const char *name, const char *value)
    1732              : {
    1733              :   ml_single *single_h;
    1734           13 :   int status = ML_ERROR_NONE;
    1735           13 :   char *old_value = NULL;
    1736              : 
    1737           26 :   check_feature_state (ML_FEATURE_INFERENCE);
    1738              : 
    1739           13 :   if (!single)
    1740            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1741              :         "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
    1742           13 :   if (!name)
    1743            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1744              :         "The parameter, name (const char *), is NULL. It should be a valid string representing a property key.");
    1745              : 
    1746              :   /* get old value, also check the property is updatable. */
    1747           12 :   _ml_error_report_return_continue_iferr
    1748              :       (ml_single_get_property (single, name, &old_value),
    1749              :       "Cannot fetch the previous value for the given property name, '%s'. It appears that the property key, '%s', is invalid (not supported).",
    1750              :       name, name);
    1751              : 
    1752              :   /* if sets same value, do not change. */
    1753           11 :   if (old_value && value && g_ascii_strcasecmp (old_value, value) == 0) {
    1754            1 :     g_free (old_value);
    1755            1 :     return ML_ERROR_NONE;
    1756              :   }
    1757              : 
    1758           10 :   ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
    1759              : 
    1760              :   /* update property */
    1761           10 :   if (g_str_equal (name, "is-updatable")) {
    1762            2 :     if (!value)
    1763            0 :       goto error;
    1764              : 
    1765              :     /* boolean */
    1766            2 :     if (g_ascii_strcasecmp (value, "true") == 0) {
    1767            1 :       if (g_ascii_strcasecmp (old_value, "true") != 0)
    1768            1 :         g_object_set (G_OBJECT (single_h->filter), name, (gboolean) TRUE, NULL);
    1769            1 :     } else if (g_ascii_strcasecmp (value, "false") == 0) {
    1770            1 :       if (g_ascii_strcasecmp (old_value, "false") != 0)
    1771            1 :         g_object_set (G_OBJECT (single_h->filter), name, (gboolean) FALSE,
    1772              :             NULL);
    1773              :     } else {
    1774            0 :       _ml_error_report
    1775              :           ("The property value, '%s', is not appropriate for a boolean property 'is-updatable'. It should be either 'true' or 'false'.",
    1776              :           value);
    1777            0 :       status = ML_ERROR_INVALID_PARAMETER;
    1778              :     }
    1779            8 :   } else if (g_str_equal (name, "input") || g_str_equal (name, "inputtype")
    1780            0 :       || g_str_equal (name, "inputname") || g_str_equal (name, "output")
    1781            7 :       || g_str_equal (name, "outputtype") || g_str_equal (name, "outputname")) {
    1782              :     GstTensorsInfo gst_info;
    1783            8 :     gboolean is_input = g_str_has_prefix (name, "input");
    1784              :     guint num;
    1785              : 
    1786            8 :     if (!value)
    1787            1 :       goto error;
    1788              : 
    1789            7 :     ml_single_get_gst_info (single_h, is_input, &gst_info);
    1790              : 
    1791            7 :     if (g_str_has_suffix (name, "type"))
    1792            0 :       num = gst_tensors_info_parse_types_string (&gst_info, value);
    1793            7 :     else if (g_str_has_suffix (name, "name"))
    1794            0 :       num = gst_tensors_info_parse_names_string (&gst_info, value);
    1795              :     else
    1796            7 :       num = gst_tensors_info_parse_dimensions_string (&gst_info, value);
    1797              : 
    1798            7 :     if (num == gst_info.num_tensors) {
    1799              :       /* change configuration */
    1800            7 :       status = ml_single_set_gst_info (single_h, &gst_info);
    1801              :     } else {
    1802            0 :       _ml_error_report
    1803              :           ("The property value, '%s', is not appropriate for the given property key, '%s'. The API has failed to parse the given property value.",
    1804              :           value, name);
    1805            0 :       status = ML_ERROR_INVALID_PARAMETER;
    1806              :     }
    1807              : 
    1808            7 :     gst_tensors_info_free (&gst_info);
    1809              :   } else {
    1810            0 :     g_object_set (G_OBJECT (single_h->filter), name, value, NULL);
    1811              :   }
    1812            9 :   goto done;
    1813            1 : error:
    1814            1 :   _ml_error_report
    1815              :       ("The parameter, value (const char *), is NULL. It should be a valid string representing the value to be set for the given property key, '%s'",
    1816              :       name);
    1817            1 :   status = ML_ERROR_INVALID_PARAMETER;
    1818           10 : done:
    1819           10 :   ML_SINGLE_HANDLE_UNLOCK (single_h);
    1820              : 
    1821           10 :   g_free (old_value);
    1822           10 :   return status;
    1823              : }
    1824              : 
    1825              : /**
    1826              :  * @brief Gets the property value for the given model.
    1827              :  */
    1828              : int
    1829           27 : ml_single_get_property (ml_single_h single, const char *name, char **value)
    1830              : {
    1831              :   ml_single *single_h;
    1832           27 :   int status = ML_ERROR_NONE;
    1833              : 
    1834           27 :   check_feature_state (ML_FEATURE_INFERENCE);
    1835              : 
    1836           27 :   if (!single)
    1837            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1838              :         "The parameter, single (ml_single_h), is NULL. It should be a valid instance of ml_single_h, which is usually created by ml_single_open().");
    1839           27 :   if (!name)
    1840            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1841              :         "The parameter, name (const char *), is NULL. It should be a valid string representing a property key.");
    1842           26 :   if (!value)
    1843            1 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1844              :         "The parameter, value (const char *), is NULL. It should be a valid string representing the value to be set for the given property key, '%s'",
    1845              :         name);
    1846              : 
    1847              :   /* init null */
    1848           25 :   *value = NULL;
    1849              : 
    1850           25 :   ML_SINGLE_GET_VALID_HANDLE_LOCKED (single_h, single, 0);
    1851              : 
    1852           25 :   if (g_str_equal (name, "input") || g_str_equal (name, "output") ||
    1853            8 :       g_str_equal (name, "inputtype") || g_str_equal (name, "inputname") ||
    1854            8 :       g_str_equal (name, "inputlayout") || g_str_equal (name, "outputtype") ||
    1855            7 :       g_str_equal (name, "outputname") || g_str_equal (name, "outputlayout") ||
    1856            7 :       g_str_equal (name, "accelerator") || g_str_equal (name, "custom")) {
    1857              :     /* string */
    1858           18 :     g_object_get (G_OBJECT (single_h->filter), name, value, NULL);
    1859            7 :   } else if (g_str_equal (name, "is-updatable")) {
    1860            5 :     gboolean bool_value = FALSE;
    1861              : 
    1862              :     /* boolean */
    1863            5 :     g_object_get (G_OBJECT (single_h->filter), name, &bool_value, NULL);
    1864           10 :     *value = (bool_value) ? g_strdup ("true") : g_strdup ("false");
    1865              :   } else {
    1866            2 :     _ml_error_report
    1867              :         ("The property key, '%s', is not available for get_property and not recognized by the API. It should be one of {input, inputtype, inputname, inputlayout, output, outputtype, outputname, outputlayout, accelerator, custom, is-updatable}.",
    1868              :         name);
    1869            2 :     status = ML_ERROR_NOT_SUPPORTED;
    1870              :   }
    1871              : 
    1872           25 :   ML_SINGLE_HANDLE_UNLOCK (single_h);
    1873           25 :   return status;
    1874              : }
    1875              : 
    1876              : /**
    1877              :  * @brief Internal helper function to validate model files.
    1878              :  */
    1879              : static int
    1880           90 : __ml_validate_model_file (const char *const *model,
    1881              :     const unsigned int num_models, gboolean * is_dir)
    1882              : {
    1883              :   guint i;
    1884              : 
    1885           90 :   if (!model)
    1886            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1887              :         "The parameter, model, is NULL. It should be a valid array of strings, where each string is a valid file path for a neural network model file.");
    1888           90 :   if (num_models < 1)
    1889            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1890              :         "The parameter, num_models, is 0. It should be the number of files for the given neural network model.");
    1891              : 
    1892           90 :   if (g_file_test (model[0], G_FILE_TEST_IS_DIR)) {
    1893            4 :     *is_dir = TRUE;
    1894            4 :     return ML_ERROR_NONE;
    1895              :   }
    1896              : 
    1897          169 :   for (i = 0; i < num_models; i++) {
    1898           86 :     if (!model[i] ||
    1899           86 :         !g_file_test (model[i], G_FILE_TEST_EXISTS | G_FILE_TEST_IS_REGULAR)) {
    1900            3 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1901              :           "The given param, model path [%d] = \"%s\" is invalid or the file is not found or accessible.",
    1902              :           i, _STR_NULL (model[i]));
    1903              :     }
    1904              :   }
    1905              : 
    1906           83 :   *is_dir = FALSE;
    1907              : 
    1908           83 :   return ML_ERROR_NONE;
    1909              : }
    1910              : 
    1911              : /**
    1912              :  * @brief Validates the nnfw model file.
    1913              :  * @since_tizen 5.5
    1914              :  * @param[in] model The path of model file.
    1915              :  * @param[in/out] nnfw The type of NNFW.
    1916              :  * @return @c 0 on success. Otherwise a negative error value.
    1917              :  * @retval #ML_ERROR_NONE Successful
    1918              :  * @retval #ML_ERROR_NOT_SUPPORTED Not supported, or framework to support this model file is unavailable in the environment.
    1919              :  * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid.
    1920              :  */
    1921              : int
    1922           90 : _ml_validate_model_file (const char *const *model,
    1923              :     const unsigned int num_models, ml_nnfw_type_e * nnfw)
    1924              : {
    1925           90 :   int status = ML_ERROR_NONE;
    1926           90 :   ml_nnfw_type_e detected = ML_NNFW_TYPE_ANY;
    1927           90 :   gboolean is_dir = FALSE;
    1928              :   gchar *pos, *fw_name;
    1929           90 :   gchar **file_ext = NULL;
    1930              :   guint i;
    1931              : 
    1932           90 :   if (!nnfw)
    1933           90 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
    1934              :         "The parameter, nnfw, is NULL. It should be a valid pointer of ml_nnfw_type_e.");
    1935              : 
    1936           90 :   _ml_error_report_return_continue_iferr (__ml_validate_model_file (model,
    1937              :           num_models, &is_dir),
    1938              :       "The parameters, model and num_models, are not valid.");
    1939              : 
    1940              :   /**
    1941              :    * @note detect-fw checks the file ext and returns proper fw name for given models.
    1942              :    * If detected fw and given nnfw are same, we don't need to check the file extension.
    1943              :    * If any condition for auto detection is added later, below code also should be updated.
    1944              :    */
    1945           87 :   fw_name = gst_tensor_filter_detect_framework (model, num_models, TRUE);
    1946           87 :   detected = _ml_get_nnfw_type_by_subplugin_name (fw_name);
    1947           87 :   g_free (fw_name);
    1948              : 
    1949           87 :   if (*nnfw == ML_NNFW_TYPE_ANY) {
    1950           37 :     if (detected == ML_NNFW_TYPE_ANY) {
    1951            0 :       _ml_error_report
    1952              :           ("The given neural network model (1st path is \"%s\", and there are %d paths declared) has unknown or unsupported extension. Please check its corresponding neural network framework and try to specify it instead of \"ML_NNFW_TYPE_ANY\".",
    1953              :           model[0], num_models);
    1954            0 :       status = ML_ERROR_INVALID_PARAMETER;
    1955              :     } else {
    1956           37 :       _ml_logi ("The given model is supposed a %s model.",
    1957              :           _ml_get_nnfw_subplugin_name (detected));
    1958           37 :       *nnfw = detected;
    1959              :     }
    1960              : 
    1961           37 :     goto done;
    1962           50 :   } else if (is_dir && *nnfw != ML_NNFW_TYPE_NNFW) {
    1963              :     /* supposed it is ONE if given model is directory */
    1964            2 :     _ml_error_report
    1965              :         ("The given model (1st path is \"%s\", and there are %d paths declared) is directory, which is allowed by \"NNFW (One Runtime)\" only, Please check the model and framework.",
    1966              :         model[0], num_models);
    1967            2 :     status = ML_ERROR_INVALID_PARAMETER;
    1968            2 :     goto done;
    1969           48 :   } else if (detected == *nnfw) {
    1970              :     /* Expected framework, nothing to do. */
    1971           43 :     goto done;
    1972              :   }
    1973              : 
    1974              :   /* Handle mismatched case, check file extension. */
    1975            5 :   file_ext = g_malloc0 (sizeof (char *) * (num_models + 1));
    1976           10 :   for (i = 0; i < num_models; i++) {
    1977            5 :     if ((pos = strrchr (model[i], '.')) == NULL) {
    1978            0 :       _ml_error_report ("The given model [%d]=\"%s\" has invalid extension.", i,
    1979              :           model[i]);
    1980            0 :       status = ML_ERROR_INVALID_PARAMETER;
    1981            0 :       goto done;
    1982              :     }
    1983              : 
    1984            5 :     file_ext[i] = g_ascii_strdown (pos, -1);
    1985              :   }
    1986              : 
    1987              :   /** @todo Make sure num_models is correct for each nnfw type */
    1988            5 :   switch (*nnfw) {
    1989            4 :     case ML_NNFW_TYPE_NNFW:
    1990              :     case ML_NNFW_TYPE_TVM:
    1991              :     case ML_NNFW_TYPE_ONNX_RUNTIME:
    1992              :     case ML_NNFW_TYPE_NCNN:
    1993              :     case ML_NNFW_TYPE_TENSORRT:
    1994              :     case ML_NNFW_TYPE_QNN:
    1995              :     case ML_NNFW_TYPE_LLAMACPP:
    1996              :     case ML_NNFW_TYPE_TIZEN_HAL:
    1997              :       /**
    1998              :        * We cannot check the file ext with NNFW.
    1999              :        * NNFW itself will validate metadata and model file.
    2000              :        */
    2001            4 :       break;
    2002            0 :     case ML_NNFW_TYPE_MVNC:
    2003              :     case ML_NNFW_TYPE_OPENVINO:
    2004              :     case ML_NNFW_TYPE_EDGE_TPU:
    2005              :       /**
    2006              :        * @todo Need to check method to validate model
    2007              :        * Although nnstreamer supports these frameworks,
    2008              :        * ML-API implementation is not ready.
    2009              :        */
    2010            0 :       _ml_error_report
    2011              :           ("Given NNFW is not supported by ML-API Inference.Single, yet, although it is supported by NNStreamer. If you have such NNFW integrated into your machine and want to access via ML-API, please update the corresponding implementation or report and discuss at github.com/nnstreamer/nnstreamer/issues.");
    2012            0 :       status = ML_ERROR_NOT_SUPPORTED;
    2013            0 :       break;
    2014            0 :     case ML_NNFW_TYPE_VD_AIFW:
    2015            0 :       if (!g_str_equal (file_ext[0], ".nb") &&
    2016            0 :           !g_str_equal (file_ext[0], ".ncp") &&
    2017            0 :           !g_str_equal (file_ext[0], ".tvn") &&
    2018            0 :           !g_str_equal (file_ext[0], ".bin")) {
    2019            0 :         status = ML_ERROR_INVALID_PARAMETER;
    2020              :       }
    2021            0 :       break;
    2022            0 :     case ML_NNFW_TYPE_SNAP:
    2023              : #if !defined (__ANDROID__)
    2024            0 :       _ml_error_report ("SNAP is supported by Android/arm64-v8a devices only.");
    2025            0 :       status = ML_ERROR_NOT_SUPPORTED;
    2026              : #endif
    2027              :       /* SNAP requires multiple files, set supported if model file exists. */
    2028            0 :       break;
    2029            0 :     case ML_NNFW_TYPE_ARMNN:
    2030            0 :       if (!g_str_equal (file_ext[0], ".caffemodel") &&
    2031            0 :           !g_str_equal (file_ext[0], ".tflite") &&
    2032            0 :           !g_str_equal (file_ext[0], ".pb") &&
    2033            0 :           !g_str_equal (file_ext[0], ".prototxt")) {
    2034            0 :         _ml_error_report
    2035              :             ("ARMNN accepts .caffemodel, .tflite, .pb, and .prototxt files only. Please support correct file extension. You have specified: \"%s\"",
    2036              :             file_ext[0]);
    2037            0 :         status = ML_ERROR_INVALID_PARAMETER;
    2038              :       }
    2039            0 :       break;
    2040            0 :     case ML_NNFW_TYPE_MXNET:
    2041            0 :       if (!g_str_equal (file_ext[0], ".params") &&
    2042            0 :           !g_str_equal (file_ext[0], ".json")) {
    2043            0 :         status = ML_ERROR_INVALID_PARAMETER;
    2044              :       }
    2045            0 :       break;
    2046            1 :     default:
    2047            1 :       _ml_error_report
    2048              :           ("You have designated an incorrect neural network framework (out of bound).");
    2049            1 :       status = ML_ERROR_INVALID_PARAMETER;
    2050            1 :       break;
    2051              :   }
    2052              : 
    2053           87 : done:
    2054           87 :   if (status == ML_ERROR_NONE) {
    2055           84 :     if (!_ml_nnfw_is_available (*nnfw, ML_NNFW_HW_ANY)) {
    2056            1 :       status = ML_ERROR_NOT_SUPPORTED;
    2057            1 :       _ml_error_report
    2058              :           ("The subplugin for tensor-filter \"%s\" is not available. Please install the corresponding tensor-filter subplugin file (usually, \"libnnstreamer_filter_${NAME}.so\") at the correct path. Please use \"nnstreamer-check\" utility to check related configurations. If you do not have the utility ready, build and install \"confchk\", which is located at ${nnstreamer_source}/tools/development/confchk/ .",
    2059              :           _ml_get_nnfw_subplugin_name (*nnfw));
    2060              :     }
    2061              :   } else {
    2062            3 :     _ml_error_report
    2063              :         ("The given model file, \"%s\" (1st of %d files), is invalid.",
    2064              :         model[0], num_models);
    2065              :   }
    2066              : 
    2067           87 :   g_strfreev (file_ext);
    2068           87 :   return status;
    2069              : }
        

Generated by: LCOV version 2.0-1