LCOV - code coverage report
Current view: top level - capi-machine-learning-inference-1.8.6/c/src - ml-api-service-extension.c (source / functions) Coverage Total Hit
Test: ML API 1.8.6-0 nnstreamer/api#7f8530c294f86ec880b29347a861499239d358a1 Lines: 84.3 % 344 290
Test Date: 2025-06-06 05:24:38 Functions: 100.0 % 18 18

            Line data    Source code
       1              : /* SPDX-License-Identifier: Apache-2.0 */
       2              : /**
       3              :  * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved.
       4              :  *
       5              :  * @file        ml-api-service-extension.c
       6              :  * @date        1 September 2023
       7              :  * @brief       ML service extension C-API.
       8              :  * @see         https://github.com/nnstreamer/api
       9              :  * @author      Jaeyun Jung <jy1210.jung@samsung.com>
      10              :  * @bug         No known bugs except for NYI items
      11              :  */
      12              : 
      13              : #include "ml-api-service-extension.h"
      14              : 
      15              : /**
      16              :  * @brief The time to wait for new input data in message thread, in millisecond.
      17              :  */
      18              : #define DEFAULT_TIMEOUT 200
      19              : 
      20              : /**
      21              :  * @brief The max number of input data in message queue (0 for no limit).
      22              :  */
      23              : #define DEFAULT_MAX_INPUT 5
      24              : 
      25              : /**
      26              :  * @brief Internal enumeration for ml-service extension types.
      27              :  */
      28              : typedef enum
      29              : {
      30              :   ML_EXTENSION_TYPE_UNKNOWN = 0,
      31              :   ML_EXTENSION_TYPE_SINGLE = 1,
      32              :   ML_EXTENSION_TYPE_PIPELINE = 2,
      33              : 
      34              :   ML_EXTENSION_TYPE_MAX
      35              : } ml_extension_type_e;
      36              : 
      37              : /**
      38              :  * @brief Internal structure of the message in ml-service extension handle.
      39              :  */
      40              : typedef struct
      41              : {
      42              :   gchar *name;
      43              :   ml_tensors_data_h input;
      44              :   ml_tensors_data_h output;
      45              : } ml_extension_msg_s;
      46              : 
      47              : /**
      48              :  * @brief Internal structure for ml-service extension handle.
      49              :  */
      50              : typedef struct
      51              : {
      52              :   ml_extension_type_e type;
      53              :   gboolean running;
      54              :   guint timeout; /**< The time to wait for new input data in message thread, in millisecond (see DEFAULT_TIMEOUT). */
      55              :   guint max_input; /**< The max number of input data in message queue (see DEFAULT_MAX_INPUT). */
      56              :   GThread *msg_thread;
      57              :   GAsyncQueue *msg_queue;
      58              : 
      59              :   /**
      60              :    * Handles for each ml-service extension type.
      61              :    * - single : Default. Open model file and prepare invoke. The configuration should include model information.
      62              :    * - pipeline : Construct a pipeline from configuration. The configuration should include pipeline description.
      63              :    */
      64              :   ml_single_h single;
      65              : 
      66              :   ml_pipeline_h pipeline;
      67              :   GHashTable *node_table;
      68              : } ml_extension_s;
      69              : 
      70              : /**
      71              :  * @brief Internal function to create node info in pipeline.
      72              :  */
      73              : static ml_service_node_info_s *
      74           42 : _ml_extension_node_info_new (ml_service_s * mls, const gchar * name,
      75              :     ml_service_node_type_e type)
      76              : {
      77           42 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
      78              :   ml_service_node_info_s *node_info;
      79              : 
      80           42 :   if (!STR_IS_VALID (name)) {
      81            2 :     _ml_error_report_return (NULL,
      82              :         "Cannot add new node info, invalid node name '%s'.", name);
      83              :   }
      84              : 
      85           40 :   if (g_hash_table_lookup (ext->node_table, name)) {
      86            2 :     _ml_error_report_return (NULL,
      87              :         "Cannot add duplicated node '%s' in ml-service pipeline.", name);
      88              :   }
      89              : 
      90           38 :   node_info = g_try_new0 (ml_service_node_info_s, 1);
      91           38 :   if (!node_info) {
      92            0 :     _ml_error_report_return (NULL,
      93              :         "Failed to allocate new memory for node info in ml-service pipeline. Out of memory?");
      94              :   }
      95              : 
      96           38 :   node_info->name = g_strdup (name);
      97           38 :   node_info->type = type;
      98           38 :   node_info->mls = mls;
      99              : 
     100           38 :   g_hash_table_insert (ext->node_table, g_strdup (name), node_info);
     101              : 
     102           38 :   return node_info;
     103              : }
     104              : 
     105              : /**
     106              :  * @brief Internal function to release pipeline node info.
     107              :  */
     108              : static void
     109           38 : _ml_extension_node_info_free (gpointer data)
     110              : {
     111           38 :   ml_service_node_info_s *node_info = (ml_service_node_info_s *) data;
     112              : 
     113           38 :   if (!node_info)
     114            0 :     return;
     115              : 
     116           38 :   if (node_info->info)
     117           34 :     ml_tensors_info_destroy (node_info->info);
     118              : 
     119           38 :   g_free (node_info->name);
     120           38 :   g_free (node_info);
     121              : }
     122              : 
     123              : /**
     124              :  * @brief Internal function to get the node info in ml-service extension.
     125              :  */
     126              : static ml_service_node_info_s *
     127           58 : _ml_extension_node_info_get (ml_extension_s * ext, const gchar * name)
     128              : {
     129           58 :   if (!STR_IS_VALID (name))
     130            8 :     return NULL;
     131              : 
     132           50 :   return g_hash_table_lookup (ext->node_table, name);
     133              : }
     134              : 
     135              : /**
     136              :  * @brief Internal function to release ml-service extension message.
     137              :  */
     138              : static void
     139           62 : _ml_extension_msg_free (gpointer data)
     140              : {
     141           62 :   ml_extension_msg_s *msg = (ml_extension_msg_s *) data;
     142              : 
     143           62 :   if (!msg)
     144            0 :     return;
     145              : 
     146           62 :   if (msg->input)
     147           47 :     ml_tensors_data_destroy (msg->input);
     148           62 :   if (msg->output)
     149           32 :     ml_tensors_data_destroy (msg->output);
     150              : 
     151           62 :   g_free (msg->name);
     152           62 :   g_free (msg);
     153              : }
     154              : 
     155              : /**
     156              :  * @brief Internal function to process ml-service extension message.
     157              :  */
     158              : static gpointer
     159           48 : _ml_extension_msg_thread (gpointer data)
     160              : {
     161           48 :   ml_service_s *mls = (ml_service_s *) data;
     162           48 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     163              :   int status;
     164              : 
     165           48 :   g_mutex_lock (&mls->lock);
     166           48 :   ext->running = TRUE;
     167           48 :   g_cond_signal (&mls->cond);
     168           48 :   g_mutex_unlock (&mls->lock);
     169              : 
     170          187 :   while (ext->running) {
     171              :     ml_extension_msg_s *msg;
     172              : 
     173          182 :     msg = g_async_queue_timeout_pop (ext->msg_queue,
     174           91 :         ext->timeout * G_TIME_SPAN_MILLISECOND);
     175              : 
     176           91 :     if (msg) {
     177           47 :       switch (ext->type) {
     178           32 :         case ML_EXTENSION_TYPE_SINGLE:
     179              :         {
     180           32 :           status = ml_single_invoke (ext->single, msg->input, &msg->output);
     181              : 
     182           32 :           if (status == ML_ERROR_NONE) {
     183           32 :             _ml_service_invoke_event_new_data (mls, NULL, msg->output);
     184              :           } else {
     185            0 :             _ml_error_report
     186              :                 ("Failed to invoke the model in ml-service extension thread.");
     187              :           }
     188           32 :           break;
     189              :         }
     190           15 :         case ML_EXTENSION_TYPE_PIPELINE:
     191              :         {
     192              :           ml_service_node_info_s *node_info;
     193              : 
     194           15 :           node_info = _ml_extension_node_info_get (ext, msg->name);
     195              : 
     196           15 :           if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_INPUT) {
     197              :             /* The input data will be released in the pipeline. */
     198           15 :             status = ml_pipeline_src_input_data (node_info->handle, msg->input,
     199              :                 ML_PIPELINE_BUF_POLICY_AUTO_FREE);
     200           15 :             msg->input = NULL;
     201              : 
     202           15 :             if (status != ML_ERROR_NONE) {
     203            0 :               _ml_error_report
     204              :                   ("Failed to push input data into the pipeline in ml-service extension thread.");
     205              :             }
     206              :           } else {
     207            0 :             _ml_error_report
     208              :                 ("Failed to push input data into the pipeline, cannot find input node '%s'.",
     209              :                 msg->name);
     210              :           }
     211           15 :           break;
     212              :         }
     213            0 :         default:
     214              :           /* Unknown ml-service extension type, skip this. */
     215            0 :           break;
     216              :       }
     217              : 
     218           47 :       _ml_extension_msg_free (msg);
     219              :     }
     220              :   }
     221              : 
     222           48 :   return NULL;
     223              : }
     224              : 
     225              : /**
     226              :  * @brief Wrapper to release tensors-info handle.
     227              :  */
     228              : static void
     229           30 : _ml_extension_destroy_tensors_info (void *data)
     230              : {
     231           30 :   ml_tensors_info_h info = (ml_tensors_info_h) data;
     232              : 
     233           30 :   if (info)
     234           30 :     ml_tensors_info_destroy (info);
     235           30 : }
     236              : 
     237              : /**
     238              :  * @brief Internal function to parse single-shot info from json.
     239              :  */
     240              : static int
     241           39 : _ml_extension_conf_parse_single (ml_service_s * mls, JsonObject * single)
     242              : {
     243           39 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     244              :   ml_option_h option;
     245              :   int status;
     246              : 
     247           39 :   status = ml_option_create (&option);
     248           39 :   if (status != ML_ERROR_NONE) {
     249           39 :     _ml_error_report_return (status,
     250              :         "Failed to parse configuration file, cannot create ml-option handle.");
     251              :   }
     252              : 
     253              :   /**
     254              :    * 1. "key" : load model info from ml-service agent.
     255              :    * 2. "model" : configuration file includes model path.
     256              :    */
     257           39 :   if (json_object_has_member (single, "key")) {
     258            1 :     const gchar *key = json_object_get_string_member (single, "key");
     259              : 
     260            1 :     if (STR_IS_VALID (key)) {
     261              :       ml_information_h model_info;
     262              : 
     263            1 :       status = ml_service_model_get_activated (key, &model_info);
     264            1 :       if (status == ML_ERROR_NONE) {
     265            1 :         gchar *paths = NULL;
     266              : 
     267              :         /** @todo parse desc and other information if necessary. */
     268            1 :         ml_information_get (model_info, "path", (void **) (&paths));
     269            2 :         ml_option_set (option, "models", g_strdup (paths), g_free);
     270              : 
     271            1 :         ml_information_destroy (model_info);
     272              :       } else {
     273            0 :         _ml_error_report
     274              :             ("Failed to parse configuration file, cannot get the model of '%s'.",
     275              :             key);
     276            0 :         goto error;
     277              :       }
     278              :     }
     279           38 :   } else if (json_object_has_member (single, "model")) {
     280           36 :     JsonNode *file_node = json_object_get_member (single, "model");
     281           36 :     gchar *paths = NULL;
     282              : 
     283           36 :     status = _ml_service_conf_parse_string (file_node, ",", &paths);
     284           36 :     if (status != ML_ERROR_NONE) {
     285            0 :       _ml_error_report
     286              :           ("Failed to parse configuration file, it should have valid model path.");
     287            0 :       goto error;
     288              :     }
     289              : 
     290           36 :     ml_option_set (option, "models", paths, g_free);
     291              :   } else {
     292            2 :     status = ML_ERROR_INVALID_PARAMETER;
     293            2 :     _ml_error_report
     294              :         ("Failed to parse configuration file, cannot get the model path.");
     295            2 :     goto error;
     296              :   }
     297              : 
     298           37 :   if (json_object_has_member (single, "framework")) {
     299           34 :     const gchar *fw = json_object_get_string_member (single, "framework");
     300              : 
     301           34 :     if (STR_IS_VALID (fw))
     302           34 :       ml_option_set (option, "framework_name", g_strdup (fw), g_free);
     303              :   }
     304              : 
     305           37 :   if (json_object_has_member (single, "input_info")) {
     306           16 :     JsonNode *info_node = json_object_get_member (single, "input_info");
     307              :     ml_tensors_info_h in_info;
     308              : 
     309           16 :     status = _ml_service_conf_parse_tensors_info (info_node, &in_info);
     310           16 :     if (status != ML_ERROR_NONE) {
     311            0 :       _ml_error_report
     312              :           ("Failed to parse configuration file, cannot parse input information.");
     313            0 :       goto error;
     314              :     }
     315              : 
     316           16 :     ml_option_set (option, "input_info", in_info,
     317              :         _ml_extension_destroy_tensors_info);
     318              :   }
     319              : 
     320           37 :   if (json_object_has_member (single, "output_info")) {
     321           16 :     JsonNode *info_node = json_object_get_member (single, "output_info");
     322              :     ml_tensors_info_h out_info;
     323              : 
     324           16 :     status = _ml_service_conf_parse_tensors_info (info_node, &out_info);
     325           16 :     if (status != ML_ERROR_NONE) {
     326            2 :       _ml_error_report
     327              :           ("Failed to parse configuration file, cannot parse output information.");
     328            2 :       goto error;
     329              :     }
     330              : 
     331           14 :     ml_option_set (option, "output_info", out_info,
     332              :         _ml_extension_destroy_tensors_info);
     333              :   }
     334              : 
     335           35 :   if (json_object_has_member (single, "custom")) {
     336            0 :     const gchar *custom = json_object_get_string_member (single, "custom");
     337              : 
     338            0 :     if (STR_IS_VALID (custom))
     339            0 :       ml_option_set (option, "custom", g_strdup (custom), g_free);
     340              :   }
     341              : 
     342           35 : error:
     343           39 :   if (status == ML_ERROR_NONE)
     344           35 :     status = ml_single_open_with_option (&ext->single, option);
     345              : 
     346           39 :   ml_option_destroy (option);
     347           39 :   return status;
     348              : }
     349              : 
     350              : /**
     351              :  * @brief Internal function to parse the node info in pipeline.
     352              :  */
     353              : static int
     354           42 : _ml_extension_conf_parse_pipeline_node (ml_service_s * mls, JsonNode * node,
     355              :     ml_service_node_type_e type)
     356              : {
     357           42 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     358           42 :   JsonArray *array = NULL;
     359              :   JsonObject *object;
     360              :   guint i, n;
     361              :   int status;
     362              : 
     363           42 :   n = 1;
     364           42 :   if (JSON_NODE_HOLDS_ARRAY (node)) {
     365           42 :     array = json_node_get_array (node);
     366           42 :     n = json_array_get_length (array);
     367              :   }
     368              : 
     369           76 :   for (i = 0; i < n; i++) {
     370           42 :     const gchar *name = NULL;
     371              :     ml_service_node_info_s *node_info;
     372              : 
     373           42 :     if (array)
     374           42 :       object = json_array_get_object_element (array, i);
     375              :     else
     376            0 :       object = json_node_get_object (node);
     377              : 
     378           42 :     name = _ml_service_get_json_string_member (object, "name");
     379              : 
     380           42 :     node_info = _ml_extension_node_info_new (mls, name, type);
     381           42 :     if (!node_info) {
     382            4 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     383              :           "Failed to parse configuration file, cannot add new node information.");
     384              :     }
     385              : 
     386           38 :     if (json_object_has_member (object, "info")) {
     387           36 :       JsonNode *info_node = json_object_get_member (object, "info");
     388              : 
     389           36 :       status = _ml_service_conf_parse_tensors_info (info_node,
     390              :           &node_info->info);
     391           36 :       if (status != ML_ERROR_NONE) {
     392            2 :         _ml_error_report_return (status,
     393              :             "Failed to parse configuration file, cannot parse the information.");
     394              :       }
     395              :     } else {
     396            2 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     397              :           "Failed to parse configuration file, cannot find node information.");
     398              :     }
     399              : 
     400           34 :     switch (type) {
     401           21 :       case ML_SERVICE_NODE_TYPE_INPUT:
     402           42 :         status = ml_pipeline_src_get_handle (ext->pipeline, name,
     403           21 :             &node_info->handle);
     404           21 :         break;
     405           13 :       case ML_SERVICE_NODE_TYPE_OUTPUT:
     406           26 :         status = ml_pipeline_sink_register (ext->pipeline, name,
     407           13 :             _ml_service_pipeline_sink_cb, node_info, &node_info->handle);
     408           13 :         break;
     409            0 :       default:
     410            0 :         status = ML_ERROR_INVALID_PARAMETER;
     411            0 :         break;
     412              :     }
     413              : 
     414           34 :     if (status != ML_ERROR_NONE) {
     415            0 :       _ml_error_report_return (status,
     416              :           "Failed to parse configuration file, cannot get the handle for pipeline node.");
     417              :     }
     418              :   }
     419              : 
     420           34 :   return ML_ERROR_NONE;
     421              : }
     422              : 
     423              : /**
     424              :  * @brief Internal function to parse pipeline info from json.
     425              :  */
     426              : static int
     427           21 : _ml_extension_conf_parse_pipeline (ml_service_s * mls, JsonObject * pipe)
     428              : {
     429           21 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     430           21 :   g_autofree gchar *desc = NULL;
     431              :   int status;
     432              : 
     433              :   /**
     434              :    * 1. "key" : load pipeline from ml-service agent.
     435              :    * 2. "description" : configuration file includes pipeline description.
     436              :    */
     437           21 :   if (json_object_has_member (pipe, "key")) {
     438            1 :     const gchar *key = json_object_get_string_member (pipe, "key");
     439              : 
     440            1 :     if (STR_IS_VALID (key)) {
     441            1 :       status = ml_service_pipeline_get (key, &desc);
     442            1 :       if (status != ML_ERROR_NONE) {
     443            0 :         _ml_error_report_return (status,
     444              :             "Failed to parse configuration file, cannot get the pipeline of '%s'.",
     445              :             key);
     446              :       }
     447              :     }
     448           20 :   } else if (json_object_has_member (pipe, "description")) {
     449           40 :     desc = g_strdup (json_object_get_string_member (pipe, "description"));
     450              :   } else {
     451            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     452              :         "Failed to parse configuration file, cannot get the pipeline description.");
     453              :   }
     454              : 
     455           21 :   status = ml_pipeline_construct (desc, NULL, NULL, &ext->pipeline);
     456           21 :   if (status != ML_ERROR_NONE) {
     457            0 :     _ml_error_report_return (status,
     458              :         "Failed to parse configuration file, cannot construct the pipeline.");
     459              :   }
     460              : 
     461           21 :   if (json_object_has_member (pipe, "input_node")) {
     462           21 :     JsonNode *node = json_object_get_member (pipe, "input_node");
     463              : 
     464           21 :     status = _ml_extension_conf_parse_pipeline_node (mls, node,
     465              :         ML_SERVICE_NODE_TYPE_INPUT);
     466           21 :     if (status != ML_ERROR_NONE) {
     467            0 :       _ml_error_report_return (status,
     468              :           "Failed to parse configuration file, cannot get the input node.");
     469              :     }
     470              :   } else {
     471            0 :     _ml_logw
     472              :         ("No input node is defined in the pipeline. Might Non-appsrc be used?");
     473              :   }
     474              : 
     475           21 :   if (json_object_has_member (pipe, "output_node")) {
     476           21 :     JsonNode *node = json_object_get_member (pipe, "output_node");
     477              : 
     478           21 :     status = _ml_extension_conf_parse_pipeline_node (mls, node,
     479              :         ML_SERVICE_NODE_TYPE_OUTPUT);
     480           21 :     if (status != ML_ERROR_NONE) {
     481            8 :       _ml_error_report_return (status,
     482              :           "Failed to parse configuration file, cannot get the output node.");
     483              :     }
     484              :   } else {
     485            0 :     _ml_logw ("No output node is defined in the pipeline.");
     486              :   }
     487              : 
     488              :   /* Start pipeline when creating ml-service handle to check pipeline description. */
     489           13 :   status = ml_pipeline_start (ext->pipeline);
     490           13 :   if (status != ML_ERROR_NONE) {
     491            0 :     _ml_error_report_return (status,
     492              :         "Failed to parse configuration file, cannot start the pipeline.");
     493              :   }
     494              : 
     495           13 :   return ML_ERROR_NONE;
     496              : }
     497              : 
     498              : /**
     499              :  * @brief Internal function to parse configuration file.
     500              :  */
     501              : static int
     502           60 : _ml_extension_conf_parse_json (ml_service_s * mls, JsonObject * object)
     503              : {
     504           60 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     505              :   int status;
     506              : 
     507           60 :   if (json_object_has_member (object, "single")) {
     508           39 :     JsonObject *single = json_object_get_object_member (object, "single");
     509              : 
     510           39 :     status = _ml_extension_conf_parse_single (mls, single);
     511           39 :     if (status != ML_ERROR_NONE)
     512            4 :       return status;
     513              : 
     514           35 :     ext->type = ML_EXTENSION_TYPE_SINGLE;
     515           21 :   } else if (json_object_has_member (object, "pipeline")) {
     516           21 :     JsonObject *pipe = json_object_get_object_member (object, "pipeline");
     517              : 
     518           21 :     status = _ml_extension_conf_parse_pipeline (mls, pipe);
     519           21 :     if (status != ML_ERROR_NONE)
     520            8 :       return status;
     521              : 
     522           13 :     ext->type = ML_EXTENSION_TYPE_PIPELINE;
     523              :   } else {
     524            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     525              :         "Failed to parse configuration file, cannot get the valid type from configuration.");
     526              :   }
     527              : 
     528           48 :   return ML_ERROR_NONE;
     529              : }
     530              : 
     531              : /**
     532              :  * @brief Internal function to create ml-service extension.
     533              :  */
     534              : int
     535           60 : _ml_service_extension_create (ml_service_s * mls, JsonObject * object)
     536              : {
     537              :   ml_extension_s *ext;
     538           60 :   g_autofree gchar *thread_name = g_strdup_printf ("ml-ext-msg-%d", getpid ());
     539              :   int status;
     540              : 
     541           60 :   mls->priv = ext = g_try_new0 (ml_extension_s, 1);
     542           60 :   if (ext == NULL) {
     543            0 :     _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
     544              :         "Failed to allocate memory for ml-service extension. Out of memory?");
     545              :   }
     546              : 
     547           60 :   ext->type = ML_EXTENSION_TYPE_UNKNOWN;
     548           60 :   ext->running = FALSE;
     549           60 :   ext->timeout = DEFAULT_TIMEOUT;
     550           60 :   ext->max_input = DEFAULT_MAX_INPUT;
     551           60 :   ext->node_table = g_hash_table_new_full (g_str_hash, g_str_equal, g_free,
     552              :       _ml_extension_node_info_free);
     553              : 
     554           60 :   status = _ml_extension_conf_parse_json (mls, object);
     555           60 :   if (status != ML_ERROR_NONE) {
     556           12 :     _ml_error_report_return (status,
     557              :         "Failed to parse the ml-service extension configuration.");
     558              :   }
     559              : 
     560           48 :   g_mutex_lock (&mls->lock);
     561              : 
     562           48 :   ext->msg_queue = g_async_queue_new_full (_ml_extension_msg_free);
     563           48 :   ext->msg_thread = g_thread_new (thread_name, _ml_extension_msg_thread, mls);
     564              : 
     565              :   /* Wait until the message thread has been initialized. */
     566           48 :   g_cond_wait (&mls->cond, &mls->lock);
     567           48 :   g_mutex_unlock (&mls->lock);
     568              : 
     569           48 :   return ML_ERROR_NONE;
     570              : }
     571              : 
     572              : /**
     573              :  * @brief Internal function to release ml-service extension.
     574              :  */
     575              : int
     576           60 : _ml_service_extension_destroy (ml_service_s * mls)
     577              : {
     578           60 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     579              : 
     580              :   /* Supposed internal function call to release handle. */
     581           60 :   if (!ext)
     582            0 :     return ML_ERROR_NONE;
     583              : 
     584              :   /**
     585              :    * Close message thread.
     586              :    * If model inference is running, it may wait for the result in message thread.
     587              :    * This takes time, so do not call join with extension lock.
     588              :    */
     589           60 :   ext->running = FALSE;
     590           60 :   if (ext->msg_thread) {
     591           48 :     g_thread_join (ext->msg_thread);
     592           48 :     ext->msg_thread = NULL;
     593              :   }
     594              : 
     595           60 :   if (ext->msg_queue) {
     596           48 :     g_async_queue_unref (ext->msg_queue);
     597           48 :     ext->msg_queue = NULL;
     598              :   }
     599              : 
     600           60 :   if (ext->single) {
     601           35 :     ml_single_close (ext->single);
     602           35 :     ext->single = NULL;
     603              :   }
     604              : 
     605           60 :   if (ext->pipeline) {
     606           21 :     ml_pipeline_stop (ext->pipeline);
     607           21 :     ml_pipeline_destroy (ext->pipeline);
     608           21 :     ext->pipeline = NULL;
     609              :   }
     610              : 
     611           60 :   if (ext->node_table) {
     612           60 :     g_hash_table_destroy (ext->node_table);
     613           60 :     ext->node_table = NULL;
     614              :   }
     615              : 
     616           60 :   g_free (ext);
     617           60 :   mls->priv = NULL;
     618              : 
     619           60 :   return ML_ERROR_NONE;
     620              : }
     621              : 
     622              : /**
     623              :  * @brief Internal function to start ml-service extension.
     624              :  */
     625              : int
     626            2 : _ml_service_extension_start (ml_service_s * mls)
     627              : {
     628            2 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     629            2 :   int status = ML_ERROR_NONE;
     630              : 
     631            2 :   switch (ext->type) {
     632            2 :     case ML_EXTENSION_TYPE_PIPELINE:
     633            2 :       status = ml_pipeline_start (ext->pipeline);
     634            2 :       break;
     635            0 :     case ML_EXTENSION_TYPE_SINGLE:
     636              :       /* Do nothing. */
     637            0 :       break;
     638            0 :     default:
     639            0 :       status = ML_ERROR_NOT_SUPPORTED;
     640            0 :       break;
     641              :   }
     642              : 
     643            2 :   return status;
     644              : }
     645              : 
     646              : /**
     647              :  * @brief Internal function to stop ml-service extension.
     648              :  */
     649              : int
     650            2 : _ml_service_extension_stop (ml_service_s * mls)
     651              : {
     652            2 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     653            2 :   int status = ML_ERROR_NONE;
     654              : 
     655            2 :   switch (ext->type) {
     656            2 :     case ML_EXTENSION_TYPE_PIPELINE:
     657            2 :       status = ml_pipeline_stop (ext->pipeline);
     658            2 :       break;
     659            0 :     case ML_EXTENSION_TYPE_SINGLE:
     660              :       /* Do nothing. */
     661            0 :       break;
     662            0 :     default:
     663            0 :       status = ML_ERROR_NOT_SUPPORTED;
     664            0 :       break;
     665              :   }
     666              : 
     667            2 :   return status;
     668              : }
     669              : 
     670              : /**
     671              :  * @brief Internal function to get the information of required input data.
     672              :  */
     673              : int
     674           26 : _ml_service_extension_get_input_information (ml_service_s * mls,
     675              :     const char *name, ml_tensors_info_h * info)
     676              : {
     677           26 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     678              :   int status;
     679              : 
     680           26 :   switch (ext->type) {
     681           13 :     case ML_EXTENSION_TYPE_SINGLE:
     682           13 :       status = ml_single_get_input_info (ext->single, info);
     683           13 :       break;
     684           13 :     case ML_EXTENSION_TYPE_PIPELINE:
     685              :     {
     686              :       ml_service_node_info_s *node_info;
     687              : 
     688           13 :       node_info = _ml_extension_node_info_get (ext, name);
     689              : 
     690           13 :       if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_INPUT) {
     691            5 :         status = _ml_tensors_info_create_from (node_info->info, info);
     692              :       } else {
     693            8 :         status = ML_ERROR_INVALID_PARAMETER;
     694              :       }
     695           13 :       break;
     696              :     }
     697            0 :     default:
     698            0 :       status = ML_ERROR_NOT_SUPPORTED;
     699            0 :       break;
     700              :   }
     701              : 
     702           26 :   return status;
     703              : }
     704              : 
     705              : /**
     706              :  * @brief Internal function to get the information of output data.
     707              :  */
     708              : int
     709           18 : _ml_service_extension_get_output_information (ml_service_s * mls,
     710              :     const char *name, ml_tensors_info_h * info)
     711              : {
     712           18 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     713              :   int status;
     714              : 
     715           18 :   switch (ext->type) {
     716            7 :     case ML_EXTENSION_TYPE_SINGLE:
     717            7 :       status = ml_single_get_output_info (ext->single, info);
     718            7 :       break;
     719           11 :     case ML_EXTENSION_TYPE_PIPELINE:
     720              :     {
     721              :       ml_service_node_info_s *node_info;
     722              : 
     723           11 :       node_info = _ml_extension_node_info_get (ext, name);
     724              : 
     725           11 :       if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_OUTPUT) {
     726            3 :         status = _ml_tensors_info_create_from (node_info->info, info);
     727              :       } else {
     728            8 :         status = ML_ERROR_INVALID_PARAMETER;
     729              :       }
     730           11 :       break;
     731              :     }
     732            0 :     default:
     733            0 :       status = ML_ERROR_NOT_SUPPORTED;
     734            0 :       break;
     735              :   }
     736              : 
     737           18 :   if (status != ML_ERROR_NONE) {
     738            8 :     if (*info) {
     739            0 :       ml_tensors_info_destroy (*info);
     740            0 :       *info = NULL;
     741              :     }
     742              :   }
     743              : 
     744           18 :   return status;
     745              : }
     746              : 
     747              : /**
     748              :  * @brief Internal function to set the information for ml-service extension.
     749              :  */
     750              : int
     751           34 : _ml_service_extension_set_information (ml_service_s * mls, const char *name,
     752              :     const char *value)
     753              : {
     754           34 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     755              : 
     756              :   /* Check limitation of message queue and other options. */
     757           34 :   if (g_ascii_strcasecmp (name, "input_queue_size") == 0 ||
     758           34 :       g_ascii_strcasecmp (name, "max_input") == 0) {
     759            2 :     ext->max_input = (guint) g_ascii_strtoull (value, NULL, 10);
     760           32 :   } else if (g_ascii_strcasecmp (name, "timeout") == 0) {
     761            0 :     ext->timeout = (guint) g_ascii_strtoull (value, NULL, 10);
     762              :   }
     763              : 
     764           34 :   return ML_ERROR_NONE;
     765              : }
     766              : 
     767              : /**
     768              :  * @brief Internal function to add an input data to process the model in ml-service extension handle.
     769              :  */
     770              : int
     771           72 : _ml_service_extension_request (ml_service_s * mls, const char *name,
     772              :     const ml_tensors_data_h data)
     773              : {
     774           72 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     775              :   ml_extension_msg_s *msg;
     776              :   int status, len;
     777              : 
     778           72 :   if (ext->type == ML_EXTENSION_TYPE_PIPELINE) {
     779              :     ml_service_node_info_s *node_info;
     780              : 
     781           23 :     if (!STR_IS_VALID (name)) {
     782            4 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     783              :           "The parameter, name '%s', is invalid.", name);
     784              :     }
     785              : 
     786           19 :     node_info = _ml_extension_node_info_get (ext, name);
     787              : 
     788           19 :     if (!node_info || node_info->type != ML_SERVICE_NODE_TYPE_INPUT) {
     789            4 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     790              :           "The parameter, name '%s', is invalid, cannot find the input node from pipeline.",
     791              :           name);
     792              :     }
     793              :   }
     794              : 
     795           64 :   len = g_async_queue_length (ext->msg_queue);
     796              : 
     797           64 :   if (ext->max_input > 0 && len > 0 && ext->max_input <= len) {
     798            2 :     _ml_error_report_return (ML_ERROR_STREAMS_PIPE,
     799              :         "Failed to push input data into the queue, the max number of input is %u.",
     800              :         ext->max_input);
     801              :   }
     802              : 
     803           62 :   msg = g_try_new0 (ml_extension_msg_s, 1);
     804           62 :   if (!msg) {
     805            0 :     _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
     806              :         "Failed to allocate the ml-service extension message. Out of memory?");
     807              :   }
     808              : 
     809           62 :   msg->name = g_strdup (name);
     810           62 :   status = ml_tensors_data_clone (data, &msg->input);
     811              : 
     812           62 :   if (status != ML_ERROR_NONE) {
     813            0 :     _ml_extension_msg_free (msg);
     814            0 :     _ml_error_report_return (status, "Failed to clone input data.");
     815              :   }
     816              : 
     817           62 :   g_async_queue_push (ext->msg_queue, msg);
     818              : 
     819           62 :   return ML_ERROR_NONE;
     820              : }
        

Generated by: LCOV version 2.0-1