LCOV - code coverage report
Current view: top level - capi-machine-learning-inference-1.8.6/c/src - ml-api-service-extension.c (source / functions) Coverage Total Hit
Test: ML API 1.8.6-0 nnstreamer/api#abde31caf90ada0ea14929b563b6d19f563740eb Lines: 83.0 % 352 292
Test Date: 2025-08-15 05:27:32 Functions: 100.0 % 18 18

            Line data    Source code
       1              : /* SPDX-License-Identifier: Apache-2.0 */
       2              : /**
       3              :  * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved.
       4              :  *
       5              :  * @file        ml-api-service-extension.c
       6              :  * @date        1 September 2023
       7              :  * @brief       ML service extension C-API.
       8              :  * @see         https://github.com/nnstreamer/api
       9              :  * @author      Jaeyun Jung <jy1210.jung@samsung.com>
      10              :  * @bug         No known bugs except for NYI items
      11              :  */
      12              : 
      13              : #include "ml-api-service-extension.h"
      14              : 
      15              : /**
      16              :  * @brief The time to wait for new input data in message thread, in millisecond.
      17              :  */
      18              : #define DEFAULT_TIMEOUT 200
      19              : 
      20              : /**
      21              :  * @brief The max number of input data in message queue (0 for no limit).
      22              :  */
      23              : #define DEFAULT_MAX_INPUT 5
      24              : 
      25              : /**
      26              :  * @brief Internal enumeration for ml-service extension types.
      27              :  */
      28              : typedef enum
      29              : {
      30              :   ML_EXTENSION_TYPE_UNKNOWN = 0,
      31              :   ML_EXTENSION_TYPE_SINGLE = 1,
      32              :   ML_EXTENSION_TYPE_PIPELINE = 2,
      33              : 
      34              :   ML_EXTENSION_TYPE_MAX
      35              : } ml_extension_type_e;
      36              : 
      37              : /**
      38              :  * @brief Internal structure of the message in ml-service extension handle.
      39              :  */
      40              : typedef struct
      41              : {
      42              :   gchar *name;
      43              :   ml_tensors_data_h input;
      44              :   ml_tensors_data_h output;
      45              : } ml_extension_msg_s;
      46              : 
      47              : /**
      48              :  * @brief Internal structure for ml-service extension handle.
      49              :  */
      50              : typedef struct
      51              : {
      52              :   ml_extension_type_e type;
      53              :   gboolean running;
      54              :   guint timeout; /**< The time to wait for new input data in message thread, in millisecond (see DEFAULT_TIMEOUT). */
      55              :   guint max_input; /**< The max number of input data in message queue (see DEFAULT_MAX_INPUT). */
      56              :   GThread *msg_thread;
      57              :   GAsyncQueue *msg_queue;
      58              : 
      59              :   /**
      60              :    * Handles for each ml-service extension type.
      61              :    * - single : Default. Open model file and prepare invoke. The configuration should include model information.
      62              :    * - pipeline : Construct a pipeline from configuration. The configuration should include pipeline description.
      63              :    */
      64              :   ml_single_h single;
      65              : 
      66              :   ml_pipeline_h pipeline;
      67              :   GHashTable *node_table;
      68              : } ml_extension_s;
      69              : 
      70              : /**
      71              :  * @brief Internal function to create node info in pipeline.
      72              :  */
      73              : static ml_service_node_info_s *
      74           42 : _ml_extension_node_info_new (ml_service_s * mls, const gchar * name,
      75              :     ml_service_node_type_e type)
      76              : {
      77           42 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
      78              :   ml_service_node_info_s *node_info;
      79              : 
      80           42 :   if (!STR_IS_VALID (name)) {
      81            2 :     _ml_error_report_return (NULL,
      82              :         "Cannot add new node info, invalid node name '%s'.", name);
      83              :   }
      84              : 
      85           40 :   if (g_hash_table_lookup (ext->node_table, name)) {
      86            2 :     _ml_error_report_return (NULL,
      87              :         "Cannot add duplicated node '%s' in ml-service pipeline.", name);
      88              :   }
      89              : 
      90           38 :   node_info = g_try_new0 (ml_service_node_info_s, 1);
      91           38 :   if (!node_info) {
      92            0 :     _ml_error_report_return (NULL,
      93              :         "Failed to allocate new memory for node info in ml-service pipeline. Out of memory?");
      94              :   }
      95              : 
      96           38 :   node_info->name = g_strdup (name);
      97           38 :   node_info->type = type;
      98           38 :   node_info->mls = mls;
      99              : 
     100           38 :   g_hash_table_insert (ext->node_table, g_strdup (name), node_info);
     101              : 
     102           38 :   return node_info;
     103              : }
     104              : 
     105              : /**
     106              :  * @brief Internal function to release pipeline node info.
     107              :  */
     108              : static void
     109           38 : _ml_extension_node_info_free (gpointer data)
     110              : {
     111           38 :   ml_service_node_info_s *node_info = (ml_service_node_info_s *) data;
     112              : 
     113           38 :   if (!node_info)
     114            0 :     return;
     115              : 
     116           38 :   if (node_info->info)
     117           34 :     ml_tensors_info_destroy (node_info->info);
     118              : 
     119           38 :   g_free (node_info->name);
     120           38 :   g_free (node_info);
     121              : }
     122              : 
     123              : /**
     124              :  * @brief Internal function to get the node info in ml-service extension.
     125              :  */
     126              : static ml_service_node_info_s *
     127           58 : _ml_extension_node_info_get (ml_extension_s * ext, const gchar * name)
     128              : {
     129           58 :   if (!STR_IS_VALID (name))
     130            8 :     return NULL;
     131              : 
     132           50 :   return g_hash_table_lookup (ext->node_table, name);
     133              : }
     134              : 
     135              : /**
     136              :  * @brief Internal function to release ml-service extension message.
     137              :  */
     138              : static void
     139           62 : _ml_extension_msg_free (gpointer data)
     140              : {
     141           62 :   ml_extension_msg_s *msg = (ml_extension_msg_s *) data;
     142              : 
     143           62 :   if (!msg)
     144            0 :     return;
     145              : 
     146           62 :   if (msg->input)
     147           47 :     ml_tensors_data_destroy (msg->input);
     148           62 :   if (msg->output)
     149           32 :     ml_tensors_data_destroy (msg->output);
     150              : 
     151           62 :   g_free (msg->name);
     152           62 :   g_free (msg);
     153              : }
     154              : 
     155              : /**
     156              :  * @brief Internal function to process ml-service extension message.
     157              :  */
     158              : static gpointer
     159           48 : _ml_extension_msg_thread (gpointer data)
     160              : {
     161           48 :   ml_service_s *mls = (ml_service_s *) data;
     162           48 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     163              :   int status;
     164              : 
     165           48 :   g_mutex_lock (&mls->lock);
     166           48 :   ext->running = TRUE;
     167           48 :   g_cond_signal (&mls->cond);
     168           48 :   g_mutex_unlock (&mls->lock);
     169              : 
     170          187 :   while (ext->running) {
     171              :     ml_extension_msg_s *msg;
     172              : 
     173          182 :     msg = g_async_queue_timeout_pop (ext->msg_queue,
     174           91 :         ext->timeout * G_TIME_SPAN_MILLISECOND);
     175              : 
     176           91 :     if (msg) {
     177           47 :       switch (ext->type) {
     178           32 :         case ML_EXTENSION_TYPE_SINGLE:
     179              :         {
     180           32 :           status = ml_single_invoke (ext->single, msg->input, &msg->output);
     181              : 
     182           32 :           if (status == ML_ERROR_NONE) {
     183           32 :             _ml_service_invoke_event_new_data (mls, NULL, msg->output);
     184              :           } else {
     185            0 :             _ml_error_report
     186              :                 ("Failed to invoke the model in ml-service extension thread.");
     187              :           }
     188           32 :           break;
     189              :         }
     190           15 :         case ML_EXTENSION_TYPE_PIPELINE:
     191              :         {
     192              :           ml_service_node_info_s *node_info;
     193              : 
     194           15 :           node_info = _ml_extension_node_info_get (ext, msg->name);
     195              : 
     196           15 :           if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_INPUT) {
     197              :             /* The input data will be released in the pipeline. */
     198           15 :             status = ml_pipeline_src_input_data (node_info->handle, msg->input,
     199              :                 ML_PIPELINE_BUF_POLICY_AUTO_FREE);
     200           15 :             msg->input = NULL;
     201              : 
     202           15 :             if (status != ML_ERROR_NONE) {
     203            0 :               _ml_error_report
     204              :                   ("Failed to push input data into the pipeline in ml-service extension thread.");
     205              :             }
     206              :           } else {
     207            0 :             _ml_error_report
     208              :                 ("Failed to push input data into the pipeline, cannot find input node '%s'.",
     209              :                 msg->name);
     210              :           }
     211           15 :           break;
     212              :         }
     213            0 :         default:
     214              :           /* Unknown ml-service extension type, skip this. */
     215            0 :           break;
     216              :       }
     217              : 
     218           47 :       _ml_extension_msg_free (msg);
     219              :     }
     220              :   }
     221              : 
     222           48 :   return NULL;
     223              : }
     224              : 
     225              : /**
     226              :  * @brief Wrapper to release tensors-info handle.
     227              :  */
     228              : static void
     229           30 : _ml_extension_destroy_tensors_info (void *data)
     230              : {
     231           30 :   ml_tensors_info_h info = (ml_tensors_info_h) data;
     232              : 
     233           30 :   if (info)
     234           30 :     ml_tensors_info_destroy (info);
     235           30 : }
     236              : 
     237              : /**
     238              :  * @brief Internal function to parse single-shot info from json.
     239              :  */
     240              : static int
     241           39 : _ml_extension_conf_parse_single (ml_service_s * mls, JsonObject * single)
     242              : {
     243           39 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     244              :   ml_option_h option;
     245              :   int status;
     246              : 
     247           39 :   status = ml_option_create (&option);
     248           39 :   if (status != ML_ERROR_NONE) {
     249           39 :     _ml_error_report_return (status,
     250              :         "Failed to parse configuration file, cannot create ml-option handle.");
     251              :   }
     252              : 
     253              :   /**
     254              :    * 1. "key" : load model info from ml-service agent.
     255              :    * 2. "model" : configuration file includes model path.
     256              :    */
     257           39 :   if (json_object_has_member (single, "key")) {
     258            1 :     const gchar *key = json_object_get_string_member (single, "key");
     259              : 
     260            1 :     if (STR_IS_VALID (key)) {
     261              :       ml_information_h model_info;
     262              : 
     263            1 :       status = ml_service_model_get_activated (key, &model_info);
     264            1 :       if (status == ML_ERROR_NONE) {
     265            1 :         gchar *paths = NULL;
     266              : 
     267              :         /** @todo parse desc and other information if necessary. */
     268            1 :         ml_information_get (model_info, "path", (void **) (&paths));
     269            2 :         ml_option_set (option, "models", g_strdup (paths), g_free);
     270              : 
     271            1 :         ml_information_destroy (model_info);
     272              :       } else {
     273            0 :         _ml_error_report
     274              :             ("Failed to parse configuration file, cannot get the model of '%s'.",
     275              :             key);
     276            0 :         goto error;
     277              :       }
     278              :     }
     279           38 :   } else if (json_object_has_member (single, "model")) {
     280           36 :     JsonNode *file_node = json_object_get_member (single, "model");
     281           36 :     gchar *paths = NULL;
     282              : 
     283           36 :     status = _ml_service_conf_parse_string (file_node, ",", &paths);
     284           36 :     if (status != ML_ERROR_NONE) {
     285            0 :       _ml_error_report
     286              :           ("Failed to parse configuration file, it should have valid model path.");
     287            0 :       goto error;
     288              :     }
     289              : 
     290           36 :     ml_option_set (option, "models", paths, g_free);
     291              :   } else {
     292            2 :     status = ML_ERROR_INVALID_PARAMETER;
     293            2 :     _ml_error_report
     294              :         ("Failed to parse configuration file, cannot get the model path.");
     295            2 :     goto error;
     296              :   }
     297              : 
     298           37 :   if (json_object_has_member (single, "framework")) {
     299           34 :     const gchar *fw = json_object_get_string_member (single, "framework");
     300              : 
     301           34 :     if (STR_IS_VALID (fw))
     302           34 :       ml_option_set (option, "framework_name", g_strdup (fw), g_free);
     303              :   }
     304              : 
     305           37 :   if (json_object_has_member (single, "input_info")) {
     306           16 :     JsonNode *info_node = json_object_get_member (single, "input_info");
     307              :     ml_tensors_info_h in_info;
     308              : 
     309           16 :     status = _ml_service_conf_parse_tensors_info (info_node, &in_info);
     310           16 :     if (status != ML_ERROR_NONE) {
     311            0 :       _ml_error_report
     312              :           ("Failed to parse configuration file, cannot parse input information.");
     313            0 :       goto error;
     314              :     }
     315              : 
     316           16 :     ml_option_set (option, "input_info", in_info,
     317              :         _ml_extension_destroy_tensors_info);
     318              :   }
     319              : 
     320           37 :   if (json_object_has_member (single, "output_info")) {
     321           16 :     JsonNode *info_node = json_object_get_member (single, "output_info");
     322              :     ml_tensors_info_h out_info;
     323              : 
     324           16 :     status = _ml_service_conf_parse_tensors_info (info_node, &out_info);
     325           16 :     if (status != ML_ERROR_NONE) {
     326            2 :       _ml_error_report
     327              :           ("Failed to parse configuration file, cannot parse output information.");
     328            2 :       goto error;
     329              :     }
     330              : 
     331           14 :     ml_option_set (option, "output_info", out_info,
     332              :         _ml_extension_destroy_tensors_info);
     333              :   }
     334              : 
     335           35 :   if (json_object_has_member (single, "custom")) {
     336            0 :     const gchar *custom = json_object_get_string_member (single, "custom");
     337              : 
     338            0 :     if (STR_IS_VALID (custom))
     339            0 :       ml_option_set (option, "custom", g_strdup (custom), g_free);
     340              :   }
     341              : 
     342           35 :   if (json_object_has_member (single, "invoke_dynamic")) {
     343              :     const gchar *invoke_dynamic =
     344            0 :         json_object_get_string_member (single, "invoke_dynamic");
     345              : 
     346            0 :     if (STR_IS_VALID (invoke_dynamic)) {
     347            0 :       ml_option_set (option, "invoke_dynamic", g_strdup (invoke_dynamic),
     348              :           g_free);
     349              :     }
     350              :   }
     351              : 
     352           35 :   if (json_object_has_member (single, "invoke_async")) {
     353              :     const gchar *invoke_async =
     354            0 :         json_object_get_string_member (single, "invoke_async");
     355              : 
     356            0 :     if (STR_IS_VALID (invoke_async)) {
     357            0 :       ml_option_set (option, "invoke_async", g_strdup (invoke_async), g_free);
     358              :     }
     359              :   }
     360              : 
     361           35 : error:
     362           39 :   if (status == ML_ERROR_NONE)
     363           35 :     status = ml_single_open_with_option (&ext->single, option);
     364              : 
     365           39 :   ml_option_destroy (option);
     366           39 :   return status;
     367              : }
     368              : 
     369              : /**
     370              :  * @brief Internal function to parse the node info in pipeline.
     371              :  */
     372              : static int
     373           42 : _ml_extension_conf_parse_pipeline_node (ml_service_s * mls, JsonNode * node,
     374              :     ml_service_node_type_e type)
     375              : {
     376           42 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     377           42 :   JsonArray *array = NULL;
     378              :   JsonObject *object;
     379              :   guint i, n;
     380              :   int status;
     381              : 
     382           42 :   n = 1;
     383           42 :   if (JSON_NODE_HOLDS_ARRAY (node)) {
     384           42 :     array = json_node_get_array (node);
     385           42 :     n = json_array_get_length (array);
     386              :   }
     387              : 
     388           76 :   for (i = 0; i < n; i++) {
     389           42 :     const gchar *name = NULL;
     390              :     ml_service_node_info_s *node_info;
     391              : 
     392           42 :     if (array)
     393           42 :       object = json_array_get_object_element (array, i);
     394              :     else
     395            0 :       object = json_node_get_object (node);
     396              : 
     397           42 :     name = _ml_service_get_json_string_member (object, "name");
     398              : 
     399           42 :     node_info = _ml_extension_node_info_new (mls, name, type);
     400           42 :     if (!node_info) {
     401            4 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     402              :           "Failed to parse configuration file, cannot add new node information.");
     403              :     }
     404              : 
     405           38 :     if (json_object_has_member (object, "info")) {
     406           36 :       JsonNode *info_node = json_object_get_member (object, "info");
     407              : 
     408           36 :       status = _ml_service_conf_parse_tensors_info (info_node,
     409              :           &node_info->info);
     410           36 :       if (status != ML_ERROR_NONE) {
     411            2 :         _ml_error_report_return (status,
     412              :             "Failed to parse configuration file, cannot parse the information.");
     413              :       }
     414              :     } else {
     415            2 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     416              :           "Failed to parse configuration file, cannot find node information.");
     417              :     }
     418              : 
     419           34 :     switch (type) {
     420           21 :       case ML_SERVICE_NODE_TYPE_INPUT:
     421           42 :         status = ml_pipeline_src_get_handle (ext->pipeline, name,
     422           21 :             &node_info->handle);
     423           21 :         break;
     424           13 :       case ML_SERVICE_NODE_TYPE_OUTPUT:
     425           26 :         status = ml_pipeline_sink_register (ext->pipeline, name,
     426           13 :             _ml_service_pipeline_sink_cb, node_info, &node_info->handle);
     427           13 :         break;
     428            0 :       default:
     429            0 :         status = ML_ERROR_INVALID_PARAMETER;
     430            0 :         break;
     431              :     }
     432              : 
     433           34 :     if (status != ML_ERROR_NONE) {
     434            0 :       _ml_error_report_return (status,
     435              :           "Failed to parse configuration file, cannot get the handle for pipeline node.");
     436              :     }
     437              :   }
     438              : 
     439           34 :   return ML_ERROR_NONE;
     440              : }
     441              : 
     442              : /**
     443              :  * @brief Internal function to parse pipeline info from json.
     444              :  */
     445              : static int
     446           21 : _ml_extension_conf_parse_pipeline (ml_service_s * mls, JsonObject * pipe)
     447              : {
     448           21 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     449           21 :   g_autofree gchar *desc = NULL;
     450              :   int status;
     451              : 
     452              :   /**
     453              :    * 1. "key" : load pipeline from ml-service agent.
     454              :    * 2. "description" : configuration file includes pipeline description.
     455              :    */
     456           21 :   if (json_object_has_member (pipe, "key")) {
     457            1 :     const gchar *key = json_object_get_string_member (pipe, "key");
     458              : 
     459            1 :     if (STR_IS_VALID (key)) {
     460            1 :       status = ml_service_pipeline_get (key, &desc);
     461            1 :       if (status != ML_ERROR_NONE) {
     462            0 :         _ml_error_report_return (status,
     463              :             "Failed to parse configuration file, cannot get the pipeline of '%s'.",
     464              :             key);
     465              :       }
     466              :     }
     467           20 :   } else if (json_object_has_member (pipe, "description")) {
     468           40 :     desc = g_strdup (json_object_get_string_member (pipe, "description"));
     469              :   } else {
     470            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     471              :         "Failed to parse configuration file, cannot get the pipeline description.");
     472              :   }
     473              : 
     474           21 :   status = ml_pipeline_construct (desc, NULL, NULL, &ext->pipeline);
     475           21 :   if (status != ML_ERROR_NONE) {
     476            0 :     _ml_error_report_return (status,
     477              :         "Failed to parse configuration file, cannot construct the pipeline.");
     478              :   }
     479              : 
     480           21 :   if (json_object_has_member (pipe, "input_node")) {
     481           21 :     JsonNode *node = json_object_get_member (pipe, "input_node");
     482              : 
     483           21 :     status = _ml_extension_conf_parse_pipeline_node (mls, node,
     484              :         ML_SERVICE_NODE_TYPE_INPUT);
     485           21 :     if (status != ML_ERROR_NONE) {
     486            0 :       _ml_error_report_return (status,
     487              :           "Failed to parse configuration file, cannot get the input node.");
     488              :     }
     489              :   } else {
     490            0 :     _ml_logw
     491              :         ("No input node is defined in the pipeline. Might Non-appsrc be used?");
     492              :   }
     493              : 
     494           21 :   if (json_object_has_member (pipe, "output_node")) {
     495           21 :     JsonNode *node = json_object_get_member (pipe, "output_node");
     496              : 
     497           21 :     status = _ml_extension_conf_parse_pipeline_node (mls, node,
     498              :         ML_SERVICE_NODE_TYPE_OUTPUT);
     499           21 :     if (status != ML_ERROR_NONE) {
     500            8 :       _ml_error_report_return (status,
     501              :           "Failed to parse configuration file, cannot get the output node.");
     502              :     }
     503              :   } else {
     504            0 :     _ml_logw ("No output node is defined in the pipeline.");
     505              :   }
     506              : 
     507              :   /* Start pipeline when creating ml-service handle to check pipeline description. */
     508           13 :   status = ml_pipeline_start (ext->pipeline);
     509           13 :   if (status != ML_ERROR_NONE) {
     510            0 :     _ml_error_report_return (status,
     511              :         "Failed to parse configuration file, cannot start the pipeline.");
     512              :   }
     513              : 
     514           13 :   return ML_ERROR_NONE;
     515              : }
     516              : 
     517              : /**
     518              :  * @brief Internal function to parse configuration file.
     519              :  */
     520              : static int
     521           60 : _ml_extension_conf_parse_json (ml_service_s * mls, JsonObject * object)
     522              : {
     523           60 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     524              :   int status;
     525              : 
     526           60 :   if (json_object_has_member (object, "single")) {
     527           39 :     JsonObject *single = json_object_get_object_member (object, "single");
     528              : 
     529           39 :     status = _ml_extension_conf_parse_single (mls, single);
     530           39 :     if (status != ML_ERROR_NONE)
     531            4 :       return status;
     532              : 
     533           35 :     ext->type = ML_EXTENSION_TYPE_SINGLE;
     534           21 :   } else if (json_object_has_member (object, "pipeline")) {
     535           21 :     JsonObject *pipe = json_object_get_object_member (object, "pipeline");
     536              : 
     537           21 :     status = _ml_extension_conf_parse_pipeline (mls, pipe);
     538           21 :     if (status != ML_ERROR_NONE)
     539            8 :       return status;
     540              : 
     541           13 :     ext->type = ML_EXTENSION_TYPE_PIPELINE;
     542              :   } else {
     543            0 :     _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     544              :         "Failed to parse configuration file, cannot get the valid type from configuration.");
     545              :   }
     546              : 
     547           48 :   return ML_ERROR_NONE;
     548              : }
     549              : 
     550              : /**
     551              :  * @brief Internal function to create ml-service extension.
     552              :  */
     553              : int
     554           60 : _ml_service_extension_create (ml_service_s * mls, JsonObject * object)
     555              : {
     556              :   ml_extension_s *ext;
     557           60 :   g_autofree gchar *thread_name = g_strdup_printf ("ml-ext-msg-%d", getpid ());
     558              :   int status;
     559              : 
     560           60 :   mls->priv = ext = g_try_new0 (ml_extension_s, 1);
     561           60 :   if (ext == NULL) {
     562            0 :     _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
     563              :         "Failed to allocate memory for ml-service extension. Out of memory?");
     564              :   }
     565              : 
     566           60 :   ext->type = ML_EXTENSION_TYPE_UNKNOWN;
     567           60 :   ext->running = FALSE;
     568           60 :   ext->timeout = DEFAULT_TIMEOUT;
     569           60 :   ext->max_input = DEFAULT_MAX_INPUT;
     570           60 :   ext->node_table = g_hash_table_new_full (g_str_hash, g_str_equal, g_free,
     571              :       _ml_extension_node_info_free);
     572              : 
     573           60 :   status = _ml_extension_conf_parse_json (mls, object);
     574           60 :   if (status != ML_ERROR_NONE) {
     575           12 :     _ml_error_report_return (status,
     576              :         "Failed to parse the ml-service extension configuration.");
     577              :   }
     578              : 
     579           48 :   g_mutex_lock (&mls->lock);
     580              : 
     581           48 :   ext->msg_queue = g_async_queue_new_full (_ml_extension_msg_free);
     582           48 :   ext->msg_thread = g_thread_new (thread_name, _ml_extension_msg_thread, mls);
     583              : 
     584              :   /* Wait until the message thread has been initialized. */
     585           48 :   g_cond_wait (&mls->cond, &mls->lock);
     586           48 :   g_mutex_unlock (&mls->lock);
     587              : 
     588           48 :   return ML_ERROR_NONE;
     589              : }
     590              : 
     591              : /**
     592              :  * @brief Internal function to release ml-service extension.
     593              :  */
     594              : int
     595           60 : _ml_service_extension_destroy (ml_service_s * mls)
     596              : {
     597           60 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     598              : 
     599              :   /* Supposed internal function call to release handle. */
     600           60 :   if (!ext)
     601            0 :     return ML_ERROR_NONE;
     602              : 
     603              :   /**
     604              :    * Close message thread.
     605              :    * If model inference is running, it may wait for the result in message thread.
     606              :    * This takes time, so do not call join with extension lock.
     607              :    */
     608           60 :   ext->running = FALSE;
     609           60 :   if (ext->msg_thread) {
     610           48 :     g_thread_join (ext->msg_thread);
     611           48 :     ext->msg_thread = NULL;
     612              :   }
     613              : 
     614           60 :   if (ext->msg_queue) {
     615           48 :     g_async_queue_unref (ext->msg_queue);
     616           48 :     ext->msg_queue = NULL;
     617              :   }
     618              : 
     619           60 :   if (ext->single) {
     620           35 :     ml_single_close (ext->single);
     621           35 :     ext->single = NULL;
     622              :   }
     623              : 
     624           60 :   if (ext->pipeline) {
     625           21 :     ml_pipeline_stop (ext->pipeline);
     626           21 :     ml_pipeline_destroy (ext->pipeline);
     627           21 :     ext->pipeline = NULL;
     628              :   }
     629              : 
     630           60 :   if (ext->node_table) {
     631           60 :     g_hash_table_destroy (ext->node_table);
     632           60 :     ext->node_table = NULL;
     633              :   }
     634              : 
     635           60 :   g_free (ext);
     636           60 :   mls->priv = NULL;
     637              : 
     638           60 :   return ML_ERROR_NONE;
     639              : }
     640              : 
     641              : /**
     642              :  * @brief Internal function to start ml-service extension.
     643              :  */
     644              : int
     645            2 : _ml_service_extension_start (ml_service_s * mls)
     646              : {
     647            2 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     648            2 :   int status = ML_ERROR_NONE;
     649              : 
     650            2 :   switch (ext->type) {
     651            2 :     case ML_EXTENSION_TYPE_PIPELINE:
     652            2 :       status = ml_pipeline_start (ext->pipeline);
     653            2 :       break;
     654            0 :     case ML_EXTENSION_TYPE_SINGLE:
     655              :       /* Do nothing. */
     656            0 :       break;
     657            0 :     default:
     658            0 :       status = ML_ERROR_NOT_SUPPORTED;
     659            0 :       break;
     660              :   }
     661              : 
     662            2 :   return status;
     663              : }
     664              : 
     665              : /**
     666              :  * @brief Internal function to stop ml-service extension.
     667              :  */
     668              : int
     669            2 : _ml_service_extension_stop (ml_service_s * mls)
     670              : {
     671            2 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     672            2 :   int status = ML_ERROR_NONE;
     673              : 
     674            2 :   switch (ext->type) {
     675            2 :     case ML_EXTENSION_TYPE_PIPELINE:
     676            2 :       status = ml_pipeline_stop (ext->pipeline);
     677            2 :       break;
     678            0 :     case ML_EXTENSION_TYPE_SINGLE:
     679              :       /* Do nothing. */
     680            0 :       break;
     681            0 :     default:
     682            0 :       status = ML_ERROR_NOT_SUPPORTED;
     683            0 :       break;
     684              :   }
     685              : 
     686            2 :   return status;
     687              : }
     688              : 
     689              : /**
     690              :  * @brief Internal function to get the information of required input data.
     691              :  */
     692              : int
     693           26 : _ml_service_extension_get_input_information (ml_service_s * mls,
     694              :     const char *name, ml_tensors_info_h * info)
     695              : {
     696           26 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     697              :   int status;
     698              : 
     699           26 :   switch (ext->type) {
     700           13 :     case ML_EXTENSION_TYPE_SINGLE:
     701           13 :       status = ml_single_get_input_info (ext->single, info);
     702           13 :       break;
     703           13 :     case ML_EXTENSION_TYPE_PIPELINE:
     704              :     {
     705              :       ml_service_node_info_s *node_info;
     706              : 
     707           13 :       node_info = _ml_extension_node_info_get (ext, name);
     708              : 
     709           13 :       if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_INPUT) {
     710            5 :         status = _ml_tensors_info_create_from (node_info->info, info);
     711              :       } else {
     712            8 :         status = ML_ERROR_INVALID_PARAMETER;
     713              :       }
     714           13 :       break;
     715              :     }
     716            0 :     default:
     717            0 :       status = ML_ERROR_NOT_SUPPORTED;
     718            0 :       break;
     719              :   }
     720              : 
     721           26 :   return status;
     722              : }
     723              : 
     724              : /**
     725              :  * @brief Internal function to get the information of output data.
     726              :  */
     727              : int
     728           18 : _ml_service_extension_get_output_information (ml_service_s * mls,
     729              :     const char *name, ml_tensors_info_h * info)
     730              : {
     731           18 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     732              :   int status;
     733              : 
     734           18 :   switch (ext->type) {
     735            7 :     case ML_EXTENSION_TYPE_SINGLE:
     736            7 :       status = ml_single_get_output_info (ext->single, info);
     737            7 :       break;
     738           11 :     case ML_EXTENSION_TYPE_PIPELINE:
     739              :     {
     740              :       ml_service_node_info_s *node_info;
     741              : 
     742           11 :       node_info = _ml_extension_node_info_get (ext, name);
     743              : 
     744           11 :       if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_OUTPUT) {
     745            3 :         status = _ml_tensors_info_create_from (node_info->info, info);
     746              :       } else {
     747            8 :         status = ML_ERROR_INVALID_PARAMETER;
     748              :       }
     749           11 :       break;
     750              :     }
     751            0 :     default:
     752            0 :       status = ML_ERROR_NOT_SUPPORTED;
     753            0 :       break;
     754              :   }
     755              : 
     756           18 :   if (status != ML_ERROR_NONE) {
     757            8 :     if (*info) {
     758            0 :       ml_tensors_info_destroy (*info);
     759            0 :       *info = NULL;
     760              :     }
     761              :   }
     762              : 
     763           18 :   return status;
     764              : }
     765              : 
     766              : /**
     767              :  * @brief Internal function to set the information for ml-service extension.
     768              :  */
     769              : int
     770           34 : _ml_service_extension_set_information (ml_service_s * mls, const char *name,
     771              :     const char *value)
     772              : {
     773           34 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     774              : 
     775              :   /* Check limitation of message queue and other options. */
     776           34 :   if (g_ascii_strcasecmp (name, "input_queue_size") == 0 ||
     777           34 :       g_ascii_strcasecmp (name, "max_input") == 0) {
     778            2 :     ext->max_input = (guint) g_ascii_strtoull (value, NULL, 10);
     779           32 :   } else if (g_ascii_strcasecmp (name, "timeout") == 0) {
     780            0 :     ext->timeout = (guint) g_ascii_strtoull (value, NULL, 10);
     781              :   }
     782              : 
     783           34 :   return ML_ERROR_NONE;
     784              : }
     785              : 
     786              : /**
     787              :  * @brief Internal function to add an input data to process the model in ml-service extension handle.
     788              :  */
     789              : int
     790           72 : _ml_service_extension_request (ml_service_s * mls, const char *name,
     791              :     const ml_tensors_data_h data)
     792              : {
     793           72 :   ml_extension_s *ext = (ml_extension_s *) mls->priv;
     794              :   ml_extension_msg_s *msg;
     795              :   int status, len;
     796              : 
     797           72 :   if (ext->type == ML_EXTENSION_TYPE_PIPELINE) {
     798              :     ml_service_node_info_s *node_info;
     799              : 
     800           23 :     if (!STR_IS_VALID (name)) {
     801            4 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     802              :           "The parameter, name '%s', is invalid.", name);
     803              :     }
     804              : 
     805           19 :     node_info = _ml_extension_node_info_get (ext, name);
     806              : 
     807           19 :     if (!node_info || node_info->type != ML_SERVICE_NODE_TYPE_INPUT) {
     808            4 :       _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
     809              :           "The parameter, name '%s', is invalid, cannot find the input node from pipeline.",
     810              :           name);
     811              :     }
     812              :   }
     813              : 
     814           64 :   len = g_async_queue_length (ext->msg_queue);
     815              : 
     816           64 :   if (ext->max_input > 0 && len > 0 && ext->max_input <= len) {
     817            2 :     _ml_error_report_return (ML_ERROR_STREAMS_PIPE,
     818              :         "Failed to push input data into the queue, the max number of input is %u.",
     819              :         ext->max_input);
     820              :   }
     821              : 
     822           62 :   msg = g_try_new0 (ml_extension_msg_s, 1);
     823           62 :   if (!msg) {
     824            0 :     _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
     825              :         "Failed to allocate the ml-service extension message. Out of memory?");
     826              :   }
     827              : 
     828           62 :   msg->name = g_strdup (name);
     829           62 :   status = ml_tensors_data_clone (data, &msg->input);
     830              : 
     831           62 :   if (status != ML_ERROR_NONE) {
     832            0 :     _ml_extension_msg_free (msg);
     833            0 :     _ml_error_report_return (status, "Failed to clone input data.");
     834              :   }
     835              : 
     836           62 :   g_async_queue_push (ext->msg_queue, msg);
     837              : 
     838           62 :   return ML_ERROR_NONE;
     839              : }
        

Generated by: LCOV version 2.0-1