Line data Source code
1 : /* SPDX-License-Identifier: Apache-2.0 */
2 : /**
3 : * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved.
4 : *
5 : * @file ml-api-service-extension.c
6 : * @date 1 September 2023
7 : * @brief ML service extension C-API.
8 : * @see https://github.com/nnstreamer/api
9 : * @author Jaeyun Jung <jy1210.jung@samsung.com>
10 : * @bug No known bugs except for NYI items
11 : */
12 :
13 : #include "ml-api-service-extension.h"
14 :
15 : /**
16 : * @brief The time to wait for new input data in message thread, in millisecond.
17 : */
18 : #define DEFAULT_TIMEOUT 200
19 :
20 : /**
21 : * @brief The max number of input data in message queue (0 for no limit).
22 : */
23 : #define DEFAULT_MAX_INPUT 5
24 :
25 : /**
26 : * @brief Internal enumeration for ml-service extension types.
27 : */
28 : typedef enum
29 : {
30 : ML_EXTENSION_TYPE_UNKNOWN = 0,
31 : ML_EXTENSION_TYPE_SINGLE = 1,
32 : ML_EXTENSION_TYPE_PIPELINE = 2,
33 :
34 : ML_EXTENSION_TYPE_MAX
35 : } ml_extension_type_e;
36 :
37 : /**
38 : * @brief Internal structure of the message in ml-service extension handle.
39 : */
40 : typedef struct
41 : {
42 : gchar *name;
43 : ml_tensors_data_h input;
44 : ml_tensors_data_h output;
45 : } ml_extension_msg_s;
46 :
47 : /**
48 : * @brief Internal structure for ml-service extension handle.
49 : */
50 : typedef struct
51 : {
52 : ml_extension_type_e type;
53 : gboolean running;
54 : guint timeout; /**< The time to wait for new input data in message thread, in millisecond (see DEFAULT_TIMEOUT). */
55 : guint max_input; /**< The max number of input data in message queue (see DEFAULT_MAX_INPUT). */
56 : GThread *msg_thread;
57 : GAsyncQueue *msg_queue;
58 :
59 : /**
60 : * Handles for each ml-service extension type.
61 : * - single : Default. Open model file and prepare invoke. The configuration should include model information.
62 : * - pipeline : Construct a pipeline from configuration. The configuration should include pipeline description.
63 : */
64 : ml_single_h single;
65 :
66 : ml_pipeline_h pipeline;
67 : GHashTable *node_table;
68 : } ml_extension_s;
69 :
70 : /**
71 : * @brief Internal function to handle the asynchronous invoke.
72 : */
73 : static int
74 0 : _ml_extension_async_cb (const ml_tensors_data_h data, void *user_data)
75 : {
76 0 : ml_service_s *mls = (ml_service_s *) user_data;
77 :
78 0 : return _ml_service_invoke_event_new_data (mls, NULL, data);
79 : }
80 :
81 : /**
82 : * @brief Internal function to create node info in pipeline.
83 : */
84 : static ml_service_node_info_s *
85 42 : _ml_extension_node_info_new (ml_service_s * mls, const gchar * name,
86 : ml_service_node_type_e type)
87 : {
88 42 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
89 : ml_service_node_info_s *node_info;
90 :
91 42 : if (!STR_IS_VALID (name)) {
92 2 : _ml_error_report_return (NULL,
93 : "Cannot add new node info, invalid node name '%s'.", name);
94 : }
95 :
96 40 : if (g_hash_table_lookup (ext->node_table, name)) {
97 2 : _ml_error_report_return (NULL,
98 : "Cannot add duplicated node '%s' in ml-service pipeline.", name);
99 : }
100 :
101 38 : node_info = g_try_new0 (ml_service_node_info_s, 1);
102 38 : if (!node_info) {
103 0 : _ml_error_report_return (NULL,
104 : "Failed to allocate new memory for node info in ml-service pipeline. Out of memory?");
105 : }
106 :
107 38 : node_info->name = g_strdup (name);
108 38 : node_info->type = type;
109 38 : node_info->mls = mls;
110 :
111 38 : g_hash_table_insert (ext->node_table, g_strdup (name), node_info);
112 :
113 38 : return node_info;
114 : }
115 :
116 : /**
117 : * @brief Internal function to release pipeline node info.
118 : */
119 : static void
120 38 : _ml_extension_node_info_free (gpointer data)
121 : {
122 38 : ml_service_node_info_s *node_info = (ml_service_node_info_s *) data;
123 :
124 38 : if (!node_info)
125 0 : return;
126 :
127 38 : if (node_info->info)
128 34 : ml_tensors_info_destroy (node_info->info);
129 :
130 38 : g_free (node_info->name);
131 38 : g_free (node_info);
132 : }
133 :
134 : /**
135 : * @brief Internal function to get the node info in ml-service extension.
136 : */
137 : static ml_service_node_info_s *
138 58 : _ml_extension_node_info_get (ml_extension_s * ext, const gchar * name)
139 : {
140 58 : if (!STR_IS_VALID (name))
141 8 : return NULL;
142 :
143 50 : return g_hash_table_lookup (ext->node_table, name);
144 : }
145 :
146 : /**
147 : * @brief Internal function to release ml-service extension message.
148 : */
149 : static void
150 62 : _ml_extension_msg_free (gpointer data)
151 : {
152 62 : ml_extension_msg_s *msg = (ml_extension_msg_s *) data;
153 :
154 62 : if (!msg)
155 0 : return;
156 :
157 62 : if (msg->input)
158 47 : ml_tensors_data_destroy (msg->input);
159 62 : if (msg->output)
160 32 : ml_tensors_data_destroy (msg->output);
161 :
162 62 : g_free (msg->name);
163 62 : g_free (msg);
164 : }
165 :
166 : /**
167 : * @brief Internal function to process ml-service extension message.
168 : */
169 : static gpointer
170 48 : _ml_extension_msg_thread (gpointer data)
171 : {
172 48 : ml_service_s *mls = (ml_service_s *) data;
173 48 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
174 : int status;
175 :
176 48 : g_mutex_lock (&mls->lock);
177 48 : ext->running = TRUE;
178 48 : g_cond_signal (&mls->cond);
179 48 : g_mutex_unlock (&mls->lock);
180 :
181 187 : while (ext->running) {
182 : ml_extension_msg_s *msg;
183 :
184 182 : msg = g_async_queue_timeout_pop (ext->msg_queue,
185 91 : ext->timeout * G_TIME_SPAN_MILLISECOND);
186 :
187 91 : if (msg) {
188 47 : switch (ext->type) {
189 32 : case ML_EXTENSION_TYPE_SINGLE:
190 : {
191 32 : status = ml_single_invoke (ext->single, msg->input, &msg->output);
192 :
193 32 : if (status == ML_ERROR_NONE) {
194 32 : _ml_service_invoke_event_new_data (mls, NULL, msg->output);
195 : } else {
196 0 : _ml_error_report
197 : ("Failed to invoke the model in ml-service extension thread.");
198 : }
199 32 : break;
200 : }
201 15 : case ML_EXTENSION_TYPE_PIPELINE:
202 : {
203 : ml_service_node_info_s *node_info;
204 :
205 15 : node_info = _ml_extension_node_info_get (ext, msg->name);
206 :
207 15 : if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_INPUT) {
208 : /* The input data will be released in the pipeline. */
209 15 : status = ml_pipeline_src_input_data (node_info->handle, msg->input,
210 : ML_PIPELINE_BUF_POLICY_AUTO_FREE);
211 15 : msg->input = NULL;
212 :
213 15 : if (status != ML_ERROR_NONE) {
214 0 : _ml_error_report
215 : ("Failed to push input data into the pipeline in ml-service extension thread.");
216 : }
217 : } else {
218 0 : _ml_error_report
219 : ("Failed to push input data into the pipeline, cannot find input node '%s'.",
220 : msg->name);
221 : }
222 15 : break;
223 : }
224 0 : default:
225 : /* Unknown ml-service extension type, skip this. */
226 0 : break;
227 : }
228 :
229 47 : _ml_extension_msg_free (msg);
230 : }
231 : }
232 :
233 48 : return NULL;
234 : }
235 :
236 : /**
237 : * @brief Wrapper to release tensors-info handle.
238 : */
239 : static void
240 30 : _ml_extension_destroy_tensors_info (void *data)
241 : {
242 30 : ml_tensors_info_h info = (ml_tensors_info_h) data;
243 :
244 30 : if (info)
245 30 : ml_tensors_info_destroy (info);
246 30 : }
247 :
248 : /**
249 : * @brief Internal function to parse single-shot info from json.
250 : */
251 : static int
252 39 : _ml_extension_conf_parse_single (ml_service_s * mls, JsonObject * single)
253 : {
254 39 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
255 : ml_option_h option;
256 : int status;
257 :
258 39 : status = ml_option_create (&option);
259 39 : if (status != ML_ERROR_NONE) {
260 39 : _ml_error_report_return (status,
261 : "Failed to parse configuration file, cannot create ml-option handle.");
262 : }
263 :
264 : /**
265 : * 1. "key" : load model info from ml-service agent.
266 : * 2. "model" : configuration file includes model path.
267 : */
268 39 : if (json_object_has_member (single, "key")) {
269 1 : const gchar *key = json_object_get_string_member (single, "key");
270 :
271 1 : if (STR_IS_VALID (key)) {
272 : ml_information_h model_info;
273 :
274 1 : status = ml_service_model_get_activated (key, &model_info);
275 1 : if (status == ML_ERROR_NONE) {
276 1 : gchar *paths = NULL;
277 :
278 : /** @todo parse desc and other information if necessary. */
279 1 : ml_information_get (model_info, "path", (void **) (&paths));
280 2 : ml_option_set (option, "models", g_strdup (paths), g_free);
281 :
282 1 : ml_information_destroy (model_info);
283 : } else {
284 0 : _ml_error_report
285 : ("Failed to parse configuration file, cannot get the model of '%s'.",
286 : key);
287 0 : goto error;
288 : }
289 : }
290 38 : } else if (json_object_has_member (single, "model")) {
291 36 : JsonNode *file_node = json_object_get_member (single, "model");
292 36 : gchar *paths = NULL;
293 :
294 36 : status = _ml_service_conf_parse_string (file_node, ",", &paths);
295 36 : if (status != ML_ERROR_NONE) {
296 0 : _ml_error_report
297 : ("Failed to parse configuration file, it should have valid model path.");
298 0 : goto error;
299 : }
300 :
301 36 : ml_option_set (option, "models", paths, g_free);
302 : } else {
303 2 : status = ML_ERROR_INVALID_PARAMETER;
304 2 : _ml_error_report
305 : ("Failed to parse configuration file, cannot get the model path.");
306 2 : goto error;
307 : }
308 :
309 37 : if (json_object_has_member (single, "framework")) {
310 34 : const gchar *fw = json_object_get_string_member (single, "framework");
311 :
312 34 : if (STR_IS_VALID (fw))
313 34 : ml_option_set (option, "framework_name", g_strdup (fw), g_free);
314 : }
315 :
316 37 : if (json_object_has_member (single, "input_info")) {
317 16 : JsonNode *info_node = json_object_get_member (single, "input_info");
318 : ml_tensors_info_h in_info;
319 :
320 16 : status = _ml_service_conf_parse_tensors_info (info_node, &in_info);
321 16 : if (status != ML_ERROR_NONE) {
322 0 : _ml_error_report
323 : ("Failed to parse configuration file, cannot parse input information.");
324 0 : goto error;
325 : }
326 :
327 16 : ml_option_set (option, "input_info", in_info,
328 : _ml_extension_destroy_tensors_info);
329 : }
330 :
331 37 : if (json_object_has_member (single, "output_info")) {
332 16 : JsonNode *info_node = json_object_get_member (single, "output_info");
333 : ml_tensors_info_h out_info;
334 :
335 16 : status = _ml_service_conf_parse_tensors_info (info_node, &out_info);
336 16 : if (status != ML_ERROR_NONE) {
337 2 : _ml_error_report
338 : ("Failed to parse configuration file, cannot parse output information.");
339 2 : goto error;
340 : }
341 :
342 14 : ml_option_set (option, "output_info", out_info,
343 : _ml_extension_destroy_tensors_info);
344 : }
345 :
346 35 : if (json_object_has_member (single, "custom")) {
347 0 : const gchar *custom = json_object_get_string_member (single, "custom");
348 :
349 0 : if (STR_IS_VALID (custom))
350 0 : ml_option_set (option, "custom", g_strdup (custom), g_free);
351 : }
352 :
353 35 : if (json_object_has_member (single, "invoke_dynamic")) {
354 : const gchar *invoke_dynamic =
355 0 : json_object_get_string_member (single, "invoke_dynamic");
356 :
357 0 : if (STR_IS_VALID (invoke_dynamic)) {
358 0 : ml_option_set (option, "invoke_dynamic", g_strdup (invoke_dynamic),
359 : g_free);
360 : }
361 : }
362 :
363 35 : if (json_object_has_member (single, "invoke_async")) {
364 : const gchar *invoke_async =
365 0 : json_object_get_string_member (single, "invoke_async");
366 :
367 0 : if (STR_IS_VALID (invoke_async)) {
368 0 : ml_option_set (option, "invoke_async", g_strdup (invoke_async), g_free);
369 :
370 0 : if (g_ascii_strcasecmp (invoke_async, "true") == 0) {
371 0 : ml_option_set (option, "async_callback", _ml_extension_async_cb, NULL);
372 0 : ml_option_set (option, "async_data", mls, NULL);
373 : }
374 : }
375 : }
376 :
377 35 : error:
378 39 : if (status == ML_ERROR_NONE)
379 35 : status = ml_single_open_with_option (&ext->single, option);
380 :
381 39 : ml_option_destroy (option);
382 39 : return status;
383 : }
384 :
385 : /**
386 : * @brief Internal function to parse the node info in pipeline.
387 : */
388 : static int
389 42 : _ml_extension_conf_parse_pipeline_node (ml_service_s * mls, JsonNode * node,
390 : ml_service_node_type_e type)
391 : {
392 42 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
393 42 : JsonArray *array = NULL;
394 : JsonObject *object;
395 : guint i, n;
396 : int status;
397 :
398 42 : n = 1;
399 42 : if (JSON_NODE_HOLDS_ARRAY (node)) {
400 42 : array = json_node_get_array (node);
401 42 : n = json_array_get_length (array);
402 : }
403 :
404 76 : for (i = 0; i < n; i++) {
405 42 : const gchar *name = NULL;
406 : ml_service_node_info_s *node_info;
407 :
408 42 : if (array)
409 42 : object = json_array_get_object_element (array, i);
410 : else
411 0 : object = json_node_get_object (node);
412 :
413 42 : name = _ml_service_get_json_string_member (object, "name");
414 :
415 42 : node_info = _ml_extension_node_info_new (mls, name, type);
416 42 : if (!node_info) {
417 4 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
418 : "Failed to parse configuration file, cannot add new node information.");
419 : }
420 :
421 38 : if (json_object_has_member (object, "info")) {
422 36 : JsonNode *info_node = json_object_get_member (object, "info");
423 :
424 36 : status = _ml_service_conf_parse_tensors_info (info_node,
425 : &node_info->info);
426 36 : if (status != ML_ERROR_NONE) {
427 2 : _ml_error_report_return (status,
428 : "Failed to parse configuration file, cannot parse the information.");
429 : }
430 : } else {
431 2 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
432 : "Failed to parse configuration file, cannot find node information.");
433 : }
434 :
435 34 : switch (type) {
436 21 : case ML_SERVICE_NODE_TYPE_INPUT:
437 42 : status = ml_pipeline_src_get_handle (ext->pipeline, name,
438 21 : &node_info->handle);
439 21 : break;
440 13 : case ML_SERVICE_NODE_TYPE_OUTPUT:
441 26 : status = ml_pipeline_sink_register (ext->pipeline, name,
442 13 : _ml_service_pipeline_sink_cb, node_info, &node_info->handle);
443 13 : break;
444 0 : default:
445 0 : status = ML_ERROR_INVALID_PARAMETER;
446 0 : break;
447 : }
448 :
449 34 : if (status != ML_ERROR_NONE) {
450 0 : _ml_error_report_return (status,
451 : "Failed to parse configuration file, cannot get the handle for pipeline node.");
452 : }
453 : }
454 :
455 34 : return ML_ERROR_NONE;
456 : }
457 :
458 : /**
459 : * @brief Internal function to parse pipeline info from json.
460 : */
461 : static int
462 21 : _ml_extension_conf_parse_pipeline (ml_service_s * mls, JsonObject * pipe)
463 : {
464 21 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
465 21 : g_autofree gchar *desc = NULL;
466 : int status;
467 :
468 : /**
469 : * 1. "key" : load pipeline from ml-service agent.
470 : * 2. "description" : configuration file includes pipeline description.
471 : */
472 21 : if (json_object_has_member (pipe, "key")) {
473 1 : const gchar *key = json_object_get_string_member (pipe, "key");
474 :
475 1 : if (STR_IS_VALID (key)) {
476 1 : status = ml_service_pipeline_get (key, &desc);
477 1 : if (status != ML_ERROR_NONE) {
478 0 : _ml_error_report_return (status,
479 : "Failed to parse configuration file, cannot get the pipeline of '%s'.",
480 : key);
481 : }
482 : }
483 20 : } else if (json_object_has_member (pipe, "description")) {
484 40 : desc = g_strdup (json_object_get_string_member (pipe, "description"));
485 : } else {
486 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
487 : "Failed to parse configuration file, cannot get the pipeline description.");
488 : }
489 :
490 21 : status = ml_pipeline_construct (desc, NULL, NULL, &ext->pipeline);
491 21 : if (status != ML_ERROR_NONE) {
492 0 : _ml_error_report_return (status,
493 : "Failed to parse configuration file, cannot construct the pipeline.");
494 : }
495 :
496 21 : if (json_object_has_member (pipe, "input_node")) {
497 21 : JsonNode *node = json_object_get_member (pipe, "input_node");
498 :
499 21 : status = _ml_extension_conf_parse_pipeline_node (mls, node,
500 : ML_SERVICE_NODE_TYPE_INPUT);
501 21 : if (status != ML_ERROR_NONE) {
502 0 : _ml_error_report_return (status,
503 : "Failed to parse configuration file, cannot get the input node.");
504 : }
505 : } else {
506 0 : _ml_logw
507 : ("No input node is defined in the pipeline. Might Non-appsrc be used?");
508 : }
509 :
510 21 : if (json_object_has_member (pipe, "output_node")) {
511 21 : JsonNode *node = json_object_get_member (pipe, "output_node");
512 :
513 21 : status = _ml_extension_conf_parse_pipeline_node (mls, node,
514 : ML_SERVICE_NODE_TYPE_OUTPUT);
515 21 : if (status != ML_ERROR_NONE) {
516 8 : _ml_error_report_return (status,
517 : "Failed to parse configuration file, cannot get the output node.");
518 : }
519 : } else {
520 0 : _ml_logw ("No output node is defined in the pipeline.");
521 : }
522 :
523 : /* Start pipeline when creating ml-service handle to check pipeline description. */
524 13 : status = ml_pipeline_start (ext->pipeline);
525 13 : if (status != ML_ERROR_NONE) {
526 0 : _ml_error_report_return (status,
527 : "Failed to parse configuration file, cannot start the pipeline.");
528 : }
529 :
530 13 : return ML_ERROR_NONE;
531 : }
532 :
533 : /**
534 : * @brief Internal function to parse configuration file.
535 : */
536 : static int
537 60 : _ml_extension_conf_parse_json (ml_service_s * mls, JsonObject * object)
538 : {
539 60 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
540 : int status;
541 :
542 60 : if (json_object_has_member (object, "single")) {
543 39 : JsonObject *single = json_object_get_object_member (object, "single");
544 :
545 39 : status = _ml_extension_conf_parse_single (mls, single);
546 39 : if (status != ML_ERROR_NONE)
547 4 : return status;
548 :
549 35 : ext->type = ML_EXTENSION_TYPE_SINGLE;
550 21 : } else if (json_object_has_member (object, "pipeline")) {
551 21 : JsonObject *pipe = json_object_get_object_member (object, "pipeline");
552 :
553 21 : status = _ml_extension_conf_parse_pipeline (mls, pipe);
554 21 : if (status != ML_ERROR_NONE)
555 8 : return status;
556 :
557 13 : ext->type = ML_EXTENSION_TYPE_PIPELINE;
558 : } else {
559 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
560 : "Failed to parse configuration file, cannot get the valid type from configuration.");
561 : }
562 :
563 48 : return ML_ERROR_NONE;
564 : }
565 :
566 : /**
567 : * @brief Internal function to create ml-service extension.
568 : */
569 : int
570 60 : _ml_service_extension_create (ml_service_s * mls, JsonObject * object)
571 : {
572 : ml_extension_s *ext;
573 60 : g_autofree gchar *thread_name = g_strdup_printf ("ml-ext-msg-%d", getpid ());
574 : int status;
575 :
576 60 : mls->priv = ext = g_try_new0 (ml_extension_s, 1);
577 60 : if (ext == NULL) {
578 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
579 : "Failed to allocate memory for ml-service extension. Out of memory?");
580 : }
581 :
582 60 : ext->type = ML_EXTENSION_TYPE_UNKNOWN;
583 60 : ext->running = FALSE;
584 60 : ext->timeout = DEFAULT_TIMEOUT;
585 60 : ext->max_input = DEFAULT_MAX_INPUT;
586 60 : ext->node_table = g_hash_table_new_full (g_str_hash, g_str_equal, g_free,
587 : _ml_extension_node_info_free);
588 :
589 60 : status = _ml_extension_conf_parse_json (mls, object);
590 60 : if (status != ML_ERROR_NONE) {
591 12 : _ml_error_report_return (status,
592 : "Failed to parse the ml-service extension configuration.");
593 : }
594 :
595 48 : g_mutex_lock (&mls->lock);
596 :
597 48 : ext->msg_queue = g_async_queue_new_full (_ml_extension_msg_free);
598 48 : ext->msg_thread = g_thread_new (thread_name, _ml_extension_msg_thread, mls);
599 :
600 : /* Wait until the message thread has been initialized. */
601 48 : g_cond_wait (&mls->cond, &mls->lock);
602 48 : g_mutex_unlock (&mls->lock);
603 :
604 48 : return ML_ERROR_NONE;
605 : }
606 :
607 : /**
608 : * @brief Internal function to release ml-service extension.
609 : */
610 : int
611 60 : _ml_service_extension_destroy (ml_service_s * mls)
612 : {
613 60 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
614 :
615 : /* Supposed internal function call to release handle. */
616 60 : if (!ext)
617 0 : return ML_ERROR_NONE;
618 :
619 : /**
620 : * Close message thread.
621 : * If model inference is running, it may wait for the result in message thread.
622 : * This takes time, so do not call join with extension lock.
623 : */
624 60 : ext->running = FALSE;
625 60 : if (ext->msg_thread) {
626 48 : g_thread_join (ext->msg_thread);
627 48 : ext->msg_thread = NULL;
628 : }
629 :
630 60 : if (ext->msg_queue) {
631 48 : g_async_queue_unref (ext->msg_queue);
632 48 : ext->msg_queue = NULL;
633 : }
634 :
635 60 : if (ext->single) {
636 35 : ml_single_close (ext->single);
637 35 : ext->single = NULL;
638 : }
639 :
640 60 : if (ext->pipeline) {
641 21 : ml_pipeline_stop (ext->pipeline);
642 21 : ml_pipeline_destroy (ext->pipeline);
643 21 : ext->pipeline = NULL;
644 : }
645 :
646 60 : if (ext->node_table) {
647 60 : g_hash_table_destroy (ext->node_table);
648 60 : ext->node_table = NULL;
649 : }
650 :
651 60 : g_free (ext);
652 60 : mls->priv = NULL;
653 :
654 60 : return ML_ERROR_NONE;
655 : }
656 :
657 : /**
658 : * @brief Internal function to start ml-service extension.
659 : */
660 : int
661 2 : _ml_service_extension_start (ml_service_s * mls)
662 : {
663 2 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
664 2 : int status = ML_ERROR_NONE;
665 :
666 2 : switch (ext->type) {
667 2 : case ML_EXTENSION_TYPE_PIPELINE:
668 2 : status = ml_pipeline_start (ext->pipeline);
669 2 : break;
670 0 : case ML_EXTENSION_TYPE_SINGLE:
671 : /* Do nothing. */
672 0 : break;
673 0 : default:
674 0 : status = ML_ERROR_NOT_SUPPORTED;
675 0 : break;
676 : }
677 :
678 2 : return status;
679 : }
680 :
681 : /**
682 : * @brief Internal function to stop ml-service extension.
683 : */
684 : int
685 2 : _ml_service_extension_stop (ml_service_s * mls)
686 : {
687 2 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
688 2 : int status = ML_ERROR_NONE;
689 :
690 2 : switch (ext->type) {
691 2 : case ML_EXTENSION_TYPE_PIPELINE:
692 2 : status = ml_pipeline_stop (ext->pipeline);
693 2 : break;
694 0 : case ML_EXTENSION_TYPE_SINGLE:
695 : /* Do nothing. */
696 0 : break;
697 0 : default:
698 0 : status = ML_ERROR_NOT_SUPPORTED;
699 0 : break;
700 : }
701 :
702 2 : return status;
703 : }
704 :
705 : /**
706 : * @brief Internal function to get the information of required input data.
707 : */
708 : int
709 26 : _ml_service_extension_get_input_information (ml_service_s * mls,
710 : const char *name, ml_tensors_info_h * info)
711 : {
712 26 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
713 : int status;
714 :
715 26 : switch (ext->type) {
716 13 : case ML_EXTENSION_TYPE_SINGLE:
717 13 : status = ml_single_get_input_info (ext->single, info);
718 13 : break;
719 13 : case ML_EXTENSION_TYPE_PIPELINE:
720 : {
721 : ml_service_node_info_s *node_info;
722 :
723 13 : node_info = _ml_extension_node_info_get (ext, name);
724 :
725 13 : if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_INPUT) {
726 5 : status = _ml_tensors_info_create_from (node_info->info, info);
727 : } else {
728 8 : status = ML_ERROR_INVALID_PARAMETER;
729 : }
730 13 : break;
731 : }
732 0 : default:
733 0 : status = ML_ERROR_NOT_SUPPORTED;
734 0 : break;
735 : }
736 :
737 26 : return status;
738 : }
739 :
740 : /**
741 : * @brief Internal function to get the information of output data.
742 : */
743 : int
744 18 : _ml_service_extension_get_output_information (ml_service_s * mls,
745 : const char *name, ml_tensors_info_h * info)
746 : {
747 18 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
748 : int status;
749 :
750 18 : switch (ext->type) {
751 7 : case ML_EXTENSION_TYPE_SINGLE:
752 7 : status = ml_single_get_output_info (ext->single, info);
753 7 : break;
754 11 : case ML_EXTENSION_TYPE_PIPELINE:
755 : {
756 : ml_service_node_info_s *node_info;
757 :
758 11 : node_info = _ml_extension_node_info_get (ext, name);
759 :
760 11 : if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_OUTPUT) {
761 3 : status = _ml_tensors_info_create_from (node_info->info, info);
762 : } else {
763 8 : status = ML_ERROR_INVALID_PARAMETER;
764 : }
765 11 : break;
766 : }
767 0 : default:
768 0 : status = ML_ERROR_NOT_SUPPORTED;
769 0 : break;
770 : }
771 :
772 18 : if (status != ML_ERROR_NONE) {
773 8 : if (*info) {
774 0 : ml_tensors_info_destroy (*info);
775 0 : *info = NULL;
776 : }
777 : }
778 :
779 18 : return status;
780 : }
781 :
782 : /**
783 : * @brief Internal function to set the information for ml-service extension.
784 : */
785 : int
786 34 : _ml_service_extension_set_information (ml_service_s * mls, const char *name,
787 : const char *value)
788 : {
789 34 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
790 :
791 : /* Check limitation of message queue and other options. */
792 34 : if (g_ascii_strcasecmp (name, "input_queue_size") == 0 ||
793 34 : g_ascii_strcasecmp (name, "max_input") == 0) {
794 2 : ext->max_input = (guint) g_ascii_strtoull (value, NULL, 10);
795 32 : } else if (g_ascii_strcasecmp (name, "timeout") == 0) {
796 0 : ext->timeout = (guint) g_ascii_strtoull (value, NULL, 10);
797 : }
798 :
799 34 : return ML_ERROR_NONE;
800 : }
801 :
802 : /**
803 : * @brief Internal function to add an input data to process the model in ml-service extension handle.
804 : */
805 : int
806 72 : _ml_service_extension_request (ml_service_s * mls, const char *name,
807 : const ml_tensors_data_h data)
808 : {
809 72 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
810 : ml_extension_msg_s *msg;
811 : int status, len;
812 :
813 72 : if (ext->type == ML_EXTENSION_TYPE_PIPELINE) {
814 : ml_service_node_info_s *node_info;
815 :
816 23 : if (!STR_IS_VALID (name)) {
817 4 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
818 : "The parameter, name '%s', is invalid.", name);
819 : }
820 :
821 19 : node_info = _ml_extension_node_info_get (ext, name);
822 :
823 19 : if (!node_info || node_info->type != ML_SERVICE_NODE_TYPE_INPUT) {
824 4 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
825 : "The parameter, name '%s', is invalid, cannot find the input node from pipeline.",
826 : name);
827 : }
828 : }
829 :
830 64 : len = g_async_queue_length (ext->msg_queue);
831 :
832 64 : if (ext->max_input > 0 && len > 0 && ext->max_input <= len) {
833 2 : _ml_error_report_return (ML_ERROR_STREAMS_PIPE,
834 : "Failed to push input data into the queue, the max number of input is %u.",
835 : ext->max_input);
836 : }
837 :
838 62 : msg = g_try_new0 (ml_extension_msg_s, 1);
839 62 : if (!msg) {
840 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
841 : "Failed to allocate the ml-service extension message. Out of memory?");
842 : }
843 :
844 62 : msg->name = g_strdup (name);
845 62 : status = ml_tensors_data_clone (data, &msg->input);
846 :
847 62 : if (status != ML_ERROR_NONE) {
848 0 : _ml_extension_msg_free (msg);
849 0 : _ml_error_report_return (status, "Failed to clone input data.");
850 : }
851 :
852 62 : g_async_queue_push (ext->msg_queue, msg);
853 :
854 62 : return ML_ERROR_NONE;
855 : }
|