Line data Source code
1 : /* SPDX-License-Identifier: Apache-2.0 */
2 : /**
3 : * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved.
4 : *
5 : * @file ml-api-service-extension.c
6 : * @date 1 September 2023
7 : * @brief ML service extension C-API.
8 : * @see https://github.com/nnstreamer/api
9 : * @author Jaeyun Jung <jy1210.jung@samsung.com>
10 : * @bug No known bugs except for NYI items
11 : */
12 :
13 : #include "ml-api-service-extension.h"
14 :
15 : /**
16 : * @brief The time to wait for new input data in message thread, in millisecond.
17 : */
18 : #define DEFAULT_TIMEOUT 200
19 :
20 : /**
21 : * @brief The max number of input data in message queue (0 for no limit).
22 : */
23 : #define DEFAULT_MAX_INPUT 5
24 :
25 : /**
26 : * @brief Internal enumeration for ml-service extension types.
27 : */
28 : typedef enum
29 : {
30 : ML_EXTENSION_TYPE_UNKNOWN = 0,
31 : ML_EXTENSION_TYPE_SINGLE = 1,
32 : ML_EXTENSION_TYPE_PIPELINE = 2,
33 :
34 : ML_EXTENSION_TYPE_MAX
35 : } ml_extension_type_e;
36 :
37 : /**
38 : * @brief Internal structure of the message in ml-service extension handle.
39 : */
40 : typedef struct
41 : {
42 : gchar *name;
43 : ml_tensors_data_h input;
44 : ml_tensors_data_h output;
45 : } ml_extension_msg_s;
46 :
47 : /**
48 : * @brief Internal structure for ml-service extension handle.
49 : */
50 : typedef struct
51 : {
52 : ml_extension_type_e type;
53 : gboolean running;
54 : guint timeout; /**< The time to wait for new input data in message thread, in millisecond (see DEFAULT_TIMEOUT). */
55 : guint max_input; /**< The max number of input data in message queue (see DEFAULT_MAX_INPUT). */
56 : GThread *msg_thread;
57 : GAsyncQueue *msg_queue;
58 :
59 : /**
60 : * Handles for each ml-service extension type.
61 : * - single : Default. Open model file and prepare invoke. The configuration should include model information.
62 : * - pipeline : Construct a pipeline from configuration. The configuration should include pipeline description.
63 : */
64 : ml_single_h single;
65 :
66 : ml_pipeline_h pipeline;
67 : GHashTable *node_table;
68 : } ml_extension_s;
69 :
70 : /**
71 : * @brief Internal function to handle the asynchronous invoke.
72 : */
73 : static int
74 0 : _ml_extension_async_cb (const ml_tensors_data_h data, void *user_data)
75 : {
76 0 : ml_service_s *mls = (ml_service_s *) user_data;
77 :
78 0 : return _ml_service_invoke_event_new_data (mls, NULL, data);
79 : }
80 :
81 : /**
82 : * @brief Internal function to create node info in pipeline.
83 : */
84 : static ml_service_node_info_s *
85 0 : _ml_extension_node_info_new (ml_service_s * mls, const gchar * name,
86 : ml_service_node_type_e type)
87 : {
88 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
89 : ml_service_node_info_s *node_info;
90 :
91 0 : if (!STR_IS_VALID (name)) {
92 0 : _ml_error_report_return (NULL,
93 : "Cannot add new node info, invalid node name '%s'.", name);
94 : }
95 :
96 0 : if (g_hash_table_lookup (ext->node_table, name)) {
97 0 : _ml_error_report_return (NULL,
98 : "Cannot add duplicated node '%s' in ml-service pipeline.", name);
99 : }
100 :
101 0 : node_info = g_try_new0 (ml_service_node_info_s, 1);
102 0 : if (!node_info) {
103 0 : _ml_error_report_return (NULL,
104 : "Failed to allocate new memory for node info in ml-service pipeline. Out of memory?");
105 : }
106 :
107 0 : node_info->name = g_strdup (name);
108 0 : node_info->type = type;
109 0 : node_info->mls = mls;
110 :
111 0 : g_hash_table_insert (ext->node_table, g_strdup (name), node_info);
112 :
113 0 : return node_info;
114 : }
115 :
116 : /**
117 : * @brief Internal function to release pipeline node info.
118 : */
119 : static void
120 0 : _ml_extension_node_info_free (gpointer data)
121 : {
122 0 : ml_service_node_info_s *node_info = (ml_service_node_info_s *) data;
123 :
124 0 : if (!node_info)
125 0 : return;
126 :
127 0 : if (node_info->info)
128 0 : ml_tensors_info_destroy (node_info->info);
129 :
130 0 : g_clear_pointer (&node_info->name, g_free);
131 0 : g_free (node_info);
132 : }
133 :
134 : /**
135 : * @brief Internal function to get the node info in ml-service extension.
136 : */
137 : static ml_service_node_info_s *
138 0 : _ml_extension_node_info_get (ml_extension_s * ext, const gchar * name)
139 : {
140 0 : if (!STR_IS_VALID (name))
141 0 : return NULL;
142 :
143 0 : return g_hash_table_lookup (ext->node_table, name);
144 : }
145 :
146 : /**
147 : * @brief Internal function to release ml-service extension message.
148 : */
149 : static void
150 0 : _ml_extension_msg_free (gpointer data)
151 : {
152 0 : ml_extension_msg_s *msg = (ml_extension_msg_s *) data;
153 :
154 0 : if (!msg)
155 0 : return;
156 :
157 0 : if (msg->input)
158 0 : ml_tensors_data_destroy (msg->input);
159 0 : if (msg->output)
160 0 : ml_tensors_data_destroy (msg->output);
161 0 : g_clear_pointer (&msg->name, g_free);
162 :
163 0 : g_free (msg);
164 : }
165 :
166 : /**
167 : * @brief Internal function to process ml-service extension message.
168 : */
169 : static gpointer
170 0 : _ml_extension_msg_thread (gpointer data)
171 : {
172 0 : ml_service_s *mls = (ml_service_s *) data;
173 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
174 : int status;
175 :
176 0 : g_mutex_lock (&mls->lock);
177 0 : ext->running = TRUE;
178 0 : g_cond_signal (&mls->cond);
179 0 : g_mutex_unlock (&mls->lock);
180 :
181 0 : while (ext->running) {
182 : ml_extension_msg_s *msg;
183 :
184 0 : msg = g_async_queue_timeout_pop (ext->msg_queue,
185 0 : ext->timeout * G_TIME_SPAN_MILLISECOND);
186 :
187 0 : if (msg) {
188 0 : switch (ext->type) {
189 0 : case ML_EXTENSION_TYPE_SINGLE:
190 : {
191 0 : status = ml_single_invoke (ext->single, msg->input, &msg->output);
192 :
193 0 : if (status == ML_ERROR_NONE) {
194 0 : _ml_service_invoke_event_new_data (mls, NULL, msg->output);
195 : } else {
196 0 : _ml_error_report
197 : ("Failed to invoke the model in ml-service extension thread.");
198 : }
199 0 : break;
200 : }
201 0 : case ML_EXTENSION_TYPE_PIPELINE:
202 : {
203 : ml_service_node_info_s *node_info;
204 :
205 0 : node_info = _ml_extension_node_info_get (ext, msg->name);
206 :
207 0 : if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_INPUT) {
208 : /* The input data will be released in the pipeline. */
209 0 : status = ml_pipeline_src_input_data (node_info->handle, msg->input,
210 : ML_PIPELINE_BUF_POLICY_AUTO_FREE);
211 0 : msg->input = NULL;
212 :
213 0 : if (status != ML_ERROR_NONE) {
214 0 : _ml_error_report
215 : ("Failed to push input data into the pipeline in ml-service extension thread.");
216 : }
217 : } else {
218 0 : _ml_error_report
219 : ("Failed to push input data into the pipeline, cannot find input node '%s'.",
220 : msg->name);
221 : }
222 0 : break;
223 : }
224 0 : default:
225 : /* Unknown ml-service extension type, skip this. */
226 0 : break;
227 : }
228 :
229 0 : _ml_extension_msg_free (msg);
230 : }
231 : }
232 :
233 0 : return NULL;
234 : }
235 :
236 : /**
237 : * @brief Wrapper to release tensors-info handle.
238 : */
239 : static void
240 0 : _ml_extension_destroy_tensors_info (void *data)
241 : {
242 0 : ml_tensors_info_h info = (ml_tensors_info_h) data;
243 :
244 0 : if (info)
245 0 : ml_tensors_info_destroy (info);
246 0 : }
247 :
248 : /**
249 : * @brief Internal function to parse single-shot info from json.
250 : */
251 : static int
252 0 : _ml_extension_conf_parse_single (ml_service_s * mls, JsonObject * single)
253 : {
254 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
255 : ml_option_h option;
256 : int status;
257 :
258 0 : status = ml_option_create (&option);
259 0 : if (status != ML_ERROR_NONE) {
260 0 : _ml_error_report_return (status,
261 : "Failed to parse configuration file, cannot create ml-option handle.");
262 : }
263 :
264 : /**
265 : * 1. "key" : load model info from ml-service agent.
266 : * 2. "model" : configuration file includes model path.
267 : */
268 0 : if (json_object_has_member (single, "key")) {
269 0 : const gchar *key = json_object_get_string_member (single, "key");
270 :
271 0 : if (STR_IS_VALID (key)) {
272 : ml_information_h model_info;
273 :
274 0 : status = ml_service_model_get_activated (key, &model_info);
275 0 : if (status == ML_ERROR_NONE) {
276 0 : gchar *paths = NULL;
277 :
278 : /** @todo parse desc and other information if necessary. */
279 0 : ml_information_get (model_info, "path", (void **) (&paths));
280 0 : ml_option_set (option, "models", g_strdup (paths), g_free);
281 :
282 0 : ml_information_destroy (model_info);
283 : } else {
284 0 : _ml_error_report
285 : ("Failed to parse configuration file, cannot get the model of '%s'.",
286 : key);
287 0 : goto error;
288 : }
289 : }
290 0 : } else if (json_object_has_member (single, "model")) {
291 0 : JsonNode *file_node = json_object_get_member (single, "model");
292 0 : gchar *paths = NULL;
293 :
294 0 : status = _ml_service_conf_parse_string (file_node, ",", &paths);
295 0 : if (status != ML_ERROR_NONE) {
296 0 : _ml_error_report
297 : ("Failed to parse configuration file, it should have valid model path.");
298 0 : goto error;
299 : }
300 :
301 0 : ml_option_set (option, "models", paths, g_free);
302 : } else {
303 0 : status = ML_ERROR_INVALID_PARAMETER;
304 0 : _ml_error_report
305 : ("Failed to parse configuration file, cannot get the model path.");
306 0 : goto error;
307 : }
308 :
309 0 : if (json_object_has_member (single, "framework")) {
310 0 : const gchar *fw = json_object_get_string_member (single, "framework");
311 :
312 0 : if (STR_IS_VALID (fw))
313 0 : ml_option_set (option, "framework_name", g_strdup (fw), g_free);
314 : }
315 :
316 0 : if (json_object_has_member (single, "input_info")) {
317 0 : JsonNode *info_node = json_object_get_member (single, "input_info");
318 : ml_tensors_info_h in_info;
319 :
320 0 : status = _ml_service_conf_parse_tensors_info (info_node, &in_info);
321 0 : if (status != ML_ERROR_NONE) {
322 0 : _ml_error_report
323 : ("Failed to parse configuration file, cannot parse input information.");
324 0 : goto error;
325 : }
326 :
327 0 : ml_option_set (option, "input_info", in_info,
328 : _ml_extension_destroy_tensors_info);
329 : }
330 :
331 0 : if (json_object_has_member (single, "output_info")) {
332 0 : JsonNode *info_node = json_object_get_member (single, "output_info");
333 : ml_tensors_info_h out_info;
334 :
335 0 : status = _ml_service_conf_parse_tensors_info (info_node, &out_info);
336 0 : if (status != ML_ERROR_NONE) {
337 0 : _ml_error_report
338 : ("Failed to parse configuration file, cannot parse output information.");
339 0 : goto error;
340 : }
341 :
342 0 : ml_option_set (option, "output_info", out_info,
343 : _ml_extension_destroy_tensors_info);
344 : }
345 :
346 : /* parse latency profiling option - "profile": "true" or "1" */
347 0 : if (json_object_has_member (single, "profile")) {
348 0 : const gchar *profile = json_object_get_string_member (single, "profile");
349 :
350 0 : if (STR_IS_VALID (profile))
351 0 : ml_option_set (option, "profile", g_strdup (profile), g_free);
352 : }
353 :
354 : /* parse latency profiling option - "latency": "true" or "1" */
355 0 : if (json_object_has_member (single, "latency")) {
356 0 : const gchar *latency = json_object_get_string_member (single, "latency");
357 :
358 0 : if (STR_IS_VALID (latency))
359 0 : ml_option_set (option, "profile", g_strdup (latency), g_free);
360 : }
361 :
362 0 : if (json_object_has_member (single, "custom")) {
363 0 : const gchar *custom = json_object_get_string_member (single, "custom");
364 :
365 0 : if (STR_IS_VALID (custom))
366 0 : ml_option_set (option, "custom", g_strdup (custom), g_free);
367 : }
368 :
369 0 : if (json_object_has_member (single, "invoke_dynamic")) {
370 : const gchar *invoke_dynamic =
371 0 : json_object_get_string_member (single, "invoke_dynamic");
372 :
373 0 : if (STR_IS_VALID (invoke_dynamic)) {
374 0 : ml_option_set (option, "invoke_dynamic", g_strdup (invoke_dynamic),
375 : g_free);
376 : }
377 : }
378 :
379 0 : if (json_object_has_member (single, "invoke_async")) {
380 : const gchar *invoke_async =
381 0 : json_object_get_string_member (single, "invoke_async");
382 :
383 0 : if (STR_IS_VALID (invoke_async)) {
384 0 : ml_option_set (option, "invoke_async", g_strdup (invoke_async), g_free);
385 :
386 0 : if (g_ascii_strcasecmp (invoke_async, "true") == 0) {
387 0 : ml_option_set (option, "async_callback", _ml_extension_async_cb, NULL);
388 0 : ml_option_set (option, "async_data", mls, NULL);
389 : }
390 : }
391 : }
392 :
393 0 : error:
394 0 : if (status == ML_ERROR_NONE)
395 0 : status = ml_single_open_with_option (&ext->single, option);
396 :
397 0 : ml_option_destroy (option);
398 0 : return status;
399 : }
400 :
401 : /**
402 : * @brief Internal function to parse the node info in pipeline.
403 : */
404 : static int
405 0 : _ml_extension_conf_parse_pipeline_node (ml_service_s * mls, JsonNode * node,
406 : ml_service_node_type_e type)
407 : {
408 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
409 0 : JsonArray *array = NULL;
410 : JsonObject *object;
411 : guint i, n;
412 : int status;
413 :
414 0 : n = 1;
415 0 : if (JSON_NODE_HOLDS_ARRAY (node)) {
416 0 : array = json_node_get_array (node);
417 0 : n = json_array_get_length (array);
418 : }
419 :
420 0 : for (i = 0; i < n; i++) {
421 0 : const gchar *name = NULL;
422 : ml_service_node_info_s *node_info;
423 :
424 0 : if (array)
425 0 : object = json_array_get_object_element (array, i);
426 : else
427 0 : object = json_node_get_object (node);
428 :
429 0 : name = _ml_service_get_json_string_member (object, "name");
430 :
431 0 : node_info = _ml_extension_node_info_new (mls, name, type);
432 0 : if (!node_info) {
433 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
434 : "Failed to parse configuration file, cannot add new node information.");
435 : }
436 :
437 0 : if (json_object_has_member (object, "info")) {
438 0 : JsonNode *info_node = json_object_get_member (object, "info");
439 :
440 0 : status = _ml_service_conf_parse_tensors_info (info_node,
441 : &node_info->info);
442 0 : if (status != ML_ERROR_NONE) {
443 0 : _ml_error_report_return (status,
444 : "Failed to parse configuration file, cannot parse the information.");
445 : }
446 : } else {
447 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
448 : "Failed to parse configuration file, cannot find node information.");
449 : }
450 :
451 0 : switch (type) {
452 0 : case ML_SERVICE_NODE_TYPE_INPUT:
453 0 : status = ml_pipeline_src_get_handle (ext->pipeline, name,
454 0 : &node_info->handle);
455 0 : break;
456 0 : case ML_SERVICE_NODE_TYPE_OUTPUT:
457 0 : status = ml_pipeline_sink_register (ext->pipeline, name,
458 0 : _ml_service_pipeline_sink_cb, node_info, &node_info->handle);
459 0 : break;
460 0 : default:
461 0 : status = ML_ERROR_INVALID_PARAMETER;
462 0 : break;
463 : }
464 :
465 0 : if (status != ML_ERROR_NONE) {
466 0 : _ml_error_report_return (status,
467 : "Failed to parse configuration file, cannot get the handle for pipeline node.");
468 : }
469 : }
470 :
471 0 : return ML_ERROR_NONE;
472 : }
473 :
474 : /**
475 : * @brief Internal function to parse pipeline info from json.
476 : */
477 : static int
478 0 : _ml_extension_conf_parse_pipeline (ml_service_s * mls, JsonObject * pipe)
479 : {
480 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
481 0 : g_autofree gchar *desc = NULL;
482 : int status;
483 :
484 : /**
485 : * 1. "key" : load pipeline from ml-service agent.
486 : * 2. "description" : configuration file includes pipeline description.
487 : */
488 0 : if (json_object_has_member (pipe, "key")) {
489 0 : const gchar *key = json_object_get_string_member (pipe, "key");
490 :
491 0 : if (STR_IS_VALID (key)) {
492 0 : status = ml_service_pipeline_get (key, &desc);
493 0 : if (status != ML_ERROR_NONE) {
494 0 : _ml_error_report_return (status,
495 : "Failed to parse configuration file, cannot get the pipeline of '%s'.",
496 : key);
497 : }
498 : }
499 0 : } else if (json_object_has_member (pipe, "description")) {
500 0 : desc = g_strdup (json_object_get_string_member (pipe, "description"));
501 : } else {
502 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
503 : "Failed to parse configuration file, cannot get the pipeline description.");
504 : }
505 :
506 0 : status = ml_pipeline_construct (desc, NULL, NULL, &ext->pipeline);
507 0 : if (status != ML_ERROR_NONE) {
508 0 : _ml_error_report_return (status,
509 : "Failed to parse configuration file, cannot construct the pipeline.");
510 : }
511 :
512 0 : if (json_object_has_member (pipe, "input_node")) {
513 0 : JsonNode *node = json_object_get_member (pipe, "input_node");
514 :
515 0 : status = _ml_extension_conf_parse_pipeline_node (mls, node,
516 : ML_SERVICE_NODE_TYPE_INPUT);
517 0 : if (status != ML_ERROR_NONE) {
518 0 : _ml_error_report_return (status,
519 : "Failed to parse configuration file, cannot get the input node.");
520 : }
521 : } else {
522 0 : _ml_logw
523 : ("No input node is defined in the pipeline. Might Non-appsrc be used?");
524 : }
525 :
526 0 : if (json_object_has_member (pipe, "output_node")) {
527 0 : JsonNode *node = json_object_get_member (pipe, "output_node");
528 :
529 0 : status = _ml_extension_conf_parse_pipeline_node (mls, node,
530 : ML_SERVICE_NODE_TYPE_OUTPUT);
531 0 : if (status != ML_ERROR_NONE) {
532 0 : _ml_error_report_return (status,
533 : "Failed to parse configuration file, cannot get the output node.");
534 : }
535 : } else {
536 0 : _ml_logw ("No output node is defined in the pipeline.");
537 : }
538 :
539 : /* Start pipeline when creating ml-service handle to check pipeline description. */
540 0 : status = ml_pipeline_start (ext->pipeline);
541 0 : if (status != ML_ERROR_NONE) {
542 0 : _ml_error_report_return (status,
543 : "Failed to parse configuration file, cannot start the pipeline.");
544 : }
545 :
546 0 : return ML_ERROR_NONE;
547 : }
548 :
549 : /**
550 : * @brief Internal function to parse configuration file.
551 : */
552 : static int
553 0 : _ml_extension_conf_parse_json (ml_service_s * mls, JsonObject * object)
554 : {
555 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
556 : int status;
557 :
558 0 : if (json_object_has_member (object, "single")) {
559 0 : JsonObject *single = json_object_get_object_member (object, "single");
560 :
561 0 : status = _ml_extension_conf_parse_single (mls, single);
562 0 : if (status != ML_ERROR_NONE)
563 0 : return status;
564 :
565 0 : ext->type = ML_EXTENSION_TYPE_SINGLE;
566 0 : } else if (json_object_has_member (object, "pipeline")) {
567 0 : JsonObject *pipe = json_object_get_object_member (object, "pipeline");
568 :
569 0 : status = _ml_extension_conf_parse_pipeline (mls, pipe);
570 0 : if (status != ML_ERROR_NONE)
571 0 : return status;
572 :
573 0 : ext->type = ML_EXTENSION_TYPE_PIPELINE;
574 : } else {
575 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
576 : "Failed to parse configuration file, cannot get the valid type from configuration.");
577 : }
578 :
579 0 : return ML_ERROR_NONE;
580 : }
581 :
582 : /**
583 : * @brief Internal function to create ml-service extension.
584 : */
585 : int
586 0 : _ml_service_extension_create (ml_service_s * mls, JsonObject * object)
587 : {
588 : ml_extension_s *ext;
589 0 : g_autofree gchar *thread_name = g_strdup_printf ("ml-ext-msg-%d", getpid ());
590 : int status;
591 :
592 0 : mls->priv = ext = g_try_new0 (ml_extension_s, 1);
593 0 : if (ext == NULL) {
594 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
595 : "Failed to allocate memory for ml-service extension. Out of memory?");
596 : }
597 :
598 0 : ext->type = ML_EXTENSION_TYPE_UNKNOWN;
599 0 : ext->running = FALSE;
600 0 : ext->timeout = DEFAULT_TIMEOUT;
601 0 : ext->max_input = DEFAULT_MAX_INPUT;
602 0 : ext->node_table = g_hash_table_new_full (g_str_hash, g_str_equal, g_free,
603 : _ml_extension_node_info_free);
604 :
605 0 : status = _ml_extension_conf_parse_json (mls, object);
606 0 : if (status != ML_ERROR_NONE) {
607 0 : _ml_error_report_return (status,
608 : "Failed to parse the ml-service extension configuration.");
609 : }
610 :
611 0 : g_mutex_lock (&mls->lock);
612 :
613 0 : ext->msg_queue = g_async_queue_new_full (_ml_extension_msg_free);
614 0 : ext->msg_thread = g_thread_new (thread_name, _ml_extension_msg_thread, mls);
615 :
616 : /* Wait until the message thread has been initialized. */
617 0 : g_cond_wait (&mls->cond, &mls->lock);
618 0 : g_mutex_unlock (&mls->lock);
619 :
620 0 : return ML_ERROR_NONE;
621 : }
622 :
623 : /**
624 : * @brief Internal function to release ml-service extension.
625 : */
626 : int
627 0 : _ml_service_extension_destroy (ml_service_s * mls)
628 : {
629 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
630 :
631 : /* Supposed internal function call to release handle. */
632 0 : if (!ext)
633 0 : return ML_ERROR_NONE;
634 :
635 : /**
636 : * Close message thread.
637 : * If model inference is running, it may wait for the result in message thread.
638 : * This takes time, so do not call join with extension lock.
639 : */
640 0 : ext->running = FALSE;
641 0 : if (ext->msg_thread) {
642 0 : g_thread_join (ext->msg_thread);
643 0 : ext->msg_thread = NULL;
644 : }
645 :
646 0 : if (ext->msg_queue) {
647 0 : g_async_queue_unref (ext->msg_queue);
648 0 : ext->msg_queue = NULL;
649 : }
650 :
651 0 : if (ext->single) {
652 0 : ml_single_close (ext->single);
653 0 : ext->single = NULL;
654 : }
655 :
656 0 : if (ext->pipeline) {
657 0 : ml_pipeline_stop (ext->pipeline);
658 0 : ml_pipeline_destroy (ext->pipeline);
659 0 : ext->pipeline = NULL;
660 : }
661 :
662 0 : if (ext->node_table) {
663 0 : g_hash_table_destroy (ext->node_table);
664 0 : ext->node_table = NULL;
665 : }
666 :
667 0 : g_free (ext);
668 0 : mls->priv = NULL;
669 :
670 0 : return ML_ERROR_NONE;
671 : }
672 :
673 : /**
674 : * @brief Internal function to start ml-service extension.
675 : */
676 : int
677 0 : _ml_service_extension_start (ml_service_s * mls)
678 : {
679 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
680 0 : int status = ML_ERROR_NONE;
681 :
682 0 : switch (ext->type) {
683 0 : case ML_EXTENSION_TYPE_PIPELINE:
684 0 : status = ml_pipeline_start (ext->pipeline);
685 0 : break;
686 0 : case ML_EXTENSION_TYPE_SINGLE:
687 : /* Do nothing. */
688 0 : break;
689 0 : default:
690 0 : status = ML_ERROR_NOT_SUPPORTED;
691 0 : break;
692 : }
693 :
694 0 : return status;
695 : }
696 :
697 : /**
698 : * @brief Internal function to stop ml-service extension.
699 : */
700 : int
701 0 : _ml_service_extension_stop (ml_service_s * mls)
702 : {
703 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
704 0 : int status = ML_ERROR_NONE;
705 :
706 0 : switch (ext->type) {
707 0 : case ML_EXTENSION_TYPE_PIPELINE:
708 0 : status = ml_pipeline_stop (ext->pipeline);
709 0 : break;
710 0 : case ML_EXTENSION_TYPE_SINGLE:
711 : /* Do nothing. */
712 0 : break;
713 0 : default:
714 0 : status = ML_ERROR_NOT_SUPPORTED;
715 0 : break;
716 : }
717 :
718 0 : return status;
719 : }
720 :
721 : /**
722 : * @brief Internal function to get the information of required input data.
723 : */
724 : int
725 0 : _ml_service_extension_get_input_information (ml_service_s * mls,
726 : const char *name, ml_tensors_info_h * info)
727 : {
728 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
729 : int status;
730 :
731 0 : switch (ext->type) {
732 0 : case ML_EXTENSION_TYPE_SINGLE:
733 0 : status = ml_single_get_input_info (ext->single, info);
734 0 : break;
735 0 : case ML_EXTENSION_TYPE_PIPELINE:
736 : {
737 : ml_service_node_info_s *node_info;
738 :
739 0 : node_info = _ml_extension_node_info_get (ext, name);
740 :
741 0 : if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_INPUT) {
742 0 : status = _ml_tensors_info_create_from (node_info->info, info);
743 : } else {
744 0 : status = ML_ERROR_INVALID_PARAMETER;
745 : }
746 0 : break;
747 : }
748 0 : default:
749 0 : status = ML_ERROR_NOT_SUPPORTED;
750 0 : break;
751 : }
752 :
753 0 : return status;
754 : }
755 :
756 : /**
757 : * @brief Internal function to get the information of output data.
758 : */
759 : int
760 0 : _ml_service_extension_get_output_information (ml_service_s * mls,
761 : const char *name, ml_tensors_info_h * info)
762 : {
763 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
764 : int status;
765 :
766 0 : switch (ext->type) {
767 0 : case ML_EXTENSION_TYPE_SINGLE:
768 0 : status = ml_single_get_output_info (ext->single, info);
769 0 : break;
770 0 : case ML_EXTENSION_TYPE_PIPELINE:
771 : {
772 : ml_service_node_info_s *node_info;
773 :
774 0 : node_info = _ml_extension_node_info_get (ext, name);
775 :
776 0 : if (node_info && node_info->type == ML_SERVICE_NODE_TYPE_OUTPUT) {
777 0 : status = _ml_tensors_info_create_from (node_info->info, info);
778 : } else {
779 0 : status = ML_ERROR_INVALID_PARAMETER;
780 : }
781 0 : break;
782 : }
783 0 : default:
784 0 : status = ML_ERROR_NOT_SUPPORTED;
785 0 : break;
786 : }
787 :
788 0 : if (status != ML_ERROR_NONE) {
789 0 : if (*info) {
790 0 : ml_tensors_info_destroy (*info);
791 0 : *info = NULL;
792 : }
793 : }
794 :
795 0 : return status;
796 : }
797 :
798 : /**
799 : * @brief Internal function to set the information for ml-service extension.
800 : */
801 : int
802 0 : _ml_service_extension_set_information (ml_service_s * mls, const char *name,
803 : const char *value)
804 : {
805 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
806 :
807 : /* Check limitation of message queue and other options. */
808 0 : if (g_ascii_strcasecmp (name, "input_queue_size") == 0 ||
809 0 : g_ascii_strcasecmp (name, "max_input") == 0) {
810 0 : ext->max_input = (guint) g_ascii_strtoull (value, NULL, 10);
811 0 : } else if (g_ascii_strcasecmp (name, "timeout") == 0) {
812 0 : ext->timeout = (guint) g_ascii_strtoull (value, NULL, 10);
813 : }
814 :
815 0 : return ML_ERROR_NONE;
816 : }
817 :
818 : /**
819 : * @brief Internal function to add an input data to process the model in ml-service extension handle.
820 : */
821 : int
822 0 : _ml_service_extension_request (ml_service_s * mls, const char *name,
823 : const ml_tensors_data_h data)
824 : {
825 0 : ml_extension_s *ext = (ml_extension_s *) mls->priv;
826 : ml_extension_msg_s *msg;
827 : int status, len;
828 :
829 0 : if (ext->type == ML_EXTENSION_TYPE_PIPELINE) {
830 : ml_service_node_info_s *node_info;
831 :
832 0 : if (!STR_IS_VALID (name)) {
833 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
834 : "The parameter, name '%s', is invalid.", name);
835 : }
836 :
837 0 : node_info = _ml_extension_node_info_get (ext, name);
838 :
839 0 : if (!node_info || node_info->type != ML_SERVICE_NODE_TYPE_INPUT) {
840 0 : _ml_error_report_return (ML_ERROR_INVALID_PARAMETER,
841 : "The parameter, name '%s', is invalid, cannot find the input node from pipeline.",
842 : name);
843 : }
844 : }
845 :
846 0 : len = g_async_queue_length (ext->msg_queue);
847 :
848 0 : if (ext->max_input > 0 && len > 0 && ext->max_input <= len) {
849 0 : _ml_error_report_return (ML_ERROR_STREAMS_PIPE,
850 : "Failed to push input data into the queue, the max number of input is %u.",
851 : ext->max_input);
852 : }
853 :
854 0 : msg = g_try_new0 (ml_extension_msg_s, 1);
855 0 : if (!msg) {
856 0 : _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY,
857 : "Failed to allocate the ml-service extension message. Out of memory?");
858 : }
859 :
860 0 : msg->name = g_strdup (name);
861 0 : status = ml_tensors_data_clone (data, &msg->input);
862 :
863 0 : if (status != ML_ERROR_NONE) {
864 0 : _ml_extension_msg_free (msg);
865 0 : _ml_error_report_return (status, "Failed to clone input data.");
866 : }
867 :
868 0 : g_async_queue_push (ext->msg_queue, msg);
869 :
870 0 : return ML_ERROR_NONE;
871 : }
|