Skip to content

Commit 7b5acc2

Browse files
committed
[API] Add ML LXM Service API(internal) for large model interactons
This commit introduces the ML LXM Service API, a new C API designed to facilitate interactions with large-scale models such as Large Language Models (LLMs) Signed-off-by: hyunil park <hyunil46.park@samsung.com>
1 parent e4d25a8 commit 7b5acc2

3 files changed

Lines changed: 412 additions & 1 deletion

File tree

Lines changed: 135 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,135 @@
1+
/* SPDX-License-Identifier: Apache-2.0 */
2+
/**
3+
* @file ml-lxm-service-internal.h
4+
* @date 23 JULY 2025
5+
* @brief Machine Learning LXM(LLM, LVM, etc.) Service API
6+
* @see https://github.com/nnstreamer/api
7+
* @author Hyunil Park <hyunil46.park@samsung.com>
8+
* @bug No known bugs except for NYI items
9+
*/
10+
11+
#ifndef __ML_LXM_SERVICE_INTERNAL_H__
12+
#define __ML_LXM_SERVICE_INTERNAL_H__
13+
14+
#include <stdlib.h>
15+
#include <ml-api-service.h>
16+
#ifdef __cplusplus
17+
extern "C"
18+
{
19+
#endif
20+
21+
/**
22+
* @brief Enumeration for LXM service availability status.
23+
*/
24+
typedef enum
25+
{
26+
ML_LXM_AVAILABILITY_AVAILABLE = 0,
27+
ML_LXM_AVAILABILITY_DEVICE_NOT_ELIGIBLE,
28+
ML_LXM_AVAILABILITY_SERVICE_DISABLED,
29+
ML_LXM_AVAILABILITY_MODEL_NOT_READY,
30+
ML_LXM_AVAILABILITY_UNKNOWN
31+
} ml_lxm_availability_e;
32+
33+
/**
34+
* @brief Checks LXM service availability.
35+
* @param[out] status Current availability status.
36+
* @return ML_ERROR_NONE on success, error code otherwise.
37+
*/
38+
int ml_lxm_check_availability (ml_lxm_availability_e * status);
39+
40+
/**
41+
* @brief A handle for lxm session.
42+
*/
43+
typedef void *ml_lxm_session_h;
44+
45+
/**
46+
* @brief Creates an LXM session.
47+
* @param[out] session Session handle.
48+
* @param[in] config_path Path to configuration file.
49+
* @param[in] instructions Initial instructions (optional).
50+
* @return ML_ERROR_NONE on success.
51+
*/
52+
int ml_lxm_session_create (ml_lxm_session_h * session, const char *config_path, const char *instructions);
53+
54+
/**
55+
* @brief Destroys an LXM session.
56+
* @param[in] session Session handle.
57+
* @return ML_ERROR_NONE on success.
58+
*/
59+
int ml_lxm_session_destroy (ml_lxm_session_h session);
60+
61+
/**
62+
* @brief A handle for lxm prompt.
63+
*/
64+
typedef void *ml_lxm_prompt_h;
65+
66+
/**
67+
* @brief Creates a prompt object.
68+
* @param[out] prompt Prompt handle.
69+
* @return ML_ERROR_NONE on success.
70+
*/
71+
int ml_lxm_prompt_create (ml_lxm_prompt_h * prompt);
72+
73+
/**
74+
* @brief Appends text to a prompt.
75+
* @param[in] prompt Prompt handle.
76+
* @param[in] text Text to append.
77+
* @return ML_ERROR_NONE on success.
78+
*/
79+
int ml_lxm_prompt_append_text (ml_lxm_prompt_h prompt, const char *text);
80+
81+
/**
82+
* @brief Appends an instruction to a prompt.
83+
* @param[in] prompt Prompt handle.
84+
* @param[in] instruction Instruction to append.
85+
* @return ML_ERROR_NONE on success.
86+
*/
87+
int ml_lxm_prompt_append_instruction (ml_lxm_prompt_h prompt, const char *instruction);
88+
89+
/**
90+
* @brief Destroys a prompt object.
91+
* @param[in] prompt Prompt handle.
92+
* @return ML_ERROR_NONE on success.
93+
*/
94+
int ml_lxm_prompt_destroy (ml_lxm_prompt_h prompt);
95+
96+
/**
97+
* @brief Sets runtime instructions for a session.
98+
* @param[in] session Session handle.
99+
* @param[in] instructions New instructions.
100+
* @return ML_ERROR_NONE on success.
101+
*/
102+
int ml_lxm_session_set_instructions (ml_lxm_session_h session, const char *instructions);
103+
104+
/**
105+
* @brief Generation options for LXM responses.
106+
*/
107+
typedef struct
108+
{
109+
double temperature; /* < Creativity control (0.0~2.0) */
110+
size_t max_tokens; /* < Maximum tokens to generate */
111+
} ml_lxm_generation_options_s;
112+
113+
/**
114+
* @brief Token streaming callback type.
115+
* @param token Generated token string.
116+
* @param user_data User-defined context.
117+
*/
118+
typedef void (*ml_lxm_token_cb) (ml_service_event_e event, ml_information_h event_data, void *user_data);
119+
120+
/**
121+
* @brief Generates an token-streamed response.
122+
* @param[in] session Session handle.
123+
* @param[in] prompt Prompt handle.
124+
* @param[in] options Generation parameters.
125+
* @param[in] token_callback Callback for each generated token.
126+
* @param[in] user_data User context passed to callback.
127+
* @return ML_ERROR_NONE on success.
128+
*/
129+
int ml_lxm_session_respond (ml_lxm_session_h session, ml_lxm_prompt_h prompt, const ml_lxm_generation_options_s * options, ml_lxm_token_cb token_callback, void *user_data);
130+
131+
#ifdef __cplusplus
132+
}
133+
#endif
134+
#endif
135+
/* __ML_LXM_SERVICE_INTERNAL_H__ */

c/src/meson.build

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
nns_capi_common_srcs = files('ml-api-common.c', 'ml-api-inference-internal.c')
22
nns_capi_single_srcs = files('ml-api-inference-single.c')
33
nns_capi_pipeline_srcs = files('ml-api-inference-pipeline.c')
4-
nns_capi_service_srcs = files('ml-api-service.c', 'ml-api-service-extension.c', 'ml-api-service-agent-client.c')
4+
nns_capi_service_srcs = files('ml-api-service.c', 'ml-api-service-extension.c', 'ml-api-service-agent-client.c', 'ml-lxm-service.c')
55

66
if support_nnstreamer_edge
77
nns_capi_service_srcs += files('ml-api-service-query.c')

0 commit comments

Comments
 (0)