-
Notifications
You must be signed in to change notification settings - Fork 752
Expand file tree
/
Copy pathproduct_models.py
More file actions
1336 lines (1038 loc) · 48 KB
/
product_models.py
File metadata and controls
1336 lines (1038 loc) · 48 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import uuid
from typing import Any, Generic, Literal, TypeVar
from pydantic import BaseModel, Field, model_validator
# Import message types from core types module
from memos.log import get_logger
from memos.types import MessageList, MessagesType, PermissionDict, SearchMode
logger = get_logger(__name__)
T = TypeVar("T")
class BaseRequest(BaseModel):
"""Base model for all requests."""
class BaseResponse(BaseModel, Generic[T]):
"""Base model for all responses."""
code: int = Field(200, description="Response status code")
message: str = Field(..., description="Response message")
data: T | None = Field(None, description="Response data")
# Product API Models
class UserRegisterRequest(BaseRequest):
"""Request model for user registration."""
user_id: str = Field(
default_factory=lambda: str(uuid.uuid4()), description="User ID for registration"
)
mem_cube_id: str | None = Field(None, description="Cube ID for registration")
user_name: str | None = Field(None, description="User name for registration")
interests: str | None = Field(None, description="User interests")
class GetMemoryPlaygroundRequest(BaseRequest):
"""Request model for getting memories."""
user_id: str = Field(..., description="User ID")
memory_type: Literal["text_mem", "act_mem", "param_mem", "para_mem"] = Field(
..., description="Memory type"
)
mem_cube_ids: list[str] | None = Field(None, description="Cube IDs")
search_query: str | None = Field(None, description="Search query")
search_type: Literal["embedding", "fulltext"] = Field("fulltext", description="Search type")
# Start API Models
class Message(BaseModel):
role: str = Field(..., description="Role of the message (user or assistant).")
content: str = Field(..., description="Message content.")
class MemoryCreate(BaseRequest):
user_id: str = Field(..., description="User ID")
messages: MessageList | None = Field(None, description="List of messages to store.")
memory_content: str | None = Field(None, description="Content to store as memory")
doc_path: str | None = Field(None, description="Path to document to store")
mem_cube_id: str | None = Field(None, description="ID of the memory cube")
class MemCubeRegister(BaseRequest):
mem_cube_name_or_path: str = Field(..., description="Name or path of the MemCube to register.")
mem_cube_id: str | None = Field(None, description="ID for the MemCube")
class ChatRequest(BaseRequest):
"""Request model for chat operations.
This model is used as the algorithm-facing chat interface, while also
remaining backward compatible with older developer-facing APIs.
"""
# ==== Basic identifiers ====
user_id: str = Field(..., description="User ID")
query: str = Field(..., description="Chat query message")
readable_cube_ids: list[str] | None = Field(
None, description="List of cube IDs user can read for multi-cube chat"
)
writable_cube_ids: list[str] | None = Field(
None, description="List of cube IDs user can write for multi-cube chat"
)
history: MessageList | None = Field(None, description="Chat history")
mode: SearchMode = Field(SearchMode.FAST, description="search mode: fast, fine, or mixture")
system_prompt: str | None = Field(None, description="Base system prompt to use for chat")
top_k: int = Field(10, description="Number of results to return")
session_id: str | None = Field(None, description="Session ID for soft-filtering memories")
include_preference: bool = Field(True, description="Whether to handle preference memory")
pref_top_k: int = Field(6, description="Number of preference results to return")
model_name_or_path: str | None = Field(None, description="Model name to use for chat")
max_tokens: int | None = Field(None, description="Max tokens to generate")
temperature: float | None = Field(None, description="Temperature for sampling")
top_p: float | None = Field(None, description="Top-p (nucleus) sampling parameter")
add_message_on_answer: bool = Field(True, description="Add dialogs to memory after chat")
manager_user_id: str | None = Field(None, description="Manager User ID")
project_id: str | None = Field(None, description="Project ID")
relativity: float = Field(
0.45,
ge=0,
description=(
"Relevance threshold for recalled memories. "
"Only memories with metadata.relativity >= relativity will be returned. "
"Use 0 to disable threshold filtering. Default: 0.45."
),
)
# ==== Filter conditions ====
filter: dict[str, Any] | None = Field(
None,
description="""
Filter for the memory, example:
{
"`and` or `or`": [
{"id": "uuid-xxx"},
{"created_at": {"gt": "2024-01-01"}},
]
}
""",
)
# ==== Extended capabilities ====
internet_search: bool = Field(False, description="Whether to use internet search")
threshold: float = Field(0.5, description="Threshold for filtering references")
# ==== Backward compatibility ====
moscube: bool = Field(
False,
description="(Deprecated) Whether to use legacy MemOSCube pipeline.",
)
mem_cube_id: str | None = Field(
None,
description=(
"(Deprecated) Single cube ID to use for chat. "
"Prefer `readable_cube_ids` / `writable_cube_ids` for multi-cube chat."
),
)
@model_validator(mode="after")
def _convert_deprecated_fields(self):
"""
Normalize fields for algorithm interface while preserving backward compatibility.
Rules:
- mem_cube_id → readable_cube_ids / writable_cube_ids if they are missing
- moscube: log warning when True (deprecated)
"""
# ---- mem_cube_id backward compatibility ----
if self.mem_cube_id is not None:
logger.warning(
"ChatRequest.mem_cube_id is deprecated and will be removed in a future version. "
"Please migrate to `readable_cube_ids` / `writable_cube_ids`."
)
if not self.readable_cube_ids:
self.readable_cube_ids = [self.mem_cube_id]
if not self.writable_cube_ids:
self.writable_cube_ids = [self.mem_cube_id]
# ---- Deprecated moscube flag ----
if self.moscube:
logger.warning(
"ChatRequest.moscube is deprecated. Legacy MemOSCube pipeline "
"will be removed in a future version."
)
return self
class ChatPlaygroundRequest(ChatRequest):
"""Request model for chat operations in playground."""
beginner_guide_step: str | None = Field(
None, description="Whether to use beginner guide, option: [first, second]"
)
class ChatBusinessRequest(ChatRequest):
"""Request model for chat operations for business user."""
business_key: str = Field(..., description="Business User Key")
need_search: bool = Field(False, description="Whether to need search before chat")
class ChatCompleteRequest(BaseRequest):
"""Request model for chat operations. will (Deprecated), instead use APIChatCompleteRequest."""
user_id: str = Field(..., description="User ID")
query: str = Field(..., description="Chat query message")
mem_cube_id: str | None = Field(None, description="Cube ID to use for chat")
history: MessageList | None = Field(None, description="Chat history")
internet_search: bool = Field(False, description="Whether to use internet search")
system_prompt: str | None = Field(None, description="Base prompt to use for chat")
top_k: int = Field(10, description="Number of results to return")
threshold: float = Field(0.5, description="Threshold for filtering references")
session_id: str | None = Field(None, description="Session ID for soft-filtering memories")
include_preference: bool = Field(True, description="Whether to handle preference memory")
pref_top_k: int = Field(6, description="Number of preference results to return")
filter: dict[str, Any] | None = Field(None, description="Filter for the memory")
model_name_or_path: str | None = Field(None, description="Model name to use for chat")
max_tokens: int | None = Field(None, description="Max tokens to generate")
temperature: float | None = Field(None, description="Temperature for sampling")
top_p: float | None = Field(None, description="Top-p (nucleus) sampling parameter")
add_message_on_answer: bool = Field(True, description="Add dialogs to memory after chat")
base_prompt: str | None = Field(None, description="(Deprecated) Base prompt alias")
moscube: bool = Field(
False, description="(Deprecated) Whether to use legacy MemOSCube pipeline"
)
class UserCreate(BaseRequest):
user_name: str | None = Field(None, description="Name of the user")
role: str = Field("USER", description="Role of the user")
user_id: str = Field(..., description="User ID")
class CubeShare(BaseRequest):
target_user_id: str = Field(..., description="Target user ID to share with")
# Response Models
class SimpleResponse(BaseResponse[None]):
"""Simple response model for operations without data return."""
class UserRegisterResponse(BaseResponse[dict]):
"""Response model for user registration."""
class MemoryResponse(BaseResponse[list]):
"""Response model for memory operations."""
class SuggestionResponse(BaseResponse[list]):
"""Response model for suggestion operations."""
data: dict[str, list[str]] | None = Field(None, description="Response data")
class AddStatusResponse(BaseResponse[dict]):
"""Response model for add status operations."""
class ConfigResponse(BaseResponse[None]):
"""Response model for configuration endpoint."""
class SearchResponse(BaseResponse[dict]):
"""Response model for search operations."""
class ChatResponse(BaseResponse[str]):
"""Response model for chat operations."""
class GetMemoryResponse(BaseResponse[dict]):
"""Response model for getting memories."""
class DeleteMemoryResponse(BaseResponse[dict]):
"""Response model for deleting memories."""
class UserResponse(BaseResponse[dict]):
"""Response model for user operations."""
class UserListResponse(BaseResponse[list]):
"""Response model for user list operations."""
class MemoryCreateRequest(BaseRequest):
"""Request model for creating memories."""
user_id: str = Field(..., description="User ID")
messages: str | MessagesType | None = Field(None, description="List of messages to store.")
memory_content: str | None = Field(None, description="Memory content to store")
doc_path: str | None = Field(None, description="Path to document to store")
mem_cube_id: str | None = Field(None, description="Cube ID")
source: str | None = Field(None, description="Source of the memory")
user_profile: bool = Field(False, description="User profile memory")
session_id: str | None = Field(None, description="Session id")
task_id: str | None = Field(None, description="Task ID for monitoring async tasks")
class SearchRequest(BaseRequest):
"""Request model for searching memories."""
user_id: str = Field(..., description="User ID")
query: str = Field(..., description="Search query")
mem_cube_id: str | None = Field(None, description="Cube ID to search in")
top_k: int = Field(10, description="Number of results to return")
session_id: str | None = Field(None, description="Session ID for soft-filtering memories")
class APISearchRequest(BaseRequest):
"""Request model for searching memories."""
# ==== Basic inputs ====
query: str = Field(
...,
description="User search query",
)
user_id: str = Field(..., description="User ID")
# ==== Cube scoping ====
readable_cube_ids: list[str] | None = Field(
None,
description=(
"List of cube IDs that are readable for this request. "
"Required for algorithm-facing API; optional for developer-facing API."
),
)
# ==== Search mode ====
mode: SearchMode = Field(
SearchMode.FAST,
description="Search mode: fast, fine, or mixture.",
)
session_id: str | None = Field(
None,
description=(
"Session ID used as a soft signal to prioritize more relevant memories. "
"Only used for weighting, not as a hard filter."
),
)
# ==== Result control ====
top_k: int = Field(
10,
ge=1,
description="Number of textual memories to retrieve (top-K). Default: 10.",
)
relativity: float = Field(
0.45,
ge=0,
description=(
"Relevance threshold for recalled memories. "
"Only memories with metadata.relativity >= relativity will be returned. "
"Use 0 to disable threshold filtering. Default: 0.45."
),
)
dedup: Literal["no", "sim", "mmr"] | None = Field(
"mmr",
description=(
"Optional dedup option for textual memories. "
"Use 'no' for no dedup, 'sim' for similarity dedup, 'mmr' for MMR-based dedup. "
"If None, default exact-text dedup is applied."
),
)
pref_top_k: int = Field(
6,
ge=0,
description="Number of preference memories to retrieve (top-K). Default: 6.",
)
include_preference: bool = Field(
True,
description=(
"Whether to retrieve preference memories along with general memories. "
"If enabled, the system will automatically recall user preferences "
"relevant to the query. Default: True."
),
)
search_tool_memory: bool = Field(
True,
description=(
"Whether to retrieve tool memories along with general memories. "
"If enabled, the system will automatically recall tool memories "
"relevant to the query. Default: True."
),
)
tool_mem_top_k: int = Field(
6,
ge=0,
description="Number of tool memories to retrieve (top-K). Default: 6.",
)
include_skill_memory: bool = Field(
True,
description="Whether to retrieve skill memories along with general memories. "
"If enabled, the system will automatically recall skill memories "
"relevant to the query. Default: True.",
)
skill_mem_top_k: int = Field(
3,
ge=0,
description="Number of skill memories to retrieve (top-K). Default: 3.",
)
# ==== Filter conditions ====
# TODO: maybe add detailed description later
filter: dict[str, Any] | None = Field(
None,
description="""
Filter for the memory, example:
{
"`and` or `or`": [
{"id": "uuid-xxx"},
{"created_at": {"gt": "2024-01-01"}},
]
}
""",
)
# ==== Extended capabilities ====
internet_search: bool = Field(
False,
description=(
"Whether to enable internet search in addition to memory search. "
"Primarily used by internal algorithms. Default: False."
),
)
# Inner user, not supported in API yet
threshold: float | None = Field(
None,
description=(
"Internal similarity threshold for searching plaintext memories. "
"If None, default thresholds will be applied."
),
)
# Internal field for search memory type
search_memory_type: str = Field(
"All",
description="Type of memory to search: All, WorkingMemory, LongTermMemory, UserMemory, OuterMemory, ToolSchemaMemory, ToolTrajectoryMemory, RawFileMemory, AllSummaryMemory, SkillMemory, PreferenceMemory",
)
# ==== Context ====
chat_history: MessageList | None = Field(
None,
description=(
"Historical chat messages used internally by algorithms. "
"If None, internal stored history may be used; "
"if provided (even an empty list), this value will be used as-is."
),
)
# ==== Backward compatibility ====
mem_cube_id: str | None = Field(
None,
description=(
"(Deprecated) Single cube ID to write feedback into. "
"Prefer `writable_cube_ids` for multi-cube feedback."
),
)
moscube: bool = Field(
False,
description="(Deprecated / internal) Whether to use legacy MemOSCube path.",
)
operation: list[PermissionDict] | None = Field(
None,
description="(Internal) Operation definitions for multi-cube read permissions.",
)
# ==== Source for plugin ====
source: str | None = Field(
None,
description="Source of the search query [plugin will router diff search]",
)
neighbor_discovery: bool = Field(
False,
description="Whether to enable neighbor discovery. "
"If enabled, the system will automatically recall neighbor chunks "
"relevant to the query. Default: False.",
)
@model_validator(mode="after")
def _convert_deprecated_fields(self) -> "APISearchRequest":
"""
Convert deprecated fields to new fields for backward compatibility.
Ensures full backward compatibility:
- mem_cube_id → readable_cube_ids
- moscube is ignored with warning
- operation ignored
"""
# Convert mem_cube_id to readable_cube_ids (new field takes priority)
if self.mem_cube_id is not None:
if not self.readable_cube_ids:
self.readable_cube_ids = [self.mem_cube_id]
logger.warning(
"Deprecated field `mem_cube_id` is used in APISearchRequest. "
"It will be removed in a future version. "
"Please migrate to `readable_cube_ids`."
)
# Reject moscube if set to True (no longer supported)
if self.moscube:
logger.warning(
"Deprecated field `moscube` is used in APISearchRequest. "
"Legacy MemOSCube pipeline will be removed soon."
)
# Warn about operation (internal)
if self.operation:
logger.warning(
"Internal field `operation` is provided in APISearchRequest. "
"This field is deprecated and ignored."
)
return self
class APIADDRequest(BaseRequest):
"""Request model for creating memories."""
# ==== Basic identifiers ====
user_id: str = Field(None, description="User ID")
session_id: str | None = Field(
None,
description="Session ID. If not provided, a default session will be used.",
)
task_id: str | None = Field(None, description="Task ID for monitering async tasks")
manager_user_id: str | None = Field(None, description="Manager User ID")
project_id: str | None = Field(None, description="Project ID")
# ==== Multi-cube writing ====
writable_cube_ids: list[str] | None = Field(
None, description="List of cube IDs user can write for multi-cube add"
)
# ==== Async control ====
async_mode: Literal["async", "sync"] = Field(
"async",
description=(
"Whether to add memory in async mode. "
"Use 'async' to enqueue background add (non-blocking), "
"or 'sync' to add memories in the current call. "
"Default: 'async'."
),
)
mode: Literal["fast", "fine"] | None = Field(
None,
description=(
"(Internal) Add mode used only when async_mode='sync'. "
"If set to 'fast', the handler will use a fast add pipeline. "
"Ignored when async_mode='async'."
),
)
# ==== Business tags & info ====
custom_tags: list[str] | None = Field(
None,
description=(
"Custom tags for this add request, e.g. ['Travel', 'family']. "
"These tags can be used as filters in search."
),
)
info: dict[str, Any] | None = Field(
None,
description=(
"Additional metadata for the add request. "
"All keys can be used as filters in search. "
"Example: "
"{'agent_id': 'xxxxxx', "
"'app_id': 'xxxx', "
"'source_type': 'web', "
"'source_url': 'https://www.baidu.com', "
"'source_content': '西湖是杭州最著名的景点'}."
),
)
# ==== Input content ====
messages: MessagesType | None = Field(
None,
description=(
"List of messages to store. Supports: "
"- system / user / assistant messages with 'content' and 'chat_time'; "
"- tool messages including: "
" * tool_description (name, description, parameters), "
" * tool_input (call_id, name, argument), "
" * raw tool messages where content is str or list[str], "
" * tool_output with structured output items "
" (input_text / input_image / input_file, etc.). "
"Also supports pure input items when there is no dialog."
),
)
# ==== Chat history ====
chat_history: MessageList | None = Field(
None,
description=(
"Historical chat messages used internally by algorithms. "
"If None, internal stored history will be used; "
"if provided (even an empty list), this value will be used as-is."
),
)
# ==== Feedback flag ====
is_feedback: bool = Field(
False,
description=("Whether this request represents user feedback. Default: False."),
)
# ==== Backward compatibility fields (will delete later) ====
mem_cube_id: str | None = Field(
None,
description="(Deprecated) Target cube ID for this add request (optional for developer API).",
)
memory_content: str | None = Field(
None,
description="(Deprecated) Plain memory content to store. Prefer using `messages`.",
)
doc_path: str | None = Field(
None,
description="(Deprecated / internal) Path to document to store.",
)
source: str | None = Field(
None,
description=(
"(Deprecated) Simple source tag of the memory. "
"Prefer using `info.source_type` / `info.source_url`."
),
)
operation: list[PermissionDict] | None = Field(
None,
description="(Internal) Operation definitions for multi-cube write permissions.",
)
@model_validator(mode="after")
def _convert_deprecated_fields(self) -> "APIADDRequest":
"""
Convert deprecated fields to new fields for backward compatibility.
This keeps the API fully backward-compatible while allowing
internal logic to use only the new fields.
Rules:
- mem_cube_id → writable_cube_ids
- memory_content → messages
- doc_path → messages (input_file)
- source → info["source"]
- operation → merged into writable_cube_ids (ignored otherwise)
"""
# ---- async_mode / mode relationship ----
if self.async_mode == "async" and self.mode is not None:
logger.warning(
"APIADDRequest.mode is ignored when async_mode='async'. "
"Fast add pipeline is only available in sync mode."
)
self.mode = None
# Convert mem_cube_id to writable_cube_ids (new field takes priority)
if self.mem_cube_id:
logger.warning(
"APIADDRequest.mem_cube_id is deprecated and will be removed in a future version. "
"Please use `writable_cube_ids` instead."
)
if not self.writable_cube_ids:
self.writable_cube_ids = [self.mem_cube_id]
# Handle deprecated operation field
if self.operation:
logger.warning(
"APIADDRequest.operation is deprecated and will be removed. "
"Use `writable_cube_ids` for multi-cube writes."
)
# Convert memory_content to messages (new field takes priority)
if self.memory_content:
logger.warning(
"APIADDRequest.memory_content is deprecated. "
"Use `messages` with a structured message instead."
)
if self.messages is None:
self.messages = []
self.messages.append(
{
"type": "text",
"text": self.memory_content,
}
)
# Handle deprecated doc_path
if self.doc_path:
logger.warning(
"APIADDRequest.doc_path is deprecated. "
"Use `messages` with an input_file item instead."
)
if self.messages is None:
self.messages = []
self.messages.append(
{
"type": "file",
"file": {"path": self.doc_path},
}
)
# Convert source to info.source_type (new field takes priority)
if self.source:
logger.warning(
"APIADDRequest.source is deprecated. "
"Use `info['source_type']` / `info['source_url']` instead."
)
if self.info is None:
self.info = {}
self.info.setdefault("source", self.source)
return self
class APIFeedbackRequest(BaseRequest):
"""Request model for processing feedback info."""
user_id: str = Field(..., description="User ID")
session_id: str | None = Field(
"default_session", description="Session ID for soft-filtering memories"
)
task_id: str | None = Field(None, description="Task ID for monitering async tasks")
history: MessageList | None = Field(..., description="Chat history")
retrieved_memory_ids: list[str] | None = Field(
None, description="Retrieved memory ids at last turn"
)
feedback_content: str | None = Field(..., description="Feedback content to process")
feedback_time: str | None = Field(None, description="Feedback time")
writable_cube_ids: list[str] | None = Field(
None, description="List of cube IDs user can write for multi-cube add"
)
async_mode: Literal["sync", "async"] = Field(
"async", description="feedback mode: sync or async"
)
corrected_answer: bool = Field(False, description="Whether need return corrected answer")
info: dict[str, Any] | None = Field(
None,
description=(
"Additional metadata for the add request. "
"All keys can be used as filters in search. "
"Example: "
"{'agent_id': 'xxxxxx', "
"'app_id': 'xxxx', "
"'source_type': 'web', "
"'source_url': 'https://www.baidu.com', "
"'source_content': 'West Lake is the most famous scenic spot in Hangzhou'}."
),
)
# ==== mem_cube_id is NOT enabled====
mem_cube_id: str | None = Field(
None,
description=(
"(Deprecated) Single cube ID to search in. "
"Prefer `readable_cube_ids` for multi-cube search."
),
)
@model_validator(mode="after")
def _convert_deprecated_fields(self) -> "APIFeedbackRequest":
if self.mem_cube_id and not self.writable_cube_ids:
logger.warning(
"APIFeedbackRequest.mem_cube_id is deprecated and will be removed in a future "
"version. Please use `writable_cube_ids` instead."
)
self.writable_cube_ids = [self.mem_cube_id]
return self
class APIChatCompleteRequest(BaseRequest):
"""Request model for chat operations."""
user_id: str = Field(..., description="User ID")
query: str = Field(..., description="Chat query message")
readable_cube_ids: list[str] | None = Field(
None, description="List of cube IDs user can read for multi-cube chat"
)
writable_cube_ids: list[str] | None = Field(
None, description="List of cube IDs user can write for multi-cube chat"
)
history: MessageList | None = Field(None, description="Chat history")
mode: SearchMode = Field(SearchMode.FAST, description="search mode: fast, fine, or mixture")
system_prompt: str | None = Field(None, description="Base system prompt to use for chat")
top_k: int = Field(10, description="Number of results to return")
session_id: str | None = Field(None, description="Session ID for soft-filtering memories")
include_preference: bool = Field(True, description="Whether to handle preference memory")
pref_top_k: int = Field(6, description="Number of preference results to return")
model_name_or_path: str | None = Field(None, description="Model name to use for chat")
max_tokens: int | None = Field(None, description="Max tokens to generate")
temperature: float | None = Field(None, description="Temperature for sampling")
top_p: float | None = Field(None, description="Top-p (nucleus) sampling parameter")
add_message_on_answer: bool = Field(True, description="Add dialogs to memory after chat")
manager_user_id: str | None = Field(None, description="Manager User ID")
project_id: str | None = Field(None, description="Project ID")
relativity: float = Field(
0.45,
ge=0,
description=(
"Relevance threshold for recalled memories. "
"Only memories with metadata.relativity >= relativity will be returned. "
"Use 0 to disable threshold filtering. Default: 0.45."
),
)
# ==== Filter conditions ====
filter: dict[str, Any] | None = Field(
None,
description="""
Filter for the memory, example:
{
"`and` or `or`": [
{"id": "uuid-xxx"},
{"created_at": {"gt": "2024-01-01"}},
]
}
""",
)
# ==== Extended capabilities ====
internet_search: bool = Field(False, description="Whether to use internet search")
threshold: float = Field(0.5, description="Threshold for filtering references")
# ==== Backward compatibility ====
mem_cube_id: str | None = Field(None, description="Cube ID to use for chat")
moscube: bool = Field(
False, description="(Deprecated) Whether to use legacy MemOSCube pipeline"
)
class AddStatusRequest(BaseRequest):
"""Request model for checking add status."""
mem_cube_id: str = Field(..., description="Cube ID")
user_id: str | None = Field(None, description="User ID")
session_id: str | None = Field(None, description="Session ID")
class GetMemoryRequest(BaseRequest):
"""Request model for getting memories."""
mem_cube_id: str = Field(..., description="Cube ID")
user_id: str | None = Field(None, description="User ID")
include_preference: bool = Field(True, description="Whether to return preference memory")
include_tool_memory: bool = Field(True, description="Whether to return tool memory")
include_skill_memory: bool = Field(True, description="Whether to return skill memory")
filter: dict[str, Any] | None = Field(None, description="Filter for the memory")
page: int | None = Field(
None,
description="Page number (starts from 1). If None, exports all data without pagination.",
)
page_size: int | None = Field(
None, description="Number of items per page. If None, exports all data without pagination."
)
class GetMemoryDashboardRequest(GetMemoryRequest):
"""Request model for getting memories for dashboard."""
mem_cube_id: str | None = Field(None, description="Cube ID")
class DeleteMemoryRequest(BaseRequest):
"""Request model for deleting memories."""
writable_cube_ids: list[str] | None = Field(None, description="Writable cube IDs")
memory_ids: list[str] | None = Field(None, description="Memory IDs")
file_ids: list[str] | None = Field(None, description="File IDs")
filter: dict[str, Any] | None = Field(None, description="Filter for the memory")
user_id: str | None = Field(
None,
description="Quick delete condition: remove memories for this user_id.",
)
session_id: str | None = Field(
None,
description="Quick delete condition: remove memories for this session_id.",
)
conversation_id: str | None = Field(
None,
description="Alias of session_id for backward compatibility.",
)
auto_cleanup_working: bool | None = Field(
False,
description=(
"(Internal) Whether to automatically delete related WorkingMemory nodes "
"based on working_binding metadata when deleting by memory_ids."
),
)
@model_validator(mode="after")
def normalize_session_alias(self) -> "DeleteMemoryRequest":
"""Normalize conversation_id to session_id."""
if self.conversation_id and self.session_id and self.conversation_id != self.session_id:
raise ValueError("conversation_id and session_id must be the same when both are set")
if self.session_id is None and self.conversation_id is not None:
self.session_id = self.conversation_id
return self
class SuggestionRequest(BaseRequest):
"""Request model for getting suggestion queries."""
user_id: str = Field(..., description="User ID")
mem_cube_id: str = Field(..., description="Cube ID")
language: Literal["zh", "en"] = Field("zh", description="Language for suggestions")
message: MessagesType | None = Field(None, description="List of messages to store.")
# ─── MemOS Client Response Models ──────────────────────────────────────────────
class MessageDetail(BaseModel):
"""Individual message detail model based on actual API response."""
model_config = {"extra": "allow"}
class MemoryDetail(BaseModel):
"""Individual memory detail model based on actual API response."""
model_config = {"extra": "allow"}
class FileDetail(BaseModel):
"""Individual file detail model based on actual API response."""
model_config = {"extra": "allow"}
class GetMessagesData(BaseModel):
"""Data model for get messages response based on actual API."""
message_detail_list: list[MessageDetail] = Field(
default_factory=list, alias="message_detail_list", description="List of message details"
)
class GetCreateKnowledgebaseData(BaseModel):
"""Data model for create knowledgebase response based on actual API."""
id: str = Field(..., description="Knowledgebase id")
class SearchMemoryData(BaseModel):
"""Data model for search memory response based on actual API."""
memory_detail_list: list[MemoryDetail] = Field(
default_factory=list, alias="memory_detail_list", description="List of memory details"
)
message_detail_list: list[MessageDetail] | None = Field(
None, alias="message_detail_list", description="List of message details (usually None)"
)
preference_detail_list: list[MessageDetail] | None = Field(
None,
alias="preference_detail_list",
description="List of preference details (usually None)",
)
tool_memory_detail_list: list[MessageDetail] | None = Field(
None,
alias="tool_memory_detail_list",
description="List of tool_memor details (usually None)",
)
preference_note: str = Field(
None, alias="preference_note", description="String of preference_note"
)
class GetKnowledgebaseFileData(BaseModel):
"""Data model for search memory response based on actual API."""
file_detail_list: list[FileDetail] = Field(
default_factory=list, alias="file_detail_list", description="List of files details"
)
class GetMemoryData(BaseModel):
"""Data model for search memory response based on actual API."""
memory_detail_list: list[MemoryDetail] = Field(
default_factory=list, alias="memory_detail_list", description="List of memory details"
)
preference_detail_list: list[MessageDetail] | None = Field(
None, alias="preference_detail_list", description="List of preference detail"
)
class AddMessageData(BaseModel):
"""Data model for add message response based on actual API."""
success: bool = Field(..., description="Operation success status")
task_id: str = Field(..., description="Operation task_id")
status: str = Field(..., description="Operation task status")
class DeleteMessageData(BaseModel):
"""Data model for delete Message based on actual API."""
success: bool = Field(..., description="Operation success status")