Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions apps/memos-local-openclaw/src/viewer/html.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
export const viewerHTML = `<!DOCTYPE html>
export function viewerHTML(pluginVersion?: string): string {
const vBadge = pluginVersion ? `<span class="version-badge">v${pluginVersion}</span>` : '';
return `<!DOCTYPE html>
<html lang="zh-CN">
<head>
<meta charset="UTF-8">
Expand Down Expand Up @@ -110,6 +112,8 @@ input,textarea,select{font-family:inherit;font-size:inherit}
.topbar .brand{display:flex;align-items:center;gap:10px;font-weight:700;font-size:15px;color:var(--text);letter-spacing:-.02em;flex-shrink:0}
.topbar .brand .icon{width:32px;height:32px;display:flex;align-items:center;justify-content:center;font-size:22px;background:none;border-radius:0}
.topbar .brand .sub{font-weight:400;color:var(--text-muted);font-size:11px}
.version-badge{font-size:10px;font-weight:600;color:var(--text-muted);background:rgba(255,255,255,.08);border:1px solid rgba(255,255,255,.1);padding:1px 7px;border-radius:6px;margin-left:6px;letter-spacing:.02em;user-select:all}
[data-theme="light"] .version-badge{background:rgba(0,0,0,.05);border-color:rgba(0,0,0,.08);color:var(--text-sec)}
.topbar-center{flex:1;display:flex;justify-content:center}
.topbar .actions{display:flex;align-items:center;gap:6px;flex-shrink:0}

Expand Down Expand Up @@ -711,7 +715,7 @@ input,textarea,select{font-family:inherit;font-size:inherit}
<div class="topbar">
<div class="brand">
<div class="icon"><svg width="24" height="24" viewBox="0 0 120 120" fill="none" xmlns="http://www.w3.org/2000/svg" style="filter:drop-shadow(0 0 8px rgba(255,77,77,.3))"><defs><linearGradient id="tLG" x1="0%" y1="0%" x2="100%" y2="100%"><stop offset="0%" stop-color="#ff4d4d"/><stop offset="100%" stop-color="#991b1b"/></linearGradient></defs><path d="M60 10C30 10 15 35 15 55C15 75 30 95 45 100L45 110L55 110L55 100C55 100 60 102 65 100L65 110L75 110L75 100C90 95 105 75 105 55C105 35 90 10 60 10Z" fill="url(#tLG)"/><path d="M20 45C5 40 0 50 5 60C10 70 20 65 25 55C28 48 25 45 20 45Z" fill="url(#tLG)"/><path d="M100 45C115 40 120 50 115 60C110 70 100 65 95 55C92 48 95 45 100 45Z" fill="url(#tLG)"/><path d="M45 15Q35 5 30 8" stroke="#ff4d4d" stroke-width="2" stroke-linecap="round"/><path d="M75 15Q85 5 90 8" stroke="#ff4d4d" stroke-width="2" stroke-linecap="round"/><circle cx="45" cy="35" r="6" fill="#050810"/><circle cx="75" cy="35" r="6" fill="#050810"/><circle cx="46" cy="34" r="2" fill="#00e5cc"/><circle cx="76" cy="34" r="2" fill="#00e5cc"/></svg></div>
<span data-i18n="title">OpenClaw Memory</span>
<span data-i18n="title">OpenClaw Memory</span>${vBadge}
</div>
<div class="topbar-center">
<nav class="nav-tabs">
Expand Down Expand Up @@ -4103,3 +4107,4 @@ checkAuth();

</body>
</html>`;
}
10 changes: 9 additions & 1 deletion apps/memos-local-openclaw/src/viewer/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,14 @@ export class ViewerServer {
private readonly ctx?: PluginContext;

private static readonly SESSION_TTL = 24 * 60 * 60 * 1000;
private static readonly PLUGIN_VERSION: string = (() => {
try {
const pkgPath = path.resolve(__dirname, "../../package.json");
return JSON.parse(fs.readFileSync(pkgPath, "utf-8")).version ?? "unknown";
} catch {
return "unknown";
}
})();
private resetToken: string;
private migrationRunning = false;
private migrationAbort = false;
Expand Down Expand Up @@ -336,7 +344,7 @@ export class ViewerServer {

private serveViewer(res: http.ServerResponse): void {
res.writeHead(200, { "Content-Type": "text/html; charset=utf-8", "Cache-Control": "no-store, no-cache, must-revalidate, max-age=0", "Pragma": "no-cache", "Expires": "0" });
res.end(viewerHTML);
res.end(viewerHTML(ViewerServer.PLUGIN_VERSION));
}

// ─── Data APIs ───
Expand Down
113 changes: 111 additions & 2 deletions src/memos/api/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ def get_activation_config() -> dict[str, Any]:

@staticmethod
def get_memreader_config() -> dict[str, Any]:
"""Get MemReader configuration."""
"""Get MemReader configuration for chat/doc extraction (fine-tuned 0.6B model)."""
return {
"backend": "openai",
"config": {
Expand All @@ -338,6 +338,107 @@ def get_memreader_config() -> dict[str, Any]:
},
}

@staticmethod
def get_memreader_general_llm_config() -> dict[str, Any]:
"""Get general LLM configuration for non-chat/doc tasks.

Used for: hallucination filter, memory rewrite, memory merge,
tool trajectory extraction, skill memory extraction.

This is the fallback for image_parser_llm and preference_extractor_llm.
Fallback chain: MEMREADER_GENERAL_MODEL -> MEMRADER_MODEL (memreader config)

Note: If you have fine-tuned a custom model for chat/doc extraction only,
you should configure MEMREADER_GENERAL_MODEL to use a general-purpose LLM
for other tasks. Otherwise, all tasks will use the same MEMRADER_MODEL.
"""
# Check if specific general model is configured
general_model = os.getenv("MEMREADER_GENERAL_MODEL")
if general_model:
return {
"backend": os.getenv("MEMREADER_GENERAL_BACKEND", "openai"),
"config": {
"model_name_or_path": general_model,
"temperature": 0.6,
"max_tokens": int(os.getenv("MEMREADER_GENERAL_MAX_TOKENS", "8000")),
"top_p": 0.95,
"top_k": 20,
"api_key": os.getenv(
"MEMREADER_GENERAL_API_KEY", os.getenv("OPENAI_API_KEY", "EMPTY")
),
"api_base": os.getenv(
"MEMREADER_GENERAL_API_BASE",
os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"),
),
"remove_think_prefix": True,
},
}
# Fallback to memreader config (same behavior as before for users who don't customize)
return APIConfig.get_memreader_config()

@staticmethod
def get_image_parser_llm_config() -> dict[str, Any]:
"""Get LLM configuration for image parsing (requires vision model).

Used for: image content extraction and analysis.
Requires a vision-capable model like GPT-4V, GPT-4o, etc.

Fallback chain: IMAGE_PARSER_MODEL -> general_llm -> OpenAI config
"""
image_model = os.getenv("IMAGE_PARSER_MODEL")
if image_model:
return {
"backend": os.getenv("IMAGE_PARSER_BACKEND", "openai"),
"config": {
"model_name_or_path": image_model,
"temperature": 0.6,
"max_tokens": int(os.getenv("IMAGE_PARSER_MAX_TOKENS", "4096")),
"top_p": 0.95,
"top_k": 20,
"api_key": os.getenv(
"IMAGE_PARSER_API_KEY", os.getenv("OPENAI_API_KEY", "EMPTY")
),
"api_base": os.getenv(
"IMAGE_PARSER_API_BASE",
os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"),
),
"remove_think_prefix": True,
},
}
# Fallback to general_llm config (which itself falls back to OpenAI)
return APIConfig.get_memreader_general_llm_config()

@staticmethod
def get_preference_extractor_llm_config() -> dict[str, Any]:
"""Get LLM configuration for preference extraction.

Used for: extracting user preferences from conversations.

Fallback chain: PREFERENCE_EXTRACTOR_MODEL -> general_llm -> OpenAI config
"""
pref_model = os.getenv("PREFERENCE_EXTRACTOR_MODEL")
if pref_model:
return {
"backend": os.getenv("PREFERENCE_EXTRACTOR_BACKEND", "openai"),
"config": {
"model_name_or_path": pref_model,
"temperature": 0.6,
"max_tokens": int(os.getenv("PREFERENCE_EXTRACTOR_MAX_TOKENS", "8000")),
"top_p": 0.95,
"top_k": 20,
"api_key": os.getenv(
"PREFERENCE_EXTRACTOR_API_KEY", os.getenv("OPENAI_API_KEY", "EMPTY")
),
"api_base": os.getenv(
"PREFERENCE_EXTRACTOR_API_BASE",
os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"),
),
"remove_think_prefix": True,
},
}
# Fallback to general_llm config (which itself falls back to OpenAI)
return APIConfig.get_memreader_general_llm_config()

@staticmethod
def get_activation_vllm_config() -> dict[str, Any]:
"""Get Ollama configuration."""
Expand All @@ -358,7 +459,7 @@ def get_preference_memory_config() -> dict[str, Any]:
return {
"backend": "pref_text",
"config": {
"extractor_llm": APIConfig.get_memreader_config(),
"extractor_llm": APIConfig.get_preference_extractor_llm_config(),
"vector_db": {
"backend": "milvus",
"config": APIConfig.get_milvus_config(),
Expand Down Expand Up @@ -812,6 +913,10 @@ def get_product_default_config() -> dict[str, Any]:
"backend": reader_config["backend"],
"config": {
"llm": APIConfig.get_memreader_config(),
# General LLM for non-chat/doc tasks (hallucination filter, rewrite, merge, etc.)
"general_llm": APIConfig.get_memreader_general_llm_config(),
# Image parser LLM (requires vision model)
"image_parser_llm": APIConfig.get_image_parser_llm_config(),
"embedder": APIConfig.get_embedder_config(),
"chunker": {
"backend": "sentence",
Expand Down Expand Up @@ -934,6 +1039,10 @@ def create_user_config(user_name: str, user_id: str) -> tuple["MOSConfig", "Gene
"backend": reader_config["backend"],
"config": {
"llm": APIConfig.get_memreader_config(),
# General LLM for non-chat/doc tasks (hallucination filter, rewrite, merge, etc.)
"general_llm": APIConfig.get_memreader_general_llm_config(),
# Image parser LLM (requires vision model)
"image_parser_llm": APIConfig.get_image_parser_llm_config(),
"embedder": APIConfig.get_embedder_config(),
"chunker": {
"backend": "sentence",
Expand Down
23 changes: 23 additions & 0 deletions src/memos/api/handlers/config_builders.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,3 +201,26 @@ def build_nli_client_config() -> dict[str, Any]:
NLI client configuration dictionary
"""
return APIConfig.get_nli_config()


def build_general_llm_config() -> dict[str, Any]:
"""
Build general LLM configuration for non-chat/doc tasks.

Used for: hallucination filter, memory rewrite, memory merge,
tool trajectory extraction, skill memory extraction.

Returns:
Validated general LLM configuration dictionary
"""
return LLMConfigFactory.model_validate(APIConfig.get_memreader_general_llm_config())


def build_image_parser_llm_config() -> dict[str, Any]:
"""
Build image parser LLM configuration (requires vision model).

Returns:
Validated image parser LLM configuration dictionary
"""
return LLMConfigFactory.model_validate(APIConfig.get_image_parser_llm_config())
41 changes: 40 additions & 1 deletion src/memos/api/handlers/memory_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,10 @@ def handle_delete_memories(delete_mem_req: DeleteMemoryRequest, naive_mem_cube:
Now unified to delete from text_mem only (includes preferences).
"""
logger.info(
f"[Delete memory request] writable_cube_ids: {delete_mem_req.writable_cube_ids}, memory_ids: {delete_mem_req.memory_ids}"
"[Delete memory request] writable_cube_ids: %s, memory_ids: %s, auto_cleanup_working: %s",
delete_mem_req.writable_cube_ids,
delete_mem_req.memory_ids,
getattr(delete_mem_req, "auto_cleanup_working", False),
)
# Validate that only one of memory_ids, file_ids, or filter is provided
provided_params = [
Expand All @@ -335,6 +338,31 @@ def handle_delete_memories(delete_mem_req: DeleteMemoryRequest, naive_mem_cube:
)

try:
working_ids_to_delete: set[str] = set()
# When deleting by explicit memory_ids and auto_cleanup_working is enabled,
# collect related WorkingMemory ids from working_binding
if delete_mem_req.memory_ids is not None and getattr(
delete_mem_req, "auto_cleanup_working", False
):
try:
memories = naive_mem_cube.text_mem.get_by_ids(memory_ids=delete_mem_req.memory_ids)
except Exception as e:
logger.warning("Failed to fetch memories before delete for working cleanup: %s", e)
memories = []

if memories:
import re

pattern = re.compile(r"\[working_binding:([0-9a-fA-F-]{36})\]")
for mem in memories:
metadata = mem.get("metadata") or {}
bg = metadata.get("background") or ""
if not isinstance(bg, str):
continue
match = pattern.search(bg)
if match:
working_ids_to_delete.add(match.group(1))

if delete_mem_req.memory_ids is not None:
# Unified deletion from text_mem (includes preferences)
naive_mem_cube.text_mem.delete_by_memory_ids(delete_mem_req.memory_ids)
Expand All @@ -344,6 +372,17 @@ def handle_delete_memories(delete_mem_req: DeleteMemoryRequest, naive_mem_cube:
)
elif delete_mem_req.filter is not None:
naive_mem_cube.text_mem.delete_by_filter(filter=delete_mem_req.filter)

# After main deletion, optionally clean up related WorkingMemory nodes.
if working_ids_to_delete:
try:
logger.info(
"Auto-cleanup WorkingMemory nodes after delete, count=%d",
len(working_ids_to_delete),
)
naive_mem_cube.text_mem.delete_by_memory_ids(list(working_ids_to_delete))
except Exception as e:
logger.warning("Failed to auto-cleanup WorkingMemory nodes: %s, Pass", e)
except Exception as e:
logger.error(f"Failed to delete memories: {e}", exc_info=True)
return DeleteMemoryResponse(
Expand Down
19 changes: 14 additions & 5 deletions src/memos/api/handlers/suggestion_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,28 +17,37 @@
SUGGESTION_QUERY_PROMPT_EN,
SUGGESTION_QUERY_PROMPT_ZH,
)
from memos.types import MessageList
from memos.types import MessageList, MessagesType


logger = get_logger(__name__)


def _get_further_suggestion(
llm: Any,
message: MessageList,
message: MessageList | str,
) -> list[str]:
"""
Get further suggestion based on recent dialogue.

Args:
llm: LLM instance for generating suggestions
message: Recent chat messages
message: Recent chat messages (can be a list of message dicts or a plain string)

Returns:
List of suggestion queries
"""
try:
dialogue_info = "\n".join([f"{msg['role']}: {msg['content']}" for msg in message[-2:]])
if isinstance(message, str):
dialogue_info = message
else:
dialogue_info = "\n".join(
[
f"{msg['role']}: {msg['content']}"
for msg in message[-2:]
if isinstance(msg, dict)
]
)
further_suggestion_prompt = FURTHER_SUGGESTION_PROMPT.format(dialogue=dialogue_info)
message_list = [{"role": "system", "content": further_suggestion_prompt}]
response = llm.generate(message_list)
Expand All @@ -53,7 +62,7 @@ def _get_further_suggestion(
def handle_get_suggestion_queries(
user_id: str,
language: str,
message: MessageList | None,
message: MessagesType | None,
llm: Any,
naive_mem_cube: Any,
) -> SuggestionResponse:
Expand Down
7 changes: 7 additions & 0 deletions src/memos/api/product_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -858,6 +858,13 @@ class DeleteMemoryRequest(BaseRequest):
memory_ids: list[str] | None = Field(None, description="Memory IDs")
file_ids: list[str] | None = Field(None, description="File IDs")
filter: dict[str, Any] | None = Field(None, description="Filter for the memory")
auto_cleanup_working: bool | None = Field(
False,
description=(
"(Internal) Whether to automatically delete related WorkingMemory nodes "
"based on working_binding metadata when deleting by memory_ids."
),
)


class SuggestionRequest(BaseRequest):
Expand Down
13 changes: 12 additions & 1 deletion src/memos/configs/mem_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,18 @@ def parse_datetime(cls, value):
return datetime.fromisoformat(value.replace("Z", "+00:00"))
return value

llm: LLMConfigFactory = Field(..., description="LLM configuration for the MemReader")
llm: LLMConfigFactory = Field(
..., description="LLM configuration for chat/doc memory extraction (fine-tuned model)"
)
general_llm: LLMConfigFactory | None = Field(
default=None,
description="General LLM for non-chat/doc tasks: hallucination filter, memory rewrite, "
"memory merge, tool trajectory, skill memory. Falls back to main llm if not set.",
)
image_parser_llm: LLMConfigFactory | None = Field(
default=None,
description="Vision LLM for image parsing. Falls back to general_llm if not set.",
)
embedder: EmbedderConfigFactory = Field(
..., description="Embedder configuration for the MemReader"
)
Expand Down
Loading
Loading