diff --git a/.gitignore b/.gitignore index 03906ea9..b18cdf44 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,9 @@ # Python bytecode files __pycache__/ *.pyc + +# Project-local easy-memory logs +/easy-memory/ + +# Runtime-generated logs inside the experimental easy-memory skill +skills/.experimental/easy-memory/logs/ diff --git a/skills/.experimental/easy-memory/LICENSE.txt b/skills/.experimental/easy-memory/LICENSE.txt new file mode 100644 index 00000000..900a0039 --- /dev/null +++ b/skills/.experimental/easy-memory/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 memeda230606@gmail.com + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/skills/.experimental/easy-memory/SKILL.md b/skills/.experimental/easy-memory/SKILL.md new file mode 100644 index 00000000..d107edfd --- /dev/null +++ b/skills/.experimental/easy-memory/SKILL.md @@ -0,0 +1,250 @@ +--- +name: easy-memory +description: Project-local memory logging for history/notes stored as YYYY-MM-DD.log files in ./easy-memory relative to the current working directory. Use when you must read today's memory, search memory by keywords (English preferred), append structured log entries with IDs (English preferred; UTF-8 accepted), or update/delete entries by ID. Logs always go to the working directory even if the skill is installed globally. +--- + +# Easy Memory + +## Overview + +Maintain project-local memory logs inside the ./easy-memory directory under the current working directory. Each day uses one log file named `YYYY-MM-DD.log`. + +## Installation And Environment Adapters + +The canonical source package for this skill must stay compatible with the upstream `openai/skills` repository layout: +- `SKILL.md` +- `agents/openai.yaml` +- `scripts/` +- `references/` +- `assets/` + +Do not make Codex-specific, Claude Code-specific, or other host-specific directories a required part of the canonical source tree for this skill. + +If an automated installer detects a host environment such as Codex or Claude Code, it may generate or supplement local adapter files and directories during installation so long as: +- the canonical source package remains upstream-compatible, +- generated host-specific files are treated as installer-managed local artifacts rather than source-of-truth skill content, +- secrets and user-specific runtime settings remain outside the tracked skill package. + +Read `references/installer-environments.md` before adding installer-specific behavior or host-specific adapter files. + +## Initialization (must follow) + +Run initialization once per project before any other scripts when: +- `./easy-memory` does not exist, or +- `./easy-memory/init.log` is missing. + +Run: + +``` +python3 scripts/init_memory.py +``` + +Initialization behavior: +- Create `./easy-memory` if needed. +- Locate `AGENTS.MD` in the project root (current working directory). If missing, create it. +- If `AGENTS.MD` exists, append a rules block to the end: + ``` + ## easy-memory rules + - At the start of the current session (before the first task), use the `easy-memory` skill and follow all rules and constraints in its `SKILL.md`. + - Only re-run memory read/search when necessary for the task. + ``` +- Create `./easy-memory/init.log` containing the same rules block and a `Date: YYYY-MM-DD` line at the end. +- If `init.log` already exists, initialization is skipped and `AGENTS.MD` is not modified. + +If you need the absolute usage path for easy-memory scripts, use the directory +containing this `SKILL.md` (the `scripts/` folder sits alongside it). Avoid +persisting absolute paths in project `AGENTS.MD` because different environments +may maintain the same project. + +All other scripts require `init.log` to exist and will exit if initialization has not been run. + +## Mandatory workflow (must follow) + +1. At the start of the current session (before the first task), run `scripts/read_today_log.py` to load the full log for today. +2. At the start of the current session (before the first task), run `scripts/search_memory.py` with English-preferred keywords for the session/task. Only repeat steps 1-2 when necessary for the task. Choose `--max-results` based on task complexity (this is the memory search depth). +3. Before finishing or submitting any task, append a new entry with `scripts/write_memory.py` following the log rules below. When the task is tied to concrete files, pages, or documents, also store project-relative local paths when the targets are inside the current working directory, and store absolute local paths only when the targets are outside the project; highly related URLs/document addresses may also be stored so they can be reopened quickly. +4. Log entries should be written in English when possible; UTF-8 is accepted. +5. `read_today_log.py` and `search_memory.py` must receive a required `--task-context` argument containing the user's question and problem context. Unless a dedicated memory-management agent is enabled, this argument is reserved for future preprocessing and must not alter the default read/search results. + +## Remote repository alignment gate (must follow) + +Condition precedent. Prior to executing `scripts/write_memory.py`, the agent shall determine, with reasonable diligence, whether any Remote Alignment Operation is expected to occur after the time of writing within the same task or session. + +For purposes of this skill, a "Remote Alignment Operation" means any action that publishes, synchronizes, or otherwise updates a remote code repository or mirror, including but not limited to `git push`, `svn commit`, `hg push`, and any API- or UI-based upload of code changes to Git-, SVN-, or similar systems. + +If a Remote Alignment Operation is expected or cannot be reasonably ruled out, the agent shall, before initiating any Repository-Change Operation, write memory in accordance with this skill and shall not perform any Repository-Change Operation until the memory entry has been written. + +For purposes of this skill, a "Repository-Change Operation" includes any action that modifies tracked files or repository history, including edits to tracked files, staging, commits, amend/merge/rebase/cherry-pick, and related history-rewriting actions. + +For avoidance of doubt, if no Remote Alignment Operation is expected after the time of writing and this can be reasonably confirmed, the agent may proceed with the ordinary timing for memory writing, subject to the Mandatory workflow above. + +## Log entry format + +Each entry is a single line and must end with a timestamp: + +``` +[ID:] [REF:] [FACT:] [PATHS:] [TIME:YYYY-MM-DD:HH:MM] +``` + +Rules: +- Log file name must be `YYYY-MM-DD.log` and use the current date only. +- If today's log file does not exist, create it; otherwise append to the end. +- Entries should be written in English when possible; UTF-8 is accepted. +- The timestamp must be the final token of the line and must be accurate to minutes. +- Each entry must include a unique ID, a reference level, and a factual flag. +- `PATHS` is optional. When present, it must be a JSON array of objects like `{"id":"","path":"","directory":"","resource_type":""}`. +- Every stored related reference must use a unique related resource ID. +- For `resource_type:"local_path"` inside the current working directory, `path` must be stored as a project-relative path, `directory` must be the corresponding project-relative parent directory (or `.` when the parent is the project root), and `path_format` must be `project_relative`. +- For `resource_type:"local_path"` outside the current working directory, `path` must be an absolute local path, `directory` must be the absolute parent directory (or the directory itself if the stored target is a directory), `path_format` must be `absolute`, and `system_hint` should record a brief host hint such as OS, CPU architecture, and short hostname. +- For `resource_type:"url"`, `path` must be a URL/document address and `directory` must be the derived parent/container URL so the agent can reopen related locations quickly. + +Compatibility naming note: +- `PATHS` remains the historical field name in the on-disk log format for backward compatibility. +- `path_id` remains the historical field name inside each stored object, but it should now be interpreted as the unique ID of a related resource, not only a local filesystem path. +- `--related-path`, `--path-update`, and `--path-clear` also retain their historical names for CLI compatibility, even though the stored target may now be either a local path or a URL/document address. +- See `references/compatibility-notes.md` for the consolidated compatibility explanation. + +## Scripts + +### Initialize memory + +``` +python3 scripts/init_memory.py +``` + +Runs one-time initialization to create `AGENTS.MD` rules and `./easy-memory/init.log`. + +### Read today's log + +``` +python3 scripts/read_today_log.py --task-context "User question and problem context" +``` + +Reads the full log for the current date. +When an entry includes `PATHS` metadata, the output must also return the related resource IDs, related references, resource types, and container values in a readable form. +Older entries without `PATHS` metadata must remain readable without errors. +`--task-context` is required. +- When the memory-management agent is not enabled, the script should only validate that it is non-empty and then ignore it. +- When the memory-management agent is enabled and returns a valid response, the script may return only the retained task-relevant memory blocks in their original format, followed by a final `[SUMMARY]` line. +- If the agent fails or returns unusable output, the script must fall back to the raw log output. + +### Search memory + +``` +python3 scripts/search_memory.py --max-results 5 --task-context "User question and problem context" +``` + +Searches all `.log` files in the ./easy-memory directory under the current working directory. Keywords should be English; UTF-8 is accepted. Default `--max-results` is 5. +Results are prioritized in this order: +- Factual entries (`FACT:true`) first +- Higher reference level first (`REF:critical` > `high` > `medium` > `low`, or higher numeric values) +- Newer timestamps first +When a matched entry includes `PATHS` metadata, the output must also return the related resource IDs, related references, resource types, and container values in a readable form. +Older entries without `PATHS` metadata must remain searchable without errors. +`--task-context` is required. +- When the memory-management agent is not enabled, the script should only validate that it is non-empty and then ignore it. +- When the memory-management agent is enabled and returns a valid response, the script may return only the retained task-relevant search blocks in their original format, followed by a final `[SUMMARY]` line. +- If the agent fails or returns unusable output, the script must fall back to the raw search output. + +### Write memory + +``` +python3 scripts/write_memory.py --content "..." --factual true --ref-level medium --related-path skills/.experimental/easy-memory/scripts/write_memory.py --related-path /opt/shared/specs/memory-agent.md --related-path https://example.com/docs/memory-agent +``` + +Appends a new entry to today's log. Content should be English and single-line; UTF-8 is accepted. The script generates the unique ID and timestamp. + +Write-memory instructions: +- Use `--related-path` for the current file, related directory, or any highly related URL/document address that should be reopened quickly later. Pass the option multiple times for multiple references. +- Every `--related-path` value must be either a project-local path, an external absolute local path, or a supported URL/document address. +- The script stores project-local targets as project-relative paths. It stores external local targets as absolute paths with `path_format:"absolute"` plus a brief `system_hint`. +- The script stores each related reference with its own unique related resource ID, resource type, derived container string, and any needed path-format metadata. +- If no file, page, or document is materially related to the memory entry, you may omit `--related-path`. + +### Update memory + +``` +python3 scripts/update_memory.py --id --content "..." --ref-level high --factual false +``` + +Updates the entry matching the ID across all logs. The timestamp is refreshed to the current time. + +Use update when: +- New factual findings contradict older memory entries (especially results from recent searches). +- The latest task outcomes refine or correct existing memory. + +Update-memory instructions: +- If the related files/pages/documents changed substantially, replace the full set with repeated `--related-path`. +- If all stored related references are stale, clear them with `--clear-related-paths`. +- If one stored path or URL is no longer valid or its relevance has dropped, overwrite that specific related reference by ID with `--path-update =project/relative/path`, `--path-update =/new/absolute/path`, or `--path-update =https://new.example/doc`, or clear it with `--path-clear `. +- When updating related reference metadata, keep only files, pages, or documents that remain highly relevant to the updated memory content. + +### Delete memory + +``` +python3 scripts/delete_memory.py --id +``` + +Deletes the entry matching the ID across all logs. + +Use delete when: +- Older memory entries are no longer valuable or are misleading. +- A memory entry conflicts with verified facts and should be removed instead of updated. + +### Smoke test memory-agent + +``` +python3 scripts/smoke_test_memory_agent.py --task-context "Smoke test for the current memory-agent configuration" +``` + +Runs a small end-to-end verification for the current project-local memory-agent setup. +This script is stricter than a connectivity check: +- it requires the current project-local `./easy-memory/agent-config.json` to exist, +- it runs both `search_memory.py` and `read_today_log.py` from the same working directory, +- it expects both commands to return agent-filtered plain-text output ending in a `[SUMMARY]` line rather than raw fallback output. + +The default search keywords are `easy-memory`, `memory-agent`, and `codex`. +Override them with repeated `--search-keyword` arguments when validating a different project or provider. +If you want the smoke test to fail whenever the shared installation-directory failure log grows during the run, add `--strict-no-new-failures`. +If you want the final smoke-test report written to disk as well as printed to stdout, add `--json-output-file `. +If you want successful runs to stay silent on stdout and rely only on the report file, add `--quiet` together with `--json-output-file`. + +## Log location rule + +Logs are always stored under `./easy-memory` relative to the directory where you run the scripts. The skill can be installed globally; logs never go to the install directory. + +## Installer Notes + +- Installer-facing environment adaptation rules live in `references/installer-environments.md`. +- Future memory-management agent integrations must keep the upstream OpenAI skill layout as the canonical package shape. +- Environment-specific adapter files may be generated by installers after install, but the tracked skill package must remain portable without them. +- The recommended project-local config file for future memory-agent runtime settings is `./easy-memory/agent-config.json`. +- Environment variables should override the local config file so machine-specific or temporary values do not require rewriting project-local state. +- A canonical local config example is available at `assets/examples/agent-config.example.json`. +- In Codex environments, the preferred default provider is `codex_exec`, which uses the installed `codex` CLI instead of direct HTTP model calls. +- The default Codex provider model should be `gpt-5.3-codex-spark`. +- `codex_service_tier` should default to `fast`, and `codex_reasoning_effort` should default to `medium`, while both remain locally configurable. +- `codex_profile` and `codex_binary` may also be provided for host-specific setups. +- For local OpenAI-compatible runtimes, `api_key` may be omitted when the endpoint does not require authentication. +- `api_style` may be used to select `codex_exec`, `openai_chat_completions`, or `ollama_native_chat`. +- `disable_thinking` may be used to request `think:false` when `api_style` is `ollama_native_chat` and the selected Ollama model supports thinking. +- If agent mode is enabled but any agent-side error occurs, including network errors, timeouts, protocol/schema problems, or unexpected runtime exceptions, the scripts must fall back to the same raw output they would produce with agent mode disabled. +- When such an agent-side fallback happens, the scripts should also append a diagnostic error record containing the full available response content to a runtime-generated error log in the installed skill directory, so cross-project model compatibility issues can be audited and corrected later. +- Provider-specific compatibility notes and dated benchmark snapshots may be stored in `references/` as informational documents so long as they do not redefine the canonical protocol. The current OpenRouter evaluation snapshot is in `references/openrouter-tested-models.md`. + +## Future Memory-Agent Contract + +The canonical source files for future memory-management agent support are: +- `agents/openai.yaml` +- `references/openai-compatible-api.md` +- `references/response-schema.md` +- `references/memory-agent-system-prompt.md` +- `references/script-output-schema.md` + +These files define UI metadata, configuration boundaries, the lightweight plain-text filtering contract, and the canonical preprocessing prompt. Runtime implementations in `scripts/` should be added only after these canonical files are stable enough to implement against. + +## Reminder to repeat each session + +- Log entries should be written in English when possible; UTF-8 is accepted. +- At the start of the current session (before the first task), run `scripts/read_today_log.py` and `scripts/search_memory.py` with English-preferred keywords; adjust `--max-results` based on task complexity. Only repeat when necessary. +- Before finishing or submitting any task, write a log entry using `scripts/write_memory.py` following the rules above. diff --git a/skills/.experimental/easy-memory/agents/openai.yaml b/skills/.experimental/easy-memory/agents/openai.yaml new file mode 100644 index 00000000..18541097 --- /dev/null +++ b/skills/.experimental/easy-memory/agents/openai.yaml @@ -0,0 +1,7 @@ +interface: + display_name: "Easy Memory" + short_description: "Project-local memory logs with optional agent preprocessing" + default_prompt: "Use $easy-memory to read, search, and maintain project-local memory logs for the current project." + +policy: + allow_implicit_invocation: true diff --git a/skills/.experimental/easy-memory/assets/examples/agent-config.example.json b/skills/.experimental/easy-memory/assets/examples/agent-config.example.json new file mode 100644 index 00000000..6dab3b12 --- /dev/null +++ b/skills/.experimental/easy-memory/assets/examples/agent-config.example.json @@ -0,0 +1,9 @@ +{ + "enabled": true, + "api_style": "codex_exec", + "model": "gpt-5.3-codex-spark", + "codex_service_tier": "fast", + "codex_reasoning_effort": "medium", + "timeout_seconds": 120, + "system_prompt_file": "./easy-memory/custom-memory-agent-prompt.md" +} diff --git a/skills/.experimental/easy-memory/assets/examples/request.example.json b/skills/.experimental/easy-memory/assets/examples/request.example.json new file mode 100644 index 00000000..570a22fe --- /dev/null +++ b/skills/.experimental/easy-memory/assets/examples/request.example.json @@ -0,0 +1,57 @@ +{ + "schema_version": "easy_memory_agent_request_v2", + "mode": "search_memory", + "task_context": "用户正在排查当前项目中的记忆管理 agent 配置问题,需要找出与配置加载和路径返回相关的历史记忆。", + "cwd": "/Users/example/workspace/project-a", + "log_dir": "/Users/example/workspace/project-a/easy-memory", + "keywords": [ + "memory-agent", + "config", + "path" + ], + "max_results": 5, + "entries": [ + { + "entry_id": "entry-001", + "log_file": "2026-03-13.log", + "ref_level": "high", + "factual": true, + "content": "Added project-local related-resource metadata support and required task-context for read/search flows.", + "timestamp": "2026-03-13:16:00", + "paths": [ + { + "path_id": "path-001", + "resource_type": "local_path", + "path": "src/memory_agent_config.py", + "directory": "src", + "path_format": "project_relative" + }, + { + "path_id": "path-002", + "resource_type": "local_path", + "path": "/opt/shared/docs/memory-agent.md", + "directory": "/opt/shared/docs", + "path_format": "absolute", + "system_hint": "Darwin arm64 @example-host" + }, + { + "path_id": "path-003", + "resource_type": "url", + "path": "https://example.com/docs/easy-memory", + "directory": "https://example.com/docs" + } + ], + "rendered_block": "2026-03-13.log: [ID:entry-001] [REF:high] [FACT:true] Added project-local related-resource metadata support and required task-context for read/search flows. [PATHS:[{\"id\":\"path-001\",\"path\":\"src/memory_agent_config.py\",\"directory\":\"src\",\"resource_type\":\"local_path\",\"path_format\":\"project_relative\"},{\"id\":\"path-002\",\"path\":\"/opt/shared/docs/memory-agent.md\",\"directory\":\"/opt/shared/docs\",\"resource_type\":\"local_path\",\"path_format\":\"absolute\",\"system_hint\":\"Darwin arm64 @example-host\"},{\"id\":\"path-003\",\"path\":\"https://example.com/docs/easy-memory\",\"directory\":\"https://example.com/docs\",\"resource_type\":\"url\"}]] [TIME:2026-03-13:16:00]\n Related resource ID path-001 [local_path, project_relative]: src/memory_agent_config.py (container: src)\n Related resource ID path-002 [local_path, absolute]: /opt/shared/docs/memory-agent.md (container: /opt/shared/docs; system: Darwin arm64 @example-host)\n Related resource ID path-003 [url]: https://example.com/docs/easy-memory (container: https://example.com/docs)" + }, + { + "entry_id": "entry-002", + "log_file": "2026-03-12.log", + "ref_level": "medium", + "factual": true, + "content": "Legacy entry without stored related-resource metadata remains compatible with read/search output.", + "timestamp": "2026-03-12:11:20", + "paths": [], + "rendered_block": "2026-03-12.log: [ID:entry-002] [REF:medium] [FACT:true] Legacy entry without stored related-resource metadata remains compatible with read/search output. [TIME:2026-03-12:11:20]" + } + ] +} diff --git a/skills/.experimental/easy-memory/assets/examples/response.example.txt b/skills/.experimental/easy-memory/assets/examples/response.example.txt new file mode 100644 index 00000000..b707c189 --- /dev/null +++ b/skills/.experimental/easy-memory/assets/examples/response.example.txt @@ -0,0 +1,6 @@ +2026-03-13.log: [ID:entry-001] [REF:high] [FACT:true] Added project-local related-resource metadata support and required task-context for read/search flows. [PATHS:[{"id":"path-001","path":"src/memory_agent_config.py","directory":"src","resource_type":"local_path","path_format":"project_relative"},{"id":"path-002","path":"/opt/shared/docs/memory-agent.md","directory":"/opt/shared/docs","resource_type":"local_path","path_format":"absolute","system_hint":"Darwin arm64 @example-host"},{"id":"path-003","path":"https://example.com/docs/easy-memory","directory":"https://example.com/docs","resource_type":"url"}]] [TIME:2026-03-13:16:00] + Related resource ID path-001 [local_path, project_relative]: src/memory_agent_config.py (container: src) + Related resource ID path-002 [local_path, absolute]: /opt/shared/docs/memory-agent.md (container: /opt/shared/docs; system: Darwin arm64 @example-host) + Related resource ID path-003 [url]: https://example.com/docs/easy-memory (container: https://example.com/docs) + +[SUMMARY] 过滤无关记忆后,仅保留与 memory-agent 配置和相关资源返回有关的结果。 diff --git a/skills/.experimental/easy-memory/assets/examples/script-output.example.txt b/skills/.experimental/easy-memory/assets/examples/script-output.example.txt new file mode 100644 index 00000000..b3cfb2f8 --- /dev/null +++ b/skills/.experimental/easy-memory/assets/examples/script-output.example.txt @@ -0,0 +1,6 @@ +2026-03-13.log: [ID:entry-001] [REF:high] [FACT:true] Added stable plain-text filtering output for memory-agent responses. [PATHS:[{"id":"path-001","path":"skills/.experimental/easy-memory/scripts/memory_agent_client.py","directory":"skills/.experimental/easy-memory/scripts","resource_type":"local_path","path_format":"project_relative"},{"id":"path-002","path":"/opt/shared/docs/memory-agent.md","directory":"/opt/shared/docs","resource_type":"local_path","path_format":"absolute","system_hint":"Darwin arm64 @example-host"},{"id":"path-003","path":"https://example.com/docs/memory-agent","directory":"https://example.com/docs","resource_type":"url"}]] [TIME:2026-03-13:16:40] + Related resource ID path-001 [local_path, project_relative]: skills/.experimental/easy-memory/scripts/memory_agent_client.py (container: skills/.experimental/easy-memory/scripts) + Related resource ID path-002 [local_path, absolute]: /opt/shared/docs/memory-agent.md (container: /opt/shared/docs; system: Darwin arm64 @example-host) + Related resource ID path-003 [url]: https://example.com/docs/memory-agent (container: https://example.com/docs) + +[SUMMARY] 已移除无关内容,仅保留与当前记忆 agent 输出行为有关的完整记忆。 diff --git a/skills/.experimental/easy-memory/references/compatibility-notes.md b/skills/.experimental/easy-memory/references/compatibility-notes.md new file mode 100644 index 00000000..addc059c --- /dev/null +++ b/skills/.experimental/easy-memory/references/compatibility-notes.md @@ -0,0 +1,89 @@ +# Compatibility Notes + +This document consolidates the naming-compatibility rules for `easy-memory` +related-resource metadata. + +## Purpose + +`easy-memory` originally stored only local filesystem paths. +Later revisions expanded the same metadata channel so it can also store: +- project-relative local filesystem paths for resources inside the current + working directory +- absolute local filesystem paths for resources outside the current working + directory +- URLs +- document addresses + +For backward compatibility, the historical field names and CLI option names were +kept stable even though their meaning is now broader. + +## Historical Names And Current Meaning + +- `PATHS` + - Historical on-disk log field name + - Current meaning: array of related resource objects +- `path_id` + - Historical object field name + - Current meaning: unique related resource ID +- `path_ids` + - Historical agent-response field name from the older structured-response contract + - Current meaning when encountered: list of related resource IDs +- `--related-path` + - Historical CLI option name + - Current meaning: project-local path, external absolute path, or + URL/document address +- `--path-update` + - Historical CLI option name + - Current meaning: replace one related resource by ID +- `--path-clear` + - Historical CLI option name + - Current meaning: clear one related resource by ID while preserving the ID + +## Stable Compatibility Rules + +- Existing logs that use `PATHS` remain valid. +- Existing payloads that use `path_id` and older structured responses that use + `path_ids` remain valid. +- Existing automation or tooling that calls `--related-path`, `--path-update`, or + `--path-clear` does not need renaming. +- New implementations should interpret these historical names using the broader + related-resource meaning. +- Older local-path entries that stored absolute paths remain valid and should be + interpreted as `path_format:"absolute"` when no explicit `path_format` is + present. + +## Resource Interpretation + +Each related resource object should be interpreted using `resource_type`: +- `local_path` + - `path_format:"project_relative"` means `path` is relative to the current + working directory and `directory` is the corresponding relative container + - `path_format:"absolute"` means `path` is an absolute local filesystem path + and `directory` is the absolute parent directory, or the directory itself + if the stored target is already a directory + - `system_hint` may be present for `path_format:"absolute"` entries to record + a brief host hint for cross-machine work +- `url` + - `path` is the URL or document address + - `directory` is the derived parent or container URL + +If older metadata does not include `resource_type`, the runtime should infer it. +If older local-path metadata does not include `path_format`, the runtime should +infer `absolute` for absolute-looking local paths and `project_relative` for +relative-looking local paths. + +## Reading Strategy + +When reading `easy-memory` metadata: +- do not assume `path` means a local filesystem path +- treat `path_id` as a stable related resource identifier +- use `resource_type` to decide whether the target is local or remote + +## Source Of Truth + +This note explains naming compatibility only. +The canonical structural contracts remain: +- `SKILL.md` +- `references/openai-compatible-api.md` +- `references/response-schema.md` +- `references/script-output-schema.md` diff --git a/skills/.experimental/easy-memory/references/installer-environments.md b/skills/.experimental/easy-memory/references/installer-environments.md new file mode 100644 index 00000000..8d2c2884 --- /dev/null +++ b/skills/.experimental/easy-memory/references/installer-environments.md @@ -0,0 +1,88 @@ +# Installer Environment Adapters + +This reference defines how automated installers may adapt `easy-memory` to different host environments while keeping the canonical skill package compatible with the upstream `openai/skills` repository. + +## Canonical Source Of Truth + +The tracked skill package must keep the upstream OpenAI skill structure as its source of truth: + +```text +easy-memory/ +├── SKILL.md +├── agents/ +│ └── openai.yaml +├── scripts/ +├── references/ +└── assets/ +``` + +The canonical source tree must not require host-specific directories such as Codex-only or Claude Code-only layout additions in order to be considered valid. + +## What Installers May Do + +An automated installer may inspect the target environment and generate local adapter artifacts for that environment. Examples include: +- creating host-specific wrapper files that point back to the canonical skill contents, +- translating the canonical system prompt or metadata into a host-specific agent definition, +- wiring command entry points or local registration files required by the host, +- prompting the user to provide local runtime configuration such as API base URL, model ID, API key, and agent enablement flags. +- creating or updating a local project-scoped config file such as `./easy-memory/agent-config.json`. + +These installer-generated artifacts must be treated as local derived files, not as canonical source files for the upstream skill package. + +## What Installers Must Not Do + +Installers must not: +- require the upstream package to commit host-specific adapter directories as part of the canonical source tree, +- store user secrets or machine-specific credentials in tracked repository files, +- rewrite core memory semantics in `scripts/` without the user explicitly choosing a different runtime mode, +- make Codex, Claude Code, or any other environment-specific adapter the only supported execution path. + +## Configuration Placement + +For future memory-management agent support, use this split: +- Canonical source package: + - `SKILL.md` for workflow rules, + - `agents/openai.yaml` for Codex/OpenAI UI metadata, + - `references/` for API contracts, prompt sources, and response schema documentation, + - `assets/` for sample request/response fixtures and templates, + - `scripts/` for deterministic execution logic. +- Local installer or user environment: +- API key, +- local config file path, +- base URL, +- selected model ID, + - environment-specific enablement toggles, + - generated host adapter files. + +## Host Mapping Guidance + +If the installer targets Codex: +- keep `agents/openai.yaml` as the canonical metadata file inside the skill package, +- avoid inventing additional required Codex-only directories beyond the upstream package layout unless the host explicitly requires local generated artifacts. +- prefer a generated local config that uses `api_style: "codex_exec"` instead of adding an HTTP provider by default. +- when generating a default Codex local config, prefer: + - `model: "gpt-5.3-codex-spark"` + - `codex_service_tier: "fast"` + - `codex_reasoning_effort: "medium"` + - no API key or base URL fields unless the user explicitly selects an external provider. + +If the installer targets Claude Code: +- prefer generating local adapter artifacts from the canonical prompt and metadata sources rather than making Claude-specific files the source of truth, +- keep generated Claude-oriented files installer-managed and replaceable. + +If the installer targets another host: +- map from the canonical package into that host's local registration mechanism, +- keep the mapping reversible and avoid embedding secrets into the installed skill package. + +## Recommended Local Config File + +For project-scoped installation flows, prefer a local config file at: +- `./easy-memory/agent-config.json` + +The installer may populate this file with non-secret defaults and prompt the user for secret values or defer those to environment variables. + +Environment variables should override the local config file so that host-specific launchers can supply temporary or machine-specific values without rewriting project-local state. + +## Packaging Rule + +If a skill is intended for one-click installation from GitHub or another installer-supported source, the repository copy should remain self-contained and portable without requiring post-clone source edits. Environment-specific supplementation should happen as a local install step, not as a divergence from the upstream-compatible source package. diff --git a/skills/.experimental/easy-memory/references/memory-agent-system-prompt.md b/skills/.experimental/easy-memory/references/memory-agent-system-prompt.md new file mode 100644 index 00000000..f62cb0c1 --- /dev/null +++ b/skills/.experimental/easy-memory/references/memory-agent-system-prompt.md @@ -0,0 +1,40 @@ +# Memory-Agent System Prompt + +Use this file as the canonical prompt source for the `easy-memory` +memory-management preprocessing agent. + +## Canonical Prompt + +```text +You are the easy-memory preprocessing agent. + +Your only job is to remove memory content that is unrelated to the current +task and keep only the memories that remain relevant. + +Rules: +1. Treat the provided memory logs as source material, not as something you can + rewrite or reinterpret. +2. Use the provided task_context to decide relevance. +3. Delete or ignore every memory block that is not relevant to the current + task. +4. For every memory block that remains relevant, return it in its complete + original format exactly as provided. +5. Do not rewrite IDs, timestamps, related-resource lines, URLs, file paths, or + log-file prefixes. +6. Do not return JSON, Markdown lists, explanations, or extra commentary. +7. After the retained memory blocks, append exactly one final summary line that + starts with `[SUMMARY]`. +8. Keep the summary concise and no longer than 500 characters. +9. If no memory remains relevant after filtering, return only the `[SUMMARY]` + line. +10. The summary must reflect only the retained task-relevant memories. +``` + +## Usage Notes + +- Installers may translate this canonical prompt into host-specific adapter + files. +- The canonical source of truth remains this file, not any generated host + adapter. +- Local overrides may exist in user environment configuration, but + repository-tracked defaults should derive from this prompt. diff --git a/skills/.experimental/easy-memory/references/openai-compatible-api.md b/skills/.experimental/easy-memory/references/openai-compatible-api.md new file mode 100644 index 00000000..2a926c2c --- /dev/null +++ b/skills/.experimental/easy-memory/references/openai-compatible-api.md @@ -0,0 +1,274 @@ +# Memory-Agent Runtime Contract + +This document defines the canonical runtime contract for the optional +`easy-memory` memory-management agent. + +Despite the historical file name, this reference covers: +- OpenAI-compatible HTTP providers +- Ollama native chat providers +- Codex CLI exec as the preferred host-local provider inside Codex environments + +## Scope + +This contract applies to optional preprocessing during: +- `scripts/read_today_log.py` +- `scripts/search_memory.py` + +When the memory-management agent is disabled, both scripts must preserve their +raw behavior. + +## Runtime Boundary + +`easy-memory` has two storage scopes: +- Shared skill implementation: + - installed skill files such as `scripts/`, `references/`, `assets/`, and + `agents/` +- Project-local memory data: + - `./easy-memory` under the current working directory + +The agent may read project-local memory content gathered by the scripts, but it +must not reinterpret the installation directory as the log storage directory. + +## Enablement + +The memory-management agent is optional. + +Required script interface: +- `--task-context` is always required for `read_today_log.py` and + `search_memory.py` +- when the agent is disabled, scripts must validate that `--task-context` is + non-empty and then ignore it +- when the agent is enabled, scripts may pass `--task-context` and gathered + memory blocks to the agent for filtering + +## Configuration Placement + +Canonical tracked files: +- `references/memory-agent-system-prompt.md` +- `references/openai-compatible-api.md` +- `references/response-schema.md` +- `references/script-output-schema.md` + +Local runtime configuration only: +- local config file path +- API key when the provider requires authentication +- base URL +- model ID +- enablement toggle +- timeout policy +- installer-generated host adapter files + +Secrets must not be stored in tracked repository files. + +## Recommended Local Configuration Keys + +Implementations should support: +- `EASY_MEMORY_AGENT_CONFIG_FILE` +- `EASY_MEMORY_AGENT_ENABLED` +- `EASY_MEMORY_AGENT_BASE_URL` +- `EASY_MEMORY_AGENT_API_KEY` +- `EASY_MEMORY_AGENT_MODEL` +- `EASY_MEMORY_AGENT_API_STYLE` +- `EASY_MEMORY_AGENT_DISABLE_THINKING` +- `EASY_MEMORY_AGENT_TIMEOUT_SECONDS` +- `EASY_MEMORY_AGENT_SYSTEM_PROMPT_FILE` +- `EASY_MEMORY_AGENT_CODEX_BINARY` +- `EASY_MEMORY_AGENT_CODEX_PROFILE` +- `EASY_MEMORY_AGENT_CODEX_SERVICE_TIER` +- `EASY_MEMORY_AGENT_CODEX_REASONING_EFFORT` + +The default local config file path should be: +- `./easy-memory/agent-config.json` + +Recommended precedence: +1. environment variables +2. local config file +3. built-in defaults + +## Supported Provider Styles + +`api_style` should describe the transport contract used by the runtime +implementation. + +Supported values: +- `codex_exec` +- `openai_chat_completions` +- `ollama_native_chat` + +### Preferred Codex CLI Exec Provider + +Inside Codex environments, `codex_exec` is the preferred default. + +Recommended local config: + +```json +{ + "enabled": true, + "api_style": "codex_exec", + "model": "gpt-5.3-codex-spark", + "codex_service_tier": "fast", + "codex_reasoning_effort": "medium", + "timeout_seconds": 120 +} +``` + +Recommended defaults: +- model: `gpt-5.3-codex-spark` +- service tier: `fast` +- reasoning effort: `medium` +- timeout: `120` seconds + +The runtime should invoke `codex exec` in a safe, non-interactive mode: +- read-only sandbox +- ephemeral session +- output written through `--output-last-message` +- no dependency on project-local HTTP credentials + +### OpenAI-Compatible Chat Completions + +Minimum HTTP compatibility target: +- method: `POST` +- path: `/chat/completions` +- authorization: optional `Bearer ` when the provider requires it +- content type: `application/json` + +`base_url` should point to the API root, typically ending in `/v1`. +If the provider does not require authentication, `api_key` may be omitted or +set to an empty string. + +### Optional Ollama Native Chat Extension + +Ollama native mode uses: +- method: `POST` +- path: `/api/chat` +- authorization: optional `Bearer ` when a reverse proxy requires it +- content type: `application/json` + +Recommended local config: + +```json +{ + "enabled": true, + "api_style": "ollama_native_chat", + "base_url": "http://127.0.0.1:11434", + "model": "qwen3.5:9b", + "disable_thinking": true, + "timeout_seconds": 20 +} +``` + +When `disable_thinking` is `true`, the runtime should send Ollama native +`think: false`. + +## Request Construction + +The request should contain: +- one system prompt derived from + `references/memory-agent-system-prompt.md` or a local override +- one user payload carrying the preprocessing payload as JSON text + +The request payload should contain: +- `schema_version` +- `mode` +- `task_context` +- `cwd` +- `log_dir` +- `entries` + +For `search_memory.py`, the payload should also contain: +- `keywords` +- `max_results` + +The canonical request schema version for agent calls should be: +- `easy_memory_agent_request_v2` + +Each entry object should include: +- `entry_id` +- `log_file` when available +- `ref_level` +- `factual` +- `content` +- `timestamp` +- `paths` +- `rendered_block` + +Each `paths` item should include: +- `path_id` +- `resource_type` +- `path` +- `directory` + +Each `paths` item may additionally include: +- `path_format` +- `system_hint` + +Compatibility naming note: +- `paths` and `path_id` remain the canonical field names in request payloads + for backward compatibility. +- Each item should nevertheless be interpreted as a related resource that may be + either a local path or a URL/document address. +- For `resource_type:"local_path"`, `path_format:"project_relative"` means the + `path` and `directory` values are relative to `cwd`, while + `path_format:"absolute"` means they are absolute local filesystem values. +- `system_hint` is optional and is intended for external absolute local paths so + cross-machine logs can be disambiguated quickly. + +`rendered_block` is the exact block the agent should copy back if that entry +remains relevant. + +## Agent Response Contract + +The agent response is plain text, not strict JSON. + +The canonical response contract is defined by: +- `references/response-schema.md` + +In practice the response should be: + +```text + + +[SUMMARY] +``` + +Runtime implementations should keep validation lightweight: +- strip one surrounding code fence when the entire response is fenced +- reject empty responses +- normalize the final summary line to `[SUMMARY] ...` +- avoid strict field-level or schema-level rejection + +## Successful Script Output + +When the memory-management agent succeeds, the scripts should print the filtered +plain-text result directly. + +The canonical source of truth for that final script output contract is: +- `references/script-output-schema.md` + +The canonical example file for this final script output should live at: +- `assets/examples/script-output.example.txt` + +## Failure Behavior + +If agent configuration is missing, invalid, or the agent-side runtime fails: +- the scripts must remain usable in non-agent mode +- raw log reading and raw search behavior must still be available +- the failure must not silently rewrite or delete memory content + +This fallback rule applies to: +- network errors +- request timeouts +- provider protocol mismatches +- empty responses +- unexpected runtime exceptions in the agent-processing path + +When fallback happens because of an agent-side failure or invalid response, the +implementation should append a diagnostic record containing the full available +response content to a runtime-generated error log under the installed skill +directory, not under the project-local `./easy-memory` directory. + +## Source Of Truth Rule + +The raw memory logs remain the source of truth. +The memory-management agent is only a filtering layer and must not become the +only way to access stored memory. diff --git a/skills/.experimental/easy-memory/references/openrouter-tested-models.md b/skills/.experimental/easy-memory/references/openrouter-tested-models.md new file mode 100644 index 00000000..ba32712c --- /dev/null +++ b/skills/.experimental/easy-memory/references/openrouter-tested-models.md @@ -0,0 +1,205 @@ +# OpenRouter Tested Models + +This file is informational only. + +It records provider-specific compatibility notes and benchmark snapshots collected while testing the experimental `easy-memory` memory-agent integration. + +This file is not a protocol source of truth. +The canonical contract remains: +- `references/openai-compatible-api.md` +- `references/response-schema.md` +- `references/memory-agent-system-prompt.md` +- `references/script-output-schema.md` + +## Snapshot Date + +- Test date: `2026-03-13` +- Provider endpoint: `https://openrouter.ai/api/v1/chat/completions` +- Test scope: experimental `easy-memory` only +- Primary test path: `scripts/read_today_log.py` +- Historical success criterion at that time: the script returned the older + structured success block used before the lightweight plain-text filtering + contract was introduced on `2026-03-18` + +## Test Method + +All primary comparisons below used the same basic harness: +- one temporary initialized `easy-memory` project +- one factual memory entry in today's log +- agent mode enabled through `./easy-memory/agent-config.json` +- OpenRouter API key authentication +- a model-specific `--task-context` +- timeout set to `70` seconds + +The goal was not to measure model intelligence in general. +The goal was to measure practical suitability for the `easy-memory` preprocessing path, especially: +- agent-output stability for the then-current contract +- stable fallback behavior +- cost efficiency +- response latency + +## Recommended Cost-Effective Models + +The following five models were the most practical candidates in this round. + +### 1. `mistralai/mistral-nemo` + +- OpenRouter list price on `2026-03-13`: + - prompt: `$0.02 / 1M tokens` + - completion: `$0.04 / 1M tokens` +- Context length: `131072` +- Structured-output support: + - `response_format` + - `structured_outputs` +- Test result: + - `read_today_log.py` succeeded + - status: `ok` + - elapsed time: about `7.15s` +- Assessment: + - Best overall value in this test set + - Strong default candidate for routine memory-agent preprocessing + +### 2. `meta-llama/llama-3.1-8b-instruct` + +- OpenRouter list price on `2026-03-13`: + - prompt: `$0.02 / 1M tokens` + - completion: `$0.05 / 1M tokens` +- Context length: `16384` +- Structured-output support: + - `response_format` + - `structured_outputs` +- Test result: + - `read_today_log.py` succeeded + - status: `ok` + - elapsed time: about `4.02s` +- Assessment: + - Fastest successful low-cost candidate in this sweep + - Good fallback model when shorter context is acceptable + +### 3. `google/gemma-3-27b-it` + +- OpenRouter list price on `2026-03-13`: + - prompt: `$0.03 / 1M tokens` + - completion: `$0.11 / 1M tokens` +- Context length: `128000` +- Structured-output support: + - `response_format` + - `structured_outputs` +- Test result: + - `read_today_log.py` succeeded + - status: `ok` + - elapsed time: about `7.26s` +- Assessment: + - Strong balance between quality, context, and cost + - Good choice when more instruction stability is desired than the smallest models typically provide + +### 4. `qwen/qwen3.5-9b` + +- OpenRouter list price on `2026-03-13`: + - prompt: `$0.10 / 1M tokens` + - completion: `$0.15 / 1M tokens` +- Context length: `262144` +- Structured-output support: + - `response_format` + - `structured_outputs` +- Test result: + - `read_today_log.py` succeeded + - status: `ok` + - elapsed time: about `12.17s` +- Assessment: + - Most context headroom among the recommended candidates + - A practical choice when staying inside the Qwen family matters more than absolute cost + +### 5. `cohere/command-r7b-12-2024` + +- OpenRouter list price on `2026-03-13`: + - prompt: `$0.0375 / 1M tokens` + - completion: `$0.15 / 1M tokens` +- Context length: `128000` +- Structured-output support: + - `response_format` + - `structured_outputs` +- Test result: + - `read_today_log.py` succeeded + - status: `ok` + - elapsed time: about `29.70s` +- Assessment: + - Functionally successful, but clearly slower than the other recommended models in this sweep + - More suitable as a compatibility backup than as the first default + +## Additional Tested Models + +The models below were tested during the same evaluation cycle but were not included in the recommended top five. + +### `qwen/qwen-turbo` + +- OpenRouter list price on `2026-03-13`: + - prompt: `$0.0325 / 1M tokens` + - completion: `$0.13 / 1M tokens` +- Context length: `131072` +- Test result: + - `read_today_log.py` fallback + - observed error: OpenRouter `HTTP 500` +- Note: + - This looked like provider-side instability in this run, not a confirmed schema-compliance failure + +### `google/gemma-3-12b-it` + +- OpenRouter list price on `2026-03-13`: + - prompt: `$0.04 / 1M tokens` + - completion: `$0.13 / 1M tokens` +- Context length: `131072` +- Test result: + - `read_today_log.py` fallback + - observed error: OpenRouter `HTTP 429` +- Note: + - This run was blocked by upstream rate limiting rather than a confirmed protocol mismatch + +### `openai/gpt-oss-20b` + +- OpenRouter list price on `2026-03-13`: + - prompt: `$0.03 / 1M tokens` + - completion: `$0.14 / 1M tokens` +- Context length: `131072` +- Test result: + - `read_today_log.py` fallback + - observed error: connection reset by peer +- Note: + - This run did not provide enough signal to recommend it for this workflow + +### `qwen/qwen3.5-397b-a17b` + +- This model was tested separately before the broader sweep. +- Earlier results: + - direct OpenRouter `chat/completions` call succeeded + - `search_memory.py` succeeded + - `read_today_log.py` initially failed because the model omitted the required `summary` field + - after narrowing the protocol and adding an explicit JSON skeleton, `read_today_log.py` also succeeded +- Note: + - It is viable for this workflow, but it was not selected as a top cost-efficient option for routine use + +## Operational Guidance + +If you need a default OpenRouter model for the experimental `easy-memory` agent path, prefer this order: + +1. `mistralai/mistral-nemo` +2. `google/gemma-3-27b-it` +3. `qwen/qwen3.5-9b` +4. `meta-llama/llama-3.1-8b-instruct` +5. `cohere/command-r7b-12-2024` + +When cost matters more than context length, start with: +- `mistralai/mistral-nemo` +- `meta-llama/llama-3.1-8b-instruct` + +When larger context matters more than lowest cost, start with: +- `qwen/qwen3.5-9b` +- `google/gemma-3-27b-it` + +## Notes + +- OpenRouter pricing and provider availability can change at any time. +- The prices above were copied from the official OpenRouter model list on `2026-03-13`. +- These results describe the experimental `easy-memory` integration path only. +- Future regressions should be checked against the shared installed-skill failure log: + - `logs/agent-failures.jsonl` diff --git a/skills/.experimental/easy-memory/references/response-schema.md b/skills/.experimental/easy-memory/references/response-schema.md new file mode 100644 index 00000000..43571fe8 --- /dev/null +++ b/skills/.experimental/easy-memory/references/response-schema.md @@ -0,0 +1,45 @@ +# Memory-Agent Response Template + +The historical file name `response-schema.md` is retained for compatibility. +The current canonical memory-agent response is no longer a strict JSON object. + +## Goal + +The memory-management agent should do only two things: +- remove memory blocks that are unrelated to the current task context, +- append a short summary at the end. + +## Canonical Response Shape + +The response should be plain text in this form: + +```text + + +[SUMMARY] +``` + +## Response Rules + +- The retained memory blocks must stay in their complete original format. +- The agent must not rewrite memory lines, log-file prefixes, IDs, timestamps, + related-resource lines, URLs, or file paths. +- The agent must not return JSON, field labels, bullet lists, or explanatory + prose. +- The final non-empty line should start with `[SUMMARY]`. +- The summary must describe only the retained task-relevant memories. +- If no memory remains relevant after filtering, the response should contain + only the summary line. + +## Validation Guidance + +Runtime implementations should keep validation lightweight. + +Recommended behavior: +- accept any non-empty plain-text response, +- strip one surrounding code fence if the entire response is fenced, +- normalize the final summary line to `[SUMMARY] ...`, +- avoid strict field-level or schema-level rejection. + +The scripts should still fall back to raw output when the provider fails, the +response is empty, or the transport/runtime path errors out. diff --git a/skills/.experimental/easy-memory/references/script-output-schema.md b/skills/.experimental/easy-memory/references/script-output-schema.md new file mode 100644 index 00000000..985aa991 --- /dev/null +++ b/skills/.experimental/easy-memory/references/script-output-schema.md @@ -0,0 +1,54 @@ +# Script Output Template + +This file defines the canonical final output contract for `easy-memory` +scripts when the optional memory-management agent succeeds. + +## Scope + +This contract applies to: +- `scripts/read_today_log.py` +- `scripts/search_memory.py` + +It does not replace raw fallback output. If the scripts do not return the +plain-text success shape described here, callers must treat the result as raw +fallback output. + +## Canonical Success Shape + +When the memory-management agent succeeds, the scripts should print the agent's +filtered plain-text result directly: + +```text + + +[SUMMARY] +``` + +## Output Rules + +- Each retained memory block should remain in its original display format. +- For `read_today_log.py`, that means the raw memory line plus any readable + related-resource lines. +- For `search_memory.py`, that means the `log_file: raw_line` form plus any + readable related-resource lines. +- The final non-empty line should start with `[SUMMARY]`. +- The summary should describe only the retained task-relevant memories. +- If the agent kept no memory blocks, the script may still return only the + summary line. + +## Fallback Rule + +If any of the following is true, callers must not assume the success shape is +present: +- the memory-management agent is disabled, +- the local config is missing or invalid, +- the provider call fails, +- the agent returns empty output, +- the runtime raises an unexpected error. + +In those cases, callers must parse the script output as raw fallback output. + +## Canonical Example + +The canonical example file for this output shape should live at: +- `assets/examples/script-output.example.txt` diff --git a/skills/.experimental/easy-memory/scripts/delete_memory.py b/skills/.experimental/easy-memory/scripts/delete_memory.py new file mode 100755 index 00000000..f9616e90 --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/delete_memory.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse + +from memory_utils import ( + list_log_files, + log_base_dir, + parse_entry_line, + require_initialized, +) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Delete a memory entry by ID across all logs." + ) + parser.add_argument("--id", required=True, help="Entry ID to delete.") + return parser.parse_args() + + +def main() -> int: + args = parse_args() + base_dir = log_base_dir(create=True) + require_initialized(base_dir) + + matches: list[tuple] = [] + for log_path in list_log_files(base_dir): + text = log_path.read_text(encoding="utf-8") + lines = text.splitlines() + for idx, line in enumerate(lines): + entry = parse_entry_line(line) + if entry and entry["id"] == args.id: + matches.append((log_path, lines, idx)) + + if not matches: + raise SystemExit("Entry ID not found.") + if len(matches) > 1: + raise SystemExit("Entry ID appears multiple times. Refine the logs manually.") + + log_path, lines, idx = matches[0] + del lines[idx] + + output = "\n".join(lines) + if output: + output += "\n" + log_path.write_text(output, encoding="utf-8") + + print(f"Deleted entry ID: {args.id}") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/skills/.experimental/easy-memory/scripts/init_memory.py b/skills/.experimental/easy-memory/scripts/init_memory.py new file mode 100644 index 00000000..c2ed184d --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/init_memory.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +from memory_utils import ensure_initialized, init_log_path, log_base_dir + + +def main() -> int: + base_dir = log_base_dir(create=True) + init_log = init_log_path(base_dir) + if init_log.exists(): + print("Initialization already completed.") + return 0 + + ensure_initialized(base_dir) + print(f"Initialized easy-memory in {base_dir}.") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/skills/.experimental/easy-memory/scripts/memory_agent_client.py b/skills/.experimental/easy-memory/scripts/memory_agent_client.py new file mode 100644 index 00000000..a21dfc1c --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/memory_agent_client.py @@ -0,0 +1,753 @@ +from __future__ import annotations + +import json +import re +import socket +import subprocess +import tempfile +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Mapping +from urllib import error, parse, request + +from memory_agent_config import ( + DEFAULT_CODEX_MODEL, + MemoryAgentConfig, + load_system_prompt_text, +) + +ALLOWED_MODES = {"read_today_log", "search_memory"} +SUMMARY_PREFIX = "[SUMMARY]" +SUMMARY_MAX_CHARS = 500 +_FENCED_BLOCK_RE = re.compile( + r"^```(?:[^\n`]*)\s*\n(?P.*)\n```$", + re.DOTALL, +) +_SUMMARY_LINE_RE = re.compile( + r"^(?:\[SUMMARY\]|SUMMARY:|Summary:)\s*(?P.*)$" +) +_ENTRY_ID_RE = re.compile(r"\[ID:(?P[^\]]+)\]") +_UNSET = object() + + +class MemoryAgentClientError(RuntimeError): + def __init__( + self, + message: str, + *, + raw_api_response: Mapping[str, Any] | None = None, + content_text: str | None = None, + response_body: str | None = None, + ) -> None: + super().__init__(message) + self.raw_api_response = ( + dict(raw_api_response) if raw_api_response is not None else None + ) + self.content_text = content_text + self.response_body = response_body + + def attach_context( + self, + *, + raw_api_response: Mapping[str, Any] | object = _UNSET, + content_text: str | object = _UNSET, + response_body: str | object = _UNSET, + ) -> "MemoryAgentClientError": + if raw_api_response is not _UNSET and self.raw_api_response is None: + self.raw_api_response = ( + dict(raw_api_response) + if isinstance(raw_api_response, Mapping) + else None + ) + if content_text is not _UNSET and self.content_text is None: + self.content_text = ( + content_text if isinstance(content_text, str) else None + ) + if response_body is not _UNSET and self.response_body is None: + self.response_body = ( + response_body if isinstance(response_body, str) else None + ) + return self + + +class MemoryAgentTransportError(MemoryAgentClientError): + pass + + +class MemoryAgentProtocolError(MemoryAgentClientError): + pass + + +class MemoryAgentSchemaError(MemoryAgentClientError): + pass + + +@dataclass(frozen=True) +class MemoryAgentResponse: + raw_api_response: dict[str, Any] + content_text: str + rendered_output: str + + +def call_memory_agent( + config: MemoryAgentConfig, + request_payload: Mapping[str, Any], +) -> MemoryAgentResponse: + config.require_runtime_ready() + + request_mode = _require_request_mode(request_payload) + system_prompt = build_runtime_system_prompt( + canonical_prompt=load_system_prompt_text(config), + request_mode=request_mode, + request_payload=request_payload, + ) + + if config.api_style == "codex_exec": + response_json, content_text = _run_codex_exec( + config=config, + system_prompt=system_prompt, + request_payload=request_payload, + ) + elif config.api_style == "ollama_native_chat": + api_payload = build_ollama_chat_payload( + model=config.model or "", + system_prompt=system_prompt, + request_payload=request_payload, + disable_thinking=config.disable_thinking, + ) + response_json = _post_ollama_chat( + base_url=config.base_url or "", + api_key=config.api_key, + timeout_seconds=config.timeout_seconds, + payload=api_payload, + ) + try: + content_text = _extract_ollama_message_text(response_json) + except MemoryAgentClientError as exc: + raise exc.attach_context(raw_api_response=response_json) + else: + api_payload = build_chat_completions_payload( + model=config.model or "", + system_prompt=system_prompt, + request_payload=request_payload, + ) + response_json = _post_chat_completions( + base_url=config.base_url or "", + api_key=config.api_key, + timeout_seconds=config.timeout_seconds, + payload=api_payload, + ) + try: + content_text = _extract_message_text(response_json) + except MemoryAgentClientError as exc: + raise exc.attach_context(raw_api_response=response_json) + + try: + rendered_output = normalize_agent_response_text( + content_text=content_text, + request_payload=request_payload, + ) + except MemoryAgentClientError as exc: + raise exc.attach_context( + raw_api_response=response_json, + content_text=content_text, + ) + + return MemoryAgentResponse( + raw_api_response=response_json, + content_text=content_text, + rendered_output=rendered_output, + ) + + +def build_codex_exec_prompt( + *, + system_prompt: str, + request_payload: Mapping[str, Any], +) -> str: + return "\n\n".join( + [ + "You are running inside Codex CLI exec as a plain-text preprocessing step for easy-memory.", + ( + "Do not use shell commands, do not inspect the workspace, " + "do not call MCP tools, and do not modify any files. " + "Use only the provided request payload." + ), + ( + "Keep only task-relevant memory blocks. Copy each retained " + "rendered_block exactly as provided." + ), + ( + f"End the reply with exactly one summary line that starts with " + f"{SUMMARY_PREFIX} and keep that summary within {SUMMARY_MAX_CHARS} characters." + ), + ( + f"If no memory remains relevant after filtering, return only the {SUMMARY_PREFIX} line." + ), + "Canonical prompt:", + system_prompt, + "Input payload JSON:", + json.dumps( + dict(request_payload), + ensure_ascii=False, + separators=(",", ":"), + ), + ] + ) + + +def _run_codex_exec( + *, + config: MemoryAgentConfig, + system_prompt: str, + request_payload: Mapping[str, Any], +) -> tuple[dict[str, Any], str]: + prompt_text = build_codex_exec_prompt( + system_prompt=system_prompt, + request_payload=request_payload, + ) + with tempfile.TemporaryDirectory(prefix="easy-memory-codex-exec-") as tmp_dir: + tmp_path = Path(tmp_dir) + output_path = tmp_path / "response.txt" + command = build_codex_exec_command( + config=config, + output_path=output_path, + prompt_text=prompt_text, + ) + try: + completed = subprocess.run( + command, + cwd=Path.cwd(), + capture_output=True, + text=True, + timeout=config.timeout_seconds, + check=False, + ) + except FileNotFoundError as exc: + raise MemoryAgentTransportError( + f"Codex CLI executable not found: {config.codex_binary}" + ) from exc + except subprocess.TimeoutExpired as exc: + raise MemoryAgentTransportError( + "Codex exec request timed out.", + raw_api_response={ + "command": command, + "stdout": exc.stdout, + "stderr": exc.stderr, + }, + response_body=_format_codex_exec_output( + stdout_text=exc.stdout, + stderr_text=exc.stderr, + ), + ) from exc + + output_text = ( + output_path.read_text(encoding="utf-8").strip() + if output_path.exists() + else "" + ) + raw_response = { + "command": command, + "returncode": completed.returncode, + "stdout": completed.stdout, + "stderr": completed.stderr, + "output_file": output_text, + } + if completed.returncode != 0: + raise MemoryAgentTransportError( + f"Codex exec failed with exit code {completed.returncode}.", + raw_api_response=raw_response, + response_body=_format_codex_exec_output( + stdout_text=completed.stdout, + stderr_text=completed.stderr, + ), + ) + + content_text = output_text or _extract_codex_exec_content(completed.stdout) + if not content_text: + raise MemoryAgentProtocolError( + "Codex exec did not produce a structured response.", + raw_api_response=raw_response, + response_body=_format_codex_exec_output( + stdout_text=completed.stdout, + stderr_text=completed.stderr, + ), + ) + return raw_response, content_text + + +def build_codex_exec_command( + *, + config: MemoryAgentConfig, + output_path: Path, + prompt_text: str, +) -> list[str]: + command = [ + config.codex_binary, + "exec", + "--ephemeral", + "--color", + "never", + "-s", + "read-only", + "--skip-git-repo-check", + "-m", + config.model or DEFAULT_CODEX_MODEL, + "-o", + str(output_path), + ] + if config.codex_profile: + command.extend(["-p", config.codex_profile]) + if config.codex_service_tier: + command.extend( + [ + "-c", + f"service_tier={_toml_string_literal(config.codex_service_tier)}", + ] + ) + if config.codex_reasoning_effort: + command.extend( + [ + "-c", + "model_reasoning_effort=" + f"{_toml_string_literal(config.codex_reasoning_effort)}", + ] + ) + command.append(prompt_text) + return command + + +def _toml_string_literal(value: str) -> str: + escaped = value.replace("\\", "\\\\").replace('"', '\\"') + return f'"{escaped}"' + + +def _extract_codex_exec_content(stdout_text: str) -> str: + stripped = stdout_text.strip() + if not stripped: + return "" + lines = [line.rstrip() for line in stripped.splitlines() if line.strip()] + if not lines: + return "" + return "\n".join(lines) + + +def _format_codex_exec_output( + *, + stdout_text: str | None, + stderr_text: str | None, +) -> str: + sections = [] + if stdout_text: + sections.append(f"stdout:\n{stdout_text}") + if stderr_text: + sections.append(f"stderr:\n{stderr_text}") + return "\n\n".join(sections) if sections else "" + + +def build_chat_completions_payload( + model: str, + system_prompt: str, + request_payload: Mapping[str, Any], +) -> dict[str, Any]: + return { + "model": model, + "messages": [ + {"role": "system", "content": system_prompt}, + { + "role": "user", + "content": json.dumps( + dict(request_payload), + ensure_ascii=False, + separators=(",", ":"), + ), + }, + ], + "temperature": 0, + } + + +def build_ollama_chat_payload( + model: str, + system_prompt: str, + request_payload: Mapping[str, Any], + disable_thinking: bool = False, +) -> dict[str, Any]: + payload = { + "model": model, + "messages": [ + {"role": "system", "content": system_prompt}, + { + "role": "user", + "content": json.dumps( + dict(request_payload), + ensure_ascii=False, + separators=(",", ":"), + ), + }, + ], + "stream": False, + "options": { + "temperature": 0, + }, + } + if disable_thinking: + payload["think"] = False + return payload + + +def build_runtime_system_prompt( + canonical_prompt: str, + request_mode: str, + request_payload: Mapping[str, Any], +) -> str: + template_lines = [ + "", + "", + f"{SUMMARY_PREFIX} ", + ] + protocol_lines = [ + "Runtime protocol requirements:", + f'1. mode is "{request_mode}". Use task_context to judge relevance.', + "2. Remove all unrelated memory blocks completely.", + "3. For each retained memory, copy the full rendered_block exactly as given.", + "4. Do not rewrite IDs, timestamps, related resource lines, URLs, or file paths.", + "5. Do not return JSON, bullets, explanations, or code fences.", + "6. End the response with exactly one summary line.", + f"7. That summary line must start with {SUMMARY_PREFIX}.", + f"8. Keep the summary within {SUMMARY_MAX_CHARS} characters.", + f"9. If no memory is relevant, return only the {SUMMARY_PREFIX} line.", + "Reply template:", + "\n".join(template_lines), + "Rendered entry index:", + build_rendered_entry_index(request_payload), + ] + return "\n\n".join([canonical_prompt, "\n".join(protocol_lines)]) + + +def build_rendered_entry_index( + request_payload: Mapping[str, Any], +) -> str: + entries = request_payload.get("entries") + if not isinstance(entries, list) or not entries: + return "No entries available." + lines = [] + for item in entries: + if not isinstance(item, Mapping): + continue + entry_id = item.get("entry_id") + if not isinstance(entry_id, str): + continue + log_file = item.get("log_file") + rendered_block = item.get("rendered_block") + header = f"- {entry_id}" + if isinstance(log_file, str) and log_file: + header += f" ({log_file})" + if isinstance(rendered_block, str) and rendered_block: + header += f": {rendered_block.splitlines()[0][:160]}" + lines.append(header) + return "\n".join(lines) if lines else "No entries available." + + +def normalize_agent_response_text( + *, + content_text: str, + request_payload: Mapping[str, Any], +) -> str: + stripped = extract_agent_text(content_text) + if not stripped: + raise MemoryAgentProtocolError("Agent returned empty content.") + + lines = [line.rstrip() for line in stripped.splitlines()] + while lines and not lines[0].strip(): + lines.pop(0) + while lines and not lines[-1].strip(): + lines.pop() + if not lines: + raise MemoryAgentProtocolError("Agent returned empty content.") + + summary_index = last_nonempty_index(lines) + if summary_index is None: + raise MemoryAgentProtocolError("Agent returned empty content.") + + summary_match = _SUMMARY_LINE_RE.match(lines[summary_index].strip()) + if summary_match: + body_lines = lines[:summary_index] + summary_text = summary_match.group("body").strip() + else: + body_lines = lines + summary_text = "" + + summary_text = truncate_summary_text(summary_text) + if not summary_text: + summary_text = ( + "Agent filtering completed. Review the retained memories above." + ) + + body_text = "\n".join(body_lines).strip() + body_text = canonicalize_body_text( + body_text=body_text, + request_payload=request_payload, + ) + summary_line = f"{SUMMARY_PREFIX} {summary_text}" + if body_text: + return f"{body_text}\n\n{summary_line}" + return summary_line + + +def extract_agent_text(content_text: str) -> str: + stripped = content_text.strip() + if not stripped: + return "" + fenced_match = _FENCED_BLOCK_RE.fullmatch(stripped) + if fenced_match: + return fenced_match.group("body").strip() + return stripped + + +def last_nonempty_index(lines: list[str]) -> int | None: + for index in range(len(lines) - 1, -1, -1): + if lines[index].strip(): + return index + return None + + +def truncate_summary_text(summary_text: str) -> str: + normalized = " ".join(summary_text.split()) + if len(normalized) <= SUMMARY_MAX_CHARS: + return normalized + return normalized[:SUMMARY_MAX_CHARS].rstrip() + + +def canonicalize_body_text( + *, + body_text: str, + request_payload: Mapping[str, Any], +) -> str: + if not body_text: + return "" + + rendered_blocks = collect_rendered_blocks_by_entry_id(request_payload) + if not rendered_blocks: + return body_text + + selected_ids: list[str] = [] + seen_ids: set[str] = set() + for match in _ENTRY_ID_RE.finditer(body_text): + entry_id = match.group("id") + if entry_id in rendered_blocks and entry_id not in seen_ids: + seen_ids.add(entry_id) + selected_ids.append(entry_id) + if not selected_ids: + return body_text + return "\n\n".join(rendered_blocks[entry_id] for entry_id in selected_ids) + + +def collect_rendered_blocks_by_entry_id( + request_payload: Mapping[str, Any], +) -> dict[str, str]: + entries = request_payload.get("entries") + if not isinstance(entries, list): + return {} + blocks: dict[str, str] = {} + for item in entries: + if not isinstance(item, Mapping): + continue + entry_id = item.get("entry_id") + rendered_block = item.get("rendered_block") + if isinstance(entry_id, str) and isinstance(rendered_block, str): + blocks[entry_id] = rendered_block.strip() + return blocks + + +def _require_request_mode(request_payload: Mapping[str, Any]) -> str: + mode = request_payload.get("mode") + if not isinstance(mode, str): + raise MemoryAgentSchemaError("Request payload must include a string mode.") + if mode not in ALLOWED_MODES: + raise MemoryAgentSchemaError(f"Invalid request mode: {mode}") + return mode + + +def _post_chat_completions( + base_url: str, + api_key: str | None, + timeout_seconds: float, + payload: Mapping[str, Any], +) -> dict[str, Any]: + url = _build_chat_completions_url(base_url) + headers = { + "Content-Type": "application/json", + } + if api_key and api_key.strip(): + headers["Authorization"] = f"Bearer {api_key.strip()}" + return _post_json_request( + url=url, + headers=headers, + timeout_seconds=timeout_seconds, + payload=payload, + ) + + +def _post_ollama_chat( + base_url: str, + api_key: str | None, + timeout_seconds: float, + payload: Mapping[str, Any], +) -> dict[str, Any]: + url = _build_ollama_chat_url(base_url) + headers = { + "Content-Type": "application/json", + } + if api_key and api_key.strip(): + headers["Authorization"] = f"Bearer {api_key.strip()}" + return _post_json_request( + url=url, + headers=headers, + timeout_seconds=timeout_seconds, + payload=payload, + ) + + +def _post_json_request( + *, + url: str, + headers: Mapping[str, str], + timeout_seconds: float, + payload: Mapping[str, Any], +) -> dict[str, Any]: + raw_body = json.dumps(payload, ensure_ascii=False).encode("utf-8") + http_request = request.Request( + url, + data=raw_body, + headers=dict(headers), + method="POST", + ) + try: + with request.urlopen(http_request, timeout=timeout_seconds) as response: + body = response.read().decode("utf-8") + except error.HTTPError as exc: + body = exc.read().decode("utf-8", errors="replace") + raise MemoryAgentTransportError( + f"Memory agent HTTP error {exc.code}: {body}", + response_body=body, + ) from exc + except socket.timeout as exc: + raise MemoryAgentTransportError( + "Memory agent request timed out." + ) from exc + except TimeoutError as exc: + raise MemoryAgentTransportError( + "Memory agent request timed out." + ) from exc + except error.URLError as exc: + raise MemoryAgentTransportError( + f"Memory agent connection failed: {exc.reason}" + ) from exc + + try: + parsed_body = json.loads(body) + except json.JSONDecodeError as exc: + raise MemoryAgentProtocolError( + "Memory agent API did not return valid JSON.", + response_body=body, + ) from exc + if not isinstance(parsed_body, dict): + raise MemoryAgentProtocolError( + "Memory agent API response must be a JSON object.", + response_body=body, + ) + return parsed_body + + +def _build_chat_completions_url(base_url: str) -> str: + normalized = base_url.rstrip("/") + if not normalized: + raise MemoryAgentTransportError("base_url must not be empty.") + parsed_base = parse.urlsplit(normalized) + if not parsed_base.scheme or not parsed_base.netloc: + raise MemoryAgentTransportError( + f"Invalid base_url for memory agent: {base_url}" + ) + if parsed_base.path.endswith("/chat/completions"): + return normalized + if parsed_base.path.endswith("/v1"): + return f"{normalized}/chat/completions" + return f"{normalized}/chat/completions" + + +def _build_ollama_chat_url(base_url: str) -> str: + normalized = base_url.rstrip("/") + if not normalized: + raise MemoryAgentTransportError("base_url must not be empty.") + parsed_base = parse.urlsplit(normalized) + if not parsed_base.scheme or not parsed_base.netloc: + raise MemoryAgentTransportError( + f"Invalid base_url for memory agent: {base_url}" + ) + base_without_suffix = normalized + suffixes = ( + "/api/chat", + "/v1/chat/completions", + "/chat/completions", + "/v1", + ) + for suffix in suffixes: + if base_without_suffix.endswith(suffix): + base_without_suffix = base_without_suffix[: -len(suffix)] + break + if base_without_suffix.endswith("/api"): + return f"{base_without_suffix}/chat" + return f"{base_without_suffix}/api/chat" + + +def _extract_message_text(api_response: Mapping[str, Any]) -> str: + choices = api_response.get("choices") + if not isinstance(choices, list) or not choices: + raise MemoryAgentProtocolError( + "Memory agent API response is missing choices." + ) + first_choice = choices[0] + if not isinstance(first_choice, dict): + raise MemoryAgentProtocolError( + "Memory agent API response choice must be an object." + ) + message = first_choice.get("message") + if not isinstance(message, dict): + raise MemoryAgentProtocolError( + "Memory agent API response is missing message." + ) + content = message.get("content") + if isinstance(content, str): + return content + if isinstance(content, list): + text_parts = [] + for index, item in enumerate(content): + if not isinstance(item, dict): + raise MemoryAgentProtocolError( + f"message.content[{index}] must be an object." + ) + if item.get("type") == "text" and isinstance(item.get("text"), str): + text_parts.append(item["text"]) + joined = "".join(text_parts).strip() + if joined: + return joined + raise MemoryAgentProtocolError( + "Memory agent API response does not contain supported message content." + ) + + +def _extract_ollama_message_text(api_response: Mapping[str, Any]) -> str: + message = api_response.get("message") + if not isinstance(message, dict): + raise MemoryAgentProtocolError( + "Ollama native response is missing message." + ) + content = message.get("content") + if isinstance(content, str): + return content + if isinstance(content, (dict, list)): + return json.dumps(content, ensure_ascii=False) + raise MemoryAgentProtocolError( + "Ollama native response does not contain supported message content." + ) diff --git a/skills/.experimental/easy-memory/scripts/memory_agent_config.py b/skills/.experimental/easy-memory/scripts/memory_agent_config.py new file mode 100644 index 00000000..64cddc6e --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/memory_agent_config.py @@ -0,0 +1,404 @@ +from __future__ import annotations + +import json +import os +import re +from dataclasses import dataclass +from pathlib import Path +from typing import Any, Mapping + +ENV_AGENT_CONFIG_FILE = "EASY_MEMORY_AGENT_CONFIG_FILE" +ENV_AGENT_ENABLED = "EASY_MEMORY_AGENT_ENABLED" +ENV_AGENT_BASE_URL = "EASY_MEMORY_AGENT_BASE_URL" +ENV_AGENT_API_KEY = "EASY_MEMORY_AGENT_API_KEY" +ENV_AGENT_MODEL = "EASY_MEMORY_AGENT_MODEL" +ENV_AGENT_API_STYLE = "EASY_MEMORY_AGENT_API_STYLE" +ENV_AGENT_DISABLE_THINKING = "EASY_MEMORY_AGENT_DISABLE_THINKING" +ENV_AGENT_TIMEOUT_SECONDS = "EASY_MEMORY_AGENT_TIMEOUT_SECONDS" +ENV_AGENT_SYSTEM_PROMPT_FILE = "EASY_MEMORY_AGENT_SYSTEM_PROMPT_FILE" +ENV_AGENT_CODEX_BINARY = "EASY_MEMORY_AGENT_CODEX_BINARY" +ENV_AGENT_CODEX_PROFILE = "EASY_MEMORY_AGENT_CODEX_PROFILE" +ENV_AGENT_CODEX_SERVICE_TIER = "EASY_MEMORY_AGENT_CODEX_SERVICE_TIER" +ENV_AGENT_CODEX_REASONING_EFFORT = "EASY_MEMORY_AGENT_CODEX_REASONING_EFFORT" +DEFAULT_TIMEOUT_SECONDS = 20.0 +DEFAULT_CODEX_EXEC_TIMEOUT_SECONDS = 120.0 +DEFAULT_CONFIG_FILE_NAME = "agent-config.json" +DEFAULT_API_STYLE = "openai_chat_completions" +DEFAULT_CODEX_BINARY = "codex" +DEFAULT_CODEX_MODEL = "gpt-5.3-codex-spark" +DEFAULT_CODEX_SERVICE_TIER = "fast" +DEFAULT_CODEX_REASONING_EFFORT = "medium" +ALLOWED_API_STYLES = { + "codex_exec", + "openai_chat_completions", + "ollama_native_chat", +} + +_PROMPT_BLOCK_RE = re.compile(r"```(?:text)?\n(?P.*?)```", re.DOTALL) + + +class MemoryAgentConfigError(RuntimeError): + pass + + +@dataclass(frozen=True) +class MemoryAgentConfig: + enabled: bool + base_url: str | None + api_key: str | None + model: str | None + api_style: str + disable_thinking: bool + codex_binary: str + codex_profile: str | None + codex_service_tier: str | None + codex_reasoning_effort: str | None + timeout_seconds: float + config_file: Path + system_prompt_file: Path + skill_dir: Path + + def require_runtime_ready(self) -> None: + if not self.enabled: + raise MemoryAgentConfigError("Memory agent is disabled.") + missing = [] + if self.api_style == "codex_exec": + if not self.codex_binary: + missing.append(ENV_AGENT_CODEX_BINARY) + elif not self.base_url: + missing.append(ENV_AGENT_BASE_URL) + if not self.model: + missing.append(ENV_AGENT_MODEL) + if missing: + missing_text = ", ".join(missing) + raise MemoryAgentConfigError( + f"Memory agent is enabled but missing required configuration: {missing_text}" + ) + if not self.system_prompt_file.exists(): + raise MemoryAgentConfigError( + f"System prompt file not found: {self.system_prompt_file}" + ) + + +def installed_skill_dir() -> Path: + return Path(__file__).resolve().parent.parent + + +def default_system_prompt_file(skill_dir: Path | None = None) -> Path: + resolved_skill_dir = skill_dir or installed_skill_dir() + return resolved_skill_dir / "references" / "memory-agent-system-prompt.md" + + +def default_local_config_file() -> Path: + return Path.cwd() / "easy-memory" / DEFAULT_CONFIG_FILE_NAME + + +def parse_enabled_flag(raw_value: Any) -> bool: + return parse_bool_flag( + raw_value, + label=ENV_AGENT_ENABLED, + default=False, + ) + + +def parse_bool_flag(raw_value: Any, *, label: str, default: bool) -> bool: + if raw_value is None: + return default + if isinstance(raw_value, bool): + return raw_value + if not isinstance(raw_value, str): + raise MemoryAgentConfigError(f"{label} must be a boolean or string.") + normalized = raw_value.strip().lower() + if normalized in {"1", "true", "yes", "on"}: + return True + if normalized in {"0", "false", "no", "off", ""}: + return False + raise MemoryAgentConfigError( + f"{label} must be one of true/false/1/0/yes/no/on/off." + ) + + +def parse_timeout_seconds(raw_value: Any, *, default: float) -> float: + if raw_value is None: + return default + if isinstance(raw_value, (int, float)) and not isinstance(raw_value, bool): + timeout_seconds = float(raw_value) + if timeout_seconds <= 0: + raise MemoryAgentConfigError( + f"{ENV_AGENT_TIMEOUT_SECONDS} must be a positive number." + ) + return timeout_seconds + if not isinstance(raw_value, str): + raise MemoryAgentConfigError( + f"{ENV_AGENT_TIMEOUT_SECONDS} must be a positive number." + ) + if not raw_value.strip(): + return default + try: + timeout_seconds = float(raw_value.strip()) + except ValueError as exc: + raise MemoryAgentConfigError( + f"{ENV_AGENT_TIMEOUT_SECONDS} must be a positive number." + ) from exc + if timeout_seconds <= 0: + raise MemoryAgentConfigError( + f"{ENV_AGENT_TIMEOUT_SECONDS} must be a positive number." + ) + return timeout_seconds + + +def parse_api_style(raw_value: Any) -> str: + if raw_value is None: + return DEFAULT_API_STYLE + if not isinstance(raw_value, str): + raise MemoryAgentConfigError( + f"{ENV_AGENT_API_STYLE} must be a string." + ) + normalized = raw_value.strip() + if not normalized: + return DEFAULT_API_STYLE + if normalized not in ALLOWED_API_STYLES: + allowed = ", ".join(sorted(ALLOWED_API_STYLES)) + raise MemoryAgentConfigError( + f"{ENV_AGENT_API_STYLE} must be one of: {allowed}" + ) + return normalized + + +def resolve_system_prompt_file( + raw_value: Any, + skill_dir: Path, + config_dir: Path | None = None, +) -> Path: + if raw_value is None: + return default_system_prompt_file(skill_dir) + if not isinstance(raw_value, str): + raise MemoryAgentConfigError( + f"{ENV_AGENT_SYSTEM_PROMPT_FILE} must be a string path." + ) + if not raw_value.strip(): + return default_system_prompt_file(skill_dir) + candidate = Path(raw_value.strip()).expanduser() + if not candidate.is_absolute(): + base_dir = config_dir or Path.cwd() + candidate = (base_dir / candidate).resolve() + return candidate + + +def resolve_config_file_path(raw_value: str | None) -> tuple[Path, bool]: + if raw_value is None or not raw_value.strip(): + return default_local_config_file(), False + candidate = Path(raw_value.strip()).expanduser() + if not candidate.is_absolute(): + candidate = (Path.cwd() / candidate).resolve() + return candidate, True + + +def load_local_config_payload( + config_file: Path, + explicit: bool, +) -> dict[str, Any]: + if not config_file.exists(): + if explicit: + raise MemoryAgentConfigError( + f"Memory agent config file not found: {config_file}" + ) + return {} + try: + raw_text = config_file.read_text(encoding="utf-8") + except OSError as exc: + raise MemoryAgentConfigError( + f"Failed to read memory agent config file: {config_file}" + ) from exc + if not raw_text.strip(): + return {} + try: + payload = json.loads(raw_text) + except json.JSONDecodeError as exc: + raise MemoryAgentConfigError( + f"Memory agent config file is not valid JSON: {config_file}" + ) from exc + if not isinstance(payload, dict): + raise MemoryAgentConfigError( + f"Memory agent config file must contain a JSON object: {config_file}" + ) + return payload + + +def load_memory_agent_config( + env: Mapping[str, str] | None = None, + skill_dir: Path | None = None, +) -> MemoryAgentConfig: + env_map = env or os.environ + resolved_skill_dir = skill_dir or installed_skill_dir() + config_file, config_file_explicit = resolve_config_file_path( + env_map.get(ENV_AGENT_CONFIG_FILE) + ) + config_payload = load_local_config_payload( + config_file=config_file, + explicit=config_file_explicit, + ) + api_style = parse_api_style( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_API_STYLE, + "api_style", + ) + ) + model = _normalize_optional_string( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_MODEL, + "model", + ), + ENV_AGENT_MODEL, + ) + if api_style == "codex_exec" and model is None: + model = DEFAULT_CODEX_MODEL + timeout_default = ( + DEFAULT_CODEX_EXEC_TIMEOUT_SECONDS + if api_style == "codex_exec" + else DEFAULT_TIMEOUT_SECONDS + ) + codex_binary = _normalize_optional_string( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_CODEX_BINARY, + "codex_binary", + ), + ENV_AGENT_CODEX_BINARY, + ) or DEFAULT_CODEX_BINARY + codex_service_tier = _normalize_optional_string( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_CODEX_SERVICE_TIER, + "codex_service_tier", + ), + ENV_AGENT_CODEX_SERVICE_TIER, + ) + if api_style == "codex_exec" and codex_service_tier is None: + codex_service_tier = DEFAULT_CODEX_SERVICE_TIER + codex_reasoning_effort = _normalize_optional_string( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_CODEX_REASONING_EFFORT, + "codex_reasoning_effort", + ), + ENV_AGENT_CODEX_REASONING_EFFORT, + ) + if api_style == "codex_exec" and codex_reasoning_effort is None: + codex_reasoning_effort = DEFAULT_CODEX_REASONING_EFFORT + + return MemoryAgentConfig( + enabled=parse_enabled_flag( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_ENABLED, + "enabled", + ) + ), + base_url=_normalize_optional_string( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_BASE_URL, + "base_url", + ), + ENV_AGENT_BASE_URL, + ), + api_key=_normalize_optional_string( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_API_KEY, + "api_key", + ), + ENV_AGENT_API_KEY, + ), + model=model, + api_style=api_style, + disable_thinking=parse_bool_flag( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_DISABLE_THINKING, + "disable_thinking", + ), + label=ENV_AGENT_DISABLE_THINKING, + default=False, + ), + codex_binary=codex_binary, + codex_profile=_normalize_optional_string( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_CODEX_PROFILE, + "codex_profile", + ), + ENV_AGENT_CODEX_PROFILE, + ), + codex_service_tier=codex_service_tier, + codex_reasoning_effort=codex_reasoning_effort, + timeout_seconds=parse_timeout_seconds( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_TIMEOUT_SECONDS, + "timeout_seconds", + ), + default=timeout_default, + ), + config_file=config_file, + system_prompt_file=resolve_system_prompt_file( + _select_raw_value( + env_map, + config_payload, + ENV_AGENT_SYSTEM_PROMPT_FILE, + "system_prompt_file", + ), + resolved_skill_dir, + config_dir=config_file.parent, + ), + skill_dir=resolved_skill_dir, + ) + + +def load_system_prompt_text(config: MemoryAgentConfig) -> str: + config.require_runtime_ready() + raw_text = config.system_prompt_file.read_text(encoding="utf-8") + match = _PROMPT_BLOCK_RE.search(raw_text) + if match: + prompt_text = match.group("body").strip() + else: + prompt_text = raw_text.strip() + if not prompt_text: + raise MemoryAgentConfigError( + f"System prompt file is empty: {config.system_prompt_file}" + ) + return prompt_text + + +def _normalize_optional_string(value: Any, label: str) -> str | None: + if value is None: + return None + if not isinstance(value, str): + raise MemoryAgentConfigError(f"{label} must be a string.") + normalized = value.strip() + return normalized or None + + +def _select_raw_value( + env_map: Mapping[str, str], + config_payload: Mapping[str, Any], + env_key: str, + config_key: str, +) -> Any: + if env_key in env_map: + return env_map.get(env_key) + return config_payload.get(config_key) diff --git a/skills/.experimental/easy-memory/scripts/memory_agent_failure_log.py b/skills/.experimental/easy-memory/scripts/memory_agent_failure_log.py new file mode 100644 index 00000000..0a1084aa --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/memory_agent_failure_log.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +import json +import traceback +from datetime import datetime +from pathlib import Path +from typing import Any, Mapping + +from memory_agent_client import MemoryAgentClientError, MemoryAgentResponse +from memory_agent_config import MemoryAgentConfig + +AGENT_FAILURE_LOG_NAME = "agent-failures.jsonl" + + +def agent_failure_log_path(skill_dir: Path) -> Path: + return skill_dir / "logs" / AGENT_FAILURE_LOG_NAME + + +def append_agent_failure_log( + *, + config: MemoryAgentConfig, + request_payload: Mapping[str, Any], + fallback_reason: str, + error: BaseException | None = None, + response: MemoryAgentResponse | None = None, +) -> Path | None: + log_path = agent_failure_log_path(config.skill_dir) + record = { + "timestamp": datetime.now().isoformat(timespec="seconds"), + "fallback_reason": fallback_reason, + "mode": request_payload.get("mode"), + "model": config.model, + "base_url": config.base_url, + "api_style": config.api_style, + "codex_binary": config.codex_binary, + "codex_profile": config.codex_profile, + "codex_service_tier": config.codex_service_tier, + "codex_reasoning_effort": config.codex_reasoning_effort, + "disable_thinking": config.disable_thinking, + "timeout_seconds": config.timeout_seconds, + "request_payload": dict(request_payload), + "error_type": error.__class__.__name__ if error else None, + "error_message": str(error) if error else None, + "raw_api_response": _extract_raw_api_response(error=error, response=response), + "content_text": _extract_content_text(error=error, response=response), + "response_body": _extract_response_body(error=error), + "rendered_output": response.rendered_output if response else None, + "traceback": _format_traceback(error), + } + try: + log_path.parent.mkdir(parents=True, exist_ok=True) + with log_path.open("a", encoding="utf-8") as handle: + handle.write(json.dumps(record, ensure_ascii=False)) + handle.write("\n") + except OSError: + return None + return log_path + + +def _extract_raw_api_response( + *, + error: BaseException | None, + response: MemoryAgentResponse | None, +) -> dict[str, Any] | None: + if response is not None: + return dict(response.raw_api_response) + if isinstance(error, MemoryAgentClientError) and error.raw_api_response is not None: + return dict(error.raw_api_response) + return None + + +def _extract_content_text( + *, + error: BaseException | None, + response: MemoryAgentResponse | None, +) -> str | None: + if response is not None: + return response.content_text + if isinstance(error, MemoryAgentClientError): + return error.content_text + return None + + +def _extract_response_body( + *, + error: BaseException | None, +) -> str | None: + if isinstance(error, MemoryAgentClientError): + return error.response_body + return None + + +def _format_traceback(error: BaseException | None) -> str | None: + if error is None or error.__traceback__ is None: + return None + return "".join( + traceback.format_exception( + type(error), + error, + error.__traceback__, + ) + ) diff --git a/skills/.experimental/easy-memory/scripts/memory_utils.py b/skills/.experimental/easy-memory/scripts/memory_utils.py new file mode 100755 index 00000000..ce221130 --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/memory_utils.py @@ -0,0 +1,529 @@ +from __future__ import annotations + +import json +import posixpath +import platform +import re +import socket +from datetime import date, datetime +from pathlib import Path +from typing import Optional +from urllib import parse +from uuid import uuid4 + +ENTRY_PREFIX_RE = re.compile( + r"^\[ID:(?P[^\]]+)\] " + r"\[REF:(?P[^\]]+)\] " + r"\[FACT:(?Ptrue|false)\] " + r"(?P.*)$" +) +TIME_SUFFIX_RE = re.compile( + r"^(?P.*) " + r"\[TIME:(?P\d{4}-\d{2}-\d{2}:\d{2}:\d{2})\]$" +) + +_REF_LEVEL_RE = re.compile(r"^[A-Za-z0-9._-]+$") +INIT_LOG_NAME = "init.log" +AGENTS_FILE_NAME = "AGENTS.MD" +PATHS_TOKEN_PREFIX = " [PATHS:" +ALLOWED_RESOURCE_TYPES = {"local_path", "url"} +ALLOWED_LOCAL_PATH_FORMATS = {"project_relative", "absolute"} +SUPPORTED_URL_SCHEMES = {"http", "https"} +WINDOWS_ABSOLUTE_PATH_RE = re.compile(r"^(?:[A-Za-z]:[\\/]|\\\\)") + + +def _json_error() -> SystemExit: + return SystemExit("related reference metadata must be valid JSON.") + + +def log_base_dir(create: bool = False) -> Path: + base_dir = Path.cwd() / "easy-memory" + if create: + base_dir.mkdir(parents=True, exist_ok=True) + return base_dir + + +def log_path_for_date(log_date: date, base_dir: Path) -> Path: + return base_dir / f"{log_date.strftime('%Y-%m-%d')}.log" + + +def list_log_files(base_dir: Path) -> list[Path]: + if not base_dir.exists(): + return [] + return sorted(base_dir.glob("*.log"), reverse=True) + + +def init_log_path(base_dir: Path) -> Path: + return base_dir / INIT_LOG_NAME + + +def init_rules_block() -> str: + return "\n".join( + [ + "## easy-memory rules", + "- At the start of the current session (before the first task), use the " + "`easy-memory` skill and follow all rules and constraints in its " + "`SKILL.md`.", + "- Only re-run memory read/search when necessary for the task.", + ] + ) + + +def ensure_initialized(base_dir: Path) -> None: + init_log = init_log_path(base_dir) + if init_log.exists(): + return + + base_dir.mkdir(parents=True, exist_ok=True) + + rules_block = init_rules_block() + agents_path = Path.cwd() / AGENTS_FILE_NAME + if agents_path.exists(): + existing = agents_path.read_text(encoding="utf-8") + if existing and not existing.endswith("\n"): + existing += "\n" + if existing.strip(): + existing += "\n" + existing += f"{rules_block}\n" + agents_path.write_text(existing, encoding="utf-8") + else: + agents_path.write_text(f"{rules_block}\n", encoding="utf-8") + + date_stamp = date.today().isoformat() + init_log_content = f"{rules_block}\nDate: {date_stamp}\n" + init_log.write_text(init_log_content, encoding="utf-8") + + +def require_initialized(base_dir: Path) -> None: + init_log = init_log_path(base_dir) + if not base_dir.exists() or not init_log.exists(): + raise SystemExit( + "Initialization required. Run `python3 scripts/init_memory.py` " + "from the project root." + ) + + +def ensure_single_line(text: str, label: str) -> None: + if "\n" in text or "\r" in text: + raise SystemExit(f"{label} must be a single line.") + + +def normalize_task_context(value: str) -> str: + context = value.strip() + if not context: + raise SystemExit("task-context must not be empty.") + ensure_single_line(context, "task-context") + return context + + +def normalize_bool(value: str) -> bool: + normalized = value.strip().lower() + if normalized == "true": + return True + if normalized == "false": + return False + raise SystemExit("factual must be 'true' or 'false'.") + + +def validate_ref_level(value: str) -> str: + if not value: + raise SystemExit("ref-level must be a non-empty string.") + if not _REF_LEVEL_RE.match(value): + raise SystemExit("ref-level must match [A-Za-z0-9._-]+.") + return value + + +def format_timestamp(dt: datetime) -> str: + return dt.strftime("%Y-%m-%d:%H:%M") + + +def normalize_related_paths(raw_paths: list[str]) -> list[dict[str, str]]: + entries: list[dict[str, str]] = [] + seen_paths: set[tuple[str, str]] = set() + for raw_path in raw_paths: + normalized_entry = normalize_single_related_path(raw_path) + dedupe_key = ( + normalized_entry["resource_type"], + normalized_entry["path"], + ) + if dedupe_key in seen_paths: + continue + seen_paths.add(dedupe_key) + entries.append( + make_related_path_entry( + normalized_entry["path"], + resource_type=normalized_entry["resource_type"], + directory=normalized_entry["directory"], + path_format=normalized_entry.get("path_format"), + system_hint=normalized_entry.get("system_hint"), + ) + ) + return entries + + +def normalize_single_related_path(raw_path: str) -> dict[str, str]: + normalized_raw_path = raw_path.strip() + if not normalized_raw_path: + raise SystemExit("related-path values must not be empty.") + ensure_single_line(normalized_raw_path, "related-path") + + normalized_url = _normalize_related_url(normalized_raw_path) + if normalized_url is not None: + return normalized_url + + workspace_root = current_workspace_root() + candidate = Path(normalized_raw_path).expanduser() + if not candidate.is_absolute(): + candidate = workspace_root / candidate + if not candidate.exists(): + raise SystemExit( + "related-path local paths must exist. Use update_memory.py to clear or replace stale paths." + ) + resolved = candidate.resolve() + + try: + relative_path = resolved.relative_to(workspace_root) + normalized_path = path_to_storage_string(relative_path) + relative_directory = relative_path if resolved.is_dir() else relative_path.parent + directory = path_to_storage_string(relative_directory) + return { + "path": normalized_path, + "directory": directory, + "resource_type": "local_path", + "path_format": "project_relative", + } + except ValueError: + normalized_path = str(resolved) + directory = normalized_path if resolved.is_dir() else str(resolved.parent) + + return { + "path": normalized_path, + "directory": directory, + "resource_type": "local_path", + "path_format": "absolute", + "system_hint": current_system_hint(), + } + + +def _normalize_related_url(raw_value: str) -> dict[str, str] | None: + parsed = parse.urlsplit(raw_value) + scheme = parsed.scheme.lower() + if scheme not in SUPPORTED_URL_SCHEMES or not parsed.netloc: + return None + + normalized_path = parsed.path or "" + normalized_url = parse.urlunsplit( + ( + scheme, + parsed.netloc, + normalized_path, + parsed.query, + parsed.fragment, + ) + ) + directory = _derive_url_directory( + scheme=scheme, + netloc=parsed.netloc, + path=normalized_path, + ) + return { + "path": normalized_url, + "directory": directory, + "resource_type": "url", + } + + +def _derive_url_directory(*, scheme: str, netloc: str, path: str) -> str: + normalized_path = path or "/" + if normalized_path == "/": + container_path = "/" + elif normalized_path.endswith("/"): + container_path = normalized_path.rstrip("/") or "/" + else: + parent = posixpath.dirname(normalized_path) + container_path = parent or "/" + return parse.urlunsplit((scheme, netloc, container_path, "", "")) + + +def infer_related_resource_type( + path_value: str, + *, + directory: str | None = None, +) -> str: + parsed = parse.urlsplit(path_value) + if parsed.scheme.lower() in SUPPORTED_URL_SCHEMES and parsed.netloc: + return "url" + parsed_directory = parse.urlsplit(directory or "") + if ( + parsed_directory.scheme.lower() in SUPPORTED_URL_SCHEMES + and parsed_directory.netloc + ): + return "url" + return "local_path" + + +def infer_local_path_format( + path_value: str, + *, + directory: str | None = None, +) -> str: + if looks_like_absolute_local_path(path_value): + return "absolute" + if looks_like_absolute_local_path(directory or ""): + return "absolute" + return "project_relative" + + +def current_workspace_root() -> Path: + return Path.cwd().resolve() + + +def path_to_storage_string(path_value: Path) -> str: + rendered = path_value.as_posix() + return rendered if rendered else "." + + +def current_system_hint() -> str: + system_name = platform.system() or "UnknownOS" + machine = platform.machine() or "unknown-arch" + hostname = socket.gethostname().split(".")[0].strip() or "unknown-host" + return f"{system_name} {machine} @{hostname}" + + +def looks_like_absolute_local_path(path_value: str) -> bool: + if not path_value: + return False + if WINDOWS_ABSOLUTE_PATH_RE.match(path_value): + return True + return Path(path_value).is_absolute() + + +def make_related_path_entry( + path_value: str, + path_id: Optional[str] = None, + *, + resource_type: str | None = None, + directory: str | None = None, + path_format: str | None = None, + system_hint: str | None = None, +) -> dict[str, str]: + normalized_entry = normalize_single_related_path(path_value) + normalized_path = normalized_entry["path"] + normalized_directory = directory or normalized_entry["directory"] + normalized_type = resource_type or normalized_entry["resource_type"] + if normalized_type not in ALLOWED_RESOURCE_TYPES: + raise SystemExit("related reference resource_type must be local_path or url.") + result = { + "id": path_id or uuid4().hex, + "path": normalized_path, + "directory": normalized_directory, + "resource_type": normalized_type, + } + if normalized_type == "local_path": + normalized_path_format = ( + path_format + or normalized_entry.get("path_format") + or infer_local_path_format( + normalized_path, + directory=normalized_directory, + ) + ) + if normalized_path_format not in ALLOWED_LOCAL_PATH_FORMATS: + raise SystemExit( + "related reference path_format must be project_relative or absolute." + ) + result["path_format"] = normalized_path_format + normalized_system_hint = ( + system_hint + or normalized_entry.get("system_hint") + or "" + ) + if normalized_path_format == "absolute" and normalized_system_hint: + result["system_hint"] = normalized_system_hint + return result + + +def serialize_related_paths(path_entries: list[dict[str, str]]) -> str: + return json.dumps(path_entries, ensure_ascii=False, separators=(",", ":")) + + +def deserialize_related_paths(raw_value: str) -> list[dict[str, str]]: + try: + payload = json.loads(raw_value) + except json.JSONDecodeError as exc: + raise _json_error() from exc + + if not isinstance(payload, list): + raise _json_error() + + parsed_entries: list[dict[str, str]] = [] + seen_ids: set[str] = set() + for item in payload: + if not isinstance(item, dict): + raise _json_error() + path_id = item.get("id") + path_value = item.get("path") + directory = item.get("directory") + resource_type = item.get("resource_type") + path_format = item.get("path_format") + system_hint = item.get("system_hint") + if not isinstance(path_id, str) or not path_id: + raise _json_error() + if path_id in seen_ids: + raise SystemExit("related resource IDs must be unique within one entry.") + seen_ids.add(path_id) + if not isinstance(path_value, str): + raise _json_error() + if not isinstance(directory, str): + raise _json_error() + if resource_type is None: + resource_type = infer_related_resource_type( + path_value, + directory=directory, + ) + if not isinstance(resource_type, str): + raise _json_error() + if resource_type not in ALLOWED_RESOURCE_TYPES: + raise _json_error() + parsed_entry = { + "id": path_id, + "path": path_value, + "directory": directory, + "resource_type": resource_type, + } + if resource_type == "local_path": + if path_format is None: + path_format = infer_local_path_format( + path_value, + directory=directory, + ) + if not isinstance(path_format, str): + raise _json_error() + if path_format not in ALLOWED_LOCAL_PATH_FORMATS: + raise _json_error() + parsed_entry["path_format"] = path_format + if system_hint is not None: + if not isinstance(system_hint, str): + raise _json_error() + if system_hint: + parsed_entry["system_hint"] = system_hint + elif system_hint is not None and not isinstance(system_hint, str): + raise _json_error() + parsed_entries.append(parsed_entry) + return parsed_entries + + +def clone_related_paths(path_entries: list[dict[str, str]]) -> list[dict[str, str]]: + return [dict(item) for item in path_entries] + + +def clear_related_path_entry(path_entries: list[dict[str, str]], path_id: str) -> None: + for item in path_entries: + if item["id"] == path_id: + item["path"] = "" + item["directory"] = "" + item.pop("system_hint", None) + item.pop("path_format", None) + return + raise SystemExit(f"Related resource ID not found: {path_id}") + + +def replace_related_path_entry( + path_entries: list[dict[str, str]], + path_id: str, + path_value: str, +) -> None: + replacement = make_related_path_entry(path_value, path_id=path_id) + for item in path_entries: + if item["id"] == path_id: + item.update(replacement) + return + raise SystemExit(f"Related resource ID not found: {path_id}") + + +def format_related_path_lines(path_entries: Optional[list[dict[str, str]]]) -> list[str]: + if path_entries is None: + return [] + lines = [] + for item in path_entries: + rendered_path = item["path"] or "" + rendered_directory = item["directory"] or "" + rendered_type = item.get("resource_type") or infer_related_resource_type( + item["path"], + directory=item["directory"], + ) + detail_parts = [rendered_type] + rendered_path_format = item.get("path_format") + if ( + not rendered_path_format + and rendered_type == "local_path" + and (item["path"] or item["directory"]) + ): + rendered_path_format = infer_local_path_format( + item["path"], + directory=item["directory"], + ) + if rendered_path_format: + detail_parts.append(rendered_path_format) + detail_text = ", ".join(detail_parts) + extras = [] + system_hint = item.get("system_hint") + if system_hint: + extras.append(f"system: {system_hint}") + extra_text = "" + if extras: + extra_text = "; " + "; ".join(extras) + lines.append( + f"Related resource ID {item['id']} [{detail_text}]: {rendered_path} " + f"(container: {rendered_directory}{extra_text})" + ) + return lines + + +def format_entry_line( + entry_id: str, + ref_level: str, + factual: bool, + content: str, + timestamp: str, + path_entries: Optional[list[dict[str, str]]] = None, +) -> str: + fact_value = "true" if factual else "false" + paths_segment = "" + if path_entries is not None: + paths_segment = f" [PATHS:{serialize_related_paths(path_entries)}]" + return ( + f"[ID:{entry_id}] [REF:{ref_level}] [FACT:{fact_value}] {content}" + f"{paths_segment} [TIME:{timestamp}]" + ) + + +def parse_entry_line(line: str) -> Optional[dict]: + prefix_match = ENTRY_PREFIX_RE.match(line.strip()) + if not prefix_match: + return None + time_match = TIME_SUFFIX_RE.match(prefix_match.group("body")) + if not time_match: + return None + + middle = time_match.group("middle") + content = middle + path_entries: list[dict[str, str]] = [] + has_paths_metadata = False + + paths_idx = middle.rfind(PATHS_TOKEN_PREFIX) + if paths_idx != -1 and middle.endswith("]"): + has_paths_metadata = True + content = middle[:paths_idx] + raw_paths = middle[paths_idx + len(PATHS_TOKEN_PREFIX) : -1] + path_entries = deserialize_related_paths(raw_paths) + + return { + "id": prefix_match.group("id"), + "ref": prefix_match.group("ref"), + "factual": prefix_match.group("factual") == "true", + "content": content, + "timestamp": time_match.group("ts"), + "path_entries": path_entries, + "has_paths_metadata": has_paths_metadata, + } diff --git a/skills/.experimental/easy-memory/scripts/read_today_log.py b/skills/.experimental/easy-memory/scripts/read_today_log.py new file mode 100755 index 00000000..81d7ee24 --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/read_today_log.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import sys +from datetime import date + +from memory_agent_client import ( + MemoryAgentClientError, + call_memory_agent, +) +from memory_agent_config import MemoryAgentConfigError, load_memory_agent_config +from memory_agent_failure_log import append_agent_failure_log +from memory_utils import ( + format_related_path_lines, + log_base_dir, + log_path_for_date, + normalize_task_context, + parse_entry_line, + require_initialized, +) + +EMPTY_LOG_MESSAGE = ( + "No log entries for today. Created an empty log file; " + "please continue with the remaining task steps." +) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Read today's easy-memory log." + ) + parser.add_argument( + "--task-context", + required=True, + help=( + "Required user question/problem context. Reserved for future " + "memory-agent preprocessing and ignored unless that agent is enabled." + ), + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + task_context = normalize_task_context(args.task_context) + + base_dir = log_base_dir(create=True) + require_initialized(base_dir) + + log_path = log_path_for_date(date.today(), base_dir) + if not log_path.exists(): + log_path.touch() + print(EMPTY_LOG_MESSAGE) + return 0 + + content = log_path.read_text(encoding="utf-8") + if not content.strip(): + print(EMPTY_LOG_MESSAGE) + return 0 + + parsed_items = [] + for line in content.splitlines(): + entry = parse_entry_line(line) + if entry: + parsed_items.append( + { + "line": line, + "entry": entry, + } + ) + + agent_output = maybe_render_agent_output( + task_context=task_context, + base_dir=base_dir, + parsed_items=parsed_items, + ) + if agent_output is not None: + print(agent_output) + return 0 + + rendered_lines = render_raw_output(content.splitlines()) + output = "\n".join(rendered_lines) + if output: + print(output) + return 0 + + +def maybe_render_agent_output( + task_context: str, + base_dir, + parsed_items: list[dict], +) -> str | None: + if not parsed_items: + return None + + try: + config = load_memory_agent_config() + except MemoryAgentConfigError as exc: + print(f"Memory-agent fallback: {exc}", file=sys.stderr) + return None + + if not config.enabled: + return None + + request_payload = { + "schema_version": "easy_memory_agent_request_v2", + "mode": "read_today_log", + "task_context": task_context, + "cwd": str(base_dir.parent.resolve()), + "log_dir": str(base_dir.resolve()), + "entries": [ + build_request_entry(item) + for item in parsed_items + ], + } + + try: + response = call_memory_agent(config, request_payload) + return response.rendered_output + except (MemoryAgentConfigError, MemoryAgentClientError) as exc: + append_agent_failure_log( + config=config, + request_payload=request_payload, + fallback_reason="agent error fallback", + error=exc, + ) + print(f"Memory-agent fallback: {exc}", file=sys.stderr) + return None + except Exception as exc: + append_agent_failure_log( + config=config, + request_payload=request_payload, + fallback_reason="unexpected agent error fallback", + error=exc, + ) + print( + f"Memory-agent fallback: unexpected agent error ({exc.__class__.__name__}): {exc}", + file=sys.stderr, + ) + return None + + +def build_request_entry(item: dict) -> dict: + entry = item["entry"] + return { + "entry_id": entry["id"], + "ref_level": entry["ref"], + "factual": entry["factual"], + "content": entry["content"], + "timestamp": entry["timestamp"], + "paths": [ + build_request_path(path_item) + for path_item in entry["path_entries"] + ], + "rendered_block": render_entry_block(item["line"], entry["path_entries"]), + } + + +def render_raw_output(lines: list[str]) -> list[str]: + rendered_lines: list[str] = [] + for line in lines: + rendered_lines.append(line) + entry = parse_entry_line(line) + if entry: + for related_line in format_related_path_lines(entry["path_entries"]): + rendered_lines.append(f" {related_line}") + return rendered_lines + + +def render_entry_block(line: str, path_entries: list[dict]) -> str: + rendered_lines = [line] + for related_line in format_related_path_lines(path_entries): + rendered_lines.append(f" {related_line}") + return "\n".join(rendered_lines) + + +def build_request_path(path_item: dict) -> dict: + payload = { + "path_id": path_item["id"], + "path": path_item["path"], + "directory": path_item["directory"], + "resource_type": path_item["resource_type"], + } + if path_item.get("path_format"): + payload["path_format"] = path_item["path_format"] + if path_item.get("system_hint"): + payload["system_hint"] = path_item["system_hint"] + return payload + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/skills/.experimental/easy-memory/scripts/search_memory.py b/skills/.experimental/easy-memory/scripts/search_memory.py new file mode 100755 index 00000000..f2a56a81 --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/search_memory.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import sys +from datetime import date, datetime + +from memory_agent_client import ( + MemoryAgentClientError, + call_memory_agent, +) +from memory_agent_config import MemoryAgentConfigError, load_memory_agent_config +from memory_agent_failure_log import append_agent_failure_log +from memory_utils import ( + format_related_path_lines, + log_base_dir, + log_path_for_date, + list_log_files, + normalize_task_context, + parse_entry_line, + require_initialized, +) + +_REF_LEVEL_SCORES = { + "low": 1, + "medium": 2, + "high": 3, + "critical": 4, +} + +_TIME_FORMAT = "%Y-%m-%d:%H:%M" + +EMPTY_LOG_MESSAGE = ( + "No log entries for today. Created an empty log file; " + "please continue with the remaining task steps." +) +NO_MATCH_MESSAGE = "No matching entries found for the provided keywords." +IMPORTANT_REMINDER = ( + "IMPORTANT NOTICE: The foregoing search history may be used as material reference " + "for this task; however, should any subsequent work disclose new information " + "inconsistent with, superseding, or rendering any entry outdated, you are hereby " + "required, prior to writing new logs or submitting this task, to correct or update " + "the relevant entries using the appropriate tool scripts, or to delete them." +) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Search memory logs in the easy-memory directory." + ) + parser.add_argument( + "keywords", + nargs="+", + help="Keywords (English preferred; space-separated).", + ) + parser.add_argument( + "--max-results", + type=int, + default=5, + help="Maximum number of entries to return (default: 5).", + ) + parser.add_argument( + "--task-context", + required=True, + help=( + "Required user question/problem context. Reserved for future " + "memory-agent preprocessing and ignored unless that agent is enabled." + ), + ) + return parser.parse_args() + + +def ref_level_score(value: str) -> int: + normalized = value.strip().lower() + if normalized in _REF_LEVEL_SCORES: + return _REF_LEVEL_SCORES[normalized] + try: + return int(normalized) + except ValueError: + return 0 + + +def parse_timestamp(value: str) -> datetime: + try: + return datetime.strptime(value, _TIME_FORMAT) + except ValueError: + return datetime.min + + +def main() -> int: + args = parse_args() + base_dir = log_base_dir(create=True) + require_initialized(base_dir) + + keywords = [k.lower() for k in args.keywords] + task_context = normalize_task_context(args.task_context) + max_results = args.max_results + if max_results <= 0: + raise SystemExit("max-results must be a positive integer.") + + log_paths = list_log_files(base_dir) + if not log_paths: + log_path_for_date(date.today(), base_dir).touch() + print(EMPTY_LOG_MESSAGE) + return 0 + + matches = [] + order = 0 + has_any_entries = False + for log_path in log_paths: + lines = log_path.read_text(encoding="utf-8").splitlines() + if lines: + has_any_entries = True + for line in lines: + entry = parse_entry_line(line) + haystack = line + if entry: + related_text = " ".join( + ( + f"{item['path']} {item['directory']} " + f"{item.get('resource_type', '')} " + f"{item.get('path_format', '')} " + f"{item.get('system_hint', '')}" + ) + for item in entry["path_entries"] + ) + haystack = f"{entry['content']} {related_text}".strip() + if any(k in haystack.lower() for k in keywords): + factual_score = 0 + ref_score = 0 + timestamp = datetime.min + if entry: + factual_score = 1 if entry["factual"] else 0 + ref_score = ref_level_score(entry["ref"]) + timestamp = parse_timestamp(entry["timestamp"]) + matches.append( + { + "log": log_path.name, + "line": line, + "entry": entry, + "path_entries": entry["path_entries"] if entry else [], + "factual": factual_score, + "ref": ref_score, + "timestamp": timestamp, + "order": order, + } + ) + order += 1 + + if not has_any_entries: + log_path = log_path_for_date(date.today(), base_dir) + if not log_path.exists(): + log_path.touch() + print(EMPTY_LOG_MESSAGE) + return 0 + + if not matches: + print(NO_MATCH_MESSAGE) + return 0 + + matches.sort( + key=lambda item: (item["factual"], item["ref"], item["timestamp"]), + reverse=True, + ) + selected_matches = matches[:max_results] + + agent_output = maybe_render_agent_output( + task_context=task_context, + base_dir=base_dir, + keywords=args.keywords, + max_results=max_results, + selected_matches=selected_matches, + ) + if agent_output is not None: + print(agent_output) + return 0 + + results: list[str] = [] + for item in selected_matches: + results.append(f"{item['log']}: {item['line']}") + for related_line in format_related_path_lines(item["path_entries"]): + results.append(f" {related_line}") + results.append(IMPORTANT_REMINDER) + print("\n".join(results)) + return 0 + + +def maybe_render_agent_output( + task_context: str, + base_dir, + keywords: list[str], + max_results: int, + selected_matches: list[dict], +) -> str | None: + if not selected_matches: + return None + if any(item["entry"] is None for item in selected_matches): + return None + + try: + config = load_memory_agent_config() + except MemoryAgentConfigError as exc: + print(f"Memory-agent fallback: {exc}", file=sys.stderr) + return None + + if not config.enabled: + return None + + request_payload = { + "schema_version": "easy_memory_agent_request_v2", + "mode": "search_memory", + "task_context": task_context, + "cwd": str(base_dir.parent.resolve()), + "log_dir": str(base_dir.resolve()), + "keywords": keywords, + "max_results": max_results, + "entries": [ + build_request_entry(item) + for item in selected_matches + ], + } + + try: + response = call_memory_agent(config, request_payload) + return response.rendered_output + except (MemoryAgentConfigError, MemoryAgentClientError) as exc: + append_agent_failure_log( + config=config, + request_payload=request_payload, + fallback_reason="agent error fallback", + error=exc, + ) + print(f"Memory-agent fallback: {exc}", file=sys.stderr) + return None + except Exception as exc: + append_agent_failure_log( + config=config, + request_payload=request_payload, + fallback_reason="unexpected agent error fallback", + error=exc, + ) + print( + f"Memory-agent fallback: unexpected agent error ({exc.__class__.__name__}): {exc}", + file=sys.stderr, + ) + return None + + +def build_request_entry(item: dict) -> dict: + return { + "entry_id": item["entry"]["id"], + "log_file": item["log"], + "ref_level": item["entry"]["ref"], + "factual": item["entry"]["factual"], + "content": item["entry"]["content"], + "timestamp": item["entry"]["timestamp"], + "paths": [ + build_request_path(path_item) + for path_item in item["path_entries"] + ], + "rendered_block": render_entry_block( + log_file=item["log"], + line=item["line"], + path_entries=item["path_entries"], + ), + } + + +def render_entry_block( + *, + log_file: str, + line: str, + path_entries: list[dict], +) -> str: + rendered_lines = [f"{log_file}: {line}"] + for related_line in format_related_path_lines(path_entries): + rendered_lines.append(f" {related_line}") + return "\n".join(rendered_lines) + + +def build_request_path(path_item: dict) -> dict: + payload = { + "path_id": path_item["id"], + "path": path_item["path"], + "directory": path_item["directory"], + "resource_type": path_item["resource_type"], + } + if path_item.get("path_format"): + payload["path_format"] = path_item["path_format"] + if path_item.get("system_hint"): + payload["system_hint"] = path_item["system_hint"] + return payload + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/skills/.experimental/easy-memory/scripts/smoke_test_memory_agent.py b/skills/.experimental/easy-memory/scripts/smoke_test_memory_agent.py new file mode 100644 index 00000000..fce7cf15 --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/smoke_test_memory_agent.py @@ -0,0 +1,301 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +import json +import subprocess +import sys +import time +from pathlib import Path + +from memory_agent_client import SUMMARY_PREFIX +from memory_agent_config import ( + default_local_config_file, + installed_skill_dir, + load_memory_agent_config, +) +from memory_agent_failure_log import agent_failure_log_path +from memory_utils import log_base_dir, require_initialized + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description=( + "Run a small end-to-end smoke test for the easy-memory " + "memory-agent integration in the current project." + ) + ) + parser.add_argument( + "--task-context", + default=( + "Smoke test for the current easy-memory memory-agent " + "configuration in this project." + ), + help="Task context passed to both read_today_log.py and search_memory.py.", + ) + parser.add_argument( + "--search-keyword", + action="append", + dest="search_keywords", + default=[], + help=( + "Keyword passed to search_memory.py. Repeat to provide multiple " + "keywords. Defaults to easy-memory, memory-agent, and codex." + ), + ) + parser.add_argument( + "--max-results", + type=int, + default=3, + help="Maximum results passed to search_memory.py (default: 3).", + ) + parser.add_argument( + "--command-timeout", + type=float, + default=90.0, + help="Per-command timeout in seconds (default: 90).", + ) + parser.add_argument( + "--strict-no-new-failures", + action="store_true", + help=( + "Fail the smoke test if the shared installation-directory " + "agent failure log grows during this test run." + ), + ) + parser.add_argument( + "--json-output-file", + help=( + "Optional path to write the final smoke-test JSON report. " + "Relative paths are resolved from the current working directory." + ), + ) + parser.add_argument( + "--quiet", + action="store_true", + help=( + "Suppress successful stdout output. Errors still surface normally. " + "Use with --json-output-file when another tool will read the report." + ), + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + if args.max_results <= 0: + raise SystemExit("--max-results must be a positive integer.") + if args.command_timeout <= 0: + raise SystemExit("--command-timeout must be a positive number.") + + base_dir = log_base_dir(create=True) + require_initialized(base_dir) + output_file = resolve_optional_output_file(args.json_output_file) + + config_file = default_local_config_file() + if not config_file.exists(): + raise SystemExit( + f"Project-local memory-agent config file not found: {config_file}" + ) + + keywords = args.search_keywords or [ + "easy-memory", + "memory-agent", + "codex", + ] + config = load_memory_agent_config() + failure_log = agent_failure_log_path(installed_skill_dir()) + failure_log_before = count_log_lines(failure_log) + + tests = [ + run_cli_test( + script_name="search_memory.py", + mode="search_memory", + script_args=[ + *keywords, + "--max-results", + str(args.max_results), + "--task-context", + args.task_context, + ], + timeout_seconds=args.command_timeout, + ), + run_cli_test( + script_name="read_today_log.py", + mode="read_today_log", + script_args=[ + "--task-context", + args.task_context, + ], + timeout_seconds=args.command_timeout, + ), + ] + failure_log_after = count_log_lines(failure_log) + + if args.strict_no_new_failures and failure_log_after > failure_log_before: + last_record_preview = read_last_log_line(failure_log) + raise SystemExit( + "Smoke test detected new agent failure log entries during the test run.\n" + f"failure_log: {failure_log}\n" + f"before: {failure_log_before}\n" + f"after: {failure_log_after}\n" + f"last_record: {last_record_preview}" + ) + + report = { + "status": "ok", + "cwd": str(Path.cwd()), + "config_file": str(config_file), + "json_output_file": str(output_file) if output_file else None, + "agent_config": { + "enabled": config.enabled, + "api_style": config.api_style, + "model": config.model, + "base_url": config.base_url, + "codex_binary": config.codex_binary, + "codex_profile": config.codex_profile, + "codex_service_tier": config.codex_service_tier, + "codex_reasoning_effort": config.codex_reasoning_effort, + "disable_thinking": config.disable_thinking, + "timeout_seconds": config.timeout_seconds, + }, + "failure_log": { + "path": str(failure_log), + "before_lines": failure_log_before, + "after_lines": failure_log_after, + "strict_no_new_failures": args.strict_no_new_failures, + }, + "tests": tests, + } + rendered_report = json.dumps(report, ensure_ascii=False, indent=2) + if output_file is not None: + write_report_file(output_file, rendered_report) + if not args.quiet: + print(rendered_report) + return 0 + + +def run_cli_test( + *, + script_name: str, + mode: str, + script_args: list[str], + timeout_seconds: float, +) -> dict[str, object]: + script_path = Path(__file__).resolve().parent / script_name + started_at = time.monotonic() + try: + completed = subprocess.run( + [sys.executable, str(script_path), *script_args], + cwd=Path.cwd(), + capture_output=True, + text=True, + timeout=timeout_seconds, + check=False, + ) + except subprocess.TimeoutExpired as exc: + raise SystemExit( + f"{script_name} timed out after {timeout_seconds:.1f}s." + ) from exc + elapsed_seconds = round(time.monotonic() - started_at, 2) + + if completed.returncode != 0: + stderr_text = completed.stderr.strip() + stdout_text = completed.stdout.strip() + raise SystemExit( + f"{script_name} exited with code {completed.returncode}.\n" + f"stdout:\n{stdout_text}\n" + f"stderr:\n{stderr_text}" + ) + + stderr_text = completed.stderr.strip() + if "Memory-agent fallback:" in stderr_text: + raise SystemExit( + f"{script_name} fell back to raw output.\n" + f"stdout:\n{completed.stdout.strip()}\n" + f"stderr:\n{stderr_text}" + ) + + stdout_text = completed.stdout.strip() + if not stdout_text: + raise SystemExit( + f"{script_name} returned empty stdout." + ) + summary_line = extract_summary_line( + stdout_text=stdout_text, + script_name=script_name, + ) + + return { + "script": script_name, + "mode": mode, + "elapsed_seconds": elapsed_seconds, + "status": "ok", + "summary": summary_line.removeprefix(SUMMARY_PREFIX).strip(), + } + + +def extract_summary_line( + *, + stdout_text: str, + script_name: str, +) -> str: + nonempty_lines = [ + line.strip() + for line in stdout_text.splitlines() + if line.strip() + ] + if not nonempty_lines: + raise SystemExit( + f"{script_name} did not return any non-empty output.\n" + f"stdout:\n{stdout_text}\n" + ) + summary_line = nonempty_lines[-1] + if not summary_line.startswith(SUMMARY_PREFIX): + raise SystemExit( + f"{script_name} did not end with the expected summary line.\n" + f"stdout:\n{stdout_text}\n" + ) + return summary_line + + +def count_log_lines(log_path: Path) -> int: + if not log_path.exists(): + return 0 + with log_path.open("r", encoding="utf-8") as handle: + return sum(1 for _ in handle) + + +def read_last_log_line(log_path: Path) -> str: + if not log_path.exists(): + return "" + last_line = "" + with log_path.open("r", encoding="utf-8") as handle: + for line in handle: + stripped = line.strip() + if stripped: + last_line = stripped + return last_line + + +def resolve_optional_output_file(raw_value: str | None) -> Path | None: + if raw_value is None: + return None + candidate = Path(raw_value).expanduser() + if not candidate.is_absolute(): + candidate = (Path.cwd() / candidate).resolve() + return candidate + + +def write_report_file(output_file: Path, rendered_report: str) -> None: + try: + output_file.parent.mkdir(parents=True, exist_ok=True) + output_file.write_text(f"{rendered_report}\n", encoding="utf-8") + except OSError as exc: + raise SystemExit( + f"Failed to write smoke-test report file: {output_file}" + ) from exc + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/skills/.experimental/easy-memory/scripts/update_memory.py b/skills/.experimental/easy-memory/scripts/update_memory.py new file mode 100755 index 00000000..53bea22b --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/update_memory.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +from datetime import datetime + +from memory_utils import ( + clear_related_path_entry, + clone_related_paths, + ensure_single_line, + format_related_path_lines, + format_entry_line, + format_timestamp, + list_log_files, + log_base_dir, + normalize_related_paths, + normalize_bool, + parse_entry_line, + replace_related_path_entry, + require_initialized, + validate_ref_level, +) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Update a memory entry by ID across all logs." + ) + parser.add_argument("--id", required=True, help="Entry ID to update.") + parser.add_argument( + "--content", + help="New content (English preferred; UTF-8 accepted).", + ) + parser.add_argument( + "--factual", + help="Whether the entry is factual: true or false.", + ) + parser.add_argument( + "--ref-level", + help="Reference level (e.g., low, medium, high, critical).", + ) + parser.add_argument( + "--related-path", + action="append", + default=None, + help=( + "Replace the stored related-path list with project-local paths, " + "external absolute local paths, or URL/document addresses. " + "Repeat to store multiple references." + ), + ) + parser.add_argument( + "--clear-related-paths", + action="store_true", + help="Replace all stored related reference metadata with an empty list.", + ) + parser.add_argument( + "--path-update", + action="append", + default=None, + help=( + "Replace one stored reference in the form " + "PATH_ID=. The existing " + "path ID is preserved." + ), + ) + parser.add_argument( + "--path-clear", + action="append", + default=None, + help=( + "Clear one stored related reference by PATH_ID while preserving the path ID " + "for future updates." + ), + ) + return parser.parse_args() + + +def parse_path_update(raw_value: str) -> tuple[str, str]: + path_id, separator, path_value = raw_value.partition("=") + if not separator or not path_id or not path_value: + raise SystemExit( + "path-update must use the form PATH_ID=." + ) + return path_id, path_value + + +def main() -> int: + args = parse_args() + base_dir = log_base_dir(create=True) + require_initialized(base_dir) + + if args.related_path is not None and args.clear_related_paths: + raise SystemExit("Use either --related-path or --clear-related-paths, not both.") + if args.related_path is not None and (args.path_update or args.path_clear): + raise SystemExit( + "Use --related-path for whole-list replacement, or --path-update/--path-clear " + "for per-reference changes, but not both together." + ) + + has_updates = any( + [ + args.content is not None, + args.factual is not None, + args.ref_level is not None, + args.related_path is not None, + args.clear_related_paths, + bool(args.path_update), + bool(args.path_clear), + ] + ) + if not has_updates: + raise SystemExit("Provide at least one field to update.") + + matches: list[tuple] = [] + for log_path in list_log_files(base_dir): + text = log_path.read_text(encoding="utf-8") + lines = text.splitlines() + for idx, line in enumerate(lines): + entry = parse_entry_line(line) + if entry and entry["id"] == args.id: + matches.append((log_path, lines, idx, entry)) + + if not matches: + raise SystemExit("Entry ID not found.") + if len(matches) > 1: + raise SystemExit("Entry ID appears multiple times. Refine the logs manually.") + + log_path, lines, idx, entry = matches[0] + + content = entry["content"] + if args.content is not None: + content = args.content.strip() + if not content: + raise SystemExit("content must not be empty.") + ensure_single_line(content, "content") + + factual = entry["factual"] + if args.factual is not None: + factual = normalize_bool(args.factual) + + ref_level = entry["ref"] + if args.ref_level is not None: + ref_level = validate_ref_level(args.ref_level) + + has_paths_metadata = entry["has_paths_metadata"] + path_entries = clone_related_paths(entry["path_entries"]) + if args.related_path is not None: + path_entries = normalize_related_paths(args.related_path) + has_paths_metadata = True + elif args.clear_related_paths: + path_entries = [] + has_paths_metadata = True + elif args.path_update or args.path_clear: + if not entry["has_paths_metadata"]: + raise SystemExit("Entry does not contain related reference metadata.") + update_pairs = [parse_path_update(item) for item in (args.path_update or [])] + update_ids = {path_id for path_id, _ in update_pairs} + clear_ids = set(args.path_clear or []) + overlap = update_ids & clear_ids + if overlap: + overlap_text = ", ".join(sorted(overlap)) + raise SystemExit( + f"path-update and path-clear cannot target the same related resource IDs: {overlap_text}" + ) + for path_id, new_path in update_pairs: + replace_related_path_entry(path_entries, path_id, new_path) + for path_id in args.path_clear or []: + clear_related_path_entry(path_entries, path_id) + + timestamp = format_timestamp(datetime.now()) + lines[idx] = format_entry_line( + args.id, + ref_level, + factual, + content, + timestamp, + path_entries=path_entries if has_paths_metadata else None, + ) + + output = "\n".join(lines) + "\n" + log_path.write_text(output, encoding="utf-8") + + print(f"Updated entry ID: {args.id}") + for line in format_related_path_lines(path_entries if has_paths_metadata else None): + print(line) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/skills/.experimental/easy-memory/scripts/write_memory.py b/skills/.experimental/easy-memory/scripts/write_memory.py new file mode 100755 index 00000000..4c08f0ca --- /dev/null +++ b/skills/.experimental/easy-memory/scripts/write_memory.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +from __future__ import annotations + +import argparse +from datetime import date, datetime +from uuid import uuid4 + +from memory_utils import ( + ensure_single_line, + format_related_path_lines, + format_entry_line, + format_timestamp, + log_base_dir, + log_path_for_date, + normalize_related_paths, + normalize_bool, + require_initialized, + validate_ref_level, +) + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser( + description="Append a memory entry to today's log." + ) + parser.add_argument( + "--content", + required=True, + help="Log content (English preferred; UTF-8 accepted).", + ) + parser.add_argument( + "--factual", + required=True, + help="Whether the entry is factual: true or false.", + ) + parser.add_argument( + "--ref-level", + required=True, + help="Reference level (e.g., low, medium, high, critical).", + ) + parser.add_argument( + "--related-path", + action="append", + default=None, + help=( + "Project-local path, external absolute local path, or " + "URL/document address for the current or highly related file, " + "directory, page, or document. Repeat this option to store " + "multiple references." + ), + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + base_dir = log_base_dir(create=True) + require_initialized(base_dir) + + content = args.content.strip() + if not content: + raise SystemExit("content must not be empty.") + + ensure_single_line(content, "content") + + factual = normalize_bool(args.factual) + ref_level = validate_ref_level(args.ref_level) + path_entries = None + if args.related_path is not None: + path_entries = normalize_related_paths(args.related_path) + + entry_id = uuid4().hex + timestamp = format_timestamp(datetime.now()) + + entry_line = format_entry_line( + entry_id, + ref_level, + factual, + content, + timestamp, + path_entries=path_entries, + ) + + log_path = log_path_for_date(date.today(), base_dir) + with log_path.open("a", encoding="utf-8") as handle: + handle.write(entry_line) + handle.write("\n") + + print(f"Appended entry ID: {entry_id}") + for line in format_related_path_lines(path_entries): + print(line) + return 0 + + +if __name__ == "__main__": + raise SystemExit(main())