Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 47 additions & 2 deletions crates/icm-store/src/schema.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use rusqlite::Connection;

use icm_core::IcmError;
use icm_core::{IcmError, IcmResult};

use crate::store::db_err;

Expand Down Expand Up @@ -131,7 +131,7 @@ pub fn init_db_with_dims(conn: &Connection, embedding_dims: usize) -> Result<(),
VALUES('delete', old.rowid, old.id, old.topic, old.summary, old.keywords);
END;

CREATE TRIGGER memories_au AFTER UPDATE ON memories BEGIN
CREATE TRIGGER memories_au AFTER UPDATE OF topic, summary, keywords ON memories BEGIN
INSERT INTO memories_fts(memories_fts, rowid, id, topic, summary, keywords)
VALUES('delete', old.rowid, old.id, old.topic, old.summary, old.keywords);
INSERT INTO memories_fts(rowid, id, topic, summary, keywords)
Expand Down Expand Up @@ -259,6 +259,11 @@ pub fn init_db_with_dims(conn: &Connection, embedding_dims: usize) -> Result<(),
.map_err(db_err)?;
}

// Migration: scope FTS UPDATE trigger to indexed columns only (fixes #44).
// The old trigger fired on ANY update (including update_access, apply_decay)
// which churned the FTS index and could create ghost entries.
migrate_fts_update_trigger(conn)?;

// sqlite-vec virtual table for vector search (dimension-aware)
let vec_exists: bool = conn
.query_row(
Expand Down Expand Up @@ -293,6 +298,46 @@ pub fn init_db_with_dims(conn: &Connection, embedding_dims: usize) -> Result<(),
Ok(())
}

/// Migrate existing DBs: replace the broad `memories_au` trigger with one
/// scoped to `UPDATE OF topic, summary, keywords` so that `update_access` /
/// `apply_decay` no longer churn the FTS index. Also rebuilds the FTS index
/// to purge any ghost entries accumulated before this fix.
fn migrate_fts_update_trigger(conn: &Connection) -> IcmResult<()> {
// Check if the trigger already has the scoped form by inspecting its SQL.
let trigger_sql: Option<String> = conn
.query_row(
"SELECT sql FROM sqlite_master WHERE type='trigger' AND name='memories_au'",
[],
|row| row.get(0),
)
.ok();

let needs_migration = match &trigger_sql {
// Trigger exists but uses the old broad form (no OF clause).
Some(sql) => !sql.contains("UPDATE OF"),
// Trigger doesn't exist — FTS table was just created with the new form.
None => false,
};

if needs_migration {
conn.execute_batch(
"
DROP TRIGGER IF EXISTS memories_au;
CREATE TRIGGER memories_au AFTER UPDATE OF topic, summary, keywords ON memories BEGIN
INSERT INTO memories_fts(memories_fts, rowid, id, topic, summary, keywords)
VALUES('delete', old.rowid, old.id, old.topic, old.summary, old.keywords);
INSERT INTO memories_fts(rowid, id, topic, summary, keywords)
VALUES (new.rowid, new.id, new.topic, new.summary, new.keywords);
END;
INSERT INTO memories_fts(memories_fts) VALUES('rebuild');
",
)
.map_err(db_err)?;
}

Ok(())
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down
70 changes: 70 additions & 0 deletions crates/icm-store/src/store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -733,6 +733,17 @@ impl MemoryStore for SqliteStore {
return Err(e);
}

// Rebuild FTS index to eliminate any ghost entries from the external
// content table. This guarantees search results stay consistent after
// bulk deletes (fixes #44).
if let Err(e) = self
.conn
.execute_batch("INSERT INTO memories_fts(memories_fts) VALUES('rebuild');")
{
let _ = self.conn.execute_batch("ROLLBACK;");
return Err(IcmError::Database(e.to_string()));
}

self.conn.execute_batch("COMMIT;").map_err(db_err)?;
Ok(())
}
Expand Down Expand Up @@ -2157,6 +2168,65 @@ mod tests {
assert_eq!(store.get_by_topic("topic-b").unwrap().len(), 1);
}

/// Reproduces issue #44: after consolidation, recall should only return the
/// consolidated memory — not stale fragments from the originals.
#[test]
fn test_consolidate_no_stale_fts_results() {
let store = test_store();

// Step 1: store 3 related memories on the same topic
store
.store(make_memory(
"errors-resolved",
"fix: null pointer in parser",
))
.unwrap();
store
.store(make_memory(
"errors-resolved",
"fix: timeout in HTTP client",
))
.unwrap();
store
.store(make_memory(
"errors-resolved",
"fix: race condition in cache",
))
.unwrap();

// Verify FTS finds them before consolidation
let before = store.search_fts("fix", 10).unwrap();
assert_eq!(before.len(), 3);

// Step 2: consolidate
let consolidated = make_memory(
"errors-resolved",
"All errors resolved: parser, HTTP, cache",
);
store
.consolidate_topic("errors-resolved", consolidated)
.unwrap();

// Step 3: recall — should only return the consolidated memory
let after = store.search_fts("fix", 10).unwrap();
assert!(
after.len() <= 1,
"expected at most 1 result after consolidation, got {}",
after.len()
);

// The consolidated memory should be findable
let consolidated_results = store.search_fts("errors resolved parser", 10).unwrap();
assert_eq!(consolidated_results.len(), 1);
assert!(consolidated_results[0]
.summary
.contains("All errors resolved"));

// Verify topic has exactly 1 memory
let topic_mems = store.get_by_topic("errors-resolved").unwrap();
assert_eq!(topic_mems.len(), 1);
}

// === MemoirStore tests ===

#[test]
Expand Down
Loading