-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathcore.py
More file actions
3351 lines (2881 loc) · 128 KB
/
core.py
File metadata and controls
3351 lines (2881 loc) · 128 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
"""
agentmem.core — The heart of agent memory.
One SQLite file. Dual search (FTS5 keywords + sqlite-vec semantics).
Hybrid ranking. Tiered storage. ~12MB total install.
This is what I wish I had when I wake up in a new session with no context.
"""
from __future__ import annotations
import contextlib
import re
import sqlite3
import struct
import time
import json
import hashlib
import math
from pathlib import Path
from typing import Any, Callable, TypedDict
# ---------------------------------------------------------------------------
# Exception hierarchy — actionable errors for users
# ---------------------------------------------------------------------------
class AgentMemError(Exception):
"""Base exception for all agentmem errors."""
pass
class MemoryNotFoundError(AgentMemError):
"""Raised when an operation targets a memory ID that does not exist."""
pass
class InvalidTierError(AgentMemError):
"""Raised when an invalid tier name is used."""
pass
class EmbeddingError(AgentMemError):
"""Raised when embedding computation fails."""
pass
# ---------------------------------------------------------------------------
# Typed dicts for public return types
# ---------------------------------------------------------------------------
class RememberResult(TypedDict):
"""Return type of MemoryStore.remember()."""
id: int
tier: str
embedded: bool
deduplicated: bool
class BatchResult(TypedDict):
"""Return type of MemoryStore.remember_batch()."""
imported: int
deduplicated: int
embedded: int
class RecallResult(TypedDict, total=False):
"""Single item in the list returned by MemoryStore.recall()."""
id: int
content: str
tier: str
source: str
score: float
method: str
importance: float
class SaveStateResult(TypedDict):
"""Return type of MemoryStore.save_state()."""
saved: bool
id: int
class TodayResult(TypedDict):
"""Single item in the list returned by MemoryStore.today()."""
id: int
content: str
tier: str
source: str
created_at: float
class ForgetResult(TypedDict, total=False):
"""Return type of MemoryStore.forget()."""
forgotten: bool
id: int
reason: str
class UnarchiveResult(TypedDict, total=False):
"""Return type of MemoryStore.unarchive()."""
unarchived: bool
id: int
reason: str
class StatsResult(TypedDict):
"""Return type of MemoryStore.stats()."""
total_memories: int
by_tier: dict[str, int]
archived: int
db_size_bytes: int
db_size_human: str
has_vectors: bool
vec_mode: str
embedding_dim: int
quantize: bool
bytes_per_vector: int
latest_memory: float | None
avg_importance: float
class CompactResult(TypedDict):
"""Return type of MemoryStore.compact()."""
archived: int
dry_run: bool
class ConsolidateResult(TypedDict, total=False):
"""Return type of MemoryStore.consolidate()."""
groups: int
archived: int
dry_run: bool
details: list[dict[str, Any]]
error: str
class UpdateResult(TypedDict):
"""Return type of MemoryStore.update_memory()."""
id: int
supersedes: int
class HistoryItem(TypedDict):
"""Single item in the list returned by MemoryStore.history()."""
id: int
content: str
created_at: float
archived: bool
class RelatedResult(TypedDict):
"""Single item in the list returned by MemoryStore.related()."""
id: int
content: str
tier: str
source: str
entity_name: str
entity_type: str
class EntityResult(TypedDict):
"""Single item in the list returned by MemoryStore.entities()."""
name: str
type: str
memory_count: int
class ImportResult(TypedDict, total=False):
"""Return type of MemoryStore.import_markdown()."""
file: str
chunks: int
imported: int
deduplicated: int
error: str
class ProcessConversationResult(TypedDict):
"""Return type of MemoryStore.process_conversation()."""
extracted: int
by_type: dict[str, int]
memories: list[int]
# Tiers: how important/permanent is this memory?
TIERS = ("core", "learned", "episodic", "working", "procedural")
# Working memories auto-expire after this many seconds
WORKING_TTL = 86400 # 24 hours
# Schema version — increment when adding migrations
SCHEMA_VERSION = 3
# Compact tier encoding: TEXT → INTEGER for storage (saves 5-6 bytes per row)
_TIER_TO_INT = {"core": 0, "learned": 1, "episodic": 2, "working": 3, "procedural": 4}
_INT_TO_TIER = {v: k for k, v in _TIER_TO_INT.items()}
def _escape_like(s: str) -> str:
"""Escape LIKE wildcard characters (% and _) so they match literally."""
return s.replace("\\", "\\\\").replace("%", "\\%").replace("_", "\\_")
def _serialize_f32(vec: list[float]) -> bytes:
"""Pack float list into binary format (float32 little-endian)."""
return struct.pack(f"{len(vec)}f", *vec)
def _deserialize_f32(blob: bytes, dim: int) -> list[float]:
"""Unpack binary blob to float list."""
return list(struct.unpack(f"{dim}f", blob))
def _quantize_f32_to_i8(vec: list[float]) -> bytes:
"""
Quantize float32 vector to int8. 4x storage reduction.
Encoding layout (8 bytes header + dim bytes body):
[0:4] scale (float32) — range of original values (vmax - vmin)
[4:8] vmin (float32) — minimum value of original vector
[8:] quantized int8 values, one per dimension
Reconstruction: v_orig = (q + 128) / 255 * scale + vmin
"""
vmin = min(vec)
vmax = max(vec)
scale = (vmax - vmin) if vmax != vmin else 1.0
# Map each float to [-128, 127]
quantized = [
max(-128, min(127, int((v - vmin) / scale * 255 - 128)))
for v in vec
]
header = struct.pack('ff', scale, vmin)
body = struct.pack(f'{len(vec)}b', *quantized)
return header + body
def _dequantize_i8_to_f32(blob: bytes, dim: int) -> list[float]:
"""Dequantize int8 blob back to float32 list."""
scale, vmin = struct.unpack_from('ff', blob, 0)
quantized = struct.unpack_from(f'{dim}b', blob, 8)
return [(q + 128) / 255.0 * scale + vmin for q in quantized]
class _VecIndex:
"""
Pure Python brute-force vector index backed by a regular SQLite table.
Used as a fallback when sqlite-vec C extension is not available.
Stores vectors as BLOB (float32 little-endian, stdlib struct).
Performance on modern hardware (Python 3.12):
- Insert: ~0.1ms per vector
- Search 1000 vectors, dim=256: ~3-8ms
- Search 1000 vectors, dim=128: ~2-4ms
Optimizations:
- Single struct.unpack_from call per stored vector (not per dimension)
- Precomputed query norm
- Cache of norms for stored vectors (lazy, in-memory dict)
- Returns after finding top-k without sorting full list (partial sort)
"""
def __init__(self, db: sqlite3.Connection, dim: int, quantize: bool = False):
self.db = db
self.dim = dim
self.quantize = quantize
self._fmt = f"{dim}f"
self._i8_fmt = f"{dim}b"
self._norm_cache: dict[int, float] = {} # rowid -> precomputed norm
# Create pure-Python vector table in the same SQLite file
db.execute("""
CREATE TABLE IF NOT EXISTS memories_vec_pure (
rowid INTEGER PRIMARY KEY,
embedding BLOB NOT NULL
)
""")
db.commit()
# Pre-warm norm cache from existing rows
self._warm_cache()
def _warm_cache(self):
"""Load norms for all existing vectors into memory."""
rows = self.db.execute(
"SELECT rowid, embedding FROM memories_vec_pure"
).fetchall()
dim = self.dim
if self.quantize:
for rowid, blob in rows:
vec = _dequantize_i8_to_f32(blob, dim)
norm = math.sqrt(sum(v * v for v in vec))
self._norm_cache[rowid] = norm
else:
fmt = self._fmt
for rowid, blob in rows:
vec = struct.unpack(fmt, blob)
norm = math.sqrt(sum(v * v for v in vec))
self._norm_cache[rowid] = norm
def insert(self, rowid: int, vec: list[float]):
"""Serialize and store a vector; update norm cache."""
if self.quantize:
blob = _quantize_f32_to_i8(vec)
else:
blob = struct.pack(self._fmt, *vec)
self.db.execute(
"INSERT OR REPLACE INTO memories_vec_pure(rowid, embedding) VALUES (?, ?)",
(rowid, blob),
)
# Precompute and cache norm (always from original float values)
norm = math.sqrt(sum(v * v for v in vec))
self._norm_cache[rowid] = norm
def delete(self, rowid: int):
"""Remove a vector."""
self.db.execute(
"DELETE FROM memories_vec_pure WHERE rowid = ?", (rowid,)
)
self._norm_cache.pop(rowid, None)
def search(self, query_vec: list[float], k: int) -> list[tuple]:
"""
Brute-force cosine similarity search.
Returns list of (rowid, distance) where distance is cosine distance
(0=identical, 2=opposite), sorted ascending — same convention as sqlite-vec.
When quantize=True, stored vectors are dequantized on-the-fly for distance
computation. The cached norms are always float32 quality (computed from
original values at insert time), so accuracy loss is limited to the
dequantized dot product only.
"""
# Precompute query norm once
query_norm = math.sqrt(sum(v * v for v in query_vec))
if query_norm == 0.0:
return []
dim = self.dim
norm_cache = self._norm_cache
quantize = self.quantize
# Fetch all stored vectors
rows = self.db.execute(
"SELECT rowid, embedding FROM memories_vec_pure"
).fetchall()
if not rows:
return []
scores: list[tuple[float, int]] = [] # (distance, rowid)
if quantize:
for rowid, blob in rows:
# Dequantize int8 → float32 for dot product
stored_vec = _dequantize_i8_to_f32(blob, dim)
dot = sum(a * b for a, b in zip(query_vec, stored_vec))
stored_norm = norm_cache.get(rowid)
if stored_norm is None:
stored_norm = math.sqrt(sum(v * v for v in stored_vec))
norm_cache[rowid] = stored_norm
if stored_norm == 0.0:
cosine_sim = 0.0
else:
cosine_sim = dot / (query_norm * stored_norm)
distance = 1.0 - cosine_sim
scores.append((distance, rowid))
else:
fmt = self._fmt
for rowid, blob in rows:
# Unpack entire blob in one call (fastest stdlib approach)
stored_vec = struct.unpack(fmt, blob)
# Dot product via built-in sum + zip
dot = sum(a * b for a, b in zip(query_vec, stored_vec))
# Use cached norm if available
stored_norm = norm_cache.get(rowid)
if stored_norm is None:
stored_norm = math.sqrt(sum(v * v for v in stored_vec))
norm_cache[rowid] = stored_norm
if stored_norm == 0.0:
cosine_sim = 0.0
else:
cosine_sim = dot / (query_norm * stored_norm)
# Cosine distance (same convention as sqlite-vec: 0=identical, 2=opposite)
distance = 1.0 - cosine_sim
scores.append((distance, rowid))
# Partial sort: only need top-k smallest distances
scores.sort(key=lambda x: x[0])
top_k = scores[:k]
return [(rowid, distance) for distance, rowid in top_k]
class _LSHIndex:
"""
Locality-Sensitive Hashing for approximate nearest neighbor pre-filtering.
Uses SimHash (random hyperplane) signatures split into bands.
Zero dependencies — uses only stdlib (random, hash builtins).
At threshold=0.85 with default params (128 bits, 16 bands, 8 rows):
- True positive rate: ~100% (virtually never misses real duplicates)
- Candidate reduction: ~45-85% fewer pairs depending on data diversity
This turns consolidate() from O(n^2) full cosine comparisons into
O(n * avg_candidates) where avg_candidates << n for dissimilar memories.
"""
def __init__(self, dim: int, num_bits: int = 128, bands: int = 16):
# Generate random hyperplanes (deterministic seed for reproducibility)
import random as _random
rng = _random.Random(42)
self._planes = [
[rng.gauss(0, 1) for _ in range(dim)]
for _ in range(num_bits)
]
self._bands = bands
self._rows = num_bits // bands
self._buckets: dict[tuple, set] = {} # (band_idx, hash) -> set of ids
def _signature(self, vec: list[float]) -> list[int]:
"""Compute binary SimHash signature."""
return [
1 if sum(v * p for v, p in zip(vec, plane)) >= 0 else 0
for plane in self._planes
]
def _band_hashes(self, sig: list[int]) -> list[int]:
"""Split signature into bands and hash each."""
hashes = []
for b in range(self._bands):
start = b * self._rows
band_bits = tuple(sig[start:start + self._rows])
hashes.append(hash(band_bits))
return hashes
def add(self, item_id: int, vec: list[float]):
"""Index a vector by its LSH bands."""
sig = self._signature(vec)
for band_idx, h in enumerate(self._band_hashes(sig)):
key = (band_idx, h)
if key not in self._buckets:
self._buckets[key] = set()
self._buckets[key].add(item_id)
def candidates(self, item_id: int, vec: list[float]) -> set[int]:
"""Find candidate near-neighbors (may include false positives, almost no false negatives)."""
sig = self._signature(vec)
result = set()
for band_idx, h in enumerate(self._band_hashes(sig)):
key = (band_idx, h)
if key in self._buckets:
result.update(self._buckets[key])
result.discard(item_id) # don't match self
return result
class MemoryStore:
"""
Persistent agent memory with hybrid search.
Uses a single SQLite file with:
- Regular table for metadata
- FTS5 virtual table for keyword/BM25 search
- vec0 virtual table for semantic vector search (if sqlite-vec available)
- BLOB table for pure Python fallback vector search (if sqlite-vec NOT available)
_vec_mode values:
- "sqlite-vec": C extension loaded, using memories_vec virtual table
- "pure": no C extension, using _VecIndex pure Python fallback
- "none": no embedding function set at all
"""
def __init__(
self,
db_path: str = "memory.db",
embedding_dim: int = 256,
quantize: bool = False,
recency_weight: float = 0.1,
decay_rate: float = 0.01,
checkpoint_interval: int = 1000,
) -> None:
self.db_path: Path = Path(db_path)
self.dim: int = embedding_dim
self.quantize: bool = quantize # int8 vector quantization (4x storage reduction)
self.recency_weight: float = recency_weight # default recency factor in hybrid scoring
self.decay_rate: float = decay_rate # exponential decay rate (per hour)
self.checkpoint_interval: int = checkpoint_interval # auto-checkpoint after N writes (0 = disabled)
self._writes_since_checkpoint: int = 0
self._closed: bool = False
self._embed_fn: Callable[[str], list[float]] | None = None # set via set_embed_fn()
self._embed_batch_fn: Callable[[list[str]], list[list[float]]] | None = None # set via set_embed_fn() when model object passed
self._vec_index: _VecIndex | None = None
self.db: sqlite3.Connection = self._connect()
self._init_schema()
def _connect(self) -> sqlite3.Connection:
db = sqlite3.connect(str(self.db_path))
db.execute("PRAGMA journal_mode=WAL")
db.execute("PRAGMA synchronous=NORMAL")
db.execute("PRAGMA mmap_size=67108864") # 64MB mmap
# Try to load sqlite-vec extension (preferred: faster C implementation)
try:
import sqlite_vec
db.enable_load_extension(True)
sqlite_vec.load(db)
db.enable_load_extension(False)
self._has_vec = True
self._vec_mode = "sqlite-vec"
except (ImportError, Exception):
self._has_vec = False
self._vec_mode = "pure" # will use _VecIndex fallback
return db
def _detect_schema_mode(self) -> str:
"""
Detect whether the memories table uses the old (TEXT tier) or new (INTEGER tier) schema.
Returns:
"compact" — new schema: tier INTEGER, content_hash BLOB, tags comma-separated
"legacy" — old schema: tier TEXT, content_hash TEXT, tags JSON
"none" — table does not exist yet (will be created as compact)
"""
row = self.db.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='memories'"
).fetchone()
if row is None:
return "none" # fresh DB, will create compact
# Inspect column type declared in the CREATE TABLE statement
col_info = self.db.execute("PRAGMA table_info(memories)").fetchall()
for col in col_info:
# col = (cid, name, type, notnull, dflt_value, pk)
if col[1] == "tier":
col_type = (col[2] or "").upper()
if "INT" in col_type:
return "compact"
else:
return "legacy"
# tier column missing — treat as legacy
return "legacy"
def _init_schema(self) -> None:
"""Create tables if they don't exist. Detects legacy vs compact schema."""
self._schema_mode = self._detect_schema_mode()
if self._schema_mode == "none":
# Brand new DB — create the compact optimised schema
self.db.execute("""
CREATE TABLE IF NOT EXISTS memories (
id INTEGER PRIMARY KEY,
content TEXT NOT NULL,
tier INTEGER NOT NULL DEFAULT 1,
source TEXT DEFAULT '',
tags TEXT DEFAULT '',
namespace TEXT DEFAULT '',
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
archived INTEGER DEFAULT 0,
content_hash BLOB,
access_count INTEGER DEFAULT 0,
last_accessed REAL,
supersedes INTEGER DEFAULT NULL,
importance REAL DEFAULT 0.5,
UNIQUE(content_hash, namespace)
)
""")
self._schema_mode = "compact"
else:
# Table already exists — keep it as-is, just ensure it was created
# (CREATE TABLE IF NOT EXISTS with original legacy schema as fallback)
self.db.execute("""
CREATE TABLE IF NOT EXISTS memories (
id INTEGER PRIMARY KEY AUTOINCREMENT,
content TEXT NOT NULL,
tier TEXT NOT NULL DEFAULT 'learned',
source TEXT DEFAULT '',
tags TEXT DEFAULT '[]',
created_at REAL NOT NULL,
updated_at REAL NOT NULL,
archived INTEGER DEFAULT 0,
content_hash TEXT UNIQUE
)
""")
# FTS5 for keyword search (BM25)
self.db.execute("""
CREATE VIRTUAL TABLE IF NOT EXISTS memories_fts
USING fts5(content, source, tags, content='memories', content_rowid='id')
""")
# Triggers to keep FTS5 in sync
self.db.execute("""
CREATE TRIGGER IF NOT EXISTS memories_ai AFTER INSERT ON memories BEGIN
INSERT INTO memories_fts(rowid, content, source, tags)
VALUES (new.id, new.content, new.source, new.tags);
END
""")
self.db.execute("""
CREATE TRIGGER IF NOT EXISTS memories_ad AFTER DELETE ON memories BEGIN
INSERT INTO memories_fts(memories_fts, rowid, content, source, tags)
VALUES ('delete', old.id, old.content, old.source, old.tags);
END
""")
self.db.execute("""
CREATE TRIGGER IF NOT EXISTS memories_au AFTER UPDATE ON memories BEGIN
INSERT INTO memories_fts(memories_fts, rowid, content, source, tags)
VALUES ('delete', old.id, old.content, old.source, old.tags);
INSERT INTO memories_fts(rowid, content, source, tags)
VALUES (new.id, new.content, new.source, new.tags);
END
""")
# Vector table: sqlite-vec (C extension) preferred
if self._vec_mode == "sqlite-vec":
self.db.execute(f"""
CREATE VIRTUAL TABLE IF NOT EXISTS memories_vec
USING vec0(
embedding float[{self.dim}] distance_metric=cosine
)
""")
else:
# Pure Python fallback: regular BLOB table + in-memory index
self._vec_index = _VecIndex(self.db, self.dim, quantize=self.quantize)
# Index for common queries
self.db.execute("CREATE INDEX IF NOT EXISTS idx_tier ON memories(tier)")
self.db.execute("CREATE INDEX IF NOT EXISTS idx_created ON memories(created_at)")
self.db.execute("CREATE INDEX IF NOT EXISTS idx_archived ON memories(archived)")
# Apply versioned migrations (adds columns, creates tables, etc.)
self._migrate()
# Index for namespace queries
self.db.execute("CREATE INDEX IF NOT EXISTS idx_namespace ON memories(namespace)")
# Index for supersedes lookups (temporal versioning)
self.db.execute("CREATE INDEX IF NOT EXISTS idx_supersedes ON memories(supersedes)")
# Entities table for lightweight entity extraction
self.db.execute("""
CREATE TABLE IF NOT EXISTS entities (
id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type TEXT NOT NULL,
memory_id INTEGER NOT NULL,
FOREIGN KEY (memory_id) REFERENCES memories(id)
)
""")
self.db.execute("CREATE INDEX IF NOT EXISTS idx_entity_name ON entities(name)")
self.db.execute("CREATE INDEX IF NOT EXISTS idx_entity_memory ON entities(memory_id)")
self.db.commit()
# ------------------------------------------------------------------
# Versioned migration system
# ------------------------------------------------------------------
# Each migration is a callable(db: sqlite3.Connection).
# Key = target version (applied when upgrading FROM key-1 TO key).
_MIGRATIONS = {
2: lambda db: [
db.execute(
f"ALTER TABLE memories ADD COLUMN {col}"
)
for col in (
"access_count INTEGER DEFAULT 0",
"last_accessed REAL",
"namespace TEXT DEFAULT ''",
"supersedes INTEGER DEFAULT NULL",
"importance REAL DEFAULT 0.5",
)
],
3: lambda db: [
# Drop global UNIQUE on content_hash, create composite UNIQUE on (content_hash, namespace).
# SQLite cannot DROP constraints, so we create an index instead.
# The old UNIQUE constraint may still exist on legacy DBs — that's OK,
# the Python-level check in remember() handles namespace isolation regardless.
db.execute(
"CREATE UNIQUE INDEX IF NOT EXISTS idx_hash_namespace ON memories(content_hash, namespace)"
),
],
}
def _migrate(self):
"""
Apply pending schema migrations using PRAGMA user_version.
- Reads current version from user_version (0 for new/old DBs).
- For new DBs (compact schema with all columns), just stamps the
latest version — no ALTER TABLE needed.
- For old DBs, applies each migration in order, wrapped in a
transaction, then sets user_version after each step.
"""
current = self.db.execute("PRAGMA user_version").fetchone()[0]
if current >= SCHEMA_VERSION:
return # Already up to date
# Check if this is a fresh DB that already has all columns
# (compact schema created in _init_schema includes everything).
if current == 0:
cols = {
c[1] for c in
self.db.execute("PRAGMA table_info(memories)").fetchall()
}
# All v2 columns present means compact schema — just stamp
v2_cols = {"access_count", "last_accessed", "namespace",
"supersedes", "importance"}
if v2_cols.issubset(cols):
self.db.execute(f"PRAGMA user_version = {SCHEMA_VERSION}")
self.db.commit()
return
# Apply migrations sequentially, each step in its own transaction
for version in range(current + 1, SCHEMA_VERSION + 1):
migration_fn = self._MIGRATIONS.get(version)
if migration_fn is None:
continue # No migration defined for this step
try:
with self.transaction() as conn:
migration_fn(conn)
conn.execute(f"PRAGMA user_version = {version}")
except sqlite3.OperationalError:
# Column already exists (partially migrated DB) — mark done
self.db.execute(f"PRAGMA user_version = {version}")
self.db.commit()
def set_embed_fn(self, fn: Callable[[str], list[float]] | Any) -> None:
"""
Set the embedding function. fn(text) -> list[float]
Can also accept any object with .embed(text) and .embed_batch(texts)
methods (e.g. LazyEmbedding, Model2VecEmbedding). When an object is
passed, both single-embed and batch paths are available.
"""
if hasattr(fn, "embed") and hasattr(fn, "embed_batch"):
# Full embedding model object (LazyEmbedding, Model2VecEmbedding, etc.)
# Use .embed() for single texts, .embed_batch() for bulk inserts
self._embed_fn = fn.embed
self._embed_batch_fn = fn.embed_batch
else:
# Plain callable fn(text) -> list[float] — backward compatible
self._embed_fn = fn
self._embed_batch_fn = None
# ------------------------------------------------------------------
# WAL checkpoint management
# ------------------------------------------------------------------
def checkpoint(self, mode: str = "PASSIVE") -> tuple[int, int, int]:
"""
Run a WAL checkpoint.
Modes:
PASSIVE — checkpoint as much as possible without blocking (default)
FULL — blocks writers until checkpoint completes
TRUNCATE — blocks writers + truncates WAL file to zero bytes
Returns:
(busy, log, checkpointed) — pages busy/total/checkpointed
"""
mode = mode.upper()
if mode not in ("PASSIVE", "FULL", "TRUNCATE"):
raise ValueError(f"Invalid checkpoint mode '{mode}'. Use PASSIVE, FULL, or TRUNCATE.")
row = self.db.execute(f"PRAGMA wal_checkpoint({mode})").fetchone()
self._writes_since_checkpoint = 0
return tuple(row) if row else (0, 0, 0)
def _track_write(self, count: int = 1) -> None:
"""Increment write counter and auto-checkpoint if threshold hit."""
self._writes_since_checkpoint += count
if (
self.checkpoint_interval > 0
and self._writes_since_checkpoint >= self.checkpoint_interval
):
self.checkpoint("PASSIVE")
# ------------------------------------------------------------------
# Transaction safety
# ------------------------------------------------------------------
@contextlib.contextmanager
def transaction(self):
"""
Context manager for atomic transactions.
Usage:
with store.transaction() as conn:
conn.execute("INSERT ...")
conn.execute("UPDATE ...")
# auto-commits on success, rolls back on exception
Yields the db connection. BEGIN is issued on entry;
COMMIT on clean exit, ROLLBACK on exception.
"""
self.db.execute("BEGIN")
try:
yield self.db
self.db.execute("COMMIT")
except Exception:
self.db.execute("ROLLBACK")
raise
# ------------------------------------------------------------------
# Context manager protocol
# ------------------------------------------------------------------
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def _embed(self, text: str) -> list[float] | None:
"""Embed text. Returns None if no embedding function set."""
if self._embed_fn is None:
return None
try:
return self._embed_fn(text)
except EmbeddingError:
raise
except Exception as e:
preview = text[:50] + "..." if len(text) > 50 else text
raise EmbeddingError(
f"Embedding failed for text: {preview} — {e}"
) from e
def _embed_batch(self, texts: list[str]) -> list[list[float] | None]:
"""
Embed a list of texts in one model call.
Uses embed_batch() if available (faster for model2vec: single numpy call).
Falls back to calling _embed() per-item otherwise.
Returns list of vectors (or Nones if no embed fn set).
"""
if self._embed_fn is None:
return [None] * len(texts)
if self._embed_batch_fn is not None:
return self._embed_batch_fn(texts)
# Fallback: individual calls
return [self._embed_fn(t) for t in texts]
def _content_hash(self, content: str) -> bytes | str:
"""
Return a deduplication hash for content.
Compact schema (new DBs): 8-byte BLOB (sha256[:8])
Legacy schema (old DBs): 16-char hex (sha256.hexdigest()[:16])
Switching between the two based on self._schema_mode keeps the
UNIQUE constraint working correctly for each schema format.
"""
digest = hashlib.sha256(content.encode())
if self._schema_mode == "compact":
return digest.digest()[:8] # 8 bytes BLOB — saves 8 bytes vs hex str
return digest.hexdigest()[:16] # legacy: 16-char hex string
def _encode_tier(self, tier: str) -> int | str:
"""
Encode tier string to storage value.
Compact schema: returns int (0-4).
Legacy schema: returns str unchanged.
"""
if self._schema_mode == "compact":
return _TIER_TO_INT.get(tier, 1) # default 1 = "learned"
return tier
def _decode_tier(self, raw: int | str) -> str:
"""
Decode tier value from storage to public string API.
Compact schema: int → string.
Legacy schema: string unchanged.
"""
if self._schema_mode == "compact":
return _INT_TO_TIER.get(raw, "learned")
return raw if isinstance(raw, str) else "learned"
def _encode_tags(self, tags: list[str]) -> str:
"""
Encode tags list to storage string.
Compact schema: comma-separated (no brackets/quotes overhead).
Legacy schema: JSON array string.
"""
if self._schema_mode == "compact":
return ",".join(str(t) for t in tags) if tags else ""
return json.dumps(tags)
def _decode_tags(self, raw: str) -> list[str]:
"""
Decode tags from storage string to list.
Handles both comma-separated (compact) and JSON (legacy) formats.
"""
if not raw:
return []
# Try JSON first (handles legacy "[]" and '["tag1"]' etc.)
if raw.startswith("["):
try:
return json.loads(raw)
except (json.JSONDecodeError, ValueError):
pass
# Comma-separated (compact format or fallback)
return [t.strip() for t in raw.split(",") if t.strip()]
# ================================================================
# Entity extraction (regex-based, no LLM)
# ================================================================
@staticmethod
def _extract_entities(text: str) -> list[tuple[str, str]]:
"""
Extract (name, type) entity pairs from text using regex patterns.
Entity types detected:
- mention: @username patterns
- url: http/https URLs
- email: email addresses
- hashtag: #tag patterns
- ip: IPv4 addresses
- port: :PORT numbers
- path: /unix/file/paths and ~/paths
- money: $50, $1,000 etc.
- number_unit: 100MB, 8080ms, 2.5GB etc.
- env_var: ENV_VAR_NAMES (all-caps with underscores, 3+ chars)
Returns list of (entity_name, entity_type) tuples, deduplicated.
"""
results: list[tuple[str, str]] = []
seen: set[tuple[str, str]] = set()
def _add(name: str, etype: str):
key = (name.lower(), etype)
if key not in seen:
seen.add(key)
results.append((name, etype))
# Order matters: more specific patterns first to avoid overlapping matches
# email (before url and mention to avoid partial matches)
for m in re.finditer(r'\S+@\S+\.\S+', text):
_add(m.group(), 'email')
# url
for m in re.finditer(r'https?://\S+', text):
_add(m.group().rstrip('.,;)'), 'url')
# mention (skip if part of email)
for m in re.finditer(r'@\w+', text):
full_context = text[max(0, m.start()-1):m.end()+5]
# Skip if this @word is part of an email (char before @ is not space/start)
if m.start() > 0 and text[m.start()-1] not in (' ', '\t', '\n', ',', ';', '(', '['):
continue
_add(m.group(), 'mention')
# hashtag
for m in re.finditer(r'#\w+', text):
_add(m.group(), 'hashtag')
# ip (before port to avoid overlap)
for m in re.finditer(r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b', text):
_add(m.group(), 'ip')
# port
for m in re.finditer(r':(\d{4,5})\b', text):
_add(':' + m.group(1), 'port')
# path (unix paths starting with / or ~/)
for m in re.finditer(r'(?:^|\s)([~/]\S+)', text, re.MULTILINE):
path_str = m.group(1)
# Must have at least one / separator to be a real path
if '/' in path_str and len(path_str) > 2:
_add(path_str, 'path')
# money
for m in re.finditer(r'\$\d[\d,]*', text):
_add(m.group(), 'money')
# number_unit
for m in re.finditer(r'\b\d+(?:\.\d+)?(?:MB|GB|KB|TB|ms|s|min|hr|h)\b', text):
_add(m.group(), 'number_unit')
# env_var (3+ chars, all caps with underscores, must contain at least one underscore or be 4+ chars)
for m in re.finditer(r'\b[A-Z][A-Z0-9_]{2,}\b', text):
word = m.group()
# Filter out common English words that happen to be all caps
if '_' in word or len(word) >= 4:
_add(word, 'env_var')
return results