-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlore.sh
More file actions
executable file
·2854 lines (2504 loc) · 99.6 KB
/
lore.sh
File metadata and controls
executable file
·2854 lines (2504 loc) · 99.6 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/usr/bin/env bash
# lore.sh - Memory that compounds
#
# A system for AI agents to build persistent, searchable memory across sessions.
set -euo pipefail
_lore_source="${BASH_SOURCE[0]}"
[[ -L "$_lore_source" ]] && _lore_source="$(readlink -f "$_lore_source")"
LORE_DIR="${LORE_DIR:-$(cd "$(dirname "$_lore_source")" && pwd)}"
unset _lore_source
source "${LORE_DIR}/lib/paths.sh"
# Colors
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
CYAN='\033[0;36m'
BOLD='\033[1m'
DIM='\033[2m'
NC='\033[0m'
# --- Bridge helpers (lazy-sourced, fail-silent) ---
# Each helper sources bridge.sh on demand so it's not loaded on every lore invocation.
_bridge_sync_last_decision() {
source "$LORE_DIR/lib/bridge.sh" || return 0
local record
record=$(tail -1 "$LORE_DECISIONS_FILE") || return 0
sync_single_decision "$record"
}
_bridge_sync_last_pattern() {
source "$LORE_DIR/lib/bridge.sh" || return 0
command -v yq &>/dev/null || return 0
local idx id name problem solution
idx=$(yq '.patterns | length - 1' "$LORE_PATTERNS_FILE" 2>/dev/null) || return 0
[[ "$idx" -lt 0 ]] && return 0
id=$(yq -r ".patterns[$idx].id // \"\"" "$LORE_PATTERNS_FILE" 2>/dev/null) || return 0
name=$(yq -r ".patterns[$idx].name // \"\"" "$LORE_PATTERNS_FILE" 2>/dev/null) || return 0
problem=$(yq -r ".patterns[$idx].problem // \"\"" "$LORE_PATTERNS_FILE" 2>/dev/null) || return 0
solution=$(yq -r ".patterns[$idx].solution // \"\"" "$LORE_PATTERNS_FILE" 2>/dev/null) || return 0
sync_single_pattern "$id" "$name" "$problem" "$solution"
}
_bridge_retract_shadow() {
source "$LORE_DIR/lib/bridge.sh" || return 0
retract_shadow "$1"
}
_bridge_health_check() {
source "$LORE_DIR/lib/bridge.sh" || return 0
shadow_health_check
}
# Infer capture type from command-line flags
# Returns: "decision", "pattern", "failure", "evidence", or "signal"
infer_capture_type() {
local has_decision_flags=false
local has_pattern_flags=false
local has_failure_flags=false
local explicit_type=""
while [[ $# -gt 0 ]]; do
case "$1" in
# Explicit type overrides
--decision) explicit_type="decision"; shift ;;
--pattern) explicit_type="pattern"; shift ;;
--failure) explicit_type="failure"; shift ;;
--observation|--signal) explicit_type="signal"; shift ;;
--evidence) explicit_type="evidence"; shift ;;
--concept) explicit_type="concept"; shift ;;
# Decision-specific flags
--rationale|-r|--alternatives|-a|--outcome|--type|-f|--files)
has_decision_flags=true
shift 2 ;;
# Pattern-specific flags
--solution|--problem|--context|--category|--confidence|--origin)
has_pattern_flags=true
shift 2 ;;
# Failure-specific flags
--error-type|--tool|--step)
has_failure_flags=true
shift 2 ;;
# Concept flags (value-bearing, skip arg)
--definition) shift 2 ;;
# Skip other args
*) shift ;;
esac
done
# Explicit type wins
[[ -n "$explicit_type" ]] && { echo "$explicit_type"; return; }
# Infer from flags (failure > pattern > decision > signal default)
if [[ "$has_failure_flags" == true ]]; then
echo "failure"
elif [[ "$has_pattern_flags" == true ]]; then
echo "pattern"
elif [[ "$has_decision_flags" == true ]]; then
echo "decision"
else
echo "signal"
fi
}
# Minimal help - fits on one screen
show_help() {
cat << 'EOF'
Lore - Memory That Compounds
Usage: lore <command> [options]
Session:
resume Load context from previous session
capture <text> Record knowledge (type inferred from flags)
handoff <message> Capture context for next session
status Show current session state
Capture flags:
(none) → signal (inbox)
--rationale "why" → decision (journal)
--solution "what" → pattern (patterns)
--error-type Type → failure (failures)
--evidence → evidence (with --confidence level)
Query:
recall <query> Read from memory (mode inferred from flags)
search <query> Search all components
review Review pending decisions
brief <topic> Assemble topic-specific context
Run 'lore help' for all commands.
Run 'lore help <topic>' for: capture, recall, search, intent, registry, components
EOF
}
# Full help - all commands
show_help_full() {
cat << 'EOF'
Lore - Memory That Compounds
Usage: lore <command> [options]
SESSION LIFECYCLE
resume [session] Load context from previous session (forks new session)
handoff <message> Capture context for next session
status Show current session state
entire-resume <branch> Resume Entire branch with Lore context injection
CAPTURE
capture <text> Universal write — type inferred from flags:
(no flags) → signal (inbox)
--rationale, -r → decision (journal)
--solution → pattern (patterns)
--error-type → failure (failures)
--decision Explicit decision override
--pattern Explicit pattern override
--failure Explicit failure override
--signal Explicit signal override
--evidence Explicit evidence (with --confidence)
--concept Explicit concept (with --definition)
--confidence Evidence confidence (preliminary|confirmed|contested|superseded)
--definition Concept definition (used with --concept)
--tags, -t Tags for categorization (all types)
remember <text> Record a decision (shortcut for capture --decision)
fail <type> <message> Log a failure
observe <text> Capture signal (shortcut for capture --signal)
EVIDENCE
evidence list [--confidence X] List evidence, optional confidence filter
evidence get <id> Get single evidence record
evidence stats Count evidence by confidence level
RECALL
recall <query> Universal read — mode inferred from flags:
(no flags) → search across all components
--routed → smart routing across Lore and Engram
--project <name> → full project context assembly
--patterns [context] → suggest relevant patterns
--failures [--type T] → list failures
--triggers [N] → recurring failure types (Rule of Three)
--concepts [query] → search or list concepts
--brief <topic> → topic-specific context briefing
--graph-depth N → follow graph edges during search (0-3)
--compact → machine-readable output
search <query> Search (shortcut for recall)
overlay <query> Generate compact context bundle for agent priming
--query, -q Search query (required)
--project, -p Boost results from this project
--limit, -l Max results (default 10)
--json Structured JSON output for automation
context <project> Project context (shortcut for recall --project)
suggest <context> Pattern suggestions (shortcut for recall --patterns)
failures [--type T] List failures (shortcut for recall --failures)
triggers [N] Recurring failures (shortcut for recall --triggers)
review List pending decisions older than 3 days
--auto Compact output for programmatic use
--resolve <id> Resolve a decision's outcome
--outcome <value> successful|revised|abandoned
--lesson "text" Optional lesson learned
brief <topic> Topic context (shortcut for recall --brief)
promote-failure [type] Promote recurring failures to anti-patterns
INTENT (Goals)
goal create <name> Create a goal
goal list [--status S] List goals
goal show <id> Show goal details
REGISTRY
registry show <proj> Show project details
registry list List all projects
registry validate Check consistency
MAINTENANCE
learn <text> Capture a pattern (shortcut, prefer: capture --solution)
index Build/rebuild search index
validate Run comprehensive checks
ingest <p> <t> <file> Bulk import from external formats
consolidate Group similar decisions and create summaries
--write Actually create summaries (default: dry-run)
--promote Also create concepts from clusters
--threshold N Jaccard similarity % (default: 50)
sync Project Lore shadows into Engram
--since T Time window (2h, 8h, 7d, 2024-01-01; default: 8h)
--type T Limit to: decisions, patterns, failures, sessions
--dry-run Print what would be synced without writing
promote Promote high-value Engram memories to Lore
--limit N Max candidates to review (default: 10)
--auto Auto-classify and skip prompt (dry-run)
JOURNAL
journal add Add structured journal entry (--type, --project, --title, --body)
COMPONENTS (direct access)
journal <cmd> Decision journal
patterns <cmd> Patterns and anti-patterns
transfer <cmd> Session management
inbox <cmd> Signal staging
intent <cmd> Goals and specs
Run 'lore help <topic>' for detailed help on:
capture, recall, search, intent, registry, components
EOF
}
# Topic-specific help
show_help_capture() {
cat << 'EOF'
CAPTURE COMMANDS
Record knowledge with a single verb. Importance is earned via flags, not assumed.
lore capture "X" → signal (inbox)
lore capture "X" --rationale "why" → decision (journal)
lore capture "X" --solution "how" → pattern (patterns)
lore capture "X" --error-type ToolError → failure (failures)
Bare capture creates a signal — a low-friction note that can be promoted
to evidence or a decision later. Add flags to indicate importance.
DECISIONS (remember)
lore remember "Use PostgreSQL" --rationale "Need ACID, team knows it"
lore remember "REST over GraphQL" -r "Simpler" -a "GraphQL, gRPC"
Options:
--rationale, -r <why> Why this decision was made
--alternatives, -a <list> Comma-separated alternatives considered
--tags, -t <list> Comma-separated tags
--type <type> architecture, implementation, naming, etc.
--files, -f <list> Files affected
--force Skip duplicate check
PATTERNS (learn)
lore learn "Retry with backoff" --context "External APIs" --solution "100ms * 2^n"
Options:
--context <when> When this pattern applies
--solution <what> The approach or technique
--problem <what> What problem this solves
--category <cat> Category (or "anti-pattern")
--confidence <0-1> How confident in this pattern
FAILURES (fail)
lore fail ToolError "Permission denied on /etc/hosts"
lore fail Timeout "API call exceeded 30s"
Error types:
Timeout Operation exceeded time limit
NonZeroExit Command returned non-zero
UserDeny User rejected proposed action
ToolError Tool execution failed
LogicError Logical/validation error
Options:
--tool <name> Tool that failed
--step <desc> Step in workflow
SIGNALS (observe)
lore observe "Users frequently ask about retry logic"
Raw signals go to inbox for later triage.
CONCEPTS (capture --concept)
lore capture "Fail-silent wrappers" --concept --definition "Library calls that catch errors"
Options:
--definition <text> What this concept means
EOF
}
show_help_recall() {
cat << 'EOF'
RECALL COMMANDS
Read from memory with a single verb. Flags select the read mode.
lore recall "authentication" → search (default)
lore recall --routed "authentication" → smart Lore + Engram routing
lore recall --project council → full project context
lore recall --patterns "API design" → relevant patterns
lore recall --failures --type Timeout → filtered failure list
lore recall --triggers → recurring failure analysis
lore recall --concepts "fail-silent" → concept search
lore recall --brief "graph" → topic-scoped briefing
The default (bare recall with a query) performs a ranked search across all
components. Add flags to request specific read modes.
SEARCH (default)
lore recall "query"
lore recall "query" --graph-depth 2 Follow graph edges
lore recall "query" --compact Machine-readable output
PROJECT CONTEXT
lore recall --project <name> Registry, decisions, patterns, graph
PATTERNS
lore recall --patterns All patterns
lore recall --patterns "deployment" Ranked search via FTS5
FAILURES
lore recall --failures All failures
lore recall --failures "permission" Ranked search via FTS5
lore recall --failures --type Timeout Filter by error type
CONCEPTS
lore recall --concepts List all concepts
lore recall --concepts "deployment" Ranked search via FTS5
TRIGGERS
lore recall --triggers Types with 3+ occurrences
lore recall --triggers 5 Custom threshold
BRIEFING
lore recall --brief "topic" Cross-component topic summary
EOF
}
show_help_search() {
cat << 'EOF'
SEARCH COMMANDS
Find knowledge across all Lore components.
BASIC SEARCH
lore search "authentication"
lore search "retry logic"
Searches: journal, patterns, sessions, graph, inbox
Uses FTS5 ranked search with 5-factor scoring:
BM25 * recency * frequency * importance * project_boost
Falls back to grep when no search index exists.
GRAPH TRAVERSAL
--graph-depth N Follow knowledge graph edges (0-3, default 0)
lore search "auth" --graph-depth 2
Depth 0: Direct matches only
Depth 1: Include directly connected concepts
Depth 2: Two hops from matches
Depth 3: Maximum traversal
OTHER QUERIES
lore context <project> Assemble full context for a project
lore suggest <text> Get pattern suggestions for context
lore failures List recorded failures
lore triggers Show recurring failure patterns (3+ occurrences)
lore promote-failure T Promote error type T to anti-pattern (--fix, --threshold)
BUILDING THE INDEX
lore index Build/rebuild FTS5 search index
Run after bulk imports or if search seems stale.
EOF
}
show_help_intent() {
cat << 'EOF'
INTENT COMMANDS
Goals define what you're trying to achieve.
GOALS
lore goal create "Implement user authentication"
lore goal list
lore goal list --status active
lore goal show <goal-id>
Status values: draft, active, blocked, completed, cancelled
SPEC MANAGEMENT (SDD Integration)
lore spec list List specs by status
lore spec context <goal-id> Full context for a spec
lore spec assign <goal-id> Assign spec to current session
lore spec progress <goal-id> Update phase (specify/plan/tasks/implement)
lore spec complete <goal-id> Mark spec complete with outcome
Specs track work through the specify → plan → tasks → implement lifecycle.
Lore captures the durable knowledge; specs are ephemeral in feature branches.
EOF
}
show_help_registry() {
cat << 'EOF'
REGISTRY COMMANDS
Project metadata and cross-project relationships.
QUERY
lore registry list List all registered projects
lore registry show <project> Show project details with context
lore registry context <project> Alias for 'show' (deprecated)
VALIDATION
lore registry validate Check registry consistency
lore validate Comprehensive validation (all components)
DIRECT ACCESS
lore registry <subcommand> Pass through to registry.sh
Registry data lives in:
registry/data/metadata.yaml Project metadata
registry/data/clusters.yaml Project groupings
registry/data/relationships.yaml Cross-project dependencies
registry/data/contracts.yaml Interface contracts
EOF
}
show_help_components() {
cat << 'EOF'
COMPONENTS
Lore has eight components. Each answers one question.
journal/ "Why did we choose this?" Decision capture with rationale
patterns/ "What did we learn?" Patterns and anti-patterns
transfer/ "What's next?" Session handoff and resume
graph/ "What relates to this?" Knowledge graph
inbox/ "What did we notice?" Raw signal staging
intent/ "What are we trying to do?" Goals and specs
failures/ "What went wrong?" Failure reports
registry/ "What exists?" Project metadata
DIRECT ACCESS
lore journal <command> Decision journal commands
lore patterns <command> Pattern commands
lore transfer <command> Session management
lore graph <command> Knowledge graph
lore inbox <command> Inbox management
lore intent <command> Goals and specs
Run 'lore <component> --help' for component-specific help.
DATA FORMATS
JSONL journal, inbox, failures (append-only logs)
JSON graph, sessions (structured documents)
YAML patterns, goals, registry (human-editable)
EOF
}
# Help command router
cmd_help() {
local topic="${1:-}"
case "$topic" in
"")
show_help_full
;;
capture|remember|learn|fail|observe)
show_help_capture
;;
recall)
show_help_recall
;;
search|query|find)
show_help_search
;;
intent|goal|goals|spec)
show_help_intent
;;
registry|project|projects)
show_help_registry
;;
components|component|journal|patterns|graph|transfer|inbox|failures)
show_help_components
;;
*)
echo "Unknown help topic: $topic"
echo ""
echo "Available topics:"
echo " capture Decisions, patterns, failures, signals"
echo " recall Read modes and flags"
echo " search Search modes and options"
echo " intent Goals, specs"
echo " registry Project metadata"
echo " components Direct component access"
return 1
;;
esac
}
# Quick commands that span components
cmd_remember() {
# Dedup check now lives in journal/journal.sh cmd_record (--force passes through)
"$LORE_DIR/journal/journal.sh" record "$@"
# Sync decision to graph (background, fail-silent)
"$LORE_DIR/graph/sync.sh" &>/dev/null &
# Write-through to search index (background, fail-silent)
local last_json
last_json=$(tail -1 "$LORE_DECISIONS_FILE" 2>/dev/null)
[[ -n "$last_json" ]] && bash "$LORE_DIR/lib/search-index.sh" index-one decision "$last_json" &>/dev/null &
# Write-through to Engram (background, fail-silent)
_bridge_sync_last_decision &>/dev/null &
}
cmd_learn() {
# Dedup check now lives in patterns/patterns.sh cmd_capture (--force passes through)
"$LORE_DIR/patterns/patterns.sh" capture "$@"
# Write-through to search index (background, fail-silent)
# Patterns are YAML — get the last entry as JSON
local last_pattern
last_pattern=$(yq -o=json '.patterns[-1]' "$LORE_PATTERNS_FILE" 2>/dev/null)
[[ -n "$last_pattern" ]] && bash "$LORE_DIR/lib/search-index.sh" index-one pattern "$last_pattern" &>/dev/null &
# Write-through to Engram (background, fail-silent)
_bridge_sync_last_pattern &>/dev/null &
}
# Unified capture command — routes to remember/learn/fail based on flags
cmd_capture() {
# --- JSON flag detection ---
local _json_in=false _json_out=false
for arg in "$@"; do
case "$arg" in
--json) _json_in=true; _json_out=true ;;
--json-in) _json_in=true ;;
--json-out) _json_out=true ;;
esac
done
if [[ "$_json_in" == true || "$_json_out" == true ]]; then
source "$LORE_DIR/lib/json-io.sh"
fi
# --- Input handling: JSON or traditional ---
local stdin_text=""
local capture_type=""
local args=()
local has_force=false
if [[ "$_json_in" == true ]]; then
# JSON input mode: read stdin, extract fields with jq, build args
local _json_raw
_json_raw=$(cat)
if [[ -z "$_json_raw" ]]; then
if [[ "$_json_out" == true ]]; then
_json_error "Empty JSON input"
else
echo -e "${RED}Error: Empty JSON input${NC}" >&2
fi
return 1
fi
if ! echo "$_json_raw" | jq empty 2>/dev/null; then
if [[ "$_json_out" == true ]]; then
_json_error "Invalid JSON"
else
echo -e "${RED}Error: Invalid JSON${NC}" >&2
fi
return 1
fi
# Infer type from JSON keys
capture_type=$(_infer_type_from_json "$_json_raw")
# Extract fields and build args array (mirrors CLI flag format)
local v
case "$capture_type" in
decision)
v=$(_jq_str "$_json_raw" "decision"); [[ -n "$v" ]] && args+=("$v")
v=$(_jq_str "$_json_raw" "rationale"); [[ -n "$v" ]] && args+=(--rationale "$v")
v=$(_jq_csv "$_json_raw" "alternatives"); [[ -n "$v" ]] && args+=(--alternatives "$v")
v=$(_jq_csv "$_json_raw" "tags"); [[ -n "$v" ]] && args+=(--tags "$v")
v=$(_jq_str "$_json_raw" "decision_type"); [[ -n "$v" ]] && args+=(--type "$v")
v=$(_jq_csv "$_json_raw" "files"); [[ -n "$v" ]] && args+=(--files "$v")
v=$(_jq_str "$_json_raw" "valid_at"); [[ -n "$v" ]] && args+=(--valid-at "$v")
;;
pattern)
v=$(_jq_str "$_json_raw" "name")
[[ -z "$v" ]] && v=$(_jq_str "$_json_raw" "pattern")
[[ -n "$v" ]] && args+=("$v")
v=$(_jq_str "$_json_raw" "context"); [[ -n "$v" ]] && args+=(--context "$v")
v=$(_jq_str "$_json_raw" "solution"); [[ -n "$v" ]] && args+=(--solution "$v")
v=$(_jq_str "$_json_raw" "problem"); [[ -n "$v" ]] && args+=(--problem "$v")
v=$(_jq_str "$_json_raw" "category"); [[ -n "$v" ]] && args+=(--category "$v")
v=$(_jq_str "$_json_raw" "origin"); [[ -n "$v" ]] && args+=(--origin "$v")
v=$(_jq_str "$_json_raw" "example_bad"); [[ -n "$v" ]] && args+=(--example-bad "$v")
v=$(_jq_str "$_json_raw" "example_good"); [[ -n "$v" ]] && args+=(--example-good "$v")
;;
failure)
v=$(_jq_str "$_json_raw" "error_type"); [[ -n "$v" ]] && args+=(--error-type "$v")
v=$(_jq_str "$_json_raw" "message"); [[ -n "$v" ]] && args+=("$v")
v=$(_jq_str "$_json_raw" "tool"); [[ -n "$v" ]] && args+=(--tool "$v")
v=$(_jq_str "$_json_raw" "step"); [[ -n "$v" ]] && args+=(--step "$v")
;;
signal)
v=$(_jq_str "$_json_raw" "content")
[[ -z "$v" ]] && v=$(_jq_str "$_json_raw" "text")
[[ -n "$v" ]] && args+=("$v")
v=$(_jq_str "$_json_raw" "source"); [[ -n "$v" ]] && args+=(--source "$v")
v=$(_jq_csv "$_json_raw" "tags"); [[ -n "$v" ]] && args+=(--tags "$v")
;;
evidence)
v=$(_jq_str "$_json_raw" "content")
[[ -z "$v" ]] && v=$(_jq_str "$_json_raw" "text")
[[ -n "$v" ]] && args+=("$v")
v=$(_jq_str "$_json_raw" "confidence"); [[ -n "$v" ]] && args+=(--confidence "$v")
v=$(_jq_csv "$_json_raw" "tags"); [[ -n "$v" ]] && args+=(--tags "$v")
v=$(_jq_str "$_json_raw" "source"); [[ -n "$v" ]] && args+=(--source "$v")
v=$(_jq_str "$_json_raw" "provenance"); [[ -n "$v" ]] && args+=(--provenance "$v")
;;
concept)
v=$(_jq_str "$_json_raw" "name"); [[ -n "$v" ]] && args+=("$v")
v=$(_jq_str "$_json_raw" "definition"); [[ -n "$v" ]] && args+=(--definition "$v")
;;
esac
# Check for force flag in JSON
v=$(echo "$_json_raw" | jq -r '.force // empty' 2>/dev/null) || true
[[ "$v" == "true" ]] && { has_force=true; args+=(--force); }
else
# Traditional input mode: read from stdin if piped
if [[ ! -t 0 ]]; then
stdin_text=$(cat)
fi
capture_type=$(infer_capture_type "$@")
# Strip explicit type flags, --force, and JSON flags
local end_of_opts=false
for arg in "$@"; do
if [[ "$end_of_opts" == true ]]; then
args+=("$arg")
continue
fi
case "$arg" in
--) end_of_opts=true ;;
--decision|--pattern|--failure|--observation|--signal|--evidence|--concept) continue ;;
--json|--json-in|--json-out) continue ;;
--force) has_force=true; args+=("$arg") ;;
*) args+=("$arg") ;;
esac
done
# Prepend stdin text if no positional text was given
if [[ -n "$stdin_text" ]]; then
local has_positional=false
local skip_next=false
for arg in "${args[@]}"; do
if [[ "$skip_next" == true ]]; then
skip_next=false
continue
fi
case "$arg" in
--rationale|--solution|--error-type|--tags|-t|--source|-s|--confidence|--provenance|--context|--tool|--step|--definition)
skip_next=true ;;
-*) ;;
*) has_positional=true; break ;;
esac
done
if [[ "$has_positional" == false ]]; then
args=("$stdin_text" "${args[@]}")
fi
fi
fi
# --- JSON-out dedup: run at this level, force downstream ---
local _dup_id=""
if [[ "$_json_out" == true && "$has_force" == false ]]; then
if [[ "$capture_type" == "decision" || "$capture_type" == "pattern" ]]; then
source "$LORE_DIR/lib/conflict.sh" 2>/dev/null || true
# Extract the text to check from args (first non-flag argument)
local _dedup_text=""
local _skip_next=false
for arg in "${args[@]}"; do
if [[ "$_skip_next" == true ]]; then
_skip_next=false
continue
fi
case "$arg" in
--*) _skip_next=true ;;
*) _dedup_text="$arg"; break ;;
esac
done
if [[ -n "$_dedup_text" ]]; then
_dup_id=$(lore_check_duplicate "$capture_type" "$_dedup_text" 2>/dev/null) || true
if [[ -n "$_dup_id" ]]; then
_json_error "Duplicate detected" "$_dup_id"
return 1
fi
fi
# Force downstream to skip redundant dedup
args+=(--force)
fi
fi
# --- Route to component handlers ---
local _rc=0
case "$capture_type" in
signal)
# Strip --force before passing to cmd_observe (it has no dedup check)
local sig_args=()
for arg in "${args[@]}"; do
[[ "$arg" == "--force" ]] && continue
sig_args+=("$arg")
done
if [[ "$_json_out" == true ]]; then
cmd_observe "${sig_args[@]}" >/dev/null || _rc=$?
else
cmd_observe "${sig_args[@]}"
return
fi
;;
decision)
if [[ "$_json_out" == true ]]; then
cmd_remember "${args[@]}" >/dev/null || _rc=$?
else
cmd_remember "${args[@]}"
fi
;;
pattern)
if [[ "$_json_out" == true ]]; then
cmd_learn "${args[@]}" >/dev/null || _rc=$?
else
cmd_learn "${args[@]}"
fi
;;
failure)
# cmd_fail expects: <error_type> <message> [--tool T] [--step S]
# capture uses --error-type <type> as a named flag, so convert it to positional
# Strip --force (failures have no dedup)
local fail_args=()
local error_type=""
local skip_next=false
for arg in "${args[@]}"; do
if [[ "$skip_next" == true ]]; then
skip_next=false
error_type="$arg"
continue
fi
if [[ "$arg" == "--error-type" ]]; then
skip_next=true
continue
fi
[[ "$arg" == "--force" ]] && continue
fail_args+=("$arg")
done
if [[ "$_json_out" == true ]]; then
if [[ -n "$error_type" ]]; then
cmd_fail "$error_type" "${fail_args[@]}" >/dev/null || _rc=$?
else
cmd_fail "${fail_args[@]}" >/dev/null || _rc=$?
fi
else
if [[ -n "$error_type" ]]; then
cmd_fail "$error_type" "${fail_args[@]}"
else
cmd_fail "${fail_args[@]}"
fi
fi
;;
evidence)
source "$LORE_DIR/evidence/lib/evidence.sh"
local evi_text="" evi_confidence="preliminary" evi_tags="" evi_source="manual" evi_provenance=""
local skip_next=false
for ((i=0; i<${#args[@]}; i++)); do
if [[ "$skip_next" == true ]]; then
skip_next=false
continue
fi
case "${args[$i]}" in
--confidence)
evi_confidence="${args[$((i+1))]}"
skip_next=true ;;
--tags|-t)
evi_tags="${args[$((i+1))]}"
skip_next=true ;;
--source)
evi_source="${args[$((i+1))]}"
skip_next=true ;;
--provenance)
evi_provenance="${args[$((i+1))]}"
skip_next=true ;;
--force) ;;
--*) skip_next=true ;;
*)
[[ -z "$evi_text" ]] && evi_text="${args[$i]}" ;;
esac
done
if [[ -z "$evi_text" ]]; then
if [[ "$_json_out" == true ]]; then
_json_error "Evidence content required"
else
echo -e "${RED}Error: Evidence content required${NC}" >&2
echo "Usage: lore capture \"text\" --evidence [--confidence preliminary|confirmed|contested|superseded]" >&2
fi
return 1
fi
local evi_id
evi_id=$(evidence_append "$evi_text" "$evi_source" "$evi_tags" "$evi_confidence" "$evi_provenance")
if [[ "$_json_out" != true ]]; then
echo -e "${GREEN}Recorded evidence:${NC} ${BOLD}${evi_id}${NC}"
echo -e " ${CYAN}Confidence:${NC} $evi_confidence"
echo -e " ${CYAN}Content:${NC} ${evi_text:0:70}$([ ${#evi_text} -gt 70 ] && echo '...')"
fi
;;
concept)
local definition=""
local text=""
local skip_next=false
for ((i=0; i<${#args[@]}; i++)); do
if [[ "$skip_next" == true ]]; then
skip_next=false
continue
fi
if [[ "${args[$i]}" == "--definition" && $((i+1)) -lt ${#args[@]} ]]; then
definition="${args[$((i+1))]}"
skip_next=true
elif [[ "${args[$i]}" != --* ]]; then
text="${args[$i]}"
fi
done
if [[ -z "$text" ]]; then
if [[ "$_json_out" == true ]]; then
_json_error "Concept name required"
else
echo -e "${RED}Error: Concept name required${NC}" >&2
echo "Usage: lore capture \"name\" --concept [--definition \"what it means\"]" >&2
fi
return 1
fi
local concept_id
concept_id=$(write_concept "$(generate_concept_id)" "$text" "$definition" "manual")
if [[ "$_json_out" != true ]]; then
echo -e "${GREEN}Created concept:${NC} ${BOLD}${concept_id}${NC}"
echo -e " ${CYAN}Name:${NC} $text"
[[ -n "$definition" ]] && echo -e " ${CYAN}Definition:${NC} $definition"
fi
;;
*)
if [[ "$_json_out" == true ]]; then
_json_error "Unknown capture type: $capture_type"
else
echo -e "${RED}Error: Unknown capture type: $capture_type${NC}" >&2
fi
return 1
;;
esac
# --- JSON-out response: extract ID from storage and emit ---
if [[ "$_json_out" == true ]]; then
if [[ $_rc -ne 0 ]]; then
_json_error "Capture failed"
return $_rc
fi
local _out_id="" _out_ts=""
case "$capture_type" in
decision)
_out_id=$(tail -1 "$LORE_DECISIONS_FILE" 2>/dev/null | jq -r '.id // empty' 2>/dev/null) || true
_out_ts=$(tail -1 "$LORE_DECISIONS_FILE" 2>/dev/null | jq -r '.timestamp // empty' 2>/dev/null) || true
;;
pattern)
_out_id=$(yq -r '.patterns[-1].id // ""' "$LORE_PATTERNS_FILE" 2>/dev/null) || true
_out_ts=$(yq -r '.patterns[-1].created_at // ""' "$LORE_PATTERNS_FILE" 2>/dev/null) || true
;;
failure)
_out_id=$(tail -1 "${LORE_FAILURES_DATA}/failures.jsonl" 2>/dev/null | jq -r '.id // empty' 2>/dev/null) || true
_out_ts=$(tail -1 "${LORE_FAILURES_DATA}/failures.jsonl" 2>/dev/null | jq -r '.timestamp // empty' 2>/dev/null) || true
;;
signal)
_out_id=$(tail -1 "$LORE_SIGNALS_FILE" 2>/dev/null | jq -r '.id // empty' 2>/dev/null) || true
_out_ts=$(tail -1 "$LORE_SIGNALS_FILE" 2>/dev/null | jq -r '.timestamp // empty' 2>/dev/null) || true
;;
evidence)
_out_id="${evi_id:-}"
_out_ts=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
;;
concept)
_out_id="${concept_id:-}"
_out_ts=$(date -u +"%Y-%m-%dT%H:%M:%SZ")
;;
esac
if [[ -n "$_out_id" ]]; then
_json_response "$_out_id" "$capture_type" "${_out_ts:-$(date -u +"%Y-%m-%dT%H:%M:%SZ")}"
else
_json_error "Write succeeded but ID extraction failed"
fi
fi
}
cmd_handoff() {
"$LORE_DIR/transfer/transfer.sh" handoff "$@"
}
cmd_resume() {
"$LORE_DIR/transfer/transfer.sh" resume "$@"
# Check shadow sync health (advisory, fail-silent)
_bridge_health_check 2>/dev/null || true
}
SEARCH_DB="${LORE_SEARCH_DB}"
# Derive current project from cwd
_derive_project() {
local workspace_root
workspace_root="$(dirname "$LORE_DIR")"
local cwd
cwd="$(pwd)"
local project="${cwd#"$workspace_root/"}"
[[ "$project" == "$cwd" ]] && { echo ""; return; }
project="${project%%/*}"
echo "$project"
}
# Log access for reinforcement scoring
_log_access() {
local db="$1" type="$2" id="$3"
sqlite3 "$db" \
"INSERT OR IGNORE INTO access_log (record_type, record_id, accessed_at) VALUES ('$(echo "$type" | sed "s/'/''/g")', '$(echo "$id" | sed "s/'/''/g")', datetime('now'));" \
2>/dev/null || true
}
# FTS5-based ranked search
_search_fts5() {
local query="$1"
local project="$2"
local limit="${3:-10}"
local graph_depth="${4:-0}"
local compact="${5:-false}"
local type_filter="${6:-}"
# Escape for FTS5: quote each term to prevent operator interpretation,
# then escape single quotes for SQL embedding
local safe_query
safe_query=$(echo "$query" | sed 's/"/""/g; s/[^ ][^ ]*/"&"/g')
safe_query="${safe_query//\'/\'\'}"
local safe_project="${project//\'/\'\'}"
# Build SQL dynamically based on type filter
local decision_sql="SELECT 'decision' as type, id, decision as content, project, timestamp, importance, rank * -1 as bm25_score FROM decisions WHERE decisions MATCH '${safe_query}'"
local pattern_sql="SELECT 'pattern' as type, id, name || ': ' || solution as content, 'lore' as project, timestamp, CAST(confidence * 5 AS INT) as importance, rank * -1 as bm25_score FROM patterns WHERE patterns MATCH '${safe_query}'"
local transfer_sql="SELECT 'transfer' as type, session_id as id, handoff as content, project, timestamp, 3 as importance, rank * -1 as bm25_score FROM transfers WHERE transfers MATCH '${safe_query}'"
local failure_sql="SELECT 'failure' as type, id, error_type || ': ' || error_message as content, '' as project, timestamp, 3 as importance, rank * -1 as bm25_score FROM failures WHERE failures MATCH '${safe_query}'"
local signal_sql="SELECT 'signal' as type, id, content as content, '' as project, timestamp, 2 as importance, rank * -1 as bm25_score FROM observations WHERE observations MATCH '${safe_query}'"
local concept_sql="SELECT 'concept' as type, id, name || ': ' || definition as content, '' as project, timestamp, 4 as importance, rank * -1 as bm25_score FROM concepts WHERE concepts MATCH '${safe_query}'"
local sql_parts=()
case "$type_filter" in
decision) sql_parts=("$decision_sql") ;;
pattern) sql_parts=("$pattern_sql") ;;
transfer) sql_parts=("$transfer_sql") ;;
failure) sql_parts=("$failure_sql") ;;