- Add URL fetching with HTML-to-text extraction - Add basic PDF text extraction - Add smart content chunking with overlap - Add deduplication via content checksums - Add auto-linking to semantically related nodes - Add CLI commands: ingest, clip - Add MCP tools: memory_ingest, memory_clip
764 lines
28 KiB
TypeScript
764 lines
28 KiB
TypeScript
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
|
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
|
import { z } from 'zod/v3';
|
|
import { query, listNodes, getNode, findNodeByPrefix, addNode, addEdge, removeNode, removeEdge, updateNode, getNodeHistory, getNodeAtTime, getNodeVersion, diffVersions, restoreVersion } from '../core/store';
|
|
import { getConnections, getEdgesByNode } from '../core/graph';
|
|
import { cosineSimilarity } from '../core/search/vector';
|
|
import { getDb } from '../core/db';
|
|
import { NodeKind, EdgeType } from '../types';
|
|
|
|
function serialize(data: any): string {
|
|
return JSON.stringify(data, (key, val) => key === 'embedding' ? undefined : val, 2);
|
|
}
|
|
|
|
const server = new McpServer({
|
|
name: 'memory',
|
|
version: '1.0.0',
|
|
});
|
|
|
|
server.tool(
|
|
'memory_query',
|
|
'Search memory nodes by text (hybrid BM25 + vector search)',
|
|
{
|
|
text: z.string().describe('Search query text'),
|
|
kind: z.enum(['memory', 'component', 'task', 'decision']).optional().describe('Filter by node kind'),
|
|
limit: z.number().optional().describe('Max results (default 10)'),
|
|
},
|
|
async ({ text, kind, limit }) => {
|
|
const results = await query(text, { kind: kind as NodeKind, limit });
|
|
return { content: [{ type: 'text' as const, text: serialize(results) }] };
|
|
}
|
|
);
|
|
|
|
server.tool(
|
|
'memory_show',
|
|
'Show a memory node by ID or ID prefix',
|
|
{
|
|
id: z.string().describe('Full node ID or prefix'),
|
|
},
|
|
async ({ id }) => {
|
|
const node = getNode(id) ?? findNodeByPrefix(id);
|
|
if (!node) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'Node not found' }) }], isError: true };
|
|
}
|
|
const connections = getConnections(node.id);
|
|
return { content: [{ type: 'text' as const, text: serialize({ node, connections }) }] };
|
|
}
|
|
);
|
|
|
|
server.tool(
|
|
'memory_list',
|
|
'List memory nodes with optional filters',
|
|
{
|
|
kind: z.enum(['memory', 'component', 'task', 'decision']).optional().describe('Filter by node kind'),
|
|
status: z.string().optional().describe('Filter by status'),
|
|
tags: z.array(z.string()).optional().describe('Filter by tags'),
|
|
limit: z.number().optional().describe('Max results'),
|
|
},
|
|
async ({ kind, status, tags, limit }) => {
|
|
const nodes = listNodes({ kind: kind as NodeKind, status, tags, limit });
|
|
return { content: [{ type: 'text' as const, text: serialize(nodes) }] };
|
|
}
|
|
);
|
|
|
|
server.tool(
|
|
'memory_children',
|
|
'List children of a memory node (outgoing "contains" edges)',
|
|
{
|
|
id: z.string().describe('Parent node ID or prefix'),
|
|
kind: z.enum(['memory', 'component', 'task', 'decision']).optional().describe('Filter children by kind'),
|
|
},
|
|
async ({ id, kind }) => {
|
|
const node = getNode(id) ?? findNodeByPrefix(id);
|
|
if (!node) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'Node not found' }) }], isError: true };
|
|
}
|
|
const { outgoing } = getConnections(node.id);
|
|
let children = outgoing.filter(e => e.type === 'contains').map(e => (e as any).node);
|
|
if (kind) {
|
|
children = children.filter((n: any) => n.kind === kind);
|
|
}
|
|
return { content: [{ type: 'text' as const, text: serialize(children) }] };
|
|
}
|
|
);
|
|
|
|
server.tool(
|
|
'memory_add',
|
|
'Add a new memory node',
|
|
{
|
|
kind: z.enum(['memory', 'component', 'task', 'decision']).describe('Node kind'),
|
|
title: z.string().describe('Node title'),
|
|
content: z.string().optional().describe('Node content/body'),
|
|
tags: z.array(z.string()).optional().describe('Tags'),
|
|
status: z.string().optional().describe('Status (e.g. active, todo, done)'),
|
|
sections: z.array(z.object({ label: z.string(), body: z.string() })).optional().describe('Structured sections'),
|
|
},
|
|
async ({ kind, title, content, tags, status, sections }) => {
|
|
const metadata: Record<string, any> = {};
|
|
if (sections) metadata.sections = sections;
|
|
const node = await addNode({ kind: kind as NodeKind, title, content, tags, status, metadata });
|
|
return { content: [{ type: 'text' as const, text: serialize(node) }] };
|
|
}
|
|
);
|
|
|
|
server.tool(
|
|
'memory_link',
|
|
'Create an edge between two memory nodes',
|
|
{
|
|
fromId: z.string().describe('Source node ID'),
|
|
toId: z.string().describe('Target node ID'),
|
|
type: z.enum(['depends_on', 'contains', 'implements', 'blocked_by', 'subtask_of', 'relates_to', 'supersedes', 'about']).describe('Edge type'),
|
|
},
|
|
async ({ fromId, toId, type }) => {
|
|
const edge = addEdge(fromId, toId, type as EdgeType);
|
|
return { content: [{ type: 'text' as const, text: serialize(edge) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_split ---
|
|
server.tool(
|
|
'memory_split',
|
|
'Break a large node into smaller children, updating parent with a summary',
|
|
{
|
|
id: z.string().describe('Node ID to split'),
|
|
pieces: z.array(z.object({ title: z.string(), content: z.string() })).describe('Child pieces to create'),
|
|
summary: z.string().optional().describe('Optional summary to replace parent content'),
|
|
},
|
|
async ({ id, pieces, summary }) => {
|
|
const node = getNode(id) ?? findNodeByPrefix(id);
|
|
if (!node) return { content: [{ type: 'text' as const, text: serialize({ error: 'Node not found' }) }], isError: true };
|
|
|
|
if (summary !== undefined) {
|
|
await updateNode(node.id, { content: summary });
|
|
}
|
|
|
|
const children = [];
|
|
for (const piece of pieces) {
|
|
const child = await addNode({ kind: node.kind, title: piece.title, content: piece.content, tags: [...node.tags] });
|
|
addEdge(node.id, child.id, 'contains');
|
|
children.push(child);
|
|
}
|
|
return { content: [{ type: 'text' as const, text: serialize({ parent: node.id, children: children.map(c => ({ id: c.id, title: c.title })) }) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_merge ---
|
|
server.tool(
|
|
'memory_merge',
|
|
'Merge multiple nodes into a single new node, relinking edges and deleting originals',
|
|
{
|
|
nodeIds: z.array(z.string()).describe('Node IDs to merge'),
|
|
title: z.string().describe('Title for merged node'),
|
|
content: z.string().describe('Content for merged node'),
|
|
kind: z.enum(['memory', 'component', 'task', 'decision']).optional().describe('Kind (defaults to first node\'s kind)'),
|
|
},
|
|
async ({ nodeIds, title, content, kind }) => {
|
|
const nodes = nodeIds.map(id => getNode(id) ?? findNodeByPrefix(id));
|
|
if (nodes.some(n => !n)) return { content: [{ type: 'text' as const, text: serialize({ error: 'One or more nodes not found' }) }], isError: true };
|
|
|
|
const validNodes = nodes as Exclude<typeof nodes[number], null>[];
|
|
const allTags = [...new Set(validNodes.flatMap(n => n.tags))];
|
|
const mergedKind = (kind as NodeKind) ?? validNodes[0].kind;
|
|
|
|
// Merge metadata from all nodes (last wins for conflicting keys)
|
|
const mergedMetadata = validNodes.reduce((acc, n) => ({ ...acc, ...n.metadata }), {} as Record<string, any>);
|
|
const merged = await addNode({ kind: mergedKind, title, content, tags: allTags, metadata: mergedMetadata });
|
|
const oldIds = new Set(validNodes.map(n => n.id));
|
|
|
|
// Relink edges from old nodes to merged node
|
|
const db = getDb();
|
|
for (const n of validNodes) {
|
|
const edges = getEdgesByNode(n.id);
|
|
for (const e of edges) {
|
|
const from = oldIds.has(e.fromId) ? merged.id : e.fromId;
|
|
const to = oldIds.has(e.toId) ? merged.id : e.toId;
|
|
if (from !== to) {
|
|
try { addEdge(from, to, e.type, e.metadata); } catch {}
|
|
}
|
|
}
|
|
removeNode(n.id, true);
|
|
}
|
|
|
|
return { content: [{ type: 'text' as const, text: serialize({ merged: { id: merged.id, title: merged.title, tags: merged.tags } }) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_dedupe ---
|
|
server.tool(
|
|
'memory_dedupe',
|
|
'Find similar/duplicate nodes using embedding cosine similarity',
|
|
{
|
|
threshold: z.number().optional().describe('Similarity threshold (default 0.85)'),
|
|
kind: z.enum(['memory', 'component', 'task', 'decision']).optional().describe('Filter by kind'),
|
|
limit: z.number().optional().describe('Max groups to return (default 10)'),
|
|
},
|
|
async ({ threshold, kind, limit }) => {
|
|
const thresh = threshold ?? 0.85;
|
|
const maxGroups = limit ?? 10;
|
|
const nodes = listNodes({ kind: kind as NodeKind, includeStale: false });
|
|
const withEmb = nodes.filter(n => n.embedding);
|
|
|
|
const groups: { nodes: { id: string; title: string }[]; similarity: number }[] = [];
|
|
const seen = new Set<string>();
|
|
|
|
for (let i = 0; i < withEmb.length && groups.length < maxGroups; i++) {
|
|
if (seen.has(withEmb[i].id)) continue;
|
|
const group = [{ id: withEmb[i].id, title: withEmb[i].title }];
|
|
let maxSim = 0;
|
|
for (let j = i + 1; j < withEmb.length; j++) {
|
|
if (seen.has(withEmb[j].id)) continue;
|
|
const sim = cosineSimilarity(withEmb[i].embedding!, withEmb[j].embedding!);
|
|
if (sim >= thresh) {
|
|
group.push({ id: withEmb[j].id, title: withEmb[j].title });
|
|
seen.add(withEmb[j].id);
|
|
maxSim = Math.max(maxSim, sim);
|
|
}
|
|
}
|
|
if (group.length > 1) {
|
|
seen.add(withEmb[i].id);
|
|
groups.push({ nodes: group, similarity: maxSim });
|
|
}
|
|
}
|
|
|
|
return { content: [{ type: 'text' as const, text: serialize({ groups, count: groups.length }) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_prune ---
|
|
server.tool(
|
|
'memory_prune',
|
|
'Clean up the graph: delete stale nodes, decay old nodes, or remove orphans',
|
|
{
|
|
mode: z.enum(['hard_delete_stale', 'decay_then_delete', 'delete_orphans']).describe('Prune mode'),
|
|
maxAgeDays: z.number().optional().describe('Max age in days for decay/delete modes'),
|
|
},
|
|
async ({ mode, maxAgeDays }) => {
|
|
const db = getDb();
|
|
let affected = 0;
|
|
|
|
if (mode === 'hard_delete_stale') {
|
|
const result = db.prepare('DELETE FROM nodes WHERE is_stale = 1').run();
|
|
affected = result.changes;
|
|
} else if (mode === 'decay_then_delete') {
|
|
const cutoff = Date.now() - (maxAgeDays ?? 90) * 86400000;
|
|
// First mark old non-stale as stale
|
|
const decayed = db.prepare('UPDATE nodes SET is_stale = 1, updated_at = ? WHERE is_stale = 0 AND last_accessed_at < ?').run(Date.now(), cutoff);
|
|
// Then hard-delete already-stale that are also old
|
|
const deleted = db.prepare('DELETE FROM nodes WHERE is_stale = 1 AND updated_at < ?').run(cutoff);
|
|
affected = decayed.changes + deleted.changes;
|
|
} else if (mode === 'delete_orphans') {
|
|
const orphans = db.prepare(`
|
|
SELECT n.id FROM nodes n
|
|
WHERE n.is_stale = 0
|
|
AND NOT EXISTS (SELECT 1 FROM edges e WHERE e.from_id = n.id OR e.to_id = n.id)
|
|
`).all() as any[];
|
|
for (const o of orphans) {
|
|
removeNode(o.id, true);
|
|
}
|
|
affected = orphans.length;
|
|
}
|
|
|
|
return { content: [{ type: 'text' as const, text: serialize({ mode, affected }) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_reorganize ---
|
|
server.tool(
|
|
'memory_reorganize',
|
|
'Move a node under a new parent (updates contains edges)',
|
|
{
|
|
nodeId: z.string().describe('Node ID to move'),
|
|
newParentId: z.string().describe('New parent node ID'),
|
|
},
|
|
async ({ nodeId, newParentId }) => {
|
|
const node = getNode(nodeId) ?? findNodeByPrefix(nodeId);
|
|
const parent = getNode(newParentId) ?? findNodeByPrefix(newParentId);
|
|
if (!node) return { content: [{ type: 'text' as const, text: serialize({ error: 'Node not found' }) }], isError: true };
|
|
if (!parent) return { content: [{ type: 'text' as const, text: serialize({ error: 'Parent not found' }) }], isError: true };
|
|
|
|
// Remove existing incoming contains edges
|
|
const db = getDb();
|
|
const incomingContains = db.prepare('SELECT id FROM edges WHERE to_id = ? AND type = ?').all(node.id, 'contains') as any[];
|
|
for (const e of incomingContains) {
|
|
removeEdge(e.id);
|
|
}
|
|
|
|
const edge = addEdge(parent.id, node.id, 'contains');
|
|
return { content: [{ type: 'text' as const, text: serialize({ moved: node.id, newParent: parent.id, edge: edge.id }) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_bulk_tag ---
|
|
server.tool(
|
|
'memory_bulk_tag',
|
|
'Add or remove tags on multiple nodes at once',
|
|
{
|
|
action: z.enum(['add', 'remove']).describe('Whether to add or remove tags'),
|
|
tags: z.array(z.string()).describe('Tags to add/remove'),
|
|
nodeIds: z.array(z.string()).optional().describe('Specific node IDs'),
|
|
filter: z.object({
|
|
kind: z.enum(['memory', 'component', 'task', 'decision']).optional(),
|
|
status: z.string().optional(),
|
|
tags: z.array(z.string()).optional(),
|
|
}).optional().describe('Filter to select nodes'),
|
|
},
|
|
async ({ action, tags, nodeIds, filter }) => {
|
|
let targets: { id: string; tags: string[] }[];
|
|
|
|
if (nodeIds?.length) {
|
|
targets = nodeIds.map(id => {
|
|
const n = getNode(id) ?? findNodeByPrefix(id);
|
|
return n ? { id: n.id, tags: n.tags } : null;
|
|
}).filter(Boolean) as { id: string; tags: string[] }[];
|
|
} else if (filter) {
|
|
targets = listNodes({ kind: filter.kind as NodeKind, status: filter.status, tags: filter.tags }).map(n => ({ id: n.id, tags: n.tags }));
|
|
} else {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'Provide nodeIds or filter' }) }], isError: true };
|
|
}
|
|
|
|
let modified = 0;
|
|
for (const t of targets) {
|
|
let newTags: string[];
|
|
if (action === 'add') {
|
|
newTags = [...new Set([...t.tags, ...tags])];
|
|
} else {
|
|
newTags = t.tags.filter(tag => !tags.includes(tag));
|
|
}
|
|
if (JSON.stringify(newTags) !== JSON.stringify(t.tags)) {
|
|
await updateNode(t.id, { tags: newTags });
|
|
modified++;
|
|
}
|
|
}
|
|
|
|
return { content: [{ type: 'text' as const, text: serialize({ action, tags, modified, total: targets.length }) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_stats ---
|
|
server.tool(
|
|
'memory_stats',
|
|
'Get graph statistics: counts by kind, stale/orphan counts, tag distribution',
|
|
{},
|
|
async () => {
|
|
const db = getDb();
|
|
|
|
const byKind = db.prepare('SELECT kind, COUNT(*) as count FROM nodes WHERE is_stale = 0 GROUP BY kind').all();
|
|
const totalNodes = db.prepare('SELECT COUNT(*) as count FROM nodes WHERE is_stale = 0').get() as any;
|
|
const staleCount = db.prepare('SELECT COUNT(*) as count FROM nodes WHERE is_stale = 1').get() as any;
|
|
const edgeCount = db.prepare('SELECT COUNT(*) as count FROM edges').get() as any;
|
|
const edgesByType = db.prepare('SELECT type, COUNT(*) as count FROM edges GROUP BY type').all();
|
|
const orphanCount = db.prepare(`
|
|
SELECT COUNT(*) as count FROM nodes n
|
|
WHERE n.is_stale = 0
|
|
AND NOT EXISTS (SELECT 1 FROM edges e WHERE e.from_id = n.id OR e.to_id = n.id)
|
|
`).get() as any;
|
|
const tagDist = db.prepare('SELECT tag, COUNT(*) as count FROM node_tags GROUP BY tag ORDER BY count DESC LIMIT 20').all();
|
|
|
|
return {
|
|
content: [{
|
|
type: 'text' as const,
|
|
text: serialize({
|
|
nodes: { total: totalNodes.count, stale: staleCount.count, orphans: orphanCount.count, byKind },
|
|
edges: { total: edgeCount.count, byType: edgesByType },
|
|
topTags: tagDist,
|
|
}),
|
|
}],
|
|
};
|
|
}
|
|
);
|
|
|
|
// --- memory_context ---
|
|
import { gatherContext, DEFAULT_CONTEXT_CONFIG } from '../core/context';
|
|
|
|
server.tool(
|
|
'memory_context',
|
|
'Get relevant context for a Claude session. Gathers recent activity, project-specific nodes, open tasks, and decisions. Use at session start.',
|
|
{
|
|
project: z.string().optional().describe('Project name to filter by (e.g. "cortex")'),
|
|
query: z.string().optional().describe('Optional semantic search query'),
|
|
maxTokens: z.number().optional().describe(`Max tokens in output (default ${DEFAULT_CONTEXT_CONFIG.maxTokens})`),
|
|
maxNodes: z.number().optional().describe(`Max nodes to include (default ${DEFAULT_CONTEXT_CONFIG.maxNodes})`),
|
|
},
|
|
async ({ project, query: semanticQuery, maxTokens, maxNodes }) => {
|
|
const result = await gatherContext({
|
|
project,
|
|
semanticQuery,
|
|
config: {
|
|
maxTokens: maxTokens ?? DEFAULT_CONTEXT_CONFIG.maxTokens,
|
|
maxNodes: maxNodes ?? DEFAULT_CONTEXT_CONFIG.maxNodes,
|
|
},
|
|
});
|
|
|
|
return {
|
|
content: [{
|
|
type: 'text' as const,
|
|
text: result.formatted,
|
|
}],
|
|
};
|
|
}
|
|
);
|
|
|
|
// --- memory_summary ---
|
|
import { getCachedSummary, generateSummary } from '../core/summary';
|
|
|
|
server.tool(
|
|
'memory_summary',
|
|
'Get a pre-computed hierarchical summary of the memory graph. Use this instead of memory_list to reduce context usage.',
|
|
{
|
|
refresh: z.boolean().optional().describe('Force regenerate summary (default: use cached)'),
|
|
},
|
|
async ({ refresh }) => {
|
|
let summary = refresh ? null : getCachedSummary();
|
|
if (!summary) {
|
|
summary = await generateSummary();
|
|
}
|
|
return { content: [{ type: 'text' as const, text: serialize(summary) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_capture ---
|
|
import { captureConversation, captureText, getCaptureConfig, setCaptureConfig, CaptureMode } from '../core/capture';
|
|
|
|
server.tool(
|
|
'memory_capture',
|
|
'Capture a conversation or context as a memory node. Uses AI to summarize and extract key information.',
|
|
{
|
|
conversation: z.string().describe('The conversation or context to capture'),
|
|
sessionId: z.string().optional().describe('Session identifier'),
|
|
filesChanged: z.array(z.string()).optional().describe('List of files that were changed'),
|
|
source: z.string().optional().describe('Source identifier (default: claude-code)'),
|
|
},
|
|
async ({ conversation, sessionId, filesChanged, source }) => {
|
|
const result = await captureConversation({
|
|
conversation,
|
|
sessionId,
|
|
filesChanged,
|
|
source: source || 'claude-code',
|
|
});
|
|
return { content: [{ type: 'text' as const, text: serialize(result) }] };
|
|
}
|
|
);
|
|
|
|
server.tool(
|
|
'memory_remember',
|
|
'Remember a piece of text for later. Simpler than memory_capture - for quick notes and facts.',
|
|
{
|
|
text: z.string().describe('The text to remember'),
|
|
tags: z.array(z.string()).optional().describe('Tags to apply'),
|
|
},
|
|
async ({ text, tags }) => {
|
|
const result = await captureText(text, { tags, source: 'remember' });
|
|
return { content: [{ type: 'text' as const, text: serialize(result) }] };
|
|
}
|
|
);
|
|
|
|
server.tool(
|
|
'memory_capture_config',
|
|
'Get or set auto-capture configuration',
|
|
{
|
|
action: z.enum(['get', 'set']).describe('Action to perform'),
|
|
mode: z.enum(['always', 'manual', 'decisions', 'off']).optional().describe('Capture mode (for set)'),
|
|
minLength: z.number().optional().describe('Minimum conversation length (for set)'),
|
|
autoTag: z.boolean().optional().describe('Auto-generate tags (for set)'),
|
|
linkRelated: z.boolean().optional().describe('Auto-link related nodes (for set)'),
|
|
},
|
|
async ({ action, mode, minLength, autoTag, linkRelated }) => {
|
|
if (action === 'get') {
|
|
const config = getCaptureConfig();
|
|
return { content: [{ type: 'text' as const, text: serialize(config) }] };
|
|
}
|
|
|
|
const updates: Partial<{ mode: CaptureMode; minLength: number; autoTag: boolean; linkRelated: boolean }> = {};
|
|
if (mode !== undefined) updates.mode = mode;
|
|
if (minLength !== undefined) updates.minLength = minLength;
|
|
if (autoTag !== undefined) updates.autoTag = autoTag;
|
|
if (linkRelated !== undefined) updates.linkRelated = linkRelated;
|
|
|
|
const config = setCaptureConfig(updates);
|
|
return { content: [{ type: 'text' as const, text: serialize({ updated: true, config }) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_prompt ---
|
|
import { interpretAndExecute } from '../core/prompt/interpreter';
|
|
|
|
server.tool(
|
|
'memory_prompt',
|
|
'Execute a natural language instruction against the memory graph. Uses AI to generate and run an action plan. Returns log of actions performed.',
|
|
{
|
|
prompt: z.string().describe('Natural language instruction'),
|
|
},
|
|
async ({ prompt }) => {
|
|
const result = await interpretAndExecute(prompt);
|
|
return { content: [{ type: 'text' as const, text: serialize(result) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_history ---
|
|
server.tool(
|
|
'memory_history',
|
|
'Get version history for a node',
|
|
{
|
|
id: z.string().describe('Node ID or prefix'),
|
|
},
|
|
async ({ id }) => {
|
|
const node = getNode(id) ?? findNodeByPrefix(id);
|
|
if (!node) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'Node not found' }) }], isError: true };
|
|
}
|
|
const history = getNodeHistory(node.id);
|
|
return { content: [{ type: 'text' as const, text: serialize({ nodeId: node.id, title: node.title, versions: history }) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_show_at ---
|
|
server.tool(
|
|
'memory_show_at',
|
|
'Show node at a specific point in time',
|
|
{
|
|
id: z.string().describe('Node ID or prefix'),
|
|
timestamp: z.union([z.number(), z.string()]).describe('Unix ms or ISO date string'),
|
|
},
|
|
async ({ id, timestamp }) => {
|
|
const node = getNode(id) ?? findNodeByPrefix(id);
|
|
if (!node) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'Node not found' }) }], isError: true };
|
|
}
|
|
|
|
// Parse timestamp
|
|
let ts: number;
|
|
if (typeof timestamp === 'number') {
|
|
ts = timestamp;
|
|
} else {
|
|
const parsed = Date.parse(timestamp);
|
|
if (isNaN(parsed)) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'Invalid timestamp format' }) }], isError: true };
|
|
}
|
|
ts = parsed;
|
|
}
|
|
|
|
const historical = getNodeAtTime(node.id, ts);
|
|
if (!historical) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'No version found for the specified time' }) }], isError: true };
|
|
}
|
|
return { content: [{ type: 'text' as const, text: serialize(historical) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_diff ---
|
|
server.tool(
|
|
'memory_diff',
|
|
'Compare two versions of a node',
|
|
{
|
|
id: z.string().describe('Node ID or prefix'),
|
|
v1: z.number().describe('First version number'),
|
|
v2: z.number().describe('Second version number'),
|
|
},
|
|
async ({ id, v1, v2 }) => {
|
|
const node = getNode(id) ?? findNodeByPrefix(id);
|
|
if (!node) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'Node not found' }) }], isError: true };
|
|
}
|
|
|
|
const diff = diffVersions(node.id, v1, v2);
|
|
if (!diff) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'One or both versions not found' }) }], isError: true };
|
|
}
|
|
return { content: [{ type: 'text' as const, text: serialize(diff) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_restore ---
|
|
server.tool(
|
|
'memory_restore',
|
|
'Restore a node to a previous version (creates new version)',
|
|
{
|
|
id: z.string().describe('Node ID or prefix'),
|
|
version: z.number().describe('Version number to restore'),
|
|
},
|
|
async ({ id, version }) => {
|
|
const node = getNode(id) ?? findNodeByPrefix(id);
|
|
if (!node) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'Node not found' }) }], isError: true };
|
|
}
|
|
|
|
const restored = await restoreVersion(node.id, version);
|
|
if (!restored) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'Version not found' }) }], isError: true };
|
|
}
|
|
return { content: [{ type: 'text' as const, text: serialize({ message: `Restored to version ${version}`, node: restored }) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_journal ---
|
|
import { getOrCreateJournal, appendToJournal, listJournals, generateJournalSummary, JournalMetadata } from '../core/journal';
|
|
|
|
server.tool(
|
|
'memory_journal',
|
|
'Get or create today\'s journal, or add an entry to it',
|
|
{
|
|
text: z.string().optional().describe('Text to add to journal (if omitted, returns current journal)'),
|
|
date: z.string().optional().describe('Specific date (YYYY-MM-DD)'),
|
|
tags: z.array(z.string()).optional().describe('Tags for the entry'),
|
|
},
|
|
async ({ text, date, tags }) => {
|
|
if (text) {
|
|
const { journal, entry } = await appendToJournal(text, { tags, date });
|
|
const meta = journal.metadata as JournalMetadata;
|
|
return { content: [{ type: 'text' as const, text: serialize({ added: true, date: meta.date, entry }) }] };
|
|
}
|
|
const journal = await getOrCreateJournal(date);
|
|
return { content: [{ type: 'text' as const, text: serialize(journal) }] };
|
|
}
|
|
);
|
|
|
|
server.tool(
|
|
'memory_journal_list',
|
|
'List recent journals',
|
|
{
|
|
limit: z.number().optional().describe('Max journals to return (default: 10)'),
|
|
month: z.string().optional().describe('Filter by month (YYYY-MM)'),
|
|
},
|
|
async ({ limit, month }) => {
|
|
const journals = listJournals({ limit: limit || 10, month });
|
|
return {
|
|
content: [{
|
|
type: 'text' as const,
|
|
text: serialize(journals.map(j => {
|
|
const meta = j.metadata as JournalMetadata;
|
|
return {
|
|
id: j.id,
|
|
date: meta.date,
|
|
entries: meta.entries?.length || 0,
|
|
hasSummary: !!meta.summary,
|
|
};
|
|
})),
|
|
}],
|
|
};
|
|
}
|
|
);
|
|
|
|
server.tool(
|
|
'memory_journal_summarize',
|
|
'Generate AI summary for a journal',
|
|
{
|
|
date: z.string().optional().describe('Date to summarize (default: today)'),
|
|
},
|
|
async ({ date }) => {
|
|
const summary = await generateJournalSummary(date);
|
|
return { content: [{ type: 'text' as const, text: serialize({ summary }) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_ingest ---
|
|
import { ingest } from '../core/ingest';
|
|
|
|
server.tool(
|
|
'memory_ingest',
|
|
'Ingest content from a URL or text into the knowledge graph',
|
|
{
|
|
source: z.string().describe('URL or raw text to ingest'),
|
|
title: z.string().optional().describe('Override title'),
|
|
tags: z.array(z.string()).optional().describe('Tags to apply'),
|
|
isUrl: z.boolean().optional().describe('Treat source as URL (auto-detected if not specified)'),
|
|
},
|
|
async ({ source, title, tags, isUrl }) => {
|
|
// If explicitly not a URL, or doesn't look like a URL, treat as raw text
|
|
const isSourceUrl = isUrl ?? (source.startsWith('http://') || source.startsWith('https://'));
|
|
|
|
if (!isSourceUrl) {
|
|
// Treat as raw text - create a simple memory node
|
|
const node = await addNode({
|
|
kind: 'memory',
|
|
title: title || 'Ingested Content',
|
|
content: source,
|
|
tags: ['ingested', 'text', ...(tags || [])],
|
|
metadata: { source: { type: 'text', ingestedAt: Date.now() } },
|
|
});
|
|
return { content: [{ type: 'text' as const, text: serialize({ success: true, nodeId: node.id, title: node.title }) }] };
|
|
}
|
|
|
|
const result = await ingest(source, { title, tags });
|
|
return { content: [{ type: 'text' as const, text: serialize(result) }] };
|
|
}
|
|
);
|
|
|
|
server.tool(
|
|
'memory_clip',
|
|
'Quick clip a URL into memory',
|
|
{
|
|
url: z.string().describe('URL to clip'),
|
|
title: z.string().optional().describe('Override title'),
|
|
tags: z.array(z.string()).optional().describe('Tags to apply'),
|
|
},
|
|
async ({ url, title, tags }) => {
|
|
if (!url.startsWith('http://') && !url.startsWith('https://')) {
|
|
return { content: [{ type: 'text' as const, text: serialize({ error: 'Invalid URL' }) }], isError: true };
|
|
}
|
|
const result = await ingest(url, { title, tags });
|
|
return { content: [{ type: 'text' as const, text: serialize(result) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_index ---
|
|
import { indexProject } from '../core/indexer';
|
|
|
|
server.tool(
|
|
'memory_index',
|
|
'Index a codebase to create component nodes. Scans files, extracts exports/imports, and maps relationships.',
|
|
{
|
|
path: z.string().optional().describe('Path to index (default: current directory)'),
|
|
update: z.boolean().optional().describe('Only update changed files (incremental)'),
|
|
language: z.string().optional().describe('Only index specific language (ts, js, py)'),
|
|
maxDepth: z.number().optional().describe('Maximum directory depth (default: 10)'),
|
|
},
|
|
async ({ path: inputPath, update, language, maxDepth }) => {
|
|
const result = await indexProject(inputPath || '.', {
|
|
update,
|
|
language,
|
|
maxDepth,
|
|
});
|
|
return { content: [{ type: 'text' as const, text: serialize(result) }] };
|
|
}
|
|
);
|
|
|
|
// --- memory_components ---
|
|
server.tool(
|
|
'memory_components',
|
|
'List indexed components for a project',
|
|
{
|
|
project: z.string().optional().describe('Project name to filter by'),
|
|
limit: z.number().optional().describe('Max results (default: 50)'),
|
|
},
|
|
async ({ project, limit }) => {
|
|
const tags = project ? [project, 'indexed'] : ['indexed'];
|
|
const components = listNodes({ kind: 'component' as NodeKind, tags, limit: limit || 50 });
|
|
return {
|
|
content: [{
|
|
type: 'text' as const,
|
|
text: serialize({
|
|
count: components.length,
|
|
components: components.map(c => ({
|
|
id: c.id,
|
|
title: c.title,
|
|
filePath: c.metadata?.filePath,
|
|
exports: (c.metadata?.exports as string[])?.length || 0,
|
|
loc: c.metadata?.loc,
|
|
})),
|
|
}),
|
|
}],
|
|
};
|
|
}
|
|
);
|
|
|
|
async function main() {
|
|
const transport = new StdioServerTransport();
|
|
await server.connect(transport);
|
|
console.error('Memory MCP server running on stdio');
|
|
}
|
|
|
|
main().catch((err) => {
|
|
console.error('Fatal error:', err);
|
|
process.exit(1);
|
|
});
|