Add AI prompt tool for natural language graph operations

New MCP tool and portal UI for executing natural language instructions
against the memory graph via Ollama (qwen3-coder:30b). Single LLM call
generates a JSON action plan which is executed sequentially.

Supports 8 action types: add_node, update_node, remove_node, add_edge,
remove_edge, bulk_tag, reorganize, query. Actions can reference previous
results via $result[N].field interpolation. Uses /api/chat with few-shot
assistant example, format:json, and temperature:0 for reliable output.
This commit is contained in:
2026-02-03 02:40:01 +01:00
parent f2f9d729da
commit 661325a235
11 changed files with 569 additions and 3 deletions

166
src/core/prompt/executor.ts Normal file
View File

@@ -0,0 +1,166 @@
import { ActionPlan, ActionResult } from './types';
import { query, addNode, updateNode, removeNode, addEdge, removeEdge } from '../store';
import { getNode, findNodeByPrefix, listNodes } from '../store';
import { getDb } from '../db';
import { NodeKind, EdgeType } from '../../types';
function resolveRefs(value: any, results: any[]): any {
if (typeof value === 'string') {
return value.replace(/\$result\[(\d+)\]((?:\.\w+|\[\d+\])*)/g, (_match, idx, path) => {
let obj = results[parseInt(idx)];
if (obj === undefined) return _match;
// Parse path like .id, [0].node.id
const parts = path.match(/\.(\w+)|\[(\d+)\]/g) || [];
for (const part of parts) {
if (obj == null) return _match;
if (part.startsWith('.')) {
obj = obj[part.slice(1)];
} else {
const i = parseInt(part.slice(1, -1));
obj = obj[i];
}
}
return typeof obj === 'string' ? obj : JSON.stringify(obj);
});
}
if (Array.isArray(value)) return value.map(v => resolveRefs(v, results));
if (value && typeof value === 'object') {
const out: Record<string, any> = {};
for (const [k, v] of Object.entries(value)) {
out[k] = resolveRefs(v, results);
}
return out;
}
return value;
}
const VALID_KINDS = new Set(['memory', 'component', 'task', 'decision']);
const VALID_EDGE_TYPES = new Set(['depends_on', 'contains', 'implements', 'blocked_by', 'subtask_of', 'relates_to', 'supersedes', 'about']);
async function executeAction(type: string, params: Record<string, any>): Promise<any> {
switch (type) {
case 'query': {
const results = await query(params.text, {
kind: params.kind as NodeKind,
limit: params.limit,
});
return results.map(r => ({ node: { id: r.node.id, kind: r.node.kind, title: r.node.title, content: r.node.content, tags: r.node.tags }, score: r.score }));
}
case 'add_node': {
if (!VALID_KINDS.has(params.kind)) throw new Error(`Invalid kind: ${params.kind}`);
const node = await addNode({
kind: params.kind as NodeKind,
title: params.title,
content: params.content,
tags: params.tags,
status: params.status,
});
return { id: node.id, kind: node.kind, title: node.title, content: node.content, tags: node.tags, status: node.status };
}
case 'update_node': {
const node = await updateNode(params.id, {
title: params.title,
content: params.content,
status: params.status,
tags: params.tags,
});
if (!node) throw new Error(`Node not found: ${params.id}`);
return { id: node.id, kind: node.kind, title: node.title, content: node.content, tags: node.tags, status: node.status };
}
case 'remove_node': {
// Soft delete only
const ok = removeNode(params.id, false);
if (!ok) throw new Error(`Node not found: ${params.id}`);
return { ok: true };
}
case 'add_edge': {
if (!VALID_EDGE_TYPES.has(params.type)) throw new Error(`Invalid edge type: ${params.type}`);
const edge = addEdge(params.fromId, params.toId, params.type as EdgeType);
return { id: edge.id, fromId: edge.fromId, toId: edge.toId, type: edge.type };
}
case 'remove_edge': {
const ok = removeEdge(params.id);
if (!ok) throw new Error(`Edge not found: ${params.id}`);
return { ok: true };
}
case 'bulk_tag': {
let targets: { id: string; tags: string[] }[];
if (params.nodeIds?.length) {
targets = params.nodeIds.map((id: string) => {
const n = getNode(id) ?? findNodeByPrefix(id);
return n ? { id: n.id, tags: n.tags } : null;
}).filter(Boolean);
} else if (params.filter) {
targets = listNodes({
kind: params.filter.kind as NodeKind,
status: params.filter.status,
tags: params.filter.tags,
}).map(n => ({ id: n.id, tags: n.tags }));
} else {
throw new Error('bulk_tag requires nodeIds or filter');
}
let modified = 0;
for (const t of targets) {
let newTags: string[];
if (params.action === 'add') {
newTags = [...new Set([...t.tags, ...params.tags])];
} else {
newTags = t.tags.filter((tag: string) => !params.tags.includes(tag));
}
if (JSON.stringify(newTags) !== JSON.stringify(t.tags)) {
await updateNode(t.id, { tags: newTags });
modified++;
}
}
return { action: params.action, tags: params.tags, modified, total: targets.length };
}
case 'reorganize': {
const node = getNode(params.nodeId) ?? findNodeByPrefix(params.nodeId);
const parent = getNode(params.newParentId) ?? findNodeByPrefix(params.newParentId);
if (!node) throw new Error(`Node not found: ${params.nodeId}`);
if (!parent) throw new Error(`Parent not found: ${params.newParentId}`);
const db = getDb();
const incomingContains = db.prepare('SELECT id FROM edges WHERE to_id = ? AND type = ?').all(node.id, 'contains') as any[];
for (const e of incomingContains) {
removeEdge(e.id);
}
const edge = addEdge(parent.id, node.id, 'contains');
return { moved: node.id, newParent: parent.id, edge: edge.id };
}
default:
throw new Error(`Unknown action type: ${type}`);
}
}
export async function executePlan(plan: ActionPlan): Promise<{ log: ActionResult[]; summary: string }> {
const log: ActionResult[] = [];
const results: any[] = [];
const counts: Record<string, number> = {};
for (const action of plan.actions) {
try {
const resolvedParams = resolveRefs(action.params, results);
const result = await executeAction(action.type, resolvedParams);
results.push(result);
log.push({ action: action.type, description: action.description, status: 'completed', result });
counts[action.type] = (counts[action.type] || 0) + 1;
} catch (err: any) {
results.push(null);
log.push({ action: action.type, description: action.description, status: 'failed', error: err.message });
}
}
const parts: string[] = [];
const labels: Record<string, string> = {
query: 'queries', add_node: 'nodes created', update_node: 'nodes updated',
remove_node: 'nodes removed', add_edge: 'edges created', remove_edge: 'edges removed',
bulk_tag: 'bulk tag ops', reorganize: 'reorganizations',
};
for (const [type, count] of Object.entries(counts)) {
parts.push(`${count} ${labels[type] || type}`);
}
const failed = log.filter(l => l.status === 'failed').length;
if (failed > 0) parts.push(`${failed} failed`);
return { log, summary: parts.join(', ') || 'No actions executed' };
}

View File

@@ -0,0 +1,105 @@
import { isGenAvailable } from '../search/ollamaGen';
import { query } from '../store';
import { buildMessages } from './templates';
import { ActionPlanSchema, PromptResult } from './types';
import { executePlan } from './executor';
const OLLAMA_URL = process.env.OLLAMA_URL || 'http://localhost:11434';
const GEN_MODEL = process.env.OLLAMA_GEN_MODEL || 'qwen3-coder:30b';
const PROMPT_TIMEOUT = 120000; // 2 minutes for large prompts
function fail(summary: string): PromptResult {
return { success: false, reasoning: '', executionLog: [], summary };
}
export async function interpretAndExecute(prompt: string): Promise<PromptResult> {
// Check Ollama availability
if (!(await isGenAvailable())) {
return fail('Ollama is not available. Make sure Ollama is running and the model is pulled (e.g. `ollama pull llama3`).');
}
// Gather context
const context = await query(prompt, { limit: 10 });
// Build and send chat messages
const messages = buildMessages(prompt, context);
let raw: string;
try {
const res = await fetch(`${OLLAMA_URL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ model: GEN_MODEL, messages, stream: false, format: 'json', options: { num_predict: 4096, temperature: 0 } }),
signal: AbortSignal.timeout(PROMPT_TIMEOUT),
});
if (!res.ok) {
const body = await res.text().catch(() => '');
return fail(`Ollama returned HTTP ${res.status}: ${body.slice(0, 200)}`);
}
const data = await res.json() as { message?: { content?: string } };
if (!data.message?.content?.trim()) {
return fail('Ollama returned an empty response.');
}
raw = data.message.content.trim();
} catch (err: any) {
if (err.name === 'TimeoutError' || err.name === 'AbortError') {
return fail(`Ollama timed out after ${PROMPT_TIMEOUT / 1000}s. Try a simpler prompt or check if the model is loaded.`);
}
return fail(`Ollama request failed: ${err.message}`);
}
// Strip thinking tags (qwen3 models output <think>...</think> before the answer)
let jsonStr = raw.replace(/<think>[\s\S]*?<\/think>/g, '').trim();
// Strip markdown fences if present
const fenceMatch = jsonStr.match(/```(?:json)?\s*([\s\S]*?)```/);
if (fenceMatch) jsonStr = fenceMatch[1].trim();
// Try to extract JSON object if there's surrounding text
if (!jsonStr.startsWith('{')) {
const objMatch = jsonStr.match(/(\{[\s\S]*\})/);
if (objMatch) jsonStr = objMatch[1];
}
let parsed: any;
try {
parsed = JSON.parse(jsonStr);
} catch {
return {
success: false,
reasoning: '',
executionLog: [],
summary: `Failed to parse LLM response as JSON. Raw response: ${raw.slice(0, 500)}`,
};
}
// Validate — be lenient: filter out unknown action types instead of rejecting the whole plan
const VALID_TYPES = new Set(['query', 'add_node', 'update_node', 'remove_node', 'add_edge', 'remove_edge', 'bulk_tag', 'reorganize']);
if (!parsed.actions || !Array.isArray(parsed.actions)) {
return fail(`Invalid action plan: missing actions array. Raw: ${raw.slice(0, 300)}`);
}
const validActions = parsed.actions.filter((a: any) => a && VALID_TYPES.has(a.type));
const skipped = parsed.actions.length - validActions.length;
if (validActions.length === 0) {
return fail(`No valid actions in plan. The model used unsupported action types: ${parsed.actions.map((a: any) => a?.type).join(', ')}`);
}
const plan = {
reasoning: parsed.reasoning || '',
actions: validActions.map((a: any) => ({ type: a.type, params: a.params || {}, description: a.description || '' })),
};
// Execute
const { log, summary } = await executePlan(plan);
const anyFailed = log.some(l => l.status === 'failed');
return {
success: !anyFailed,
reasoning: plan.reasoning,
executionLog: log,
summary: summary + (skipped > 0 ? ` (${skipped} unsupported actions skipped)` : ''),
};
}

View File

@@ -0,0 +1,49 @@
import { SearchResult } from '../../types';
export function buildMessages(userPrompt: string, context: SearchResult[]): { role: string; content: string }[] {
const system = `You produce JSON action plans for a knowledge graph.
ACTIONS:
- add_node: params { kind, title, content?, tags?, status? } → returns { id }. kind must be: memory, component, task, or decision.
- update_node: params { id, title?, content?, status?, tags? }
- remove_node: params { id }
- add_edge: params { fromId, toId, type }. type must be: depends_on, contains, implements, blocked_by, subtask_of, relates_to, supersedes, about
- remove_edge: params { id }
- bulk_tag: params { action: "add"|"remove", tags, nodeIds? }
- reorganize: params { nodeId, newParentId }
Use "$result[N].id" to reference the id from action N's result.
IMPORTANT: To group nodes, first add_node to create a parent, then reorganize each node under it. Use node IDs from context — do NOT query.
Output JSON: {"reasoning":"...","actions":[{"type":"...","params":{...},"description":"..."}]}`;
// Few-shot example showing the exact grouping pattern
const exampleUser = `AVAILABLE NODES:
1. id="aaa-111" kind=memory title="DB Config" tags=[config]
2. id="bbb-222" kind=memory title="API Config" tags=[config]
3. id="ccc-333" kind=memory title="Unrelated Note" tags=[misc]
INSTRUCTION: group the config nodes /no_think`;
const exampleAssistant = `{"reasoning":"Create a parent Configuration group and move the two config nodes under it.","actions":[{"type":"add_node","params":{"kind":"memory","title":"Configuration","content":"Parent group for configuration nodes.","tags":["configuration","group"]},"description":"Create Configuration parent node"},{"type":"reorganize","params":{"nodeId":"aaa-111","newParentId":"$result[0].id"},"description":"Move DB Config under Configuration"},{"type":"reorganize","params":{"nodeId":"bbb-222","newParentId":"$result[0].id"},"description":"Move API Config under Configuration"}]}`;
let contextList = '';
if (context.length > 0) {
contextList = context.map((r, i) =>
`${i + 1}. id="${r.node.id}" kind=${r.node.kind} title="${r.node.title}"${r.node.tags.length ? ` tags=[${r.node.tags.join(',')}]` : ''}`
).join('\n');
}
const user = `AVAILABLE NODES:
${contextList || '(none)'}
INSTRUCTION: ${userPrompt} /no_think`;
return [
{ role: 'system', content: system },
{ role: 'user', content: exampleUser },
{ role: 'assistant', content: exampleAssistant },
{ role: 'user', content: user },
];
}

35
src/core/prompt/types.ts Normal file
View File

@@ -0,0 +1,35 @@
import { z } from 'zod/v3';
export const ActionTypeSchema = z.enum([
'query', 'add_node', 'update_node', 'remove_node',
'add_edge', 'remove_edge', 'bulk_tag', 'reorganize',
]);
export type ActionType = z.infer<typeof ActionTypeSchema>;
export const ActionSchema = z.object({
type: ActionTypeSchema,
params: z.record(z.any()),
description: z.string(),
});
export type Action = z.infer<typeof ActionSchema>;
export const ActionPlanSchema = z.object({
reasoning: z.string(),
actions: z.array(ActionSchema).max(20),
});
export type ActionPlan = z.infer<typeof ActionPlanSchema>;
export interface ActionResult {
action: string;
description: string;
status: 'completed' | 'failed';
result?: any;
error?: string;
}
export interface PromptResult {
success: boolean;
reasoning: string;
executionLog: ActionResult[];
summary: string;
}

View File

@@ -1,5 +1,5 @@
const OLLAMA_URL = process.env.OLLAMA_URL || 'http://localhost:11434';
const GEN_MODEL = process.env.OLLAMA_GEN_MODEL || 'llama3';
const GEN_MODEL = process.env.OLLAMA_GEN_MODEL || 'llama3.2';
let _available: boolean | null = null;

View File

@@ -367,6 +367,21 @@ server.tool(
}
);
// --- memory_prompt ---
import { interpretAndExecute } from '../core/prompt/interpreter';
server.tool(
'memory_prompt',
'Execute a natural language instruction against the memory graph. Uses AI to generate and run an action plan. Returns log of actions performed.',
{
prompt: z.string().describe('Natural language instruction'),
},
async ({ prompt }) => {
const result = await interpretAndExecute(prompt);
return { content: [{ type: 'text' as const, text: serialize(result) }] };
}
);
async function main() {
const transport = new StdioServerTransport();
await server.connect(transport);

View File

@@ -166,4 +166,19 @@ router.post('/maintenance/run', async (_req: Request, res: Response) => {
}
});
// Prompt — AI-driven natural language instruction
router.post('/prompt', async (req: Request, res: Response) => {
try {
const { prompt } = req.body;
if (!prompt || typeof prompt !== 'string') {
return res.status(400).json({ error: 'prompt is required' });
}
const { interpretAndExecute } = await import('../core/prompt/interpreter');
const result = await interpretAndExecute(prompt);
res.json(result);
} catch (err: any) {
res.status(500).json({ error: err.message });
}
});
export default router;