From 661325a235ab80fa0dd72d97a2632f391add2bb2 Mon Sep 17 00:00:00 2001 From: omigamedev Date: Tue, 3 Feb 2026 02:40:01 +0100 Subject: [PATCH] Add AI prompt tool for natural language graph operations New MCP tool and portal UI for executing natural language instructions against the memory graph via Ollama (qwen3-coder:30b). Single LLM call generates a JSON action plan which is executed sequentially. Supports 8 action types: add_node, update_node, remove_node, add_edge, remove_edge, bulk_tag, reorganize, query. Actions can reference previous results via $result[N].field interpolation. Uses /api/chat with few-shot assistant example, format:json, and temperature:0 for reliable output. --- portal/src/App.tsx | 28 ++++- portal/src/api.ts | 5 +- portal/src/components/PromptPanel.tsx | 137 +++++++++++++++++++++ portal/src/types.ts | 15 +++ src/core/prompt/executor.ts | 166 ++++++++++++++++++++++++++ src/core/prompt/interpreter.ts | 105 ++++++++++++++++ src/core/prompt/templates.ts | 49 ++++++++ src/core/prompt/types.ts | 35 ++++++ src/core/search/ollamaGen.ts | 2 +- src/mcp/index.ts | 15 +++ src/server/routes.ts | 15 +++ 11 files changed, 569 insertions(+), 3 deletions(-) create mode 100644 portal/src/components/PromptPanel.tsx create mode 100644 src/core/prompt/executor.ts create mode 100644 src/core/prompt/interpreter.ts create mode 100644 src/core/prompt/templates.ts create mode 100644 src/core/prompt/types.ts diff --git a/portal/src/App.tsx b/portal/src/App.tsx index d6b1dec..63b8a87 100644 --- a/portal/src/App.tsx +++ b/portal/src/App.tsx @@ -8,6 +8,7 @@ import QueryBar from './components/QueryBar'; import LinkModal from './components/LinkModal'; import Toast from './components/Toast'; import MaintenancePanel from './components/MaintenancePanel'; +import PromptPanel from './components/PromptPanel'; export default function App() { const [selectedId, setSelectedId] = useState(null); @@ -17,6 +18,7 @@ export default function App() { const [drawerOpen, setDrawerOpen] = useState(false); const [showQuery, setShowQuery] = useState(false); const [showMaintenance, setShowMaintenance] = useState(false); + const [showPrompt, setShowPrompt] = useState(false); const qc = useQueryClient(); const refresh = useCallback(() => { @@ -38,13 +40,14 @@ export default function App() { const handler = (e: KeyboardEvent) => { if (e.key === 'Escape') { if (selectedId) setSelectedId(null); + else if (showPrompt) setShowPrompt(false); else if (showQuery) setShowQuery(false); else if (drawerOpen) setDrawerOpen(false); } }; window.addEventListener('keydown', handler); return () => window.removeEventListener('keydown', handler); - }, [selectedId, drawerOpen, showQuery]); + }, [selectedId, drawerOpen, showQuery, showPrompt]); return (
@@ -76,6 +79,13 @@ export default function App() { > ? +
)} + {/* Prompt panel — slides up from bottom */} + {showPrompt && ( +
setShowPrompt(false)}> +
+
e.stopPropagation()} + > + setShowPrompt(false)} + onDone={() => { refresh(); notify('Prompt executed'); }} + /> +
+
+ )} + {showAddNode && ( setShowAddNode(false)} diff --git a/portal/src/api.ts b/portal/src/api.ts index 8b55b45..e6484ff 100644 --- a/portal/src/api.ts +++ b/portal/src/api.ts @@ -1,4 +1,4 @@ -import type { CortexNode, CortexEdge, GraphData, NodeWithConnections, SearchResult, NodeKind, EdgeType, GroupedQueryResult } from './types'; +import type { CortexNode, CortexEdge, GraphData, NodeWithConnections, SearchResult, NodeKind, EdgeType, GroupedQueryResult, PromptResult } from './types'; const BASE = '/api'; @@ -50,4 +50,7 @@ export const api = { runMaintenance: () => request>('/maintenance/run', { method: 'POST' }), + + prompt: (prompt: string) => + request('/prompt', { method: 'POST', body: JSON.stringify({ prompt }) }), }; diff --git a/portal/src/components/PromptPanel.tsx b/portal/src/components/PromptPanel.tsx new file mode 100644 index 0000000..36e3172 --- /dev/null +++ b/portal/src/components/PromptPanel.tsx @@ -0,0 +1,137 @@ +import { useState } from 'react'; +import { api } from '../api'; +import type { PromptResult } from '../types'; + +interface Props { + onClose: () => void; + onDone: () => void; +} + +export default function PromptPanel({ onClose, onDone }: Props) { + const [text, setText] = useState(''); + const [result, setResult] = useState(null); + const [loading, setLoading] = useState(false); + const [error, setError] = useState(null); + + const submit = async () => { + if (!text.trim()) return; + setLoading(true); + setError(null); + setResult(null); + try { + const data = await api.prompt(text); + setResult(data); + if (data.success) onDone(); + } catch (err: any) { + setError(err.message || 'Request failed'); + } finally { + setLoading(false); + } + }; + + return ( +
+ {/* Header */} +
+ AI Prompt + +
+ + {/* Input */} +
+ setText(e.target.value)} + onKeyDown={e => e.key === 'Enter' && !loading && submit()} + placeholder='e.g. "create a decision node about using Redis for caching"' + className="flex-1 bg-gray-800 border border-gray-600 rounded-lg px-3 py-2 text-sm text-gray-200 placeholder-gray-500 focus:outline-none focus:border-amber-500" + autoFocus + disabled={loading} + /> + +
+ + {/* Results */} +
+ {!result && !loading && !error && ( +
+

Give a natural language instruction to modify the graph:

+

"create a decision node about using Redis for caching"

+

"tag all task nodes with 'backlog'"

+

"create a component for auth and link it to the API gateway"

+
+ )} + + {loading && ( +
+ Generating and executing action plan... +
+ )} + + {error && ( +
+ {error} +
+ )} + + {result && ( +
+ {/* Summary */} +
+
{result.success ? 'Completed' : 'Completed with errors'}
+
{result.summary}
+
+ + {/* Reasoning */} + {result.reasoning && ( +
+ Reasoning: + {result.reasoning} +
+ )} + + {/* Execution log */} + {result.executionLog.length > 0 && ( +
+
Execution Log
+ {result.executionLog.map((entry, i) => ( +
+ + {entry.status === 'completed' ? '✓' : '✗'} + +
+
{entry.description}
+
{entry.action}
+ {entry.error && ( +
{entry.error}
+ )} +
+
+ ))} +
+ )} +
+ )} +
+
+ ); +} diff --git a/portal/src/types.ts b/portal/src/types.ts index a0526ff..be94417 100644 --- a/portal/src/types.ts +++ b/portal/src/types.ts @@ -54,3 +54,18 @@ export interface GroupedQueryResult { groups: ResultGroup[]; totalResults: number; } + +export interface ActionResultEntry { + action: string; + description: string; + status: 'completed' | 'failed'; + result?: any; + error?: string; +} + +export interface PromptResult { + success: boolean; + reasoning: string; + executionLog: ActionResultEntry[]; + summary: string; +} diff --git a/src/core/prompt/executor.ts b/src/core/prompt/executor.ts new file mode 100644 index 0000000..fab0ac9 --- /dev/null +++ b/src/core/prompt/executor.ts @@ -0,0 +1,166 @@ +import { ActionPlan, ActionResult } from './types'; +import { query, addNode, updateNode, removeNode, addEdge, removeEdge } from '../store'; +import { getNode, findNodeByPrefix, listNodes } from '../store'; +import { getDb } from '../db'; +import { NodeKind, EdgeType } from '../../types'; + +function resolveRefs(value: any, results: any[]): any { + if (typeof value === 'string') { + return value.replace(/\$result\[(\d+)\]((?:\.\w+|\[\d+\])*)/g, (_match, idx, path) => { + let obj = results[parseInt(idx)]; + if (obj === undefined) return _match; + // Parse path like .id, [0].node.id + const parts = path.match(/\.(\w+)|\[(\d+)\]/g) || []; + for (const part of parts) { + if (obj == null) return _match; + if (part.startsWith('.')) { + obj = obj[part.slice(1)]; + } else { + const i = parseInt(part.slice(1, -1)); + obj = obj[i]; + } + } + return typeof obj === 'string' ? obj : JSON.stringify(obj); + }); + } + if (Array.isArray(value)) return value.map(v => resolveRefs(v, results)); + if (value && typeof value === 'object') { + const out: Record = {}; + for (const [k, v] of Object.entries(value)) { + out[k] = resolveRefs(v, results); + } + return out; + } + return value; +} + +const VALID_KINDS = new Set(['memory', 'component', 'task', 'decision']); +const VALID_EDGE_TYPES = new Set(['depends_on', 'contains', 'implements', 'blocked_by', 'subtask_of', 'relates_to', 'supersedes', 'about']); + +async function executeAction(type: string, params: Record): Promise { + switch (type) { + case 'query': { + const results = await query(params.text, { + kind: params.kind as NodeKind, + limit: params.limit, + }); + return results.map(r => ({ node: { id: r.node.id, kind: r.node.kind, title: r.node.title, content: r.node.content, tags: r.node.tags }, score: r.score })); + } + case 'add_node': { + if (!VALID_KINDS.has(params.kind)) throw new Error(`Invalid kind: ${params.kind}`); + const node = await addNode({ + kind: params.kind as NodeKind, + title: params.title, + content: params.content, + tags: params.tags, + status: params.status, + }); + return { id: node.id, kind: node.kind, title: node.title, content: node.content, tags: node.tags, status: node.status }; + } + case 'update_node': { + const node = await updateNode(params.id, { + title: params.title, + content: params.content, + status: params.status, + tags: params.tags, + }); + if (!node) throw new Error(`Node not found: ${params.id}`); + return { id: node.id, kind: node.kind, title: node.title, content: node.content, tags: node.tags, status: node.status }; + } + case 'remove_node': { + // Soft delete only + const ok = removeNode(params.id, false); + if (!ok) throw new Error(`Node not found: ${params.id}`); + return { ok: true }; + } + case 'add_edge': { + if (!VALID_EDGE_TYPES.has(params.type)) throw new Error(`Invalid edge type: ${params.type}`); + const edge = addEdge(params.fromId, params.toId, params.type as EdgeType); + return { id: edge.id, fromId: edge.fromId, toId: edge.toId, type: edge.type }; + } + case 'remove_edge': { + const ok = removeEdge(params.id); + if (!ok) throw new Error(`Edge not found: ${params.id}`); + return { ok: true }; + } + case 'bulk_tag': { + let targets: { id: string; tags: string[] }[]; + if (params.nodeIds?.length) { + targets = params.nodeIds.map((id: string) => { + const n = getNode(id) ?? findNodeByPrefix(id); + return n ? { id: n.id, tags: n.tags } : null; + }).filter(Boolean); + } else if (params.filter) { + targets = listNodes({ + kind: params.filter.kind as NodeKind, + status: params.filter.status, + tags: params.filter.tags, + }).map(n => ({ id: n.id, tags: n.tags })); + } else { + throw new Error('bulk_tag requires nodeIds or filter'); + } + let modified = 0; + for (const t of targets) { + let newTags: string[]; + if (params.action === 'add') { + newTags = [...new Set([...t.tags, ...params.tags])]; + } else { + newTags = t.tags.filter((tag: string) => !params.tags.includes(tag)); + } + if (JSON.stringify(newTags) !== JSON.stringify(t.tags)) { + await updateNode(t.id, { tags: newTags }); + modified++; + } + } + return { action: params.action, tags: params.tags, modified, total: targets.length }; + } + case 'reorganize': { + const node = getNode(params.nodeId) ?? findNodeByPrefix(params.nodeId); + const parent = getNode(params.newParentId) ?? findNodeByPrefix(params.newParentId); + if (!node) throw new Error(`Node not found: ${params.nodeId}`); + if (!parent) throw new Error(`Parent not found: ${params.newParentId}`); + const db = getDb(); + const incomingContains = db.prepare('SELECT id FROM edges WHERE to_id = ? AND type = ?').all(node.id, 'contains') as any[]; + for (const e of incomingContains) { + removeEdge(e.id); + } + const edge = addEdge(parent.id, node.id, 'contains'); + return { moved: node.id, newParent: parent.id, edge: edge.id }; + } + default: + throw new Error(`Unknown action type: ${type}`); + } +} + +export async function executePlan(plan: ActionPlan): Promise<{ log: ActionResult[]; summary: string }> { + const log: ActionResult[] = []; + const results: any[] = []; + const counts: Record = {}; + + for (const action of plan.actions) { + try { + const resolvedParams = resolveRefs(action.params, results); + const result = await executeAction(action.type, resolvedParams); + results.push(result); + log.push({ action: action.type, description: action.description, status: 'completed', result }); + counts[action.type] = (counts[action.type] || 0) + 1; + } catch (err: any) { + results.push(null); + log.push({ action: action.type, description: action.description, status: 'failed', error: err.message }); + } + } + + const parts: string[] = []; + const labels: Record = { + query: 'queries', add_node: 'nodes created', update_node: 'nodes updated', + remove_node: 'nodes removed', add_edge: 'edges created', remove_edge: 'edges removed', + bulk_tag: 'bulk tag ops', reorganize: 'reorganizations', + }; + for (const [type, count] of Object.entries(counts)) { + parts.push(`${count} ${labels[type] || type}`); + } + const failed = log.filter(l => l.status === 'failed').length; + if (failed > 0) parts.push(`${failed} failed`); + + return { log, summary: parts.join(', ') || 'No actions executed' }; +} diff --git a/src/core/prompt/interpreter.ts b/src/core/prompt/interpreter.ts new file mode 100644 index 0000000..21e1c54 --- /dev/null +++ b/src/core/prompt/interpreter.ts @@ -0,0 +1,105 @@ +import { isGenAvailable } from '../search/ollamaGen'; +import { query } from '../store'; +import { buildMessages } from './templates'; +import { ActionPlanSchema, PromptResult } from './types'; +import { executePlan } from './executor'; + +const OLLAMA_URL = process.env.OLLAMA_URL || 'http://localhost:11434'; +const GEN_MODEL = process.env.OLLAMA_GEN_MODEL || 'qwen3-coder:30b'; +const PROMPT_TIMEOUT = 120000; // 2 minutes for large prompts + +function fail(summary: string): PromptResult { + return { success: false, reasoning: '', executionLog: [], summary }; +} + +export async function interpretAndExecute(prompt: string): Promise { + // Check Ollama availability + if (!(await isGenAvailable())) { + return fail('Ollama is not available. Make sure Ollama is running and the model is pulled (e.g. `ollama pull llama3`).'); + } + + // Gather context + const context = await query(prompt, { limit: 10 }); + + // Build and send chat messages + const messages = buildMessages(prompt, context); + + let raw: string; + try { + const res = await fetch(`${OLLAMA_URL}/api/chat`, { + method: 'POST', + headers: { 'Content-Type': 'application/json' }, + body: JSON.stringify({ model: GEN_MODEL, messages, stream: false, format: 'json', options: { num_predict: 4096, temperature: 0 } }), + signal: AbortSignal.timeout(PROMPT_TIMEOUT), + }); + if (!res.ok) { + const body = await res.text().catch(() => ''); + return fail(`Ollama returned HTTP ${res.status}: ${body.slice(0, 200)}`); + } + const data = await res.json() as { message?: { content?: string } }; + if (!data.message?.content?.trim()) { + return fail('Ollama returned an empty response.'); + } + raw = data.message.content.trim(); + } catch (err: any) { + if (err.name === 'TimeoutError' || err.name === 'AbortError') { + return fail(`Ollama timed out after ${PROMPT_TIMEOUT / 1000}s. Try a simpler prompt or check if the model is loaded.`); + } + return fail(`Ollama request failed: ${err.message}`); + } + + // Strip thinking tags (qwen3 models output ... before the answer) + let jsonStr = raw.replace(/[\s\S]*?<\/think>/g, '').trim(); + + // Strip markdown fences if present + const fenceMatch = jsonStr.match(/```(?:json)?\s*([\s\S]*?)```/); + if (fenceMatch) jsonStr = fenceMatch[1].trim(); + + // Try to extract JSON object if there's surrounding text + if (!jsonStr.startsWith('{')) { + const objMatch = jsonStr.match(/(\{[\s\S]*\})/); + if (objMatch) jsonStr = objMatch[1]; + } + + let parsed: any; + try { + parsed = JSON.parse(jsonStr); + } catch { + return { + success: false, + reasoning: '', + executionLog: [], + summary: `Failed to parse LLM response as JSON. Raw response: ${raw.slice(0, 500)}`, + }; + } + + // Validate — be lenient: filter out unknown action types instead of rejecting the whole plan + const VALID_TYPES = new Set(['query', 'add_node', 'update_node', 'remove_node', 'add_edge', 'remove_edge', 'bulk_tag', 'reorganize']); + + if (!parsed.actions || !Array.isArray(parsed.actions)) { + return fail(`Invalid action plan: missing actions array. Raw: ${raw.slice(0, 300)}`); + } + + const validActions = parsed.actions.filter((a: any) => a && VALID_TYPES.has(a.type)); + const skipped = parsed.actions.length - validActions.length; + + if (validActions.length === 0) { + return fail(`No valid actions in plan. The model used unsupported action types: ${parsed.actions.map((a: any) => a?.type).join(', ')}`); + } + + const plan = { + reasoning: parsed.reasoning || '', + actions: validActions.map((a: any) => ({ type: a.type, params: a.params || {}, description: a.description || '' })), + }; + + // Execute + const { log, summary } = await executePlan(plan); + const anyFailed = log.some(l => l.status === 'failed'); + + return { + success: !anyFailed, + reasoning: plan.reasoning, + executionLog: log, + summary: summary + (skipped > 0 ? ` (${skipped} unsupported actions skipped)` : ''), + }; +} diff --git a/src/core/prompt/templates.ts b/src/core/prompt/templates.ts new file mode 100644 index 0000000..4f49234 --- /dev/null +++ b/src/core/prompt/templates.ts @@ -0,0 +1,49 @@ +import { SearchResult } from '../../types'; + +export function buildMessages(userPrompt: string, context: SearchResult[]): { role: string; content: string }[] { + const system = `You produce JSON action plans for a knowledge graph. + +ACTIONS: +- add_node: params { kind, title, content?, tags?, status? } → returns { id }. kind must be: memory, component, task, or decision. +- update_node: params { id, title?, content?, status?, tags? } +- remove_node: params { id } +- add_edge: params { fromId, toId, type }. type must be: depends_on, contains, implements, blocked_by, subtask_of, relates_to, supersedes, about +- remove_edge: params { id } +- bulk_tag: params { action: "add"|"remove", tags, nodeIds? } +- reorganize: params { nodeId, newParentId } + +Use "$result[N].id" to reference the id from action N's result. + +IMPORTANT: To group nodes, first add_node to create a parent, then reorganize each node under it. Use node IDs from context — do NOT query. + +Output JSON: {"reasoning":"...","actions":[{"type":"...","params":{...},"description":"..."}]}`; + + // Few-shot example showing the exact grouping pattern + const exampleUser = `AVAILABLE NODES: +1. id="aaa-111" kind=memory title="DB Config" tags=[config] +2. id="bbb-222" kind=memory title="API Config" tags=[config] +3. id="ccc-333" kind=memory title="Unrelated Note" tags=[misc] + +INSTRUCTION: group the config nodes /no_think`; + + const exampleAssistant = `{"reasoning":"Create a parent Configuration group and move the two config nodes under it.","actions":[{"type":"add_node","params":{"kind":"memory","title":"Configuration","content":"Parent group for configuration nodes.","tags":["configuration","group"]},"description":"Create Configuration parent node"},{"type":"reorganize","params":{"nodeId":"aaa-111","newParentId":"$result[0].id"},"description":"Move DB Config under Configuration"},{"type":"reorganize","params":{"nodeId":"bbb-222","newParentId":"$result[0].id"},"description":"Move API Config under Configuration"}]}`; + + let contextList = ''; + if (context.length > 0) { + contextList = context.map((r, i) => + `${i + 1}. id="${r.node.id}" kind=${r.node.kind} title="${r.node.title}"${r.node.tags.length ? ` tags=[${r.node.tags.join(',')}]` : ''}` + ).join('\n'); + } + + const user = `AVAILABLE NODES: +${contextList || '(none)'} + +INSTRUCTION: ${userPrompt} /no_think`; + + return [ + { role: 'system', content: system }, + { role: 'user', content: exampleUser }, + { role: 'assistant', content: exampleAssistant }, + { role: 'user', content: user }, + ]; +} diff --git a/src/core/prompt/types.ts b/src/core/prompt/types.ts new file mode 100644 index 0000000..5922835 --- /dev/null +++ b/src/core/prompt/types.ts @@ -0,0 +1,35 @@ +import { z } from 'zod/v3'; + +export const ActionTypeSchema = z.enum([ + 'query', 'add_node', 'update_node', 'remove_node', + 'add_edge', 'remove_edge', 'bulk_tag', 'reorganize', +]); +export type ActionType = z.infer; + +export const ActionSchema = z.object({ + type: ActionTypeSchema, + params: z.record(z.any()), + description: z.string(), +}); +export type Action = z.infer; + +export const ActionPlanSchema = z.object({ + reasoning: z.string(), + actions: z.array(ActionSchema).max(20), +}); +export type ActionPlan = z.infer; + +export interface ActionResult { + action: string; + description: string; + status: 'completed' | 'failed'; + result?: any; + error?: string; +} + +export interface PromptResult { + success: boolean; + reasoning: string; + executionLog: ActionResult[]; + summary: string; +} diff --git a/src/core/search/ollamaGen.ts b/src/core/search/ollamaGen.ts index 76dc108..69ff717 100644 --- a/src/core/search/ollamaGen.ts +++ b/src/core/search/ollamaGen.ts @@ -1,5 +1,5 @@ const OLLAMA_URL = process.env.OLLAMA_URL || 'http://localhost:11434'; -const GEN_MODEL = process.env.OLLAMA_GEN_MODEL || 'llama3'; +const GEN_MODEL = process.env.OLLAMA_GEN_MODEL || 'llama3.2'; let _available: boolean | null = null; diff --git a/src/mcp/index.ts b/src/mcp/index.ts index 7314abf..755a93e 100644 --- a/src/mcp/index.ts +++ b/src/mcp/index.ts @@ -367,6 +367,21 @@ server.tool( } ); +// --- memory_prompt --- +import { interpretAndExecute } from '../core/prompt/interpreter'; + +server.tool( + 'memory_prompt', + 'Execute a natural language instruction against the memory graph. Uses AI to generate and run an action plan. Returns log of actions performed.', + { + prompt: z.string().describe('Natural language instruction'), + }, + async ({ prompt }) => { + const result = await interpretAndExecute(prompt); + return { content: [{ type: 'text' as const, text: serialize(result) }] }; + } +); + async function main() { const transport = new StdioServerTransport(); await server.connect(transport); diff --git a/src/server/routes.ts b/src/server/routes.ts index 712e1d1..cad09cd 100644 --- a/src/server/routes.ts +++ b/src/server/routes.ts @@ -166,4 +166,19 @@ router.post('/maintenance/run', async (_req: Request, res: Response) => { } }); +// Prompt — AI-driven natural language instruction +router.post('/prompt', async (req: Request, res: Response) => { + try { + const { prompt } = req.body; + if (!prompt || typeof prompt !== 'string') { + return res.status(400).json({ error: 'prompt is required' }); + } + const { interpretAndExecute } = await import('../core/prompt/interpreter'); + const result = await interpretAndExecute(prompt); + res.json(result); + } catch (err: any) { + res.status(500).json({ error: err.message }); + } +}); + export default router;