Add Ollama generation module for AI-powered maintenance
Provides text generation via Ollama (llama3) for heartbeat operations like auto-dedupe, auto-tag, auto-summarize, auto-split, and auto-archive.
This commit is contained in:
34
src/core/search/ollamaGen.ts
Normal file
34
src/core/search/ollamaGen.ts
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
const OLLAMA_URL = process.env.OLLAMA_URL || 'http://localhost:11434';
|
||||||
|
const GEN_MODEL = process.env.OLLAMA_GEN_MODEL || 'llama3';
|
||||||
|
|
||||||
|
let _available: boolean | null = null;
|
||||||
|
|
||||||
|
export async function isGenAvailable(): Promise<boolean> {
|
||||||
|
if (_available !== null) return _available;
|
||||||
|
try {
|
||||||
|
const res = await fetch(`${OLLAMA_URL}/api/tags`, { signal: AbortSignal.timeout(2000) });
|
||||||
|
if (!res.ok) { _available = false; return false; }
|
||||||
|
const data = await res.json() as { models?: { name: string }[] };
|
||||||
|
_available = !!data.models?.some(m => m.name.startsWith(GEN_MODEL));
|
||||||
|
} catch {
|
||||||
|
_available = false;
|
||||||
|
}
|
||||||
|
return _available;
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function generate(prompt: string): Promise<string | null> {
|
||||||
|
if (!(await isGenAvailable())) return null;
|
||||||
|
try {
|
||||||
|
const res = await fetch(`${OLLAMA_URL}/api/generate`, {
|
||||||
|
method: 'POST',
|
||||||
|
headers: { 'Content-Type': 'application/json' },
|
||||||
|
body: JSON.stringify({ model: GEN_MODEL, prompt, stream: false }),
|
||||||
|
signal: AbortSignal.timeout(60000),
|
||||||
|
});
|
||||||
|
if (!res.ok) return null;
|
||||||
|
const data = await res.json() as { response?: string };
|
||||||
|
return data.response?.trim() ?? null;
|
||||||
|
} catch {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user