Implement dual-model filing pipeline with Ollama extraction

This commit is contained in:
2026-02-28 16:31:25 -05:00
parent 0615534f4b
commit a09001501e
16 changed files with 872 additions and 51 deletions

View File

@@ -2,6 +2,7 @@ import { beforeEach, describe, expect, it, mock } from 'bun:test';
import {
__resetAiWarningsForTests,
getAiConfig,
getExtractionAiConfig,
runAiAnalysis
} from './ai';
@@ -154,4 +155,65 @@ describe('ai config and runtime', () => {
})
).rejects.toThrow('AI SDK returned an empty response');
});
it('uses ollama defaults for extraction workload config', () => {
const config = getExtractionAiConfig({
env: {},
warn: () => {}
});
expect(config.provider).toBe('ollama');
expect(config.baseUrl).toBe('http://127.0.0.1:11434');
expect(config.model).toBe('qwen3:8b');
expect(config.apiKey).toBe('ollama');
expect(config.temperature).toBe(0);
});
it('uses extraction workload and returns ollama provider on success', async () => {
const createModel = mock((config: {
provider: string;
apiKey?: string;
model: string;
baseUrl: string;
temperature: number;
}) => {
expect(config.provider).toBe('ollama');
expect(config.baseUrl).toBe('http://127.0.0.1:11434');
expect(config.model).toBe('qwen3:8b');
expect(config.temperature).toBe(0);
return { modelId: config.model };
});
const generate = mock(async () => ({ text: '{"summary":"ok","keyPoints":[],"redFlags":[],"followUpQuestions":[],"portfolioSignals":[],"confidence":0.6}' }));
const result = await runAiAnalysis('Extract this filing', 'Return JSON', {
env: {
OLLAMA_MODEL: 'qwen3:8b'
},
warn: () => {},
workload: 'extraction',
createModel,
generate
});
expect(createModel).toHaveBeenCalledTimes(1);
expect(generate).toHaveBeenCalledTimes(1);
expect(result.provider).toBe('ollama');
expect(result.model).toBe('qwen3:8b');
});
it('falls back to local text when extraction workload generation fails', async () => {
const result = await runAiAnalysis('Extract this filing', 'Return JSON', {
env: {},
warn: () => {},
workload: 'extraction',
createModel: () => ({}),
generate: async () => {
throw new Error('ollama unavailable');
}
});
expect(result.provider).toBe('local-fallback');
expect(result.model).toBe('qwen3:8b');
expect(result.text).toContain('AI SDK fallback mode is active');
});
});