Implement dual-model filing pipeline with Ollama extraction
This commit is contained in:
@@ -19,6 +19,12 @@ ZHIPU_API_KEY=
|
||||
ZHIPU_MODEL=glm-4.7-flashx
|
||||
AI_TEMPERATURE=0.2
|
||||
|
||||
# Local extraction model (Ollama, OpenAI-compatible API)
|
||||
# For host Ollama from Docker, use http://host.docker.internal:11434
|
||||
OLLAMA_BASE_URL=http://127.0.0.1:11434
|
||||
OLLAMA_MODEL=qwen3:8b
|
||||
OLLAMA_API_KEY=ollama
|
||||
|
||||
# SEC API etiquette
|
||||
SEC_USER_AGENT=Fiscal Clone <support@fiscal.local>
|
||||
|
||||
|
||||
Reference in New Issue
Block a user