mirror of
https://github.com/Frooodle/Stirling-PDF.git
synced 2026-03-13 02:18:16 +01:00
34 lines
1.4 KiB
Properties
34 lines
1.4 KiB
Properties
# Spring AI OpenAI Configuration
|
|
# Uses GPT-5-nano as primary model and GPT-5-mini as fallback (configured in settings.yml)
|
|
spring.ai.openai.enabled=true
|
|
#spring.ai.openai.api-key=# todo <API-KEY-HERE>
|
|
spring.ai.openai.base-url=https://api.openai.com
|
|
spring.ai.openai.chat.enabled=true
|
|
spring.ai.openai.chat.options.model=gpt-5-nano
|
|
# Note: Some models only support default temperature value of 1.0
|
|
spring.ai.openai.chat.options.temperature=1.0
|
|
# For newer models, use max-completion-tokens instead of max-tokens
|
|
spring.ai.openai.chat.options.max-completion-tokens=4000
|
|
spring.ai.openai.embedding.enabled=true
|
|
spring.ai.openai.embedding.options.model=text-embedding-ada-002
|
|
# Increase timeout for OpenAI API calls (default is 10 seconds)
|
|
spring.ai.openai.chat.options.connection-timeout=60s
|
|
spring.ai.openai.chat.options.read-timeout=60s
|
|
spring.ai.openai.embedding.options.connection-timeout=60s
|
|
spring.ai.openai.embedding.options.read-timeout=60s
|
|
|
|
# Spring AI Ollama Configuration (disabled to avoid bean conflicts)
|
|
spring.ai.ollama.enabled=false
|
|
spring.ai.ollama.base-url=http://localhost:11434
|
|
spring.ai.ollama.chat.enabled=false
|
|
spring.ai.ollama.chat.options.model=llama3
|
|
spring.ai.ollama.chat.options.temperature=1.0
|
|
spring.ai.ollama.embedding.enabled=false
|
|
spring.ai.ollama.embedding.options.model=nomic-embed-text
|
|
|
|
spring.data.redis.host=localhost
|
|
spring.data.redis.port=6379
|
|
spring.data.redis.password=
|
|
spring.data.redis.timeout=60000
|
|
spring.data.redis.ssl.enabled=false
|