@@ -23,6 +23,11 @@ CATALOG_ENTITIES_EXTRACT_DIR=/extensions
2323# AUTH_GITHUB_CLIENT_ID=
2424# AUTH_GITHUB_CLIENT_SECRET=
2525
26+ # Max entry size for dynamic plugin installation (in bytes)
27+ # Note: Lightspeed needs >= 40000000
28+ # and Orchestrator needs >= 30000000
29+ MAX_ENTRY_SIZE = 40000000
30+
2631# Node environment for extensions plugin installation
2732NODE_ENV = development
2833
@@ -52,67 +57,71 @@ SEGMENT_WRITE_KEY=gGVM6sYRK0D0ndVX22BOtS7NRcxPej8t
5257# Developer Lightspeed Configuration
5358# ==============================================================================
5459# For detailed documentation, see: developer-lightspeed/README.md
55- # Note: Ollama is pre-configured by default - no setup needed. External providers require configuration below.
5660
5761# ------------------------------------------------------------------------------
58- # Ollama Provider Configuration (Default - Pre-configured)
62+ # Inference Provider Enablement
5963# ------------------------------------------------------------------------------
60- # Pre-configured and ready to use - typically no changes needed
61- OLLAMA_URL = http://ollama:11434
62- # Change to use different model
63- OLLAMA_MODEL = llama3.2:1b
64- # Optional: mount local models directory
65- # OLLAMA_MODELS_PATH=/path/to/.ollama
64+ # # Set to 'true' to enable models from that provider. I.e. ENABLE_VLLM=true
65+ # # Leave blank to disable that provider. I.e. ENABLE_VLLM=
66+ # # Note: You can have multiple enabled. I.e. ENABLE_VLLM=true ENABLE_OPENAI=true
67+ ENABLE_VLLM =
68+ ENABLE_VERTEX_AI =
69+ ENABLE_OPENAI =
70+ ENABLE_OLLAMA =
71+
6672
6773# ------------------------------------------------------------------------------
68- # External Provider Enablement
74+ # Ollama Provider Configuration
75+ # Required if ENABLE_OLLAMA=true
6976# ------------------------------------------------------------------------------
70- # Set ONE to 'true' if using external provider instead of Ollama
71- # ENABLE_VLLM=
72- # ENABLE_OPENAI=
73- # ENABLE_VERTEX_AI=
77+ # # Must end with /v1
78+ OLLAMA_URL =
79+
7480
7581# ------------------------------------------------------------------------------
7682# vLLM Provider Configuration
77- # ------------------------------------------------------------------------------
7883# Required if ENABLE_VLLM=true
79- # Must end with /v1
84+ # ------------------------------------------------------------------------------
85+ # # Must end with /v1
8086VLLM_URL =
81- # Leave empty if no auth required
87+ # # Leave empty if no auth required
8288VLLM_API_KEY =
83- # Optional: max tokens (default: 4096)
89+ # # Optional: max tokens (default: 4096)
8490# VLLM_MAX_TOKENS=4096
85- # Optional: TLS verification (default: true)
91+ # # Optional: TLS verification (default: true)
8692# VLLM_TLS_VERIFY=true
8793
94+
8895# ------------------------------------------------------------------------------
8996# OpenAI Provider Configuration
90- # ------------------------------------------------------------------------------
9197# Required if ENABLE_OPENAI=true
92- # Get from https://platform.openai.com/api-keys
98+ # ------------------------------------------------------------------------------
99+ # # Get from https://platform.openai.com/api-keys
93100OPENAI_API_KEY =
94101
102+
95103# ------------------------------------------------------------------------------
96104# Vertex AI Provider Configuration (Experimental)
97- # ------------------------------------------------------------------------------
98105# Required if ENABLE_VERTEX_AI=true
99- # Absolute path to GCP service account JSON
106+ # ------------------------------------------------------------------------------
107+ # # Absolute path to GCP service account JSON on the host
100108VERTEX_AI_CREDENTIALS_PATH =
101- # Your GCP project ID
109+ # # Your GCP project ID
102110VERTEX_AI_PROJECT =
103- # Optional: GCP region (default: us-central1)
111+ # # Optional: GCP region (default: us-central1)
104112# VERTEX_AI_LOCATION=us-central1
105113
114+
106115# ------------------------------------------------------------------------------
107- # Llama Guard Settings
116+ # Validation Settings
108117# ------------------------------------------------------------------------------
109- # The safety provider uses inline::llama-guard, so SAFETY_MODEL must be a
110- # llama-guard variant (e.g. llama-guard3:1b) .
111- # Defaults to llama-guard3:1b if not set.
112- SAFETY_MODEL =
113- # SAFETY_URL is auto-configured by the compose files.
114- # You should not need to set this manually.
115- SAFETY_URL =
116- # Only required if connecting to a remote safety endpoint that requires auth .
117- # Not needed when using the local safety-ollama container (the default).
118- SAFETY_API_KEY =
118+ # # Enables query validation.
119+ # # Will limit queries to RHDH specific requests .
120+ ENABLE_VALIDATION =
121+ # # Required if ENABLE_VALIDATION=true
122+ # # Must be one of the enabled providers. I.e. if ENABLE_OPENAI=true then VALIDATION_PROVIDER=openai
123+ VALIDATION_PROVIDER =
124+ # # Required if ENABLE_VALIDATION=true
125+ # # Must be an available model for an enabled provider .
126+ # # I.e. ENABLE_OPENAI=true VALIDATION_MODEL_NAME=gpt-4o-mini
127+ VALIDATION_MODEL_NAME =
0 commit comments