-
Notifications
You must be signed in to change notification settings - Fork 70
Expand file tree
/
Copy pathdefault.env
More file actions
127 lines (102 loc) · 4.57 KB
/
default.env
File metadata and controls
127 lines (102 loc) · 4.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
# Database connection
POSTGRES_DB=postgres
POSTGRES_HOST=db
POSTGRES_PORT=5432
POSTGRES_USER=postgres
POSTGRES_PASSWORD=postgres
BASE_URL=http://localhost:7007
# configure image to use: default to using the latest stable tag of the community builds, which are built for both amd64 and arm64 architectures
# to use bleeding edge :next images or commercially supported images, see README.md for details
# RHDH_IMAGE=quay.io/rhdh-community/rhdh:1.9
# Default plugin catalog index image
# Requires RHDH 1.9+ to be handled.
CATALOG_INDEX_IMAGE=quay.io/rhdh/plugin-catalog-index:1.9
# Path in the install-dynamic-plugins container where the extensions catalog entities should be extracted to, from the catalog index image
# Requires RHDH 1.9+ to be handled
CATALOG_ENTITIES_EXTRACT_DIR=/extensions
# GitHub auth (you need to uncomment github auth section in configs/app-config.local.yaml to enable this)
#AUTH_GITHUB_CLIENT_ID=
#AUTH_GITHUB_CLIENT_SECRET=
# Max entry size for dynamic plugin installation (in bytes)
# Note: Lightspeed needs >= 40000000
# and Orchestrator needs >= 30000000
MAX_ENTRY_SIZE=40000000
# Node environment for extensions plugin installation
NODE_ENV=development
# Disable TLS certificate validation (development only)
NODE_TLS_REJECT_UNAUTHORIZED=0
# Use development segment key
SEGMENT_WRITE_KEY=gGVM6sYRK0D0ndVX22BOtS7NRcxPej8t
# uncomment the following line to disable telemetry
#SEGMENT_TEST_MODE=true
# Backstage log level
#LOG_LEVEL=debug
# Logs from global-agent to inspect the 'node-fetch' behavior with proxy settings
#ROARR_LOG=true
# Logs from fetch.
# Might be useful to inspect the 'fetch' behavior with proxy settings (handled by 'undici', not 'global-agent')
#NODE_DEBUG=fetch
# NO_PROXY will take effect only if HTTP(S)_PROXY env vars are set.
# See the compose-with-corporate-proxy.yaml file.
#NO_PROXY=localhost,127.0.0.1
# ==============================================================================
# Developer Lightspeed Configuration
# ==============================================================================
# For detailed documentation, see: developer-lightspeed/README.md
# ------------------------------------------------------------------------------
# Inference Provider Enablement
# ------------------------------------------------------------------------------
## Set to 'true' to enable models from that provider. I.e. ENABLE_VLLM=true
## Leave blank to disable that provider. I.e. ENABLE_VLLM=
## Note: You can have multiple enabled. I.e. ENABLE_VLLM=true ENABLE_OPENAI=true
ENABLE_VLLM=
ENABLE_VERTEX_AI=
ENABLE_OPENAI=
ENABLE_OLLAMA=
# ------------------------------------------------------------------------------
# Ollama Provider Configuration
# Required if ENABLE_OLLAMA=true
# ------------------------------------------------------------------------------
## Must end with /v1
OLLAMA_URL=
# ------------------------------------------------------------------------------
# vLLM Provider Configuration
# Required if ENABLE_VLLM=true
# ------------------------------------------------------------------------------
## Must end with /v1
VLLM_URL=
## Leave empty if no auth required
VLLM_API_KEY=
## Optional: max tokens (default: 4096)
# VLLM_MAX_TOKENS=4096
## Optional: TLS verification (default: true)
# VLLM_TLS_VERIFY=true
# ------------------------------------------------------------------------------
# OpenAI Provider Configuration
# Required if ENABLE_OPENAI=true
# ------------------------------------------------------------------------------
## Get from https://platform.openai.com/api-keys
OPENAI_API_KEY=
# ------------------------------------------------------------------------------
# Vertex AI Provider Configuration (Experimental)
# Required if ENABLE_VERTEX_AI=true
# ------------------------------------------------------------------------------
## Absolute path to GCP service account JSON on the host
VERTEX_AI_CREDENTIALS_PATH=
## Your GCP project ID
VERTEX_AI_PROJECT=
## Optional: GCP region (default: us-central1)
# VERTEX_AI_LOCATION=us-central1
# ------------------------------------------------------------------------------
# Validation Settings
# ------------------------------------------------------------------------------
## Enables query validation.
## Will limit queries to RHDH specific requests.
ENABLE_VALIDATION=
## Required if ENABLE_VALIDATION=true
## Must be one of the enabled providers. I.e. if ENABLE_OPENAI=true then VALIDATION_PROVIDER=openai
VALIDATION_PROVIDER=
## Required if ENABLE_VALIDATION=true
## Must be an available model for an enabled provider.
## I.e. ENABLE_OPENAI=true VALIDATION_MODEL_NAME=gpt-4o-mini
VALIDATION_MODEL_NAME=