-
Notifications
You must be signed in to change notification settings - Fork 47
Expand file tree
/
Copy path.env.example
More file actions
79 lines (65 loc) · 3.34 KB
/
.env.example
File metadata and controls
79 lines (65 loc) · 3.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
# HMLR CognitiveLattice Configuration
# Copy this to .env and uncomment the provider you want to use
# =============================================================================
# OPTION 1: OpenAI (Default)
# =============================================================================
API_PROVIDER=openai
OPENAI_API_KEY=sk-proj-your-key-here
# Model Configuration
HMLR_DEFAULT_MODEL=gpt-4.1-mini # Cheap workers
HMLR_MAIN_MODEL=gpt-4.1-pro # Premium user-facing (optional)
# Temperature Settings
HMLR_DEFAULT_TEMPERATURE=0.1 # Deterministic workers
HMLR_MAIN_TEMPERATURE=0.6 # Natural conversation (optional)
# Advanced Model Parameters (Optional - only for thinking models)
# HMLR_DEFAULT_REASONING_EFFORT=medium # For o1/o3: low, medium, high
# HMLR_MAIN_REASONING_EFFORT=high # Override for main conversation
# HMLR_DEFAULT_TOP_P=0.95 # Nucleus sampling (0.0-1.0)
# HMLR_DEFAULT_TOP_K=40 # Top-K sampling (Gemini)
# HMLR_DEFAULT_FREQUENCY_PENALTY=0.0 # Reduce repetition (-2.0 to 2.0)
# HMLR_DEFAULT_PRESENCE_PENALTY=0.0 # Encourage new topics (-2.0 to 2.0)
# =============================================================================
# OPTION 2: Google Gemini
# =============================================================================
# API_PROVIDER=gemini
# GEMINI_API_KEY=AIza-your-key-here
# Model Configuration
# HMLR_DEFAULT_MODEL=gemini-2.0-flash-thinking-exp
# HMLR_MAIN_MODEL=gemini-1.5-pro # Optional override
# Advanced Parameters for Gemini Thinking
# HMLR_DEFAULT_REASONING_EFFORT=5 # Thinking budget 1-10 (higher = more thinking)
# HMLR_MAIN_REASONING_EFFORT=8 # Use more thinking for user conversations
# HMLR_DEFAULT_TOP_K=40 # Top-K sampling
# =============================================================================
# OPTION 3: xAI Grok
# =============================================================================
# API_PROVIDER=grok
# XAI_API_KEY=xai-your-key-here
# Model Configuration
# HMLR_DEFAULT_MODEL=grok-2-latest
# =============================================================================
# OPTION 4: Local Ollama (Free!)
# =============================================================================
# API_PROVIDER=openai
# OPENAI_API_KEY=ollama # Dummy key
# OPENAI_API_BASE=http://localhost:11434/v1
# Model Configuration
# HMLR_DEFAULT_MODEL=llama3.2:3b
# =============================================================================
# TOKEN BUDGETS (Optional overrides)
# =============================================================================
# CONTEXT_BUDGET_TOKENS=6000
# MAX_RESPONSE_TOKENS=2000
# FACT_EXTRACTION_MAX_TOKENS=500
# USER_PROFILE_MAX_TOKENS=300
# HYDRATOR_MAX_TOKENS=50000
# =============================================================================
# EMBEDDING CONFIGURATION (Optional overrides)
# =============================================================================
# HMLR_EMBEDDING_MODEL=BAAI/bge-large-en-v1.5
# HMLR_EMBEDDING_DIM=1024
# =============================================================================
# OTHER SETTINGS
# =============================================================================
# SLIDING_WINDOW_SIZE=20
# COGNITIVE_LATTICE_DB=/path/to/custom/db.sqlite