-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy path.env.example
More file actions
37 lines (28 loc) · 1.87 KB
/
.env.example
File metadata and controls
37 lines (28 loc) · 1.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
# FastFlowLM-Docker Configuration
# Copy this file to .env and adjust values as needed
# ── Mode Selection ──────────────────────────────────────────
# Enable/disable individual Wyoming services
FLM_ASR_ENABLED=true
FLM_LLM_ENABLED=true
# ── Model Configuration ────────────────────────────────────
# LLM model to load for conversation (any FLM-supported model)
FLM_LLM_MODEL=llama3.2:1b
# Whisper model for ASR (speech-to-text)
FLM_ASR_MODEL=whisper-v3:turbo
# ── Network Ports ──────────────────────────────────────────
# FastFlowLM internal API port
FLM_SERVER_PORT=52625
# Wyoming protocol listening ports
WYOMING_ASR_PORT=10300
WYOMING_LLM_PORT=10400
# ── ASR Settings ───────────────────────────────────────────
# Default language for speech recognition
FLM_ASR_LANGUAGE=en
# ── LLM Settings ──────────────────────────────────────────
# System prompt for the LLM conversation handler
FLM_LLM_SYSTEM_PROMPT=You are a helpful voice assistant for a smart home. Keep responses concise and conversational. When asked to control devices, describe what you would do.
# ── Storage ────────────────────────────────────────────────
# Model storage path inside container
FLM_MODEL_PATH=/data/models
# ── Logging ────────────────────────────────────────────────
FLM_LOG_LEVEL=INFO