forked from Open-LLM-VTuber/Open-LLM-VTuber
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathserver.py
More file actions
371 lines (313 loc) · 14.5 KB
/
server.py
File metadata and controls
371 lines (313 loc) · 14.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
import os
import re
import shutil
import atexit
import json
import asyncio
from typing import List, Dict
import yaml
import numpy as np
from fastapi import FastAPI, WebSocket, APIRouter
from fastapi.staticfiles import StaticFiles
from starlette.websockets import WebSocketDisconnect
from main import OpenLLMVTuberMain
from live2d_model import Live2dModel
from tts.stream_audio import AudioPayloadPreparer
import chardet
from loguru import logger
class WebSocketServer:
"""
WebSocketServer initializes a FastAPI application with WebSocket endpoints and a broadcast endpoint.
Attributes:
config (dict): Configuration dictionary.
app (FastAPI): FastAPI application instance.
router (APIRouter): APIRouter instance for routing.
connected_clients (List[WebSocket]): List of connected WebSocket clients for "/client-ws".
server_ws_clients (List[WebSocket]): List of connected WebSocket clients for "/server-ws".
"""
def __init__(self, open_llm_vtuber_main_config: Dict | None = None):
"""
Initializes the WebSocketServer with the given configuration.
"""
self.app = FastAPI()
self.router = APIRouter()
self.new_connected_clients: List[WebSocket] = []
self.connected_clients: List[WebSocket] = []
self.server_ws_clients: List[WebSocket] = []
self.open_llm_vtuber_main_config: Dict | None = open_llm_vtuber_main_config
self._setup_routes()
self._mount_static_files()
self.app.include_router(self.router)
def _initialize_components(
self, websocket: WebSocket
) -> tuple[Live2dModel, OpenLLMVTuberMain, AudioPayloadPreparer]:
"""
Initialize or reinitialize all necessary components with current configuration.
Args:
websocket: The WebSocket connection to send messages through
Returns:
tuple: (Live2dModel instance, OpenLLMVTuberMain instance, AudioPayloadPreparer instance)
"""
l2d = Live2dModel(self.open_llm_vtuber_main_config["LIVE2D_MODEL"])
open_llm_vtuber = OpenLLMVTuberMain(self.open_llm_vtuber_main_config)
audio_preparer = AudioPayloadPreparer()
# Set up the audio playback function
def _play_audio_file(sentence: str | None, filepath: str | None) -> None:
if filepath is None:
print("No audio to be streamed. Response is empty.")
return
if sentence is None:
sentence = ""
print(f">> Playing {filepath}...")
payload, duration = audio_preparer.prepare_audio_payload(
audio_path=filepath,
display_text=sentence,
expression_list=l2d.extract_emotion(sentence),
)
print("Payload send.")
async def _send_audio():
await websocket.send_text(json.dumps(payload))
await asyncio.sleep(duration)
new_loop = asyncio.new_event_loop()
asyncio.set_event_loop(new_loop)
new_loop.run_until_complete(_send_audio())
new_loop.close()
print("Audio played.")
open_llm_vtuber.set_audio_output_func(_play_audio_file)
return l2d, open_llm_vtuber, audio_preparer
def _setup_routes(self):
"""Sets up the WebSocket and broadcast routes."""
# the connection between this server and the frontend client
# The version 2 of the client-ws. Introduces breaking changes.
# This route will initiate its own main.py instance and conversation loop
@self.app.websocket("/client-ws")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
await websocket.send_text(
json.dumps({"type": "full-text", "text": "Connection established"})
)
self.connected_clients.append(websocket)
print("Connection established")
# Initialize components
l2d, open_llm_vtuber, _ = self._initialize_components(websocket)
await websocket.send_text(
json.dumps({"type": "set-model", "text": l2d.model_info})
)
print("Model set")
received_data_buffer = np.array([])
# start mic
await websocket.send_text(
json.dumps({"type": "control", "text": "start-mic"})
)
conversation_task = None
try:
while True:
print(".", end="")
message = await websocket.receive_text()
data = json.loads(message)
# print(f"\033\n Received ws req: {data.get('type')}\033[0m\n")
if data.get("type") == "interrupt-signal":
print("Start receiving audio data from front end.")
if conversation_task is not None:
print(
"\033[91mLLM hadn't finish itself. Interrupting it...",
"heard response: \n",
data.get("text"),
"\033[0m\n",
)
open_llm_vtuber.interrupt(data.get("text"))
# conversation_task.cancel()
elif data.get("type") == "mic-audio-data":
received_data_buffer = np.append(
received_data_buffer,
np.array(
list(data.get("audio").values()), dtype=np.float32
),
)
print("*", end="")
elif data.get("type") == "mic-audio-end":
print("Received audio data end from front end.")
await websocket.send_text(
json.dumps({"type": "full-text", "text": "Thinking..."})
)
audio = received_data_buffer
received_data_buffer = np.array([])
async def _run_conversation():
try:
await websocket.send_text(
json.dumps(
{
"type": "control",
"text": "conversation-chain-start",
}
)
)
await asyncio.to_thread(
open_llm_vtuber.conversation_chain,
user_input=audio,
)
await websocket.send_text(
json.dumps(
{
"type": "control",
"text": "conversation-chain-end",
}
)
)
print("One Conversation Loop Completed")
except asyncio.CancelledError:
print("Conversation task was cancelled.")
except InterruptedError as e:
print(f"😢Conversation was interrupted. {e}")
conversation_task = asyncio.create_task(_run_conversation())
elif data.get("type") == "fetch-configs":
config_files = self._scan_config_alts_directory()
await websocket.send_text(
json.dumps({"type": "config-files", "files": config_files})
)
elif data.get("type") == "switch-config":
config_file = data.get("file")
if config_file:
new_config = self._load_config_from_file(config_file)
if new_config:
# Update configuration
self.open_llm_vtuber_main_config.update(new_config)
# Reinitialize components with new configuration
l2d, open_llm_vtuber, _ = self._initialize_components(
websocket
)
# Send confirmation and model info
await websocket.send_text(
json.dumps(
{
"type": "config-switched",
"message": f"Switched to config: {config_file}",
}
)
)
await websocket.send_text(
json.dumps(
{"type": "set-model", "text": l2d.model_info}
)
)
print(f"Configuration switched to {config_file}")
elif data.get("type") == "fetch-backgrounds":
bg_files = self._scan_bg_directory()
await websocket.send_text(
json.dumps({"type": "background-files", "files": bg_files})
)
else:
print("Unknown data type received.")
except WebSocketDisconnect:
self.connected_clients.remove(websocket)
open_llm_vtuber = None
def _scan_config_alts_directory(self) -> List[str]:
config_files = ["conf.yaml"] # default config file
config_alts_dir = self.open_llm_vtuber_main_config.get(
"CONFIG_ALTS_DIR", "config_alts"
)
for root, _, files in os.walk(config_alts_dir):
for file in files:
if file.endswith(".yaml"):
config_files.append(file)
return config_files
def _load_config_from_file(self, filename: str) -> Dict:
"""
Load configuration from a YAML file with robust encoding handling.
Args:
filename: Name of the config file
Returns:
Dict: Loaded configuration or None if loading fails
"""
if filename == "conf.yaml":
return load_config_with_env("conf.yaml")
config_alts_dir = self.open_llm_vtuber_main_config.get("CONFIG_ALTS_DIR", "config_alts")
file_path = os.path.join(config_alts_dir, filename)
if not os.path.exists(file_path):
logger.error(f"Config file not found: {file_path}")
return None
# Try common encodings first
encodings = ['utf-8', 'utf-8-sig', 'gbk', 'gb2312', 'ascii']
content = None
for encoding in encodings:
try:
with open(file_path, 'r', encoding=encoding) as file:
content = file.read()
break
except UnicodeDecodeError:
continue
if content is None:
# Try detecting encoding as last resort
try:
with open(file_path, 'rb') as file:
raw_data = file.read()
detected = chardet.detect(raw_data)
if detected['encoding']:
content = raw_data.decode(detected['encoding'])
except Exception as e:
logger.error(f"Error detecting encoding for config file {file_path}: {e}")
return None
try:
return yaml.safe_load(content)
except yaml.YAMLError as e:
logger.error(f"Error parsing YAML from {file_path}: {e}")
return None
def _scan_bg_directory(self) -> List[str]:
bg_files = []
bg_dir = os.path.join("static", "bg")
for root, _, files in os.walk(bg_dir):
for file in files:
if file.endswith((".jpg", ".jpeg", ".png", ".gif")):
bg_files.append(file)
return bg_files
def _mount_static_files(self):
"""Mounts static file directories."""
self.app.mount(
"/live2d-models",
StaticFiles(directory="live2d-models"),
name="live2d-models",
)
self.app.mount("/", StaticFiles(directory="./static", html=True), name="static")
def run(self, host: str = "127.0.0.1", port: int = 8000, log_level: str = "info"):
"""Runs the FastAPI application using Uvicorn."""
import uvicorn
uvicorn.run(self.app, host=host, port=port, log_level=log_level)
@staticmethod
def clean_cache():
"""Clean the cache directory by removing and recreating it."""
cache_dir = "./cache"
if (os.path.exists(cache_dir)):
shutil.rmtree(cache_dir)
os.makedirs(cache_dir)
def load_config_with_env(path) -> dict:
"""
Load the configuration file with environment variables.
Parameters:
- path (str): The path to the configuration file.
Returns:
- dict: The configuration dictionary.
Raises:
- FileNotFoundError if the configuration file is not found.
- yaml.YAMLError if the configuration file is not a valid YAML file.
"""
with open(path, "r", encoding="utf-8") as file:
content = file.read()
# Match ${VAR_NAME}
pattern = re.compile(r"\$\{(\w+)\}")
# replace ${VAR_NAME} with os.getenv('VAR_NAME')
def replacer(match):
env_var = match.group(1)
return os.getenv(
env_var, match.group(0)
) # return the original string if the env var is not found
content = pattern.sub(replacer, content)
# Load the yaml file
return yaml.safe_load(content)
if __name__ == "__main__":
atexit.register(WebSocketServer.clean_cache)
# Load configurations from yaml file
config = load_config_with_env("conf.yaml")
config["LIVE2D"] = True # make sure the live2d is enabled
# Initialize and run the WebSocket server
server = WebSocketServer(open_llm_vtuber_main_config=config)
server.run(host=config["HOST"], port=config["PORT"])