Skip to content

Commit 3eef514

Browse files
committed
feat: 集成 LLM 客户端,支持与大语言模型交互;更新依赖项并优化配置
1 parent 5f556f2 commit 3eef514

File tree

8 files changed

+242
-14
lines changed

8 files changed

+242
-14
lines changed

backend/Cargo.lock

Lines changed: 3 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

backend/Cargo.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,3 +46,6 @@ anyhow = "1"
4646
jsonwebtoken = "9.3.1"
4747
bcrypt = "0.17.0"
4848
wechat-oa-sdk = "1.0.3"
49+
50+
# HTTP 客户端(LLM API 调用)
51+
reqwest = { version = "0.12", features = ["json"] }

backend/src/api/wechatapi.rs

Lines changed: 32 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ use wechat_oa_sdk::{
1919
};
2020

2121
use crate::error::{AppError, AppErrorType};
22+
use crate::llm::LlmClient;
2223
use crate::state::AppState;
2324

2425
// ============================================================================
@@ -164,7 +165,7 @@ pub async fn receive_message(
164165
};
165166

166167
// 处理消息并生成回复
167-
let reply_xml = handle_message(message).await;
168+
let reply_xml = handle_message(message, state.llm_client.as_ref()).await;
168169

169170
// 如果是加密模式,加密回复
170171
if is_encrypted && reply_xml != "success" {
@@ -181,23 +182,45 @@ pub async fn receive_message(
181182
}
182183

183184
/// 处理接收到的消息/事件
184-
async fn handle_message(message: IncomingMessage) -> String {
185+
async fn handle_message(message: IncomingMessage, llm_client: Option<&Arc<LlmClient>>) -> String {
185186
match message {
186187
IncomingMessage::Text(msg) => {
187188
info!("收到文本消息: {} from {}", msg.content, msg.from_user_name);
188-
let reply_content = match msg.content.as_str() {
189-
"你好" | "hello" | "hi" => "你好!欢迎关注我的博客!".to_string(),
190-
"帮助" | "help" => {
191-
"可用命令:\n- 博客:获取博客链接\n- 最新:获取最新文章".to_string()
189+
190+
// 先处理特殊命令
191+
let reply_content = match msg.content.trim().to_lowercase().as_str() {
192+
"帮助" | "help" | "?" => {
193+
"可用命令:\n- 博客:获取博客链接\n- 清除:清除对话历史\n\n其他消息我会用 AI 回复你!".to_string()
194+
}
195+
"博客" | "blog" => "访问我的博客:https://blog.exquisitecore.xyz".to_string(),
196+
"清除" | "clear" | "reset" => {
197+
// 清除对话历史
198+
if let Some(client) = llm_client {
199+
client.clear_history(&msg.from_user_name).await;
200+
}
201+
"对话历史已清除!".to_string()
202+
}
203+
_ => {
204+
// 使用 LLM 回复
205+
if let Some(client) = llm_client {
206+
match client.chat(&msg.from_user_name, &msg.content).await {
207+
Ok(response) => response,
208+
Err(e) => {
209+
error!("LLM 调用失败: {}", e);
210+
format!("抱歉,AI 服务暂时不可用:{}", e)
211+
}
212+
}
213+
} else {
214+
// 没有配置 LLM,使用简单回复
215+
format!("收到:{}", msg.content)
216+
}
192217
}
193-
"博客" => "访问我的博客:https://blog.exquisitecore.xyz".to_string(),
194-
_ => format!("收到:{}", msg.content),
195218
};
196219
TextReply::new(&msg.from_user_name, &msg.to_user_name, reply_content).to_xml()
197220
}
198221
IncomingMessage::SubscribeEvent(event) => {
199222
info!("用户关注: {}", event.from_user_name);
200-
let welcome = "感谢关注!\n\n这里是我的个人博客公众号,会不定期分享技术文章。\n\n回复「博客」获取博客链接";
223+
let welcome = "感谢关注!\n\n这里是我的个人博客公众号,会不定期分享技术文章。\n\n💡 你可以直接向我发消息,我会用 AI 回复你!\n\n回复「帮助」查看更多命令";
201224
TextReply::new(&event.from_user_name, &event.to_user_name, welcome).to_xml()
202225
}
203226
IncomingMessage::UnsubscribeEvent(event) => {

backend/src/config.rs

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,18 @@ pub struct Config {
2727
pub cors: CorsConfig,
2828
#[serde(default)]
2929
pub wechat: Option<WeChatConfig>,
30+
#[serde(default)]
31+
pub llm: Option<LlmConfig>,
32+
}
33+
34+
#[derive(Debug, Serialize, Deserialize, Clone)]
35+
pub struct LlmConfig {
36+
pub provider: String, // "deepseek", "openai", etc.
37+
pub api_key: String,
38+
pub base_url: Option<String>,
39+
pub model: Option<String>,
40+
pub max_tokens: Option<u32>,
41+
pub timeout_secs: Option<u64>,
3042
}
3143

3244
#[derive(Debug, Serialize, Deserialize, Clone)]
@@ -106,6 +118,7 @@ impl Default for Config {
106118
allow_credentials: true,
107119
},
108120
wechat: None,
121+
llm: None,
109122
}
110123
}
111124
}

backend/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
pub mod api;
22
pub mod config;
33
pub mod error;
4+
pub mod llm;
45
pub mod logger;
56
pub mod middleware;
67
pub mod model;

backend/src/llm.rs

Lines changed: 172 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,172 @@
1+
//! LLM 集成模块
2+
//!
3+
//! 支持 DeepSeek 等大语言模型 API
4+
5+
use reqwest::Client;
6+
use serde::{Deserialize, Serialize};
7+
use std::sync::Arc;
8+
use std::time::Duration;
9+
use tokio::sync::RwLock;
10+
use std::collections::HashMap;
11+
use tracing::{error, info};
12+
13+
use crate::config::LlmConfig;
14+
15+
/// LLM 客户端
16+
pub struct LlmClient {
17+
config: LlmConfig,
18+
http: Client,
19+
/// 用户对话历史缓存 (openid -> messages)
20+
conversations: Arc<RwLock<HashMap<String, Vec<ChatMessage>>>>,
21+
}
22+
23+
#[derive(Debug, Clone, Serialize, Deserialize)]
24+
pub struct ChatMessage {
25+
pub role: String, // "system", "user", "assistant"
26+
pub content: String,
27+
}
28+
29+
#[derive(Debug, Serialize)]
30+
struct ChatRequest {
31+
model: String,
32+
messages: Vec<ChatMessage>,
33+
max_tokens: Option<u32>,
34+
temperature: Option<f32>,
35+
stream: bool,
36+
}
37+
38+
#[derive(Debug, Deserialize)]
39+
struct ChatResponse {
40+
choices: Vec<Choice>,
41+
}
42+
43+
#[derive(Debug, Deserialize)]
44+
struct Choice {
45+
message: ChatMessage,
46+
}
47+
48+
#[derive(Debug, Deserialize)]
49+
struct ErrorResponse {
50+
error: ApiError,
51+
}
52+
53+
#[derive(Debug, Deserialize)]
54+
struct ApiError {
55+
message: String,
56+
}
57+
58+
impl LlmClient {
59+
pub fn new(config: LlmConfig) -> Self {
60+
let timeout = config.timeout_secs.unwrap_or(30);
61+
let http = Client::builder()
62+
.timeout(Duration::from_secs(timeout))
63+
.build()
64+
.expect("Failed to create HTTP client");
65+
66+
Self {
67+
config,
68+
http,
69+
conversations: Arc::new(RwLock::new(HashMap::new())),
70+
}
71+
}
72+
73+
/// 与 LLM 聊天
74+
///
75+
/// `user_id`: 用户标识(微信 openid),用于维护对话上下文
76+
/// `message`: 用户消息
77+
pub async fn chat(&self, user_id: &str, message: &str) -> Result<String, String> {
78+
// 获取或创建对话历史
79+
let mut conversations = self.conversations.write().await;
80+
let history = conversations.entry(user_id.to_string()).or_insert_with(|| {
81+
vec![ChatMessage {
82+
role: "system".to_string(),
83+
content: "你是一个友好的助手,用简洁的中文回复用户问题。回复尽量控制在100字以内。".to_string(),
84+
}]
85+
});
86+
87+
// 添加用户消息
88+
history.push(ChatMessage {
89+
role: "user".to_string(),
90+
content: message.to_string(),
91+
});
92+
93+
// 限制历史长度(保留最近10轮对话)
94+
if history.len() > 21 { // 1 system + 20 user/assistant
95+
let system_msg = history[0].clone();
96+
let recent: Vec<_> = history.iter().skip(history.len() - 20).cloned().collect();
97+
history.clear();
98+
history.push(system_msg);
99+
history.extend(recent);
100+
}
101+
102+
// 构建请求
103+
let base_url = self.config.base_url.as_deref()
104+
.unwrap_or("https://api.deepseek.com");
105+
let model = self.config.model.as_deref()
106+
.unwrap_or("deepseek-chat");
107+
108+
let request = ChatRequest {
109+
model: model.to_string(),
110+
messages: history.clone(),
111+
max_tokens: self.config.max_tokens.or(Some(500)),
112+
temperature: Some(0.7),
113+
stream: false,
114+
};
115+
116+
info!("Calling LLM API for user {}", user_id);
117+
118+
// 发送请求
119+
let response = self.http
120+
.post(format!("{}/v1/chat/completions", base_url))
121+
.header("Authorization", format!("Bearer {}", self.config.api_key))
122+
.header("Content-Type", "application/json")
123+
.json(&request)
124+
.send()
125+
.await
126+
.map_err(|e| {
127+
error!("LLM API request failed: {}", e);
128+
format!("请求失败: {}", e)
129+
})?;
130+
131+
let status = response.status();
132+
let body = response.text().await.map_err(|e| format!("读取响应失败: {}", e))?;
133+
134+
if !status.is_success() {
135+
error!("LLM API error: {} - {}", status, body);
136+
// 尝试解析错误信息
137+
if let Ok(err) = serde_json::from_str::<ErrorResponse>(&body) {
138+
return Err(format!("API错误: {}", err.error.message));
139+
}
140+
return Err(format!("API错误: {}", status));
141+
}
142+
143+
// 解析响应
144+
let chat_response: ChatResponse = serde_json::from_str(&body)
145+
.map_err(|e| {
146+
error!("Failed to parse LLM response: {} - {}", e, body);
147+
format!("解析响应失败: {}", e)
148+
})?;
149+
150+
let assistant_message = chat_response.choices
151+
.first()
152+
.map(|c| c.message.content.clone())
153+
.unwrap_or_else(|| "抱歉,我没有生成回复。".to_string());
154+
155+
// 保存助手回复到历史
156+
history.push(ChatMessage {
157+
role: "assistant".to_string(),
158+
content: assistant_message.clone(),
159+
});
160+
161+
info!("LLM response for user {}: {} chars", user_id, assistant_message.len());
162+
163+
Ok(assistant_message)
164+
}
165+
166+
/// 清除用户对话历史
167+
pub async fn clear_history(&self, user_id: &str) {
168+
let mut conversations = self.conversations.write().await;
169+
conversations.remove(user_id);
170+
info!("Cleared conversation history for user {}", user_id);
171+
}
172+
}

backend/src/main.rs

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
use backend::{config, logger, model, routes, state::AppState};
1+
use backend::{config, llm::LlmClient, logger, model, routes, state::AppState};
22
use std::net::SocketAddr;
33
use std::path::Path;
44
use std::sync::Arc;
@@ -40,8 +40,14 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
4040
WeChatClient::new(sdk_config)
4141
});
4242

43+
// 初始化 LLM 客户端(如果配置了)
44+
let llm_client = config.llm.map(|lc| {
45+
info!("初始化 LLM 客户端,provider: {}", lc.provider);
46+
LlmClient::new(lc)
47+
});
48+
4349
// 创建应用状态
44-
let app_state = AppState::new(pool, wechat_client);
50+
let app_state = AppState::new(pool, wechat_client, llm_client);
4551

4652
// 创建应用路由
4753
let app = routes::create_routes(app_state);

backend/src/state.rs

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,21 +6,30 @@ use sqlx::{Pool, Postgres};
66
use std::sync::Arc;
77
use wechat_oa_sdk::WeChatClient;
88

9+
use crate::llm::LlmClient;
10+
911
/// 应用状态
1012
#[derive(Clone)]
1113
pub struct AppState {
1214
/// 数据库连接池
1315
pub pool: Arc<Pool<Postgres>>,
1416
/// 微信客户端(可选)
1517
pub wechat_client: Option<Arc<WeChatClient>>,
18+
/// LLM 客户端(可选)
19+
pub llm_client: Option<Arc<LlmClient>>,
1620
}
1721

1822
impl AppState {
1923
/// 创建新的应用状态
20-
pub fn new(pool: Arc<Pool<Postgres>>, wechat_client: Option<WeChatClient>) -> Self {
24+
pub fn new(
25+
pool: Arc<Pool<Postgres>>,
26+
wechat_client: Option<WeChatClient>,
27+
llm_client: Option<LlmClient>,
28+
) -> Self {
2129
Self {
2230
pool,
2331
wechat_client: wechat_client.map(Arc::new),
32+
llm_client: llm_client.map(Arc::new),
2433
}
2534
}
2635
}

0 commit comments

Comments
 (0)