-
Notifications
You must be signed in to change notification settings - Fork 310
Expand file tree
/
Copy pathmain.rs
More file actions
110 lines (96 loc) · 2.83 KB
/
main.rs
File metadata and controls
110 lines (96 loc) · 2.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
// <complete_code>
// <imports>
use foundry_local_sdk::{
ChatCompletionRequestMessage,
ChatCompletionRequestSystemMessage,
ChatCompletionRequestUserMessage,
FoundryLocalConfig, FoundryLocalManager,
};
use std::io::{self, Write};
// </imports>
#[tokio::main]
async fn main() -> anyhow::Result<()> {
// <init>
// Initialize the Foundry Local SDK
let manager = FoundryLocalManager::create(
FoundryLocalConfig::new("note-taker"),
)?;
// </init>
// <transcription>
// Load the speech-to-text model
let speech_model = manager
.catalog()
.get_model("whisper-tiny")
.await?;
if !speech_model.is_cached().await? {
println!("Downloading speech model...");
speech_model
.download(Some(|progress: &str| {
print!("\r {progress}");
io::stdout().flush().ok();
}))
.await?;
println!();
}
speech_model.load().await?;
println!("Speech model loaded.");
// Transcribe the audio file
let audio_client = speech_model.create_audio_client();
let transcription = audio_client
.transcribe("meeting-notes.wav")
.await?;
println!("\nTranscription:\n{}", transcription.text);
// Unload the speech model to free memory
speech_model.unload().await?;
// </transcription>
// <summarization>
// Load the chat model for summarization
let chat_model = manager
.catalog()
.get_model("qwen2.5-0.5b")
.await?;
if !chat_model.is_cached().await? {
println!("Downloading chat model...");
chat_model
.download(Some(|progress: &str| {
print!("\r {progress}");
io::stdout().flush().ok();
}))
.await?;
println!();
}
chat_model.load().await?;
println!("Chat model loaded.");
// Summarize the transcription into organized notes
let client = chat_model
.create_chat_client()
.temperature(0.7)
.max_tokens(512);
let messages: Vec<ChatCompletionRequestMessage> = vec![
ChatCompletionRequestSystemMessage::from(
"You are a note-taking assistant. Summarize \
the following transcription into organized, \
concise notes with bullet points.",
)
.into(),
ChatCompletionRequestUserMessage::from(
transcription.text.as_str(),
)
.into(),
];
let response = client
.complete_chat(&messages, None)
.await?;
let summary = response.choices[0]
.message
.content
.as_deref()
.unwrap_or("");
println!("\nSummary:\n{}", summary);
// Clean up
chat_model.unload().await?;
println!("\nDone. Models unloaded.");
// </summarization>
Ok(())
}
// </complete_code>