-
Notifications
You must be signed in to change notification settings - Fork 993
Expand file tree
/
Copy pathllava_runner.cpp
More file actions
186 lines (154 loc) · 5.36 KB
/
llava_runner.cpp
File metadata and controls
186 lines (154 loc) · 5.36 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
// A simple LLaVA runner that includes preprocessing and post processing logic.
// The runner takes in a prompt string as well as a list of images as input and
// emits a string as output.
#include <executorch/examples/models/llava/runner/llava_image_prefiller.h>
#include <executorch/examples/models/llava/runner/llava_runner.h>
#include <executorch/examples/models/llava/runner/llava_text_decoder_runner.h>
#include <pytorch/tokenizers/llama2c_tokenizer.h>
#include <ctime>
#include <memory>
#include <sstream>
#include <vector>
namespace llm = ::executorch::extension::llm;
using ::executorch::runtime::Error;
using ::executorch::runtime::Result;
namespace example {
bool LlavaRunner::is_loaded() {
bool instantiated = tokenizer_ && text_decoder_runner_ && text_prefiller_ &&
image_prefiller_ && text_token_generator_;
if (!instantiated) {
return false;
}
return text_decoder_runner_->is_method_loaded() &&
image_prefiller_->is_method_loaded();
}
Error LlavaRunner::load() {
if (is_loaded()) {
return Error::Ok;
}
stats_.model_load_start_ms = llm::time_in_ms();
// Load the tokenizer
tokenizer_ = std::make_unique<tokenizers::Llama2cTokenizer>();
tokenizer_->load(tokenizer_path_);
// Load the text decoder runner
text_decoder_runner_ = std::make_unique<LlavaTextDecoderRunner>(
module_.get(), tokenizer_->vocab_size(), temperature_);
text_decoder_runner_->load();
// Load the text prefiller
text_prefiller_ = std::make_unique<llm::TextPrefiller>(
text_decoder_runner_.get(),
/*use_kv_cache=*/true,
/*enable_parallel_prefill=*/true,
/*max_seq_len=*/128);
// Load the image prefiller
image_prefiller_ = std::make_unique<LlavaImagePrefiller>(module_.get());
image_prefiller_->load();
// Load the text token generator
text_token_generator_ = std::make_unique<llm::TextTokenGenerator>(
tokenizer_.get(),
text_decoder_runner_.get(),
/*use_kv_cache=*/true,
std::make_unique<std::unordered_set<uint64_t>>(
std::unordered_set<uint64_t>{tokenizer_->eos_tok()}),
&stats_);
stats_.model_load_end_ms = llm::time_in_ms();
return Error::Ok;
}
Error LlavaRunner::prefill_images(
std::vector<llm::Image>& images,
int64_t& start_pos) {
for (auto& image : images) {
// pos is updated inside image prefill.
ET_UNWRAP(image_prefiller_->prefill(image, start_pos));
}
return Error::Ok;
}
Result<uint64_t> LlavaRunner::prefill_prompt(
const std::string& prompt,
int64_t& start_pos,
int8_t bos,
int8_t eos) {
std::vector<uint64_t> prompt_tokens =
ET_UNWRAP_TOKENIZER(tokenizer_->encode(prompt, bos, eos));
return text_prefiller_->prefill(prompt_tokens, start_pos);
}
Error LlavaRunner::generate_from_pos(
const std::string& prompt,
int32_t seq_len,
int64_t start_pos,
std::function<void(const std::string&)> token_callback,
std::function<void(const ::executorch::extension::llm::Stats&)>
stats_callback,
bool echo) {
// prefill user prompt. No BOS because preset prompt already has it.
if (echo) {
token_callback(prompt);
}
uint64_t prefill_next_token =
ET_UNWRAP(prefill_prompt(prompt, start_pos, /*bos=*/0, /*eos*/ 0));
stats_.first_token_ms = llm::time_in_ms();
stats_.prompt_eval_end_ms = llm::time_in_ms();
stats_.num_prompt_tokens = start_pos;
// Generate tokens
int64_t num_generated_tokens = ET_UNWRAP(text_token_generator_->generate(
{prefill_next_token}, start_pos, seq_len, token_callback));
// Bookkeeping
stats_.num_generated_tokens = num_generated_tokens;
if (stats_callback) {
stats_callback(stats_);
}
return Error::Ok;
}
Error LlavaRunner::generate(
std::vector<llm::Image> images,
const std::string& prompt,
int32_t seq_len,
std::function<void(const std::string&)> token_callback,
std::function<void(const llm::Stats&)> stats_callback,
bool echo) {
ET_CHECK_MSG(!prompt.empty(), "Prompt cannot be null");
if (!is_loaded()) {
ET_CHECK_OK_OR_RETURN_ERROR(load());
}
ET_LOG(
Info,
"RSS after loading model: %f MiB (0 if unsupported)",
llm::get_rss_bytes() / 1024.0 / 1024.0);
// Wrap the token_callback with print function
std::function<void(const std::string&)> wrapped_callback =
[token_callback](const std::string& piece) {
llm::safe_printf(piece.c_str());
fflush(stdout);
if (token_callback) {
token_callback(piece);
}
};
int64_t pos = 0;
stats_.inference_start_ms = llm::time_in_ms();
// prefill preset prompt
prefill_prompt(kPresetPrompt, pos, /*bos=*/1, /*eos*/ 0);
// prefill images
prefill_images(images, pos);
ET_LOG(
Info,
"RSS after prompt and image prefill: %f MiB (0 if unsupported)",
llm::get_rss_bytes() / 1024.0 / 1024.0);
// Generate tokens
Error err = generate_from_pos(
prompt, seq_len, pos, wrapped_callback, stats_callback, echo);
stats_.inference_end_ms = llm::time_in_ms();
::executorch::llm::print_report(stats_);
ET_LOG(
Info,
"RSS after finishing text generation: %f MiB (0 if unsupported)",
llm::get_rss_bytes() / 1024.0 / 1024.0);
return err;
}
} // namespace example