@@ -161,54 +161,106 @@ TEST_F(LLMTest, EmptyPromptThrows) {
161161 EXPECT_THROW ((void )model.generate (" " , nullptr ), RnExecutorchError);
162162}
163163
164+ TEST_F (LLMTest, CountTextTokensPositive) {
165+ LLM model (kValidModelPath , kValidTokenizerPath , {}, mockInvoker_);
166+ EXPECT_GT (model.countTextTokens (" hello world" ), 0 );
167+ }
168+
169+ TEST_F (LLMTest, CountTextTokensEmptyString) {
170+ LLM model (kValidModelPath , kValidTokenizerPath , {}, mockInvoker_);
171+ EXPECT_GE (model.countTextTokens (" " ), 0 );
172+ }
173+
174+ TEST_F (LLMTest, GetMaxContextLengthPositive) {
175+ LLM model (kValidModelPath , kValidTokenizerPath , {}, mockInvoker_);
176+ EXPECT_GT (model.getMaxContextLength (), 0 );
177+ }
178+
179+ TEST_F (LLMTest, ResetZerosGeneratedTokenCount) {
180+ LLM model (kValidModelPath , kValidTokenizerPath , {}, mockInvoker_);
181+ model.generate (formatChatML (kSystemPrompt , " Hi" ), nullptr );
182+ EXPECT_GT (model.getGeneratedTokenCount (), 0 );
183+ model.reset ();
184+ EXPECT_EQ (model.getGeneratedTokenCount (), 0 );
185+ }
186+
187+ TEST_F (LLMTest, PromptTokenCountNonZeroAfterGenerate) {
188+ LLM model (kValidModelPath , kValidTokenizerPath , {}, mockInvoker_);
189+ model.generate (formatChatML (kSystemPrompt , " Hi" ), nullptr );
190+ EXPECT_GT (model.getPromptTokenCount (), 0 );
191+ }
192+
164193TEST (VisionEncoderTest, LoadFailsWithClearErrorWhenMethodMissing) {
165194 // smolLm2_135M_8da4w.pte has no vision_encoder method
166195 auto module = std::make_unique<::executorch::extension::Module>(
167196 " smolLm2_135M_8da4w.pte" ,
168197 ::executorch::extension::Module::LoadMode::File);
169198
170199 auto encoder =
171- std::make_unique<executorch::extension::llm::VisionEncoder>(module . get () );
200+ std::make_unique<executorch::extension::llm::VisionEncoder>(* module );
172201
173202 EXPECT_THROW (encoder->load (), rnexecutorch::RnExecutorchError);
174203}
175204
176- #include < runner/base_llm_runner.h>
205+ // ============================================================================
206+ // VLM-specific tests
207+ // ============================================================================
208+ constexpr auto kVlmModelPath = " lfm2_5_vl_quantized_xnnpack_v2.pte" ;
209+ constexpr auto kVlmTokenizerPath = " lfm2_vl_tokenizer.json" ;
210+ constexpr auto kVlmImageToken = " <image>" ;
211+ constexpr auto kTestImagePath =
212+ " file:///data/local/tmp/rnexecutorch_tests/test_image.jpg" ;
213+
214+ TEST_F (LLMTest, TextModelIsNotMultimodal) {
215+ LLM model (kValidModelPath , kValidTokenizerPath , {}, mockInvoker_);
216+ EXPECT_EQ (model.getVisualTokenCount (), 0 );
217+ }
218+
219+ TEST_F (LLMTest, GenerateMultimodalOnTextModelThrows) {
220+ LLM model (kValidModelPath , kValidTokenizerPath , {}, mockInvoker_);
221+ EXPECT_THROW (model.generateMultimodal (" hello" , {}, " <image>" , nullptr ),
222+ RnExecutorchError);
223+ }
177224
178- // Minimal concrete subclass — only used in tests to verify base class behavior
179- class StubRunner : public rnexecutorch ::llm::runner::BaseLLMRunner {
180- public :
181- using BaseLLMRunner::BaseLLMRunner;
182- bool is_loaded () const override { return loaded_; }
183- ::executorch::runtime::Error load_subcomponents () override {
184- loaded_ = true ;
185- return ::executorch::runtime::Error::Ok ;
225+ // Fixture that loads the VLM model once for all VLM tests
226+ class VLMTest : public ::testing::Test {
227+ protected :
228+ static void SetUpTestSuite () {
229+ invoker_ = createMockCallInvoker ();
230+ model_ =
231+ std::make_unique<LLM>( kVlmModelPath , kVlmTokenizerPath ,
232+ std::vector<std::string>{ " vision " }, invoker_) ;
186233 }
187- ::executorch::runtime::Error generate_internal (
188- const std::vector<::executorch::extension::llm::MultimodalInput> &,
189- std::function< void ( const std::string &)>) override {
190- return ::executorch::runtime::Error::Ok ;
234+
235+ static void TearDownTestSuite () {
236+ model_. reset ();
237+ invoker_. reset () ;
191238 }
192- void stop_impl () override {}
193- void set_temperature_impl (float t) override { last_temp_ = t; }
194- void set_topp_impl (float ) override {}
195- void set_count_interval_impl (size_t ) override {}
196- void set_time_interval_impl (size_t ) override {}
197-
198- bool loaded_ = false ;
199- float last_temp_ = -1 .f;
239+
240+ static std::shared_ptr<facebook::react::CallInvoker> invoker_;
241+ static std::unique_ptr<LLM> model_;
200242};
201243
202- TEST (BaseLLMRunnerTest, SetTemperatureWritesConfigAndCallsImpl) {
203- StubRunner runner (nullptr , " dummy_tokenizer.json" );
204- runner.set_temperature (0 .5f );
205- EXPECT_FLOAT_EQ (runner.config_ .temperature , 0 .5f );
206- EXPECT_FLOAT_EQ (runner.last_temp_ , 0 .5f );
244+ std::shared_ptr<facebook::react::CallInvoker> VLMTest::invoker_;
245+ std::unique_ptr<LLM> VLMTest::model_;
246+
247+ TEST_F (VLMTest, GenerateMultimodalEmptyImageTokenThrows) {
248+ EXPECT_THROW (
249+ model_->generateMultimodal (" hello" , {kTestImagePath }, " " , nullptr ),
250+ RnExecutorchError);
207251}
208252
209- TEST (BaseLLMRunnerTest, ResetZerosPos) {
210- StubRunner runner (nullptr , " dummy_tokenizer.json" );
211- runner.pos_ = 42 ;
212- runner.reset ();
213- EXPECT_EQ (runner.pos_ , 0 );
253+ TEST_F (VLMTest, GenerateMultimodalMorePlaceholdersThanImagePaths) {
254+ std::string prompt = std::string (kVlmImageToken ) + " and " + kVlmImageToken ;
255+ EXPECT_THROW (model_->generateMultimodal (prompt, {kTestImagePath },
256+ kVlmImageToken , nullptr ),
257+ RnExecutorchError);
258+ }
259+
260+ TEST_F (VLMTest, GenerateMultimodalMoreImagePathsThanPlaceholders) {
261+ std::string prompt = std::string (kVlmImageToken ) + " describe" ;
262+ EXPECT_THROW (model_->generateMultimodal (prompt,
263+ {kTestImagePath , kTestImagePath },
264+ kVlmImageToken , nullptr ),
265+ RnExecutorchError);
214266}
0 commit comments