99
1010import torch
1111from executorch .examples .models .lfm2_5_vl .export_lfm2_5_vl import export_all
12- from executorch .examples .models .lfm2_5_vl .model import IMAGE_SIZE , MAX_SEQ_LEN , Lfm2p5VlModel
13-
14- # import order matters: portable_lib must come first so its static op registry
15- # is in place before custom_ops registers against it.
16- from executorch .extension .pybindings .portable_lib import ( # noqa # usort: skip
12+ from executorch .examples .models .lfm2_5_vl .model import IMAGE_SIZE , Lfm2p5VlModel
13+ from executorch .extension .pybindings .portable_lib import (
1714 _load_for_executorch_from_buffer ,
1815)
19- from executorch .extension .llm .custom_ops import custom_ops # noqa # usort: skip
20- from executorch .kernels import quantized # noqa # usort: skip
2116
2217logging .basicConfig (level = logging .INFO )
2318logger = logging .getLogger (__name__ )
@@ -33,7 +28,9 @@ def setUpClass(cls):
3328
3429 def test_vision_encoder_shape (self ):
3530 """Vision encoder must produce [1, 256, 2048] embeddings."""
36- pixels = torch .randint (0 , 256 , (1 , 3 , IMAGE_SIZE , IMAGE_SIZE ), dtype = torch .float32 )
31+ pixels = torch .randint (
32+ 0 , 256 , (1 , 3 , IMAGE_SIZE , IMAGE_SIZE ), dtype = torch .float32
33+ )
3734 with torch .no_grad ():
3835 embeds = self .lfm2 .image_embedding (pixels )
3936 self .assertEqual (embeds .shape , (1 , 256 , 2048 ))
@@ -73,7 +70,10 @@ def test_export_and_run(self):
7370 before_embeds = module .run_method ("token_embedding" , (prompt_before ,))[0 ]
7471 module .run_method (
7572 "text_decoder" ,
76- (before_embeds , torch .arange (start_pos , start_pos + before_embeds .shape [1 ])),
73+ (
74+ before_embeds ,
75+ torch .arange (start_pos , start_pos + before_embeds .shape [1 ]),
76+ ),
7777 )
7878 start_pos += before_embeds .shape [1 ]
7979
0 commit comments