Skip to content

Commit 4a09493

Browse files
refactor: modify the __init__.py
1 parent 68a408a commit 4a09493

2 files changed

Lines changed: 17 additions & 9 deletions

File tree

examples/models/lfm2_5_vl/__init__.py

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,3 +3,11 @@
33
#
44
# This source code is licensed under the BSD-style license found in the
55
# LICENSE file in the root directory of this source tree.
6+
7+
from executorch.examples.models.lfm2_5_vl.convert_weights import convert_weights
8+
from executorch.examples.models.lfm2_5_vl.model import Lfm2p5VlModel
9+
10+
__all__ = [
11+
"convert_weights",
12+
"Lfm2p5VlModel",
13+
]

examples/models/lfm2_5_vl/test/test_lfm2_5_vl.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,10 @@
99

1010
import torch
1111
from executorch.examples.models.lfm2_5_vl.export_lfm2_5_vl import export_all
12-
from executorch.examples.models.lfm2_5_vl.model import IMAGE_SIZE, MAX_SEQ_LEN, Lfm2p5VlModel
13-
14-
# import order matters: portable_lib must come first so its static op registry
15-
# is in place before custom_ops registers against it.
16-
from executorch.extension.pybindings.portable_lib import ( # noqa # usort: skip
12+
from executorch.examples.models.lfm2_5_vl.model import IMAGE_SIZE, Lfm2p5VlModel
13+
from executorch.extension.pybindings.portable_lib import (
1714
_load_for_executorch_from_buffer,
1815
)
19-
from executorch.extension.llm.custom_ops import custom_ops # noqa # usort: skip
20-
from executorch.kernels import quantized # noqa # usort: skip
2116

2217
logging.basicConfig(level=logging.INFO)
2318
logger = logging.getLogger(__name__)
@@ -33,7 +28,9 @@ def setUpClass(cls):
3328

3429
def test_vision_encoder_shape(self):
3530
"""Vision encoder must produce [1, 256, 2048] embeddings."""
36-
pixels = torch.randint(0, 256, (1, 3, IMAGE_SIZE, IMAGE_SIZE), dtype=torch.float32)
31+
pixels = torch.randint(
32+
0, 256, (1, 3, IMAGE_SIZE, IMAGE_SIZE), dtype=torch.float32
33+
)
3734
with torch.no_grad():
3835
embeds = self.lfm2.image_embedding(pixels)
3936
self.assertEqual(embeds.shape, (1, 256, 2048))
@@ -73,7 +70,10 @@ def test_export_and_run(self):
7370
before_embeds = module.run_method("token_embedding", (prompt_before,))[0]
7471
module.run_method(
7572
"text_decoder",
76-
(before_embeds, torch.arange(start_pos, start_pos + before_embeds.shape[1])),
73+
(
74+
before_embeds,
75+
torch.arange(start_pos, start_pos + before_embeds.shape[1]),
76+
),
7777
)
7878
start_pos += before_embeds.shape[1]
7979

0 commit comments

Comments
 (0)