Skip to content

Commit 4cbbfcd

Browse files
feat: add lfm 2.5 350M as consts (#1042)
## Description As the name of PR states HF repo: https://huggingface.co/software-mansion/react-native-executorch-lfm2.5-350M ### Introduces a breaking change? - [ ] Yes - [x] No ### Type of change - [ ] Bug fix (change which fixes an issue) - [ ] New feature (change which adds functionality) - [ ] Documentation update (improves or adds clarity to existing documentation) - [x] Other (chores, tests, code style improvements etc.) ### Tested on - [x] iOS - [ ] Android ### Testing instructions <!-- Provide step-by-step instructions on how to test your changes. Include setup details if necessary. --> ### Screenshots <!-- Add screenshots here, if applicable --> ### Related issues <!-- Link related issues here using #issue-number --> ### Checklist - [x] I have performed a self-review of my code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have updated the documentation accordingly - [ ] My changes generate no new warnings ### Additional notes <!-- Include any additional information, assumptions, or context that reviewers might need to understand this PR. --> --------- Co-authored-by: Mateusz Kopciński <mateusz.kopcinski@swmansion.com>
1 parent 3f02766 commit 4cbbfcd

File tree

5 files changed

+61
-27
lines changed

5 files changed

+61
-27
lines changed

apps/llm/components/llmModels.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,8 @@ import {
3131
QWEN2_5_3B_QUANTIZED,
3232
PHI_4_MINI_4B,
3333
PHI_4_MINI_4B_QUANTIZED,
34+
LFM2_5_350M,
35+
LFM2_5_350M_QUANTIZED,
3436
LFM2_5_1_2B_INSTRUCT,
3537
LFM2_5_1_2B_INSTRUCT_QUANTIZED,
3638
LLMProps,
@@ -79,6 +81,8 @@ export const LLM_MODELS: ModelOption<LLMModelSources>[] = [
7981
{ label: 'Phi-4 Mini 4B', value: PHI_4_MINI_4B },
8082
{ label: 'Phi-4 Mini 4B Quantized', value: PHI_4_MINI_4B_QUANTIZED },
8183
// LFM2.5
84+
{ label: 'LFM2.5 350M', value: LFM2_5_350M },
85+
{ label: 'LFM2.5 350M Quantized', value: LFM2_5_350M_QUANTIZED },
8286
{ label: 'LFM2.5 1.2B Instruct', value: LFM2_5_1_2B_INSTRUCT },
8387
{
8488
label: 'LFM2.5 1.2B Instruct Quantized',

docs/docs/03-hooks/01-natural-language-processing/useLLM.md

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -555,13 +555,13 @@ const handleGenerate = async () => {
555555

556556
## Available models
557557

558-
| Model Family | Sizes | Quantized | Capabilities |
559-
| ------------------------------------------------------------------------------------------------------------ | :--------------: | :-------: | :----------: |
560-
| [Hammer 2.1](https://huggingface.co/software-mansion/react-native-executorch-hammer-2.1) | 0.5B, 1.5B, 3B || - |
561-
| [Qwen 2.5](https://huggingface.co/software-mansion/react-native-executorch-qwen-2.5) | 0.5B, 1.5B, 3B || - |
562-
| [Qwen 3](https://huggingface.co/software-mansion/react-native-executorch-qwen-3) | 0.6B, 1.7B, 4B || - |
563-
| [Phi 4 Mini](https://huggingface.co/software-mansion/react-native-executorch-phi-4-mini) | 4B || - |
564-
| [SmolLM 2](https://huggingface.co/software-mansion/react-native-executorch-smolLm-2) | 135M, 360M, 1.7B || - |
565-
| [LLaMA 3.2](https://huggingface.co/software-mansion/react-native-executorch-llama-3.2) | 1B, 3B || - |
566-
| [LFM2.5-1.2B-Instruct](https://huggingface.co/software-mansion/react-native-executorch-lfm2.5-1.2B-instruct) | 1.2B || - |
567-
| [LFM2.5-VL-1.6B](https://huggingface.co/software-mansion/react-native-executorch-lfm2.5-VL-1.6B) | 1.6B || vision |
558+
| Model Family | Sizes | Quantized | Capabilities |
559+
| ------------------------------------------------------------------------------------------------------------------ | :-----------------: | :-------: | :----------: |
560+
| [Hammer 2.1](https://huggingface.co/software-mansion/react-native-executorch-hammer-2.1) | 0.5B, 1.5B, 3B || - |
561+
| [Qwen 2.5](https://huggingface.co/software-mansion/react-native-executorch-qwen-2.5) | 0.5B, 1.5B, 3B || - |
562+
| [Qwen 3](https://huggingface.co/software-mansion/react-native-executorch-qwen-3) | 0.6B, 1.7B, 4B || - |
563+
| [Phi 4 Mini](https://huggingface.co/software-mansion/react-native-executorch-phi-4-mini) | 4B || - |
564+
| [SmolLM 2](https://huggingface.co/software-mansion/react-native-executorch-smolLm-2) | 135M, 360M, 1.7B || - |
565+
| [LLaMA 3.2](https://huggingface.co/software-mansion/react-native-executorch-llama-3.2) | 1B, 3B || - |
566+
| [LFM2.5](https://huggingface.co/software-mansion/react-native-executorch-lfm-2.5) | 350M, 1.2B, 1.6B-VL || - |
567+
| [LFM2.5-VL-1.6B](https://huggingface.co/software-mansion/react-native-executorch-lfm-2.5/tree/main/lfm2.5-VL-1.6B) | 1.6B || vision |

docs/versioned_docs/version-0.8.x/03-hooks/01-natural-language-processing/useLLM.md

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -555,13 +555,13 @@ const handleGenerate = async () => {
555555

556556
## Available models
557557

558-
| Model Family | Sizes | Quantized | Capabilities |
559-
| ------------------------------------------------------------------------------------------------------------ | :--------------: | :-------: | :----------: |
560-
| [Hammer 2.1](https://huggingface.co/software-mansion/react-native-executorch-hammer-2.1) | 0.5B, 1.5B, 3B || - |
561-
| [Qwen 2.5](https://huggingface.co/software-mansion/react-native-executorch-qwen-2.5) | 0.5B, 1.5B, 3B || - |
562-
| [Qwen 3](https://huggingface.co/software-mansion/react-native-executorch-qwen-3) | 0.6B, 1.7B, 4B || - |
563-
| [Phi 4 Mini](https://huggingface.co/software-mansion/react-native-executorch-phi-4-mini) | 4B || - |
564-
| [SmolLM 2](https://huggingface.co/software-mansion/react-native-executorch-smolLm-2) | 135M, 360M, 1.7B || - |
565-
| [LLaMA 3.2](https://huggingface.co/software-mansion/react-native-executorch-llama-3.2) | 1B, 3B || - |
566-
| [LFM2.5-1.2B-Instruct](https://huggingface.co/software-mansion/react-native-executorch-lfm2.5-1.2B-instruct) | 1.2B || - |
567-
| [LFM2.5-VL-1.6B](https://huggingface.co/software-mansion/react-native-executorch-lfm2.5-VL-1.6B) | 1.6B || vision |
558+
| Model Family | Sizes | Quantized | Capabilities |
559+
| -------------------------------------------------------------------------------------------------------------------- | :-----------------: | :-------: | :----------: |
560+
| [Hammer 2.1](https://huggingface.co/software-mansion/react-native-executorch-hammer-2.1) | 0.5B, 1.5B, 3B || - |
561+
| [Qwen 2.5](https://huggingface.co/software-mansion/react-native-executorch-qwen-2.5) | 0.5B, 1.5B, 3B || - |
562+
| [Qwen 3](https://huggingface.co/software-mansion/react-native-executorch-qwen-3) | 0.6B, 1.7B, 4B || - |
563+
| [Phi 4 Mini](https://huggingface.co/software-mansion/react-native-executorch-phi-4-mini) | 4B || - |
564+
| [SmolLM 2](https://huggingface.co/software-mansion/react-native-executorch-smolLm-2) | 135M, 360M, 1.7B || - |
565+
| [LLaMA 3.2](https://huggingface.co/software-mansion/react-native-executorch-llama-3.2) | 1B, 3B || - |
566+
| [LFM2.5](https://huggingface.co/software-mansion/react-native-executorch-lfm-2.5) | 350M, 1.2B, 1.6B-VL || - |
567+
| [LFM2.5-VL-1.6B](https://huggingface.co/software-mansion/react-native-executorch-lfm-2.5/tree/v0.8.0/lfm2.5-VL-1.6B) | 1.6B || vision |

packages/react-native-executorch/src/constants/modelUrls.ts

Lines changed: 35 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -380,10 +380,10 @@ export const PHI_4_MINI_4B_QUANTIZED = {
380380
} as const;
381381

382382
// LFM2.5-1.2B-Instruct
383-
const LFM2_5_1_2B_INSTRUCT_MODEL = `${URL_PREFIX}-lfm2.5-1.2B-instruct/${VERSION_TAG}/original/lfm2_5_1_2b_fp16.pte`;
384-
const LFM2_5_1_2B_INSTRUCT_QUANTIZED_MODEL = `${URL_PREFIX}-lfm2.5-1.2B-instruct/${VERSION_TAG}/quantized/lfm2_5_1_2b_8da4w.pte`;
385-
const LFM2_5_1_2B_TOKENIZER = `${URL_PREFIX}-lfm2.5-1.2B-instruct/${VERSION_TAG}/tokenizer.json`;
386-
const LFM2_5_1_2B_TOKENIZER_CONFIG = `${URL_PREFIX}-lfm2.5-1.2B-instruct/${VERSION_TAG}/tokenizer_config.json`;
383+
const LFM2_5_1_2B_INSTRUCT_MODEL = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-1.2B-instruct/original/lfm2_5_1_2b_fp16.pte`;
384+
const LFM2_5_1_2B_INSTRUCT_QUANTIZED_MODEL = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-1.2B-instruct/quantized/lfm2_5_1_2b_8da4w.pte`;
385+
const LFM2_5_1_2B_TOKENIZER = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-1.2B-instruct/tokenizer.json`;
386+
const LFM2_5_1_2B_TOKENIZER_CONFIG = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-1.2B-instruct/tokenizer_config.json`;
387387

388388
/**
389389
* @category Models - LLM
@@ -405,10 +405,36 @@ export const LFM2_5_1_2B_INSTRUCT_QUANTIZED = {
405405
tokenizerConfigSource: LFM2_5_1_2B_TOKENIZER_CONFIG,
406406
} as const;
407407

408+
// LFM2.5-350M
409+
const LFM2_5_350M_MODEL = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-350M/xnnpack/lfm2_5_350m_xnnpack_fp16.pte`;
410+
const LFM2_5_350M_QUANTIZED_MODEL = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-350M/xnnpack/lfm2_5_350m_xnnpack_8w4da.pte`;
411+
const LFM2_5_350M_TOKENIZER = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-350M/tokenizer.json`;
412+
const LFM2_5_350M_TOKENIZER_CONFIG = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-350M/tokenizer_config.json`;
413+
414+
/**
415+
* @category Models - LLM
416+
*/
417+
export const LFM2_5_350M = {
418+
modelName: 'lfm2.5-350m',
419+
modelSource: LFM2_5_350M_MODEL,
420+
tokenizerSource: LFM2_5_350M_TOKENIZER,
421+
tokenizerConfigSource: LFM2_5_350M_TOKENIZER_CONFIG,
422+
} as const;
423+
424+
/**
425+
* @category Models - LLM
426+
*/
427+
export const LFM2_5_350M_QUANTIZED = {
428+
modelName: 'lfm2.5-350m-quantized',
429+
modelSource: LFM2_5_350M_QUANTIZED_MODEL,
430+
tokenizerSource: LFM2_5_350M_TOKENIZER,
431+
tokenizerConfigSource: LFM2_5_350M_TOKENIZER_CONFIG,
432+
} as const;
433+
408434
// LFM2.5-VL-1.6B
409-
const LFM2_VL_1_6B_QUANTIZED_MODEL = `${URL_PREFIX}-lfm2.5-VL-1.6B/${VERSION_TAG}/quantized/lfm2_5_vl_1_6b_8da4w_xnnpack.pte`;
410-
const LFM2_VL_TOKENIZER = `${URL_PREFIX}-lfm2.5-VL-1.6B/${VERSION_TAG}/tokenizer.json`;
411-
const LFM2_VL_TOKENIZER_CONFIG = `${URL_PREFIX}-lfm2.5-VL-1.6B/${VERSION_TAG}/tokenizer_config.json`;
435+
const LFM2_VL_1_6B_QUANTIZED_MODEL = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-VL-1.6B/quantized/lfm2_5_vl_1_6b_8da4w_xnnpack.pte`;
436+
const LFM2_VL_TOKENIZER = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-VL-1.6B/tokenizer.json`;
437+
const LFM2_VL_TOKENIZER_CONFIG = `${URL_PREFIX}-lfm-2.5/${VERSION_TAG}/lfm2.5-VL-1.6B/tokenizer_config.json`;
412438

413439
/**
414440
* @category Models - VLM
@@ -1081,6 +1107,8 @@ export const MODEL_REGISTRY = {
10811107
QWEN2_5_3B_QUANTIZED,
10821108
PHI_4_MINI_4B,
10831109
PHI_4_MINI_4B_QUANTIZED,
1110+
LFM2_5_350M,
1111+
LFM2_5_350M_QUANTIZED,
10841112
LFM2_5_1_2B_INSTRUCT,
10851113
LFM2_5_1_2B_INSTRUCT_QUANTIZED,
10861114
LFM2_VL_1_6B_QUANTIZED,

packages/react-native-executorch/src/types/llm.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,8 @@ export type LLMModelName =
5151
| 'qwen2.5-3b-quantized'
5252
| 'phi-4-mini-4b'
5353
| 'phi-4-mini-4b-quantized'
54+
| 'lfm2.5-350m'
55+
| 'lfm2.5-350m-quantized'
5456
| 'lfm2.5-1.2b-instruct'
5557
| 'lfm2.5-1.2b-instruct-quantized'
5658
| 'lfm2.5-vl-1.6b-quantized';

0 commit comments

Comments
 (0)