diff --git a/.gitignore b/.gitignore
index 351e54b8..0f2765e6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -131,3 +131,4 @@ temp/
*.zip
models/*/models/*
models/*/output/*
+hf_models/*/eval/*
diff --git a/ci/get_changed_bundle.py b/ci/get_changed_bundle.py
index 054ab0d3..a177ea86 100644
--- a/ci/get_changed_bundle.py
+++ b/ci/get_changed_bundle.py
@@ -29,9 +29,26 @@ def get_changed_bundle(changed_dirs):
print(bundle_names)
+def get_changed_hf_model(changed_dirs):
+ """
+ This function is used to get all changed hf models, a string which
+ contains all hf model names will be printed, and can be used in shell scripts.
+ """
+ hf_model_names = ""
+ root_path = "hf_models"
+ hf_model_list = get_changed_bundle_list(changed_dirs, root_path=root_path)
+ for hf_model in hf_model_list:
+ hf_model_names += f"{hf_model} "
+ print(hf_model_names)
+
+
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="")
parser.add_argument("-f", "--f", type=str, help="changed files.")
+ parser.add_argument("--hf_model", type=bool, default=False, help="if true, get changed hf models.")
args = parser.parse_args()
changed_dirs = args.f.splitlines()
- get_changed_bundle(changed_dirs)
+ if args.hf_model:
+ get_changed_hf_model(changed_dirs)
+ else:
+ get_changed_bundle(changed_dirs)
diff --git a/ci/prepare_schema.py b/ci/prepare_schema.py
index c22576cf..4d140c00 100644
--- a/ci/prepare_schema.py
+++ b/ci/prepare_schema.py
@@ -16,7 +16,11 @@
def main(bundle_list, models_path):
- prepare_schema(bundle_list, root_path=models_path)
+ if "hf_models" in models_path:
+ hf_model = True
+ else:
+ hf_model = False
+ prepare_schema(bundle_list, root_path=models_path, hf_model=hf_model)
if __name__ == "__main__":
diff --git a/ci/run_premerge_cpu.sh b/ci/run_premerge_cpu.sh
index f7326adc..5312d0a2 100755
--- a/ci/run_premerge_cpu.sh
+++ b/ci/run_premerge_cpu.sh
@@ -89,6 +89,28 @@ verify_bundle() {
else
echo "this pull request does not change any files in 'models', skip verify."
fi
+ # check hf models
+ hf_model_changes=$(git diff --name-only $head_ref origin/dev -- hf_models)
+ if [ ! -z "$hf_model_changes" ]
+ then
+ # get all changed hf models
+ hf_model_list=$(python $(pwd)/ci/get_changed_bundle.py --f "$hf_model_changes" --hf_model True)
+ if [ ! -z "$hf_model_list" ]
+ then
+ python $(pwd)/ci/prepare_schema.py --l "$hf_model_list" --p "hf_models"
+ echo $hf_model_list
+ for hf_model in $hf_model_list;
+ do
+ echo "verify hf model: $hf_model"
+ # verify hf model
+ python $(pwd)/ci/verify_hf_model.py -b "$hf_model"
+ done
+ else
+ echo "this pull request does not change any hf models, skip verify."
+ fi
+ else
+ echo "this pull request does not change any hf models, skip verify."
+ fi
}
diff --git a/ci/utils.py b/ci/utils.py
index ba1db0b1..49f2d823 100644
--- a/ci/utils.py
+++ b/ci/utils.py
@@ -67,7 +67,7 @@ def get_changed_bundle_list(changed_dirs: List[str], root_path: str = "models"):
return list(set(changed_bundle_list))
-def prepare_schema(bundle_list: List[str], root_path: str = "models"):
+def prepare_schema(bundle_list: List[str], root_path: str = "models", hf_model: bool = False):
"""
This function is used to prepare schema for changed bundles.
Due to Github's limitation (see: https://github.com/Project-MONAI/model-zoo/issues/111),
@@ -79,7 +79,10 @@ def prepare_schema(bundle_list: List[str], root_path: str = "models"):
for bundle_name in bundle_list:
bundle_path = os.path.join(root_path, bundle_name)
if os.path.exists(bundle_path):
- meta_file_path = os.path.join(bundle_path, "configs/metadata.json")
+ if hf_model:
+ meta_file_path = os.path.join(bundle_path, "metadata.json")
+ else:
+ meta_file_path = os.path.join(bundle_path, "configs/metadata.json")
metadata = get_json_dict(meta_file_path)
schema_url = metadata["schema"]
schema_name = schema_url.split("/")[-1]
diff --git a/ci/verify_hf_model.py b/ci/verify_hf_model.py
new file mode 100644
index 00000000..cedd59f5
--- /dev/null
+++ b/ci/verify_hf_model.py
@@ -0,0 +1,98 @@
+# Copyright (c) MONAI Consortium
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import os
+import sys
+
+from monai.bundle import verify_metadata
+from utils import get_json_dict
+
+
+def verify_hf_model_directory(models_path: str, model_name: str):
+ """
+ Required files:
+ - README.md
+ - LICENSE
+ - metadata.json
+
+ """
+
+ necessary_files_list = ["README.md", "LICENSE", "metadata.json"]
+
+ model_path = os.path.join(models_path, model_name)
+ # verify necessary files are included
+ for file in necessary_files_list:
+ if not os.path.exists(os.path.join(model_path, file)):
+ raise ValueError(f"necessary file {file} is not existing.")
+
+
+def verify_version_changes(models_path: str, model_name: str):
+ """
+ This function is used to verify if "version" and "changelog" are correct in "metadata.json".
+ In addition, if changing an existing hf model, a new version number should be provided.
+
+ """
+
+ model_path = os.path.join(models_path, model_name)
+
+ meta_file_path = os.path.join(model_path, "metadata.json")
+ metadata = get_json_dict(meta_file_path)
+ if "version" not in metadata:
+ raise ValueError(f"'version' is missing in metadata.json of hf model: {model_name}.")
+ if "changelog" not in metadata:
+ raise ValueError(f"'changelog' is missing in metadata.json of hf model: {model_name}.")
+
+ # version number should be in changelog
+ latest_version = metadata["version"]
+ if latest_version not in metadata["changelog"].keys():
+ raise ValueError(
+ f"version number: {latest_version} is missing in 'changelog' in metadata.json of hf model: {model_name}."
+ )
+
+
+def verify_metadata_format(model_path: str):
+ """
+ This function is used to verify the metadata format.
+
+ """
+ verify_metadata(
+ meta_file=os.path.join(model_path, "metadata.json"), filepath=os.path.join(model_path, "eval/schema.json")
+ )
+
+
+def verify(model_name, models_path="hf_models", mode="full"):
+ print(f"start verifying {model_name}:")
+ # add bundle path to ensure custom code can be used
+ sys.path = [os.path.join(models_path, model_name)] + sys.path
+ # verify bundle directory
+ verify_hf_model_directory(models_path, model_name)
+ print("directory is verified correctly.")
+ if mode != "regular":
+ # verify version, changelog
+ verify_version_changes(models_path, model_name)
+ print("version and changelog are verified correctly.")
+ # verify metadata format and data
+ model_path = os.path.join(models_path, model_name)
+ verify_metadata_format(model_path)
+ print("metadata format is verified correctly.")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="")
+ parser.add_argument("-b", "--b", type=str, help="model name.")
+ parser.add_argument("-p", "--p", type=str, default="hf_models", help="models path.")
+ parser.add_argument("-m", "--mode", type=str, default="full", help="verify model mode (full/min).")
+ args = parser.parse_args()
+ model_name = args.b
+ models_path = args.p
+ mode = args.mode
+ verify(model_name, models_path, mode)
diff --git a/hf_models/README.md b/hf_models/README.md
new file mode 100644
index 00000000..9d36fa47
--- /dev/null
+++ b/hf_models/README.md
@@ -0,0 +1,23 @@
+# Hugging Face Models
+
+This directory contains models that are hosted on Hugging Face. **Important: These models do not follow the traditional MONAI Bundle format and cannot be run using the standard MONAI Bundle APIs.**
+
+Each model directory contains:
+
+1. `metadata.json` - Model metadata following a similar schema to MONAI Bundles
+2. `README.md` - Detailed documentation about the model
+3. `LICENSE` - Model license
+
+## Using HF Models
+
+These models must be accessed directly from Hugging Face using the `huggingface_hub` and `transformers` libraries. For complete usage instructions and examples, please visit the corresponding Hugging Face model repository linked below.
+
+### Available Models
+
+| Model | Description | HF Repository |
+|-------|-------------|--------------|
+| exaonepath | EXAONEPath is a patch-level pathology pretrained model with 86 million parameters | [LGAI-EXAONE/EXAONEPath](https://huggingface.co/LGAI-EXAONE/EXAONEPath) |
+| llama3_vila_m3_3b | Lightweight medical vision language model that enhances VLMs with medical expert knowledge (3B parameters) | [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B) |
+| llama3_vila_m3_8b | Medical vision language model that utilizes domain-expert models to improve precision in medical imaging tasks (8B parameters) | [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B) |
+| llama3_vila_m3_13b | Enhanced medical vision language model with improved capabilities for various medical imaging tasks (13B parameters) | [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B) |
+| ct_chat | Vision-language foundational chat model for 3D chest CT volumes | [ibrahimhamamci/CT-RATE](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) |
diff --git a/hf_models/ct_chat/LICENSE b/hf_models/ct_chat/LICENSE
new file mode 100644
index 00000000..64fade3f
--- /dev/null
+++ b/hf_models/ct_chat/LICENSE
@@ -0,0 +1,18 @@
+Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)
+
+This is a human-readable summary of (and not a substitute for) the license. See the full license text at: https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
+
+You are free to:
+- Share — copy and redistribute the material in any medium or format
+- Adapt — remix, transform, and build upon the material
+
+Under the following terms:
+- Attribution — You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use.
+- NonCommercial — You may not use the material for commercial purposes.
+- ShareAlike — If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original.
+
+No additional restrictions — You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits.
+
+Notices:
+You do not have to comply with the license for elements of the material in the public domain or where your use is permitted by an applicable exception or limitation.
+No warranties are given. The license may not give you all of the permissions necessary for your intended use. For example, other rights such as publicity, privacy, or moral rights may limit how you use the material.
diff --git a/hf_models/ct_chat/README.md b/hf_models/ct_chat/README.md
new file mode 100644
index 00000000..dba2b4f3
--- /dev/null
+++ b/hf_models/ct_chat/README.md
@@ -0,0 +1,88 @@
+---
+license: cc-by-nc-sa-4.0
+tags:
+- computed-tomography
+- chest-ct
+- medical-imaging
+- vision-language-model
+- multimodal
+- medical-assistant
+---
+
+# CT-CHAT Model
+
+## [Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography](https://arxiv.org/abs/2403.17834)
+
+## Model Overview
+
+CT-CHAT is a vision-language foundational chat model for 3D chest CT volumes. Leveraging the VQA dataset derived from CT-RATE and pretrained 3D vision encoder from CT-CLIP, we developed this multimodal AI assistant specifically designed to enhance the interpretation and diagnostic capabilities of 3D chest CT imaging.
+
+Building on the strong foundation of CT-CLIP, CT-CHAT integrates both visual and language processing to handle diverse tasks including:
+- Visual question answering
+- Radiology report generation
+- Multiple-choice diagnostic questions
+
+Trained on over 2.7 million question-answer pairs from the CT-RATE dataset, CT-CHAT leverages 3D spatial information, making it superior to 2D-based models. The model not only improves radiologist workflows by reducing interpretation time but also delivers highly accurate and clinically relevant responses, pushing the boundaries of 3D medical imaging analysis.
+
+## Technical Foundation
+
+CT-CHAT builds upon two key technological innovations:
+
+### CT-CLIP
+A CT-focused contrastive language-image pre-training framework that serves as the visual encoder for CT-CHAT. As a versatile, self-supervised model, CT-CLIP is designed for broad application and outperforms state-of-the-art, fully supervised methods in multi-abnormality detection.
+
+### CT-RATE Dataset
+A pioneering dataset of 25,692 non-contrast chest CT volumes (expanded to 50,188 through various reconstructions) paired with corresponding radiology text reports, multi-abnormality labels, and metadata from 21,304 unique patients.
+
+## Model Capabilities
+
+1. **Visual Question Answering**: Answer free-form questions about 3D CT volumes
+2. **Report Generation**: Create comprehensive radiology reports from CT scans
+3. **Diagnostic Support**: Assist with differential diagnoses and abnormality detection
+4. **Educational Use**: Train medical students and residents on CT interpretation
+
+## Terms and Conditions
+
+Users of the CT-CHAT model must agree to the [Terms and Conditions](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) which specify:
+
+- The model is intended solely for academic, research, and educational purposes
+- Any commercial exploitation is forbidden without permission
+- Users must maintain data confidentiality and comply with data protection laws
+- Proper attribution is required in any publications resulting from model use
+- Redistribution of the model is not allowed
+
+## Citation
+
+When using this model, please consider citing the following related papers:
+
+```bibtex
+@misc{hamamci2024foundation,
+ title={Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography},
+ author={Ibrahim Ethem Hamamci and Sezgin Er and Furkan Almas and Ayse Gulnihan Simsek and Sevval Nil Esirgun and Irem Dogan and Muhammed Furkan Dasdelen and Omer Faruk Durugol and Bastian Wittmann and Tamaz Amiranashvili and Enis Simsar and Mehmet Simsar and Emine Bensu Erdemir and Abdullah Alanbay and Anjany Sekuboyina and Berkan Lafci and Christian Bluethgen and Mehmet Kemal Ozdemir and Bjoern Menze},
+ year={2024},
+ eprint={2403.17834},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV},
+ url={https://arxiv.org/abs/2403.17834},
+}
+
+@misc{hamamci2024generatect,
+ title={GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes},
+ author={Ibrahim Ethem Hamamci and Sezgin Er and Anjany Sekuboyina and Enis Simsar and Alperen Tezcan and Ayse Gulnihan Simsek and Sevval Nil Esirgun and Furkan Almas and Irem Dogan and Muhammed Furkan Dasdelen and Chinmay Prabhakar and Hadrien Reynaud and Sarthak Pati and Christian Bluethgen and Mehmet Kemal Ozdemir and Bjoern Menze},
+ year={2024},
+ eprint={2305.16037},
+ archivePrefix={arXiv},
+ primaryClass={cs.CV},
+ url={https://arxiv.org/abs/2305.16037},
+}
+
+@misc{hamamci2024ct2rep,
+ title={CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging},
+ author={Ibrahim Ethem Hamamci and Sezgin Er and Bjoern Menze},
+ year={2024},
+ eprint={2403.06801},
+ archivePrefix={arXiv},
+ primaryClass={eess.IV},
+ url={https://arxiv.org/abs/2403.06801},
+}
+```
diff --git a/hf_models/ct_chat/metadata.json b/hf_models/ct_chat/metadata.json
new file mode 100644
index 00000000..9f9cef0b
--- /dev/null
+++ b/hf_models/ct_chat/metadata.json
@@ -0,0 +1,37 @@
+{
+ "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json",
+ "version": "1.0.0",
+ "changelog": {
+ "1.0.0": "initial release of CT_CHAT model"
+ },
+ "monai_version": "1.4.0",
+ "pytorch_version": "2.4.0",
+ "numpy_version": "1.24.4",
+ "required_packages_version": {
+ "torch": "2.4.0",
+ "nibabel": "5.2.1",
+ "pandas": "2.2.1",
+ "huggingface_hub": "0.24.2",
+ "datasets": "2.18.0"
+ },
+ "supported_apps": {
+ "ct_clip": "",
+ "ct_chat": ""
+ },
+ "name": "CT_CHAT",
+ "task": "Vision-language foundational chat model for 3D chest CT volumes",
+ "description": "CT-CHAT is a multimodal AI assistant designed to enhance the interpretation and diagnostic capabilities of 3D chest CT imaging. Building on the strong foundation of CT-CLIP, it integrates both visual and language processing to handle diverse tasks like visual question answering, report generation, and multiple-choice questions. Trained on over 2.7 million question-answer pairs from CT-RATE, it leverages 3D spatial information, making it superior to 2D-based models.",
+ "authors": "Ibrahim Ethem Hamamci, Sezgin Er, Furkan Almas, et al.",
+ "copyright": "Ibrahim Ethem Hamamci and collaborators",
+ "data_source": "CT-RATE dataset",
+ "data_type": "3D CT volumes and text",
+ "image_classes": "3D chest CT volumes with radiology reports and Q&A",
+ "huggingface_dataset_id": "ibrahimhamamci/CT-RATE",
+ "huggingface_url": "https://huggingface.co/datasets/ibrahimhamamci/CT-RATE",
+ "intended_use": "Research on multimodal medical AI assistants for radiology interpretation and diagnosis",
+ "references": [
+ "Hamamci, Ibrahim Ethem, et al. 'Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography.' arXiv preprint arXiv:2403.17834 (2024).",
+ "Hamamci, Ibrahim Ethem, et al. 'GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes.' arXiv preprint arXiv:2305.16037 (2024).",
+ "Hamamci, Ibrahim Ethem, et al. 'CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging.' arXiv preprint arXiv:2403.06801 (2024)."
+ ]
+}
diff --git a/hf_models/exaonepath/LICENSE b/hf_models/exaonepath/LICENSE
new file mode 100644
index 00000000..0edcddc0
--- /dev/null
+++ b/hf_models/exaonepath/LICENSE
@@ -0,0 +1,34 @@
+EXAONEPath AI Model License Agreement 1.0 - NC
+
+This EXAONEPath AI Model License Agreement (the "Agreement") is entered into by and between LG AI Research ("Licensor") and the individual or entity exercising the rights under this Agreement ("Licensee").
+
+1. Definitions
+ a. "Model" means the EXAONEPath AI Model, a machine learning model, including all associated weights, parameters, and other components.
+ b. "Commercial Use" means any use of the Model primarily intended for or directed toward commercial advantage or monetary compensation.
+
+2. License Grant
+ Subject to the terms and conditions of this Agreement, Licensor hereby grants to Licensee a worldwide, non-exclusive, non-transferable, non-sublicensable, royalty-free license to use, reproduce, and create derivative works of the Model for non-commercial purposes only.
+
+3. Restrictions
+ a. Commercial Use is not permitted under this license.
+ b. Licensee shall not use the Model in connection with any illegal, harmful, fraudulent, infringing, or offensive use.
+ c. Licensee shall not use the Model to create, train, or improve any foundation models.
+ d. Licensee shall not rent, lease, lend, sell, redistribute, or sublicense the Model.
+
+4. Disclaimer of Warranties
+ THE MODEL IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT.
+
+5. Limitation of Liability
+ IN NO EVENT SHALL LICENSOR BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE MODEL OR THE USE OR OTHER DEALINGS IN THE MODEL.
+
+6. Attribution
+ Any use of the Model shall include appropriate attribution to LG AI Research and reference to the research paper: "EXAONEPath 1.0 Patch-level Foundation Model for Pathology" (https://arxiv.org/abs/2408.00380).
+
+7. Termination
+ This Agreement will terminate automatically if Licensee breaches any of its terms.
+
+8. Governing Law
+ This Agreement shall be governed by and construed in accordance with the laws of South Korea, without regard to its conflict of law provisions.
+
+9. Entire Agreement
+ This Agreement constitutes the entire agreement between the parties with respect to the use of the Model.
diff --git a/hf_models/exaonepath/README.md b/hf_models/exaonepath/README.md
new file mode 100644
index 00000000..3e52777e
--- /dev/null
+++ b/hf_models/exaonepath/README.md
@@ -0,0 +1,161 @@
+---
+license: other
+license_name: exaonepath
+license_link: LICENSE
+tags:
+- lg-ai
+- EXAONEPath-1.0
+- pathology
+- lg-ai
+---
+
+# EXAONEPath
+
+## EXAONEPath 1.0 Patch-level Foundation Model for Pathology
+
+[[`Paper`](https://arxiv.org/abs/2408.00380)] [[`Github`](https://github.com/LG-AI-EXAONE/EXAONEPath)] [[`Model`](https://github.com/LG-AI-EXAONE/EXAONEPath/releases/download/1.0.0/EXAONEPath.ckpt)] [[`BibTeX`](#Citation)]
+
+
+## Introduction
+We introduce EXAONEPath, a patch-level pathology pretrained model with 86 million parameters.
+The model was pretrained on 285,153,903 patches extracted from a total of 34,795 WSIs.
+EXAONEPath demonstrates superior performance considering the number of WSIs used and the model's parameter count.
+
+
+
+
+## Quickstart
+Load EXAONEPath and run inference on tile-level images.
+
+### 1. Hardware Requirements ###
+- NVIDIA GPU is required
+- Minimum 8GB GPU memory recommended
+- NVIDIA driver version >= 450.80.02 required
+
+Note: This implementation requires NVIDIA GPU and drivers. The provided environment setup specifically uses CUDA-enabled PyTorch, making NVIDIA GPU mandatory for running the model.
+
+### 2. Environment Setup ###
+First, install Conda if you haven't already. You can find installation instructions [here](https://docs.anaconda.com/miniconda/).
+Then create and activate the environment using the provided configuration:
+```bash
+git clone https://github.com/LG-AI-EXAONE/EXAONEPath.git
+cd EXAONEPath
+conda env create -f environment.yaml
+conda activate exaonepath
+```
+
+### 3. Load the model & Inference
+#### Load with HuggingFace
+
+
+```python
+import torch
+from PIL import Image
+from macenko import macenko_normalizer
+import torchvision.transforms as transforms
+from vision_transformer import VisionTransformer
+
+hf_token = "YOUR_HUGGING_FACE_ACCESS_TOKEN"
+model = VisionTransformer.from_pretrained("LGAI-EXAONE/EXAONEPath", use_auth_token=hf_token)
+
+transform = transforms.Compose(
+ [
+ transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC),
+ transforms.CenterCrop(224),
+ transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
+ ]
+)
+
+normalizer = macenko_normalizer()
+img_path = "images/MHIST_aaa.png"
+image = Image.open(img_path).convert("RGB")
+image_macenko = normalizer(image)
+
+sample_input = transform(image_macenko).unsqueeze(0)
+model.cuda()
+model.eval()
+
+features = model(sample_input.cuda())
+```
+
+#### Load Manually
+First, download the EXAONEPath model checkpoint from [here](https://github.com/LG-AI-EXAONE/EXAONEPath/releases/download/1.0.0/EXAONEPath.ckpt)
+
+```python
+import torch
+from PIL import Image
+from macenko import macenko_normalizer
+import torchvision.transforms as transforms
+from vision_transformer import vit_base
+
+file_path = "MODEL_CHECKPOINT_PATH"
+checkpoint = torch.load(file_path, map_location=torch.device('cpu'))
+state_dict = checkpoint['state_dict']
+model = vit_base(patch_size=16, num_classes=0)
+msg = model.load_state_dict(state_dict, strict=False)
+print(f'Pretrained weights found at {file_path} and loaded with msg: {msg}')
+
+transform = transforms.Compose(
+ [
+ transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC),
+ transforms.CenterCrop(224),
+ transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
+ ]
+)
+
+normalizer = macenko_normalizer()
+img_path = "images/MHIST_aaa.png"
+image = Image.open(img_path).convert("RGB")
+image_macenko = normalizer(image)
+
+sample_input = transform(image_macenko).unsqueeze(0)
+model.cuda()
+model.eval()
+
+features = model(sample_input.cuda())
+```
+
+## Model Performance Comparison
+
+We report linear evaluation result on six downstream tasks. Top-1 accuracy is shown, with values for models other than Gigapath taken from the RudolfV paper.
+
+| Model | PCAM | MHIST | CRC-100K | TIL Det. | MSI CRC | MSI STAD | Avg |
+|--------------------------|-----------|-----------|-----------|-----------|-----------|-----------|-----------|
+| ResNet50 ImageNet | 0.833 | 0.806 | 0.849 | 0.915 | 0.653 | 0.664 | 0.787 |
+| ViT-L/16 ImageNet | 0.852 | 0.796 | 0.847 | 0.924 | 0.669 | 0.671 | 0.793 |
+| Lunit | 0.918 | 0.771 | 0.949 | 0.943 | 0.745 | 0.756 | 0.847 |
+| CTransPath | 0.872 | 0.817 | 0.840 | 0.930 | 0.694 | 0.726 | 0.813 |
+| Phikon | 0.906 | 0.795 | 0.883 | **0.946** | 0.733 | 0.751 | 0.836 |
+| Virchow | 0.933 | **0.834** | 0.968 | - | - | - | - |
+| RudolfV | 0.944 | 0.821 | **0.973** | 0.943 | 0.755 | 0.788 | **0.871** |
+| GigaPath (patch encoder) | **0.947** | 0.822 | 0.964 | 0.938 | 0.753 | 0.748 | 0.862 |
+| EXAONEPath (ours) | 0.901 | 0.818 | 0.946 | 0.939 | **0.756** | **0.804** | 0.861 |
+
+
+
+
+
+
+
+
+
+ Figure 1. Performance comparison of models based on the number of parameters and the number of WSIs used for training. The average Top-1 accuracy represents the mean linear evaluation performance across six downstream tasks.
+
+
+
+## License
+The model is licensed under [EXAONEPath AI Model License Agreement 1.0 - NC](./LICENSE)
+
+## Citation
+If you find EXAONEPath useful, please cite it using this BibTeX:
+```
+@article{yun2024exaonepath,
+ title={EXAONEPath 1.0 Patch-level Foundation Model for Pathology},
+ author={Yun, Juseung and Hu, Yi and Kim, Jinhyung and Jang, Jongseong and Lee, Soonyoung},
+ journal={arXiv preprint arXiv:2408.00380},
+ year={2024}
+}
+```
+
+## Contact
+LG AI Research Technical Support: contact_us1@lgresearch.ai
diff --git a/hf_models/exaonepath/metadata.json b/hf_models/exaonepath/metadata.json
new file mode 100644
index 00000000..5c4e92fe
--- /dev/null
+++ b/hf_models/exaonepath/metadata.json
@@ -0,0 +1,35 @@
+{
+ "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json",
+ "version": "1.0.0",
+ "changelog": {
+ "1.0.0": "initial release of EXAONEPath 1.0"
+ },
+ "monai_version": "1.4.0",
+ "pytorch_version": "2.4.0",
+ "numpy_version": "1.24.4",
+ "required_packages_version": {
+ "torch": "2.4.0",
+ "torchvision": "0.15.0",
+ "torchstain": "1.3.0",
+ "pillow": "10.0.0",
+ "huggingface_hub": "0.24.2",
+ "transformers": "4.43.3"
+ },
+ "supported_apps": {
+ "exaonepath": ""
+ },
+ "name": "EXAONEPath",
+ "task": "Pathology foundation model",
+ "description": "EXAONEPath is a patch-level pathology pretrained model with 86 million parameters, pretrained on 285,153,903 patches extracted from 34,795 WSIs.",
+ "authors": "LG AI Research",
+ "copyright": "LG AI Research",
+ "data_source": "LG AI Research",
+ "data_type": "WSI patches",
+ "image_classes": "RGB pathology image patches",
+ "huggingface_model_id": "LGAI-EXAONE/EXAONEPath",
+ "huggingface_url": "https://huggingface.co/LGAI-EXAONE/EXAONEPath",
+ "intended_use": "Research and clinical support for pathology image analysis",
+ "references": [
+ "Yun, Juseung, et al. 'EXAONEPath 1.0 Patch-level Foundation Model for Pathology', arXiv preprint arXiv:2408.00380 (2024)."
+ ]
+}
diff --git a/hf_models/llama3_vila_m3_13b/LICENSE b/hf_models/llama3_vila_m3_13b/LICENSE
new file mode 100644
index 00000000..20d5ee6b
--- /dev/null
+++ b/hf_models/llama3_vila_m3_13b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2025 NVIDIA Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/hf_models/llama3_vila_m3_13b/README.md b/hf_models/llama3_vila_m3_13b/README.md
new file mode 100644
index 00000000..a8b16183
--- /dev/null
+++ b/hf_models/llama3_vila_m3_13b/README.md
@@ -0,0 +1,16 @@
+# VILA_M3_13B
+
+VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.
+
+This model is available at: [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B)
+
+## Citation
+
+```
+@article{nath2025vila,
+ title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge},
+ author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang},
+ journal={arXiv preprint arXiv:2411.12915},
+ year={2025}
+}
+```
diff --git a/hf_models/llama3_vila_m3_13b/metadata.json b/hf_models/llama3_vila_m3_13b/metadata.json
new file mode 100644
index 00000000..18b1d117
--- /dev/null
+++ b/hf_models/llama3_vila_m3_13b/metadata.json
@@ -0,0 +1,29 @@
+{
+ "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json",
+ "version": "1.0.0",
+ "changelog": {
+ "1.0.0": "initial release of VILA_M3_13B model"
+ },
+ "monai_version": "1.4.0",
+ "pytorch_version": "2.4.0",
+ "numpy_version": "1.24.4",
+ "required_packages_version": {
+ "torch": "2.4.0",
+ "huggingface_hub": "0.24.2",
+ "transformers": "4.43.3"
+ },
+ "name": "VILA_M3_13B",
+ "task": "Medical vision-language model",
+ "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.",
+ "authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH",
+ "copyright": "NVIDIA",
+ "data_source": "NVIDIA",
+ "data_type": "Medical images and text",
+ "image_classes": "Various medical imaging modalities",
+ "huggingface_model_id": "MONAI/Llama3-VILA-M3-13B",
+ "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-13B",
+ "intended_use": "Research in medical vision-language tasks",
+ "references": [
+ "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)."
+ ]
+}
diff --git a/hf_models/llama3_vila_m3_3b/LICENSE b/hf_models/llama3_vila_m3_3b/LICENSE
new file mode 100644
index 00000000..20d5ee6b
--- /dev/null
+++ b/hf_models/llama3_vila_m3_3b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2025 NVIDIA Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/hf_models/llama3_vila_m3_3b/README.md b/hf_models/llama3_vila_m3_3b/README.md
new file mode 100644
index 00000000..c633cc73
--- /dev/null
+++ b/hf_models/llama3_vila_m3_3b/README.md
@@ -0,0 +1,16 @@
+# VILA_M3_3B
+
+VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.
+
+This model is available at: [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B)
+
+## Citation
+
+```
+@article{nath2025vila,
+ title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge},
+ author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang},
+ journal={arXiv preprint arXiv:2411.12915},
+ year={2025}
+}
+```
diff --git a/hf_models/llama3_vila_m3_3b/metadata.json b/hf_models/llama3_vila_m3_3b/metadata.json
new file mode 100644
index 00000000..16542074
--- /dev/null
+++ b/hf_models/llama3_vila_m3_3b/metadata.json
@@ -0,0 +1,29 @@
+{
+ "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json",
+ "version": "1.0.0",
+ "changelog": {
+ "1.0.0": "initial release of VILA_M3_3B model"
+ },
+ "monai_version": "1.4.0",
+ "pytorch_version": "2.4.0",
+ "numpy_version": "1.24.4",
+ "required_packages_version": {
+ "torch": "2.4.0",
+ "huggingface_hub": "0.24.2",
+ "transformers": "4.43.3"
+ },
+ "name": "VILA_M3_3B",
+ "task": "Medical vision-language model",
+ "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.",
+ "authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH",
+ "copyright": "NVIDIA",
+ "data_source": "NVIDIA",
+ "data_type": "Medical images and text",
+ "image_classes": "Various medical imaging modalities",
+ "huggingface_model_id": "MONAI/Llama3-VILA-M3-3B",
+ "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-3B",
+ "intended_use": "Research in medical vision-language tasks",
+ "references": [
+ "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)."
+ ]
+}
diff --git a/hf_models/llama3_vila_m3_8b/LICENSE b/hf_models/llama3_vila_m3_8b/LICENSE
new file mode 100644
index 00000000..20d5ee6b
--- /dev/null
+++ b/hf_models/llama3_vila_m3_8b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2025 NVIDIA Corporation
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/hf_models/llama3_vila_m3_8b/README.md b/hf_models/llama3_vila_m3_8b/README.md
new file mode 100644
index 00000000..a62b5601
--- /dev/null
+++ b/hf_models/llama3_vila_m3_8b/README.md
@@ -0,0 +1,16 @@
+# VILA_M3_8B
+
+VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.
+
+This model is available at: [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B)
+
+## Citation
+
+```
+@article{nath2025vila,
+ title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge},
+ author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang},
+ journal={arXiv preprint arXiv:2411.12915},
+ year={2025}
+}
+```
diff --git a/hf_models/llama3_vila_m3_8b/metadata.json b/hf_models/llama3_vila_m3_8b/metadata.json
new file mode 100644
index 00000000..9fea8354
--- /dev/null
+++ b/hf_models/llama3_vila_m3_8b/metadata.json
@@ -0,0 +1,29 @@
+{
+ "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json",
+ "version": "1.0.0",
+ "changelog": {
+ "1.0.0": "initial release of VILA_M3_8B model"
+ },
+ "monai_version": "1.4.0",
+ "pytorch_version": "2.4.0",
+ "numpy_version": "1.24.4",
+ "required_packages_version": {
+ "torch": "2.4.0",
+ "huggingface_hub": "0.24.2",
+ "transformers": "4.43.3"
+ },
+ "name": "VILA_M3_8B",
+ "task": "Medical vision-language model",
+ "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.",
+ "authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH",
+ "copyright": "NVIDIA",
+ "data_source": "NVIDIA",
+ "data_type": "Medical images and text",
+ "image_classes": "Various medical imaging modalities",
+ "huggingface_model_id": "MONAI/Llama3-VILA-M3-8B",
+ "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-8B",
+ "intended_use": "Research in medical vision-language tasks",
+ "references": [
+ "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)."
+ ]
+}
diff --git a/requirements.txt b/requirements.txt
index b9e71fc2..6d29a604 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,2 +1,3 @@
monai>=1.0.1
huggingface_hub==0.29.3
+jsonschema