From 54045aa3f247b67b6906ca942f763b56e21f9101 Mon Sep 17 00:00:00 2001 From: Michael Zephyr Date: Tue, 18 Mar 2025 07:28:42 -0700 Subject: [PATCH 01/11] Add HF Models structure and initial models --- hf_models/README.md | 34 +++++ hf_models/ct-rate/LICENSE | 18 +++ hf_models/ct-rate/README.md | 92 ++++++++++++ hf_models/ct-rate/metadata.json | 36 +++++ hf_models/exaonepath/LICENSE | 34 +++++ hf_models/exaonepath/README.md | 161 +++++++++++++++++++++ hf_models/exaonepath/metadata.json | 33 +++++ hf_models/llama3-vila-m3-13b/LICENSE | 33 +++++ hf_models/llama3-vila-m3-13b/README.md | 54 +++++++ hf_models/llama3-vila-m3-13b/metadata.json | 34 +++++ hf_models/llama3-vila-m3-3b/LICENSE | 33 +++++ hf_models/llama3-vila-m3-3b/README.md | 54 +++++++ hf_models/llama3-vila-m3-3b/metadata.json | 34 +++++ hf_models/llama3-vila-m3-8b/LICENSE | 33 +++++ hf_models/llama3-vila-m3-8b/README.md | 52 +++++++ hf_models/llama3-vila-m3-8b/metadata.json | 34 +++++ 16 files changed, 769 insertions(+) create mode 100644 hf_models/README.md create mode 100644 hf_models/ct-rate/LICENSE create mode 100644 hf_models/ct-rate/README.md create mode 100644 hf_models/ct-rate/metadata.json create mode 100644 hf_models/exaonepath/LICENSE create mode 100644 hf_models/exaonepath/README.md create mode 100644 hf_models/exaonepath/metadata.json create mode 100644 hf_models/llama3-vila-m3-13b/LICENSE create mode 100644 hf_models/llama3-vila-m3-13b/README.md create mode 100644 hf_models/llama3-vila-m3-13b/metadata.json create mode 100644 hf_models/llama3-vila-m3-3b/LICENSE create mode 100644 hf_models/llama3-vila-m3-3b/README.md create mode 100644 hf_models/llama3-vila-m3-3b/metadata.json create mode 100644 hf_models/llama3-vila-m3-8b/LICENSE create mode 100644 hf_models/llama3-vila-m3-8b/README.md create mode 100644 hf_models/llama3-vila-m3-8b/metadata.json diff --git a/hf_models/README.md b/hf_models/README.md new file mode 100644 index 00000000..32606978 --- /dev/null +++ b/hf_models/README.md @@ -0,0 +1,34 @@ +# Hugging Face Models + +This directory contains models that are hosted on Hugging Face. **Important: These models do not follow the traditional MONAI Bundle format and cannot be run using the standard MONAI Bundle APIs.** + +Each model directory contains: + +1. `configs/metadata.json` - Model metadata following a similar schema to MONAI Bundles +2. `configs/inference.json` - Configuration that references the HF model but may not be directly executable +3. `configs/logging.conf` - Logging configuration +4. `docs/README.md` - Detailed documentation about the model +5. `large_files.yml` - References the Hugging Face model repository +6. `LICENSE` - Model license + +## Using HF Models + +These models must be accessed directly from Hugging Face using the `huggingface_hub` and `transformers` libraries. For complete usage instructions and examples, please visit the corresponding Hugging Face model repository linked below. + +### Authentication + +Some models may require authentication with a Hugging Face token. You can set your token as an environment variable: + +```bash +export HF_TOKEN=your_huggingface_token +``` + +### Available Models + +| Model | Description | HF Repository | +|-------|-------------|--------------| +| exaonepath | EXAONEPath is a patch-level pathology pretrained model with 86 million parameters | [LGAI-EXAONE/EXAONEPath](https://huggingface.co/LGAI-EXAONE/EXAONEPath) | +| llama3-vila-m3-3b | Lightweight medical visual language model based on VILA and Llama 3 (3B parameters) | [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B) | +| llama3-vila-m3-8b | Medical visual language model based on VILA and Llama 3 that supports medical image analysis | [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B) | +| llama3-vila-m3-13b | Enhanced medical visual language model based on VILA and Llama 3 with improved reasoning capabilities (13B parameters) | [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B) | +| ct-rate | Pioneering dataset of chest CT volumes paired with radiology reports, multi-abnormality labels, and metadata | [ibrahimhamamci/CT-RATE](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) | \ No newline at end of file diff --git a/hf_models/ct-rate/LICENSE b/hf_models/ct-rate/LICENSE new file mode 100644 index 00000000..92b8f0f7 --- /dev/null +++ b/hf_models/ct-rate/LICENSE @@ -0,0 +1,18 @@ +Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0) + +This is a human-readable summary of (and not a substitute for) the license. See the full license text at: https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode + +You are free to: +- Share — copy and redistribute the material in any medium or format +- Adapt — remix, transform, and build upon the material + +Under the following terms: +- Attribution — You must give appropriate credit, provide a link to the license, and indicate if changes were made. You may do so in any reasonable manner, but not in any way that suggests the licensor endorses you or your use. +- NonCommercial — You may not use the material for commercial purposes. +- ShareAlike — If you remix, transform, or build upon the material, you must distribute your contributions under the same license as the original. + +No additional restrictions — You may not apply legal terms or technological measures that legally restrict others from doing anything the license permits. + +Notices: +You do not have to comply with the license for elements of the material in the public domain or where your use is permitted by an applicable exception or limitation. +No warranties are given. The license may not give you all of the permissions necessary for your intended use. For example, other rights such as publicity, privacy, or moral rights may limit how you use the material. \ No newline at end of file diff --git a/hf_models/ct-rate/README.md b/hf_models/ct-rate/README.md new file mode 100644 index 00000000..9409f3a0 --- /dev/null +++ b/hf_models/ct-rate/README.md @@ -0,0 +1,92 @@ +--- +license: cc-by-nc-sa-4.0 +tags: +- computed-tomography +- chest-ct +- medical-imaging +- dataset +- multimodal +--- + +# CT-RATE Dataset + +## [Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography](https://arxiv.org/abs/2403.17834) + +CT-RATE is a pioneering dataset in 3D medical imaging that uniquely pairs textual data with image data focused on chest CT volumes. The dataset comprises chest CT volumes paired with corresponding radiology text reports, multi-abnormality labels, and metadata, all freely accessible to researchers. + +## Dataset Overview + +CT-RATE consists of 25,692 non-contrast chest CT volumes, expanded to 50,188 through various reconstructions, from 21,304 unique patients, along with corresponding radiology text reports, multi-abnormality labels, and metadata. + +The dataset is divided into: +- Training set: 20,000 patients +- Validation set: 1,304 patients + +File naming convention: `split_patientID_scanID_reconstructionID` +For example, "valid_53_a_1" indicates a CT volume from the validation set, scan "a" from patient 53, and reconstruction 1 of scan "a". + +## Applications + +This dataset has been used to develop several groundbreaking models: + +### CT-CLIP +A CT-focused contrastive language-image pre-training framework. As a versatile, self-supervised model, CT-CLIP is designed for broad application and does not require task-specific training. CT-CLIP outperforms state-of-the-art, fully supervised methods in multi-abnormality detection across all key metrics. + +### CT-CHAT +A multimodal AI assistant designed to enhance the interpretation and diagnostic capabilities of 3D chest CT imaging. Building on CT-CLIP, it integrates both visual and language processing to handle diverse tasks like visual question answering, report generation, and multiple-choice questions. + +## Dataset Configurations + +1. **Labels**: Multi-abnormality labels for the CT volumes +2. **Reports**: Corresponding radiology text reports +3. **Metadata**: Additional metadata for each CT volume + +## Terms and Conditions + +Users of the CT-RATE dataset must agree to the [Terms and Conditions](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) which specify: + +- The dataset is intended solely for academic, research, and educational purposes +- Any commercial exploitation is forbidden without permission +- Users must maintain data confidentiality and comply with data protection laws +- Proper attribution is required in any publications resulting from dataset use +- Redistribution of the dataset is not allowed + +## Ethical Approval + +Ethical approval documentation is available for researchers who require it for grant applications. + +## Citation + +When using this dataset, please consider citing the following related papers: + +```bibtex +@misc{hamamci2024foundation, + title={Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography}, + author={Ibrahim Ethem Hamamci and Sezgin Er and Furkan Almas and Ayse Gulnihan Simsek and Sevval Nil Esirgun and Irem Dogan and Muhammed Furkan Dasdelen and Omer Faruk Durugol and Bastian Wittmann and Tamaz Amiranashvili and Enis Simsar and Mehmet Simsar and Emine Bensu Erdemir and Abdullah Alanbay and Anjany Sekuboyina and Berkan Lafci and Christian Bluethgen and Mehmet Kemal Ozdemir and Bjoern Menze}, + year={2024}, + eprint={2403.17834}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2403.17834}, +} + +@misc{hamamci2024generatect, + title={GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes}, + author={Ibrahim Ethem Hamamci and Sezgin Er and Anjany Sekuboyina and Enis Simsar and Alperen Tezcan and Ayse Gulnihan Simsek and Sevval Nil Esirgun and Furkan Almas and Irem Dogan and Muhammed Furkan Dasdelen and Chinmay Prabhakar and Hadrien Reynaud and Sarthak Pati and Christian Bluethgen and Mehmet Kemal Ozdemir and Bjoern Menze}, + year={2024}, + eprint={2305.16037}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2305.16037}, +} + +@misc{hamamci2024ct2rep, + title={CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging}, + author={Ibrahim Ethem Hamamci and Sezgin Er and Bjoern Menze}, + year={2024}, + eprint={2403.06801}, + archivePrefix={arXiv}, + primaryClass={eess.IV}, + url={https://arxiv.org/abs/2403.06801}, +} +``` \ No newline at end of file diff --git a/hf_models/ct-rate/metadata.json b/hf_models/ct-rate/metadata.json new file mode 100644 index 00000000..b8cb7dde --- /dev/null +++ b/hf_models/ct-rate/metadata.json @@ -0,0 +1,36 @@ +{ + "version": "1.0.0", + "changelog": { + "1.0.0": "initial release of CT-RATE dataset" + }, + "monai_version": "1.4.0", + "pytorch_version": "2.4.0", + "numpy_version": "1.24.4", + "required_packages_version": { + "torch": "2.4.0", + "nibabel": "5.2.1", + "pandas": "2.2.1", + "huggingface_hub": "0.24.2", + "datasets": "2.18.0" + }, + "supported_apps": { + "ct-clip": "", + "ct-chat": "" + }, + "name": "CT-RATE", + "task": "3D Chest CT multimodal dataset", + "description": "CT-RATE is a pioneering dataset of 25,692 non-contrast chest CT volumes (expanded to 50,188 through various reconstructions) paired with corresponding radiology text reports, multi-abnormality labels, and metadata from 21,304 unique patients.", + "authors": "Ibrahim Ethem Hamamci, Sezgin Er, Furkan Almas, et al.", + "copyright": "Ibrahim Ethem Hamamci and collaborators", + "data_source": "CT-RATE dataset", + "data_type": "3D CT volumes and text", + "image_classes": "3D chest CT volumes with various abnormalities", + "huggingface_dataset_id": "ibrahimhamamci/CT-RATE", + "huggingface_url": "https://huggingface.co/datasets/ibrahimhamamci/CT-RATE", + "intended_use": "Research and development of AI models for 3D CT analysis", + "references": [ + "Hamamci, Ibrahim Ethem, et al. 'Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography.' arXiv preprint arXiv:2403.17834 (2024).", + "Hamamci, Ibrahim Ethem, et al. 'GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes.' arXiv preprint arXiv:2305.16037 (2024).", + "Hamamci, Ibrahim Ethem, et al. 'CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging.' arXiv preprint arXiv:2403.06801 (2024)." + ] +} \ No newline at end of file diff --git a/hf_models/exaonepath/LICENSE b/hf_models/exaonepath/LICENSE new file mode 100644 index 00000000..151c62a7 --- /dev/null +++ b/hf_models/exaonepath/LICENSE @@ -0,0 +1,34 @@ +EXAONEPath AI Model License Agreement 1.0 - NC + +This EXAONEPath AI Model License Agreement (the "Agreement") is entered into by and between LG AI Research ("Licensor") and the individual or entity exercising the rights under this Agreement ("Licensee"). + +1. Definitions + a. "Model" means the EXAONEPath AI Model, a machine learning model, including all associated weights, parameters, and other components. + b. "Commercial Use" means any use of the Model primarily intended for or directed toward commercial advantage or monetary compensation. + +2. License Grant + Subject to the terms and conditions of this Agreement, Licensor hereby grants to Licensee a worldwide, non-exclusive, non-transferable, non-sublicensable, royalty-free license to use, reproduce, and create derivative works of the Model for non-commercial purposes only. + +3. Restrictions + a. Commercial Use is not permitted under this license. + b. Licensee shall not use the Model in connection with any illegal, harmful, fraudulent, infringing, or offensive use. + c. Licensee shall not use the Model to create, train, or improve any foundation models. + d. Licensee shall not rent, lease, lend, sell, redistribute, or sublicense the Model. + +4. Disclaimer of Warranties + THE MODEL IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. + +5. Limitation of Liability + IN NO EVENT SHALL LICENSOR BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE MODEL OR THE USE OR OTHER DEALINGS IN THE MODEL. + +6. Attribution + Any use of the Model shall include appropriate attribution to LG AI Research and reference to the research paper: "EXAONEPath 1.0 Patch-level Foundation Model for Pathology" (https://arxiv.org/abs/2408.00380). + +7. Termination + This Agreement will terminate automatically if Licensee breaches any of its terms. + +8. Governing Law + This Agreement shall be governed by and construed in accordance with the laws of South Korea, without regard to its conflict of law provisions. + +9. Entire Agreement + This Agreement constitutes the entire agreement between the parties with respect to the use of the Model. \ No newline at end of file diff --git a/hf_models/exaonepath/README.md b/hf_models/exaonepath/README.md new file mode 100644 index 00000000..29fd6c17 --- /dev/null +++ b/hf_models/exaonepath/README.md @@ -0,0 +1,161 @@ +--- +license: other +license_name: exaonepath +license_link: LICENSE +tags: +- lg-ai +- EXAONEPath-1.0 +- pathology +- lg-ai +--- + +# EXAONEPath + +## EXAONEPath 1.0 Patch-level Foundation Model for Pathology + +[[`Paper`](https://arxiv.org/abs/2408.00380)] [[`Github`](https://github.com/LG-AI-EXAONE/EXAONEPath)] [[`Model`](https://github.com/LG-AI-EXAONE/EXAONEPath/releases/download/1.0.0/EXAONEPath.ckpt)] [[`BibTeX`](#Citation)] + + +## Introduction +We introduce EXAONEPath, a patch-level pathology pretrained model with 86 million parameters. +The model was pretrained on 285,153,903 patches extracted from a total of 34,795 WSIs. +EXAONEPath demonstrates superior performance considering the number of WSIs used and the model's parameter count. + + + + +## Quickstart +Load EXAONEPath and run inference on tile-level images. + +### 1. Hardware Requirements ### +- NVIDIA GPU is required +- Minimum 8GB GPU memory recommended +- NVIDIA driver version >= 450.80.02 required + +Note: This implementation requires NVIDIA GPU and drivers. The provided environment setup specifically uses CUDA-enabled PyTorch, making NVIDIA GPU mandatory for running the model. + +### 2. Environment Setup ### +First, install Conda if you haven't already. You can find installation instructions [here](https://docs.anaconda.com/miniconda/). +Then create and activate the environment using the provided configuration: +```bash +git clone https://github.com/LG-AI-EXAONE/EXAONEPath.git +cd EXAONEPath +conda env create -f environment.yaml +conda activate exaonepath +``` + +### 3. Load the model & Inference +#### Load with HuggingFace + + +```python +import torch +from PIL import Image +from macenko import macenko_normalizer +import torchvision.transforms as transforms +from vision_transformer import VisionTransformer + +hf_token = "YOUR_HUGGING_FACE_ACCESS_TOKEN" +model = VisionTransformer.from_pretrained("LGAI-EXAONE/EXAONEPath", use_auth_token=hf_token) + +transform = transforms.Compose( + [ + transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), + transforms.CenterCrop(224), + transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + ] +) + +normalizer = macenko_normalizer() +img_path = "images/MHIST_aaa.png" +image = Image.open(img_path).convert("RGB") +image_macenko = normalizer(image) + +sample_input = transform(image_macenko).unsqueeze(0) +model.cuda() +model.eval() + +features = model(sample_input.cuda()) +``` + +#### Load Manually +First, download the EXAONEPath model checkpoint from [here](https://github.com/LG-AI-EXAONE/EXAONEPath/releases/download/1.0.0/EXAONEPath.ckpt) + +```python +import torch +from PIL import Image +from macenko import macenko_normalizer +import torchvision.transforms as transforms +from vision_transformer import vit_base + +file_path = "MODEL_CHECKPOINT_PATH" +checkpoint = torch.load(file_path, map_location=torch.device('cpu')) +state_dict = checkpoint['state_dict'] +model = vit_base(patch_size=16, num_classes=0) +msg = model.load_state_dict(state_dict, strict=False) +print(f'Pretrained weights found at {file_path} and loaded with msg: {msg}') + +transform = transforms.Compose( + [ + transforms.Resize(256, interpolation=transforms.InterpolationMode.BICUBIC), + transforms.CenterCrop(224), + transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), + ] +) + +normalizer = macenko_normalizer() +img_path = "images/MHIST_aaa.png" +image = Image.open(img_path).convert("RGB") +image_macenko = normalizer(image) + +sample_input = transform(image_macenko).unsqueeze(0) +model.cuda() +model.eval() + +features = model(sample_input.cuda()) +``` + +## Model Performance Comparison + +We report linear evaluation result on six downstream tasks. Top-1 accuracy is shown, with values for models other than Gigapath taken from the RudolfV paper. + +| Model | PCAM | MHIST | CRC-100K | TIL Det. | MSI CRC | MSI STAD | Avg | +|--------------------------|-----------|-----------|-----------|-----------|-----------|-----------|-----------| +| ResNet50 ImageNet | 0.833 | 0.806 | 0.849 | 0.915 | 0.653 | 0.664 | 0.787 | +| ViT-L/16 ImageNet | 0.852 | 0.796 | 0.847 | 0.924 | 0.669 | 0.671 | 0.793 | +| Lunit | 0.918 | 0.771 | 0.949 | 0.943 | 0.745 | 0.756 | 0.847 | +| CTransPath | 0.872 | 0.817 | 0.840 | 0.930 | 0.694 | 0.726 | 0.813 | +| Phikon | 0.906 | 0.795 | 0.883 | **0.946** | 0.733 | 0.751 | 0.836 | +| Virchow | 0.933 | **0.834** | 0.968 | - | - | - | - | +| RudolfV | 0.944 | 0.821 | **0.973** | 0.943 | 0.755 | 0.788 | **0.871** | +| GigaPath (patch encoder) | **0.947** | 0.822 | 0.964 | 0.938 | 0.753 | 0.748 | 0.862 | +| EXAONEPath (ours) | 0.901 | 0.818 | 0.946 | 0.939 | **0.756** | **0.804** | 0.861 | + +
+ +
+
+ Model Comparison Param + Model Comparison WSIS +
+
+ Figure 1. Performance comparison of models based on the number of parameters and the number of WSIs used for training. The average Top-1 accuracy represents the mean linear evaluation performance across six downstream tasks. +
+
+ +## License +The model is licensed under [EXAONEPath AI Model License Agreement 1.0 - NC](./LICENSE) + +## Citation +If you find EXAONEPath useful, please cite it using this BibTeX: +``` +@article{yun2024exaonepath, + title={EXAONEPath 1.0 Patch-level Foundation Model for Pathology}, + author={Yun, Juseung and Hu, Yi and Kim, Jinhyung and Jang, Jongseong and Lee, Soonyoung}, + journal={arXiv preprint arXiv:2408.00380}, + year={2024} +} +``` + +## Contact +LG AI Research Technical Support: contact_us1@lgresearch.ai \ No newline at end of file diff --git a/hf_models/exaonepath/metadata.json b/hf_models/exaonepath/metadata.json new file mode 100644 index 00000000..87777e92 --- /dev/null +++ b/hf_models/exaonepath/metadata.json @@ -0,0 +1,33 @@ +{ + "version": "1.0.0", + "changelog": { + "1.0.0": "initial release of EXAONEPath 1.0" + }, + "monai_version": "1.4.0", + "pytorch_version": "2.4.0", + "numpy_version": "1.24.4", + "required_packages_version": { + "torch": "2.4.0", + "torchvision": "0.15.0", + "torchstain": "1.3.0", + "pillow": "10.0.0", + "huggingface_hub": "0.24.2", + "transformers": "4.43.3" + }, + "supported_apps": { + "exaonepath": "" + }, + "name": "EXAONEPath", + "task": "Pathology foundation model", + "description": "EXAONEPath is a patch-level pathology pretrained model with 86 million parameters, pretrained on 285,153,903 patches extracted from 34,795 WSIs.", + "authors": "LG AI Research", + "copyright": "LG AI Research", + "data_source": "LG AI Research", + "data_type": "WSI patches", + "image_classes": "RGB pathology image patches", + "huggingface_model_id": "LGAI-EXAONE/EXAONEPath", + "huggingface_url": "https://huggingface.co/LGAI-EXAONE/EXAONEPath", + "intended_use": "Research and clinical support for pathology image analysis", + "references": [ + "Yun, Juseung, et al. 'EXAONEPath 1.0 Patch-level Foundation Model for Pathology', arXiv preprint arXiv:2408.00380 (2024)."] +} diff --git a/hf_models/llama3-vila-m3-13b/LICENSE b/hf_models/llama3-vila-m3-13b/LICENSE new file mode 100644 index 00000000..3180a112 --- /dev/null +++ b/hf_models/llama3-vila-m3-13b/LICENSE @@ -0,0 +1,33 @@ +NVIDIA One-Way Non-Commercial License Agreement + +By downloading, using, or accessing the model (the "Model"), you are agreeing to the following terms. + +1. Definitions + a. "Model" means the Llama3-VILA-M3-13B AI model, including all associated weights, parameters, and other components. + b. "Commercial Use" means any use of the Model primarily intended for or directed toward commercial advantage or monetary compensation. + +2. License Grant + Subject to the terms and conditions of this Agreement, NVIDIA hereby grants to you a worldwide, non-exclusive, non-transferable, non-sublicensable, royalty-free license to use, reproduce, and create derivative works of the Model for non-commercial purposes only. + +3. Restrictions + a. Commercial Use is not permitted under this license without obtaining a separate commercial license from NVIDIA. + b. You shall not use the Model in connection with any illegal, harmful, fraudulent, infringing, or offensive use. + c. You shall not rent, lease, lend, sell, redistribute, or sublicense the Model. + +4. Disclaimer of Warranties + THE MODEL IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. + +5. Limitation of Liability + IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE MODEL OR THE USE OR OTHER DEALINGS IN THE MODEL. + +6. Attribution + Any use of the Model shall include appropriate attribution to NVIDIA and MONAI. + +7. Termination + This Agreement will terminate automatically if you breach any of its terms. + +8. Governing Law + This Agreement shall be governed by and construed in accordance with the laws of the United States, without regard to its conflict of law provisions. + +9. Entire Agreement + This Agreement constitutes the entire agreement between the parties with respect to the use of the Model. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-13b/README.md b/hf_models/llama3-vila-m3-13b/README.md new file mode 100644 index 00000000..8dfa559b --- /dev/null +++ b/hf_models/llama3-vila-m3-13b/README.md @@ -0,0 +1,54 @@ +--- +license: other +license_name: nvidia-oneway-noncommercial-license +license_link: LICENSE +tags: +- medical-imaging +- visual-language-model +- multimodal +- vila +- llama3 +--- + +# Llama3-VILA-M3-13B + +> Built with Meta Llama 3 + +## Model Overview + +## Description: +M3 is a medical visual language model that empowers medical imaging professionals, researchers, and healthcare enterprises by enhancing medical imaging workflows across various modalities. + +Key features include: +- Integration with expert models from the MONAI Model Zoo +- Support for multiple imaging modalities +- Enhanced reasoning capabilities with the larger 13B parameter model + +For more details, see our [repo](https://github.com/Project-MONAI/VLM) + +### Core Capabilities +M3 NIM provides a comprehensive suite of 2D medical image analysis tools, including: +1. Segmentation +2. Classification +3. Visual Question Answering (VQA) +4. Report/Findings Generation + +These capabilities are applicable across various medical imaging modalities, leveraging expert models from the MONAI Model Zoo to ensure high-quality results. + +## Model Architecture: +**Architecture Type:** Auto-Regressive Vision Language Model +**Network Architecture:** [VILA](https://github.com/NVlabs/VILA) with Llama +**Parameters:** 13 billion parameters + +## Input: +**Input Type(s):** Text and Image +**Input Format(s):** Text: String, Image +**Input Parameters:** Text: 1D, Image: 2D + +## Output: +**Output Type(s):** Text and Image +**Output Format:** Text: String and Image +**Output Parameters:** Text: 1D, Image: 2D/3D + +## Ethical Considerations +NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-13b/metadata.json b/hf_models/llama3-vila-m3-13b/metadata.json new file mode 100644 index 00000000..ba9de34d --- /dev/null +++ b/hf_models/llama3-vila-m3-13b/metadata.json @@ -0,0 +1,34 @@ +{ + "version": "1.0.0", + "changelog": { + "1.0.0": "initial release of Llama3-VILA-M3-13B" + }, + "monai_version": "1.4.0", + "pytorch_version": "2.4.0", + "numpy_version": "1.24.4", + "required_packages_version": { + "torch": "2.4.0", + "torchvision": "0.19.0", + "pillow": "10.4.0", + "huggingface_hub": "0.24.2", + "transformers": "4.43.3" + }, + "supported_apps": { + "m3-nim": "" + }, + "name": "Llama3-VILA-M3-13B", + "task": "Medical visual language model", + "description": "M3 is a medical visual language model based on VILA and Llama 3 that supports medical image analysis including segmentation, classification, visual question answering, and report generation across multiple imaging modalities.", + "authors": "MONAI team", + "copyright": "NVIDIA", + "data_source": "MONAI", + "data_type": "Medical images", + "image_classes": "2D medical images from various modalities", + "huggingface_model_id": "MONAI/Llama3-VILA-M3-13B", + "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-13B", + "intended_use": "Research and clinical support for medical image analysis", + "references": [ + "VILA: https://github.com/NVlabs/VILA", + "Meta Llama 3: https://ai.meta.com/llama/" + ] +} \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-3b/LICENSE b/hf_models/llama3-vila-m3-3b/LICENSE new file mode 100644 index 00000000..6f093683 --- /dev/null +++ b/hf_models/llama3-vila-m3-3b/LICENSE @@ -0,0 +1,33 @@ +NVIDIA One-Way Non-Commercial License Agreement + +By downloading, using, or accessing the model (the "Model"), you are agreeing to the following terms. + +1. Definitions + a. "Model" means the Llama3-VILA-M3-3B AI model, including all associated weights, parameters, and other components. + b. "Commercial Use" means any use of the Model primarily intended for or directed toward commercial advantage or monetary compensation. + +2. License Grant + Subject to the terms and conditions of this Agreement, NVIDIA hereby grants to you a worldwide, non-exclusive, non-transferable, non-sublicensable, royalty-free license to use, reproduce, and create derivative works of the Model for non-commercial purposes only. + +3. Restrictions + a. Commercial Use is not permitted under this license without obtaining a separate commercial license from NVIDIA. + b. You shall not use the Model in connection with any illegal, harmful, fraudulent, infringing, or offensive use. + c. You shall not rent, lease, lend, sell, redistribute, or sublicense the Model. + +4. Disclaimer of Warranties + THE MODEL IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. + +5. Limitation of Liability + IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE MODEL OR THE USE OR OTHER DEALINGS IN THE MODEL. + +6. Attribution + Any use of the Model shall include appropriate attribution to NVIDIA and MONAI. + +7. Termination + This Agreement will terminate automatically if you breach any of its terms. + +8. Governing Law + This Agreement shall be governed by and construed in accordance with the laws of the United States, without regard to its conflict of law provisions. + +9. Entire Agreement + This Agreement constitutes the entire agreement between the parties with respect to the use of the Model. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-3b/README.md b/hf_models/llama3-vila-m3-3b/README.md new file mode 100644 index 00000000..9d37aade --- /dev/null +++ b/hf_models/llama3-vila-m3-3b/README.md @@ -0,0 +1,54 @@ +--- +license: other +license_name: nvidia-oneway-noncommercial-license +license_link: LICENSE +tags: +- medical-imaging +- visual-language-model +- multimodal +- vila +- llama3 +--- + +# Llama3-VILA-M3-3B + +> Built with Meta Llama 3 + +## Model Overview + +## Description: +M3 is a medical visual language model that empowers medical imaging professionals, researchers, and healthcare enterprises by enhancing medical imaging workflows across various modalities. + +Key features include: +- Integration with expert models from the MONAI Model Zoo +- Support for multiple imaging modalities +- Lightweight 3B parameter model for faster inference and reduced hardware requirements + +For more details, see our [repo](https://github.com/Project-MONAI/VLM) + +### Core Capabilities +M3 NIM provides a comprehensive suite of 2D medical image analysis tools, including: +1. Segmentation +2. Classification +3. Visual Question Answering (VQA) +4. Report/Findings Generation + +These capabilities are applicable across various medical imaging modalities, leveraging expert models from the MONAI Model Zoo to ensure high-quality results. + +## Model Architecture: +**Architecture Type:** Auto-Regressive Vision Language Model +**Network Architecture:** [VILA](https://github.com/NVlabs/VILA) with Llama +**Parameters:** 3 billion parameters + +## Input: +**Input Type(s):** Text and Image +**Input Format(s):** Text: String, Image +**Input Parameters:** Text: 1D, Image: 2D + +## Output: +**Output Type(s):** Text and Image +**Output Format:** Text: String and Image +**Output Parameters:** Text: 1D, Image: 2D/3D + +## Ethical Considerations +NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-3b/metadata.json b/hf_models/llama3-vila-m3-3b/metadata.json new file mode 100644 index 00000000..d9dce4f9 --- /dev/null +++ b/hf_models/llama3-vila-m3-3b/metadata.json @@ -0,0 +1,34 @@ +{ + "version": "1.0.0", + "changelog": { + "1.0.0": "initial release of Llama3-VILA-M3-3B" + }, + "monai_version": "1.4.0", + "pytorch_version": "2.4.0", + "numpy_version": "1.24.4", + "required_packages_version": { + "torch": "2.4.0", + "torchvision": "0.19.0", + "pillow": "10.4.0", + "huggingface_hub": "0.24.2", + "transformers": "4.43.3" + }, + "supported_apps": { + "m3-nim": "" + }, + "name": "Llama3-VILA-M3-3B", + "task": "Medical visual language model", + "description": "M3 is a medical visual language model based on VILA and Llama 3 that supports medical image analysis including segmentation, classification, visual question answering, and report generation across multiple imaging modalities.", + "authors": "MONAI team", + "copyright": "NVIDIA", + "data_source": "MONAI", + "data_type": "Medical images", + "image_classes": "2D medical images from various modalities", + "huggingface_model_id": "MONAI/Llama3-VILA-M3-3B", + "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-3B", + "intended_use": "Research and clinical support for medical image analysis", + "references": [ + "VILA: https://github.com/NVlabs/VILA", + "Meta Llama 3: https://ai.meta.com/llama/" + ] +} \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-8b/LICENSE b/hf_models/llama3-vila-m3-8b/LICENSE new file mode 100644 index 00000000..bd18d81c --- /dev/null +++ b/hf_models/llama3-vila-m3-8b/LICENSE @@ -0,0 +1,33 @@ +NVIDIA One-Way Non-Commercial License Agreement + +By downloading, using, or accessing the model (the "Model"), you are agreeing to the following terms. + +1. Definitions + a. "Model" means the Llama3-VILA-M3-8B AI model, including all associated weights, parameters, and other components. + b. "Commercial Use" means any use of the Model primarily intended for or directed toward commercial advantage or monetary compensation. + +2. License Grant + Subject to the terms and conditions of this Agreement, NVIDIA hereby grants to you a worldwide, non-exclusive, non-transferable, non-sublicensable, royalty-free license to use, reproduce, and create derivative works of the Model for non-commercial purposes only. + +3. Restrictions + a. Commercial Use is not permitted under this license without obtaining a separate commercial license from NVIDIA. + b. You shall not use the Model in connection with any illegal, harmful, fraudulent, infringing, or offensive use. + c. You shall not rent, lease, lend, sell, redistribute, or sublicense the Model. + +4. Disclaimer of Warranties + THE MODEL IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. + +5. Limitation of Liability + IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE MODEL OR THE USE OR OTHER DEALINGS IN THE MODEL. + +6. Attribution + Any use of the Model shall include appropriate attribution to NVIDIA and MONAI. + +7. Termination + This Agreement will terminate automatically if you breach any of its terms. + +8. Governing Law + This Agreement shall be governed by and construed in accordance with the laws of the United States, without regard to its conflict of law provisions. + +9. Entire Agreement + This Agreement constitutes the entire agreement between the parties with respect to the use of the Model. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-8b/README.md b/hf_models/llama3-vila-m3-8b/README.md new file mode 100644 index 00000000..66ea31da --- /dev/null +++ b/hf_models/llama3-vila-m3-8b/README.md @@ -0,0 +1,52 @@ +--- +license: other +license_name: nvidia-oneway-noncommercial-license +license_link: LICENSE +tags: +- medical-imaging +- visual-language-model +- multimodal +- vila +- llama3 +--- + +# Llama3-VILA-M3-8B + +> Built with Meta Llama 3 + +## Model Overview + +## Description: +M3 is a medical visual language model that empowers medical imaging professionals, researchers, and healthcare enterprises by enhancing medical imaging workflows across various modalities. + +Key features include: +- Integration with expert models from the MONAI Model Zoo +- Support for multiple imaging modalities + +For more details, see our [repo](https://github.com/Project-MONAI/VLM) + +### Core Capabilities +M3 NIM provides a comprehensive suite of 2D medical image analysis tools, including: +1. Segmentation +2. Classification +3. Visual Question Answering (VQA) +4. Report/Findings Generation + +These capabilities are applicable across various medical imaging modalities, leveraging expert models from the MONAI Model Zoo to ensure high-quality results. + +## Model Architecture: +**Architecture Type:** Auto-Regressive Vision Language Model +**Network Architecture:** [VILA](https://github.com/NVlabs/VILA) with Llama + +## Input: +**Input Type(s):** Text and Image +**Input Format(s):** Text: String, Image +**Input Parameters:** Text: 1D, Image: 2D + +## Output: +**Output Type(s):** Text and Image +**Output Format:** Text: String and Image +**Output Parameters:** Text: 1D, Image: 2D/3D + +## Ethical Considerations +NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-8b/metadata.json b/hf_models/llama3-vila-m3-8b/metadata.json new file mode 100644 index 00000000..df8131b4 --- /dev/null +++ b/hf_models/llama3-vila-m3-8b/metadata.json @@ -0,0 +1,34 @@ +{ + "version": "1.0.0", + "changelog": { + "1.0.0": "initial release of Llama3-VILA-M3-8B" + }, + "monai_version": "1.4.0", + "pytorch_version": "2.4.0", + "numpy_version": "1.24.4", + "required_packages_version": { + "torch": "2.4.0", + "torchvision": "0.19.0", + "pillow": "10.4.0", + "huggingface_hub": "0.24.2", + "transformers": "4.43.3" + }, + "supported_apps": { + "m3-nim": "" + }, + "name": "Llama3-VILA-M3-8B", + "task": "Medical visual language model", + "description": "M3 is a medical visual language model based on VILA and Llama 3 that supports medical image analysis including segmentation, classification, visual question answering, and report generation across multiple imaging modalities.", + "authors": "MONAI team", + "copyright": "NVIDIA", + "data_source": "MONAI", + "data_type": "Medical images", + "image_classes": "2D medical images from various modalities", + "huggingface_model_id": "MONAI/Llama3-VILA-M3-8B", + "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-8B", + "intended_use": "Research and clinical support for medical image analysis", + "references": [ + "VILA: https://github.com/NVlabs/VILA", + "Meta Llama 3: https://ai.meta.com/llama/" + ] +} \ No newline at end of file From 98e2bdeb00b89c49266441379a163698292230a2 Mon Sep 17 00:00:00 2001 From: Michael Zephyr Date: Tue, 18 Mar 2025 07:56:17 -0700 Subject: [PATCH 02/11] Update naming to be underscores instead of dashes --- hf_models/README.md | 17 +++--- hf_models/{ct-rate => ct_rate}/LICENSE | 0 hf_models/{ct-rate => ct_rate}/README.md | 8 +-- hf_models/{ct-rate => ct_rate}/metadata.json | 16 +++--- hf_models/llama3-vila-m3-13b/LICENSE | 33 ------------ hf_models/llama3-vila-m3-13b/README.md | 54 -------------------- hf_models/llama3-vila-m3-13b/metadata.json | 34 ------------ hf_models/llama3-vila-m3-3b/LICENSE | 33 ------------ hf_models/llama3-vila-m3-3b/README.md | 54 -------------------- hf_models/llama3-vila-m3-3b/metadata.json | 34 ------------ hf_models/llama3-vila-m3-8b/LICENSE | 33 ------------ hf_models/llama3-vila-m3-8b/README.md | 52 ------------------- hf_models/llama3-vila-m3-8b/metadata.json | 34 ------------ hf_models/llama3_vila_m3_13b/LICENSE | 21 ++++++++ hf_models/llama3_vila_m3_13b/README.md | 16 ++++++ hf_models/llama3_vila_m3_13b/metadata.json | 31 +++++++++++ hf_models/llama3_vila_m3_3b/LICENSE | 21 ++++++++ hf_models/llama3_vila_m3_3b/README.md | 16 ++++++ hf_models/llama3_vila_m3_3b/metadata.json | 31 +++++++++++ hf_models/llama3_vila_m3_8b/LICENSE | 21 ++++++++ hf_models/llama3_vila_m3_8b/README.md | 16 ++++++ hf_models/llama3_vila_m3_8b/metadata.json | 31 +++++++++++ 22 files changed, 223 insertions(+), 383 deletions(-) rename hf_models/{ct-rate => ct_rate}/LICENSE (100%) rename hf_models/{ct-rate => ct_rate}/README.md (93%) rename hf_models/{ct-rate => ct_rate}/metadata.json (83%) delete mode 100644 hf_models/llama3-vila-m3-13b/LICENSE delete mode 100644 hf_models/llama3-vila-m3-13b/README.md delete mode 100644 hf_models/llama3-vila-m3-13b/metadata.json delete mode 100644 hf_models/llama3-vila-m3-3b/LICENSE delete mode 100644 hf_models/llama3-vila-m3-3b/README.md delete mode 100644 hf_models/llama3-vila-m3-3b/metadata.json delete mode 100644 hf_models/llama3-vila-m3-8b/LICENSE delete mode 100644 hf_models/llama3-vila-m3-8b/README.md delete mode 100644 hf_models/llama3-vila-m3-8b/metadata.json create mode 100644 hf_models/llama3_vila_m3_13b/LICENSE create mode 100644 hf_models/llama3_vila_m3_13b/README.md create mode 100644 hf_models/llama3_vila_m3_13b/metadata.json create mode 100644 hf_models/llama3_vila_m3_3b/LICENSE create mode 100644 hf_models/llama3_vila_m3_3b/README.md create mode 100644 hf_models/llama3_vila_m3_3b/metadata.json create mode 100644 hf_models/llama3_vila_m3_8b/LICENSE create mode 100644 hf_models/llama3_vila_m3_8b/README.md create mode 100644 hf_models/llama3_vila_m3_8b/metadata.json diff --git a/hf_models/README.md b/hf_models/README.md index 32606978..c3461b1e 100644 --- a/hf_models/README.md +++ b/hf_models/README.md @@ -4,12 +4,9 @@ This directory contains models that are hosted on Hugging Face. **Important: The Each model directory contains: -1. `configs/metadata.json` - Model metadata following a similar schema to MONAI Bundles -2. `configs/inference.json` - Configuration that references the HF model but may not be directly executable -3. `configs/logging.conf` - Logging configuration -4. `docs/README.md` - Detailed documentation about the model -5. `large_files.yml` - References the Hugging Face model repository -6. `LICENSE` - Model license +1. `metadata.json` - Model metadata following a similar schema to MONAI Bundles +2. `README.md` - Detailed documentation about the model +3. `LICENSE` - Model license ## Using HF Models @@ -28,7 +25,7 @@ export HF_TOKEN=your_huggingface_token | Model | Description | HF Repository | |-------|-------------|--------------| | exaonepath | EXAONEPath is a patch-level pathology pretrained model with 86 million parameters | [LGAI-EXAONE/EXAONEPath](https://huggingface.co/LGAI-EXAONE/EXAONEPath) | -| llama3-vila-m3-3b | Lightweight medical visual language model based on VILA and Llama 3 (3B parameters) | [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B) | -| llama3-vila-m3-8b | Medical visual language model based on VILA and Llama 3 that supports medical image analysis | [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B) | -| llama3-vila-m3-13b | Enhanced medical visual language model based on VILA and Llama 3 with improved reasoning capabilities (13B parameters) | [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B) | -| ct-rate | Pioneering dataset of chest CT volumes paired with radiology reports, multi-abnormality labels, and metadata | [ibrahimhamamci/CT-RATE](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) | \ No newline at end of file +| llama3_vila_m3_3b | Lightweight medical vision language model that enhances VLMs with medical expert knowledge (3B parameters) | [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B) | +| llama3_vila_m3_8b | Medical vision language model that utilizes domain-expert models to improve precision in medical imaging tasks (8B parameters) | [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B) | +| llama3_vila_m3_13b | Enhanced medical vision language model with improved capabilities for various medical imaging tasks (13B parameters) | [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B) | +| ct_rate | Pioneering dataset of chest CT volumes paired with radiology reports, multi-abnormality labels, and metadata | [ibrahimhamamci/CT_RATE](https://huggingface.co/datasets/ibrahimhamamci/CT_RATE) | \ No newline at end of file diff --git a/hf_models/ct-rate/LICENSE b/hf_models/ct_rate/LICENSE similarity index 100% rename from hf_models/ct-rate/LICENSE rename to hf_models/ct_rate/LICENSE diff --git a/hf_models/ct-rate/README.md b/hf_models/ct_rate/README.md similarity index 93% rename from hf_models/ct-rate/README.md rename to hf_models/ct_rate/README.md index 9409f3a0..67514971 100644 --- a/hf_models/ct-rate/README.md +++ b/hf_models/ct_rate/README.md @@ -8,15 +8,15 @@ tags: - multimodal --- -# CT-RATE Dataset +# CT_RATE Dataset ## [Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography](https://arxiv.org/abs/2403.17834) -CT-RATE is a pioneering dataset in 3D medical imaging that uniquely pairs textual data with image data focused on chest CT volumes. The dataset comprises chest CT volumes paired with corresponding radiology text reports, multi-abnormality labels, and metadata, all freely accessible to researchers. +CT_RATE is a pioneering dataset in 3D medical imaging that uniquely pairs textual data with image data focused on chest CT volumes. The dataset comprises chest CT volumes paired with corresponding radiology text reports, multi-abnormality labels, and metadata, all freely accessible to researchers. ## Dataset Overview -CT-RATE consists of 25,692 non-contrast chest CT volumes, expanded to 50,188 through various reconstructions, from 21,304 unique patients, along with corresponding radiology text reports, multi-abnormality labels, and metadata. +CT_RATE consists of 25,692 non-contrast chest CT volumes, expanded to 50,188 through various reconstructions, from 21,304 unique patients, along with corresponding radiology text reports, multi-abnormality labels, and metadata. The dataset is divided into: - Training set: 20,000 patients @@ -43,7 +43,7 @@ A multimodal AI assistant designed to enhance the interpretation and diagnostic ## Terms and Conditions -Users of the CT-RATE dataset must agree to the [Terms and Conditions](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) which specify: +Users of the CT_RATE dataset must agree to the [Terms and Conditions](https://huggingface.co/datasets/ibrahimhamamci/CT_RATE) which specify: - The dataset is intended solely for academic, research, and educational purposes - Any commercial exploitation is forbidden without permission diff --git a/hf_models/ct-rate/metadata.json b/hf_models/ct_rate/metadata.json similarity index 83% rename from hf_models/ct-rate/metadata.json rename to hf_models/ct_rate/metadata.json index b8cb7dde..941e8aba 100644 --- a/hf_models/ct-rate/metadata.json +++ b/hf_models/ct_rate/metadata.json @@ -1,7 +1,7 @@ { "version": "1.0.0", "changelog": { - "1.0.0": "initial release of CT-RATE dataset" + "1.0.0": "initial release of CT_RATE dataset" }, "monai_version": "1.4.0", "pytorch_version": "2.4.0", @@ -14,19 +14,19 @@ "datasets": "2.18.0" }, "supported_apps": { - "ct-clip": "", - "ct-chat": "" + "ct_clip": "", + "ct_chat": "" }, - "name": "CT-RATE", + "name": "CT_RATE", "task": "3D Chest CT multimodal dataset", - "description": "CT-RATE is a pioneering dataset of 25,692 non-contrast chest CT volumes (expanded to 50,188 through various reconstructions) paired with corresponding radiology text reports, multi-abnormality labels, and metadata from 21,304 unique patients.", + "description": "CT_RATE is a pioneering dataset of 25,692 non-contrast chest CT volumes (expanded to 50,188 through various reconstructions) paired with corresponding radiology text reports, multi-abnormality labels, and metadata from 21,304 unique patients.", "authors": "Ibrahim Ethem Hamamci, Sezgin Er, Furkan Almas, et al.", "copyright": "Ibrahim Ethem Hamamci and collaborators", - "data_source": "CT-RATE dataset", + "data_source": "CT_RATE dataset", "data_type": "3D CT volumes and text", "image_classes": "3D chest CT volumes with various abnormalities", - "huggingface_dataset_id": "ibrahimhamamci/CT-RATE", - "huggingface_url": "https://huggingface.co/datasets/ibrahimhamamci/CT-RATE", + "huggingface_dataset_id": "ibrahimhamamci/CT_RATE", + "huggingface_url": "https://huggingface.co/datasets/ibrahimhamamci/CT_RATE", "intended_use": "Research and development of AI models for 3D CT analysis", "references": [ "Hamamci, Ibrahim Ethem, et al. 'Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography.' arXiv preprint arXiv:2403.17834 (2024).", diff --git a/hf_models/llama3-vila-m3-13b/LICENSE b/hf_models/llama3-vila-m3-13b/LICENSE deleted file mode 100644 index 3180a112..00000000 --- a/hf_models/llama3-vila-m3-13b/LICENSE +++ /dev/null @@ -1,33 +0,0 @@ -NVIDIA One-Way Non-Commercial License Agreement - -By downloading, using, or accessing the model (the "Model"), you are agreeing to the following terms. - -1. Definitions - a. "Model" means the Llama3-VILA-M3-13B AI model, including all associated weights, parameters, and other components. - b. "Commercial Use" means any use of the Model primarily intended for or directed toward commercial advantage or monetary compensation. - -2. License Grant - Subject to the terms and conditions of this Agreement, NVIDIA hereby grants to you a worldwide, non-exclusive, non-transferable, non-sublicensable, royalty-free license to use, reproduce, and create derivative works of the Model for non-commercial purposes only. - -3. Restrictions - a. Commercial Use is not permitted under this license without obtaining a separate commercial license from NVIDIA. - b. You shall not use the Model in connection with any illegal, harmful, fraudulent, infringing, or offensive use. - c. You shall not rent, lease, lend, sell, redistribute, or sublicense the Model. - -4. Disclaimer of Warranties - THE MODEL IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. - -5. Limitation of Liability - IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE MODEL OR THE USE OR OTHER DEALINGS IN THE MODEL. - -6. Attribution - Any use of the Model shall include appropriate attribution to NVIDIA and MONAI. - -7. Termination - This Agreement will terminate automatically if you breach any of its terms. - -8. Governing Law - This Agreement shall be governed by and construed in accordance with the laws of the United States, without regard to its conflict of law provisions. - -9. Entire Agreement - This Agreement constitutes the entire agreement between the parties with respect to the use of the Model. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-13b/README.md b/hf_models/llama3-vila-m3-13b/README.md deleted file mode 100644 index 8dfa559b..00000000 --- a/hf_models/llama3-vila-m3-13b/README.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -license: other -license_name: nvidia-oneway-noncommercial-license -license_link: LICENSE -tags: -- medical-imaging -- visual-language-model -- multimodal -- vila -- llama3 ---- - -# Llama3-VILA-M3-13B - -> Built with Meta Llama 3 - -## Model Overview - -## Description: -M3 is a medical visual language model that empowers medical imaging professionals, researchers, and healthcare enterprises by enhancing medical imaging workflows across various modalities. - -Key features include: -- Integration with expert models from the MONAI Model Zoo -- Support for multiple imaging modalities -- Enhanced reasoning capabilities with the larger 13B parameter model - -For more details, see our [repo](https://github.com/Project-MONAI/VLM) - -### Core Capabilities -M3 NIM provides a comprehensive suite of 2D medical image analysis tools, including: -1. Segmentation -2. Classification -3. Visual Question Answering (VQA) -4. Report/Findings Generation - -These capabilities are applicable across various medical imaging modalities, leveraging expert models from the MONAI Model Zoo to ensure high-quality results. - -## Model Architecture: -**Architecture Type:** Auto-Regressive Vision Language Model -**Network Architecture:** [VILA](https://github.com/NVlabs/VILA) with Llama -**Parameters:** 13 billion parameters - -## Input: -**Input Type(s):** Text and Image -**Input Format(s):** Text: String, Image -**Input Parameters:** Text: 1D, Image: 2D - -## Output: -**Output Type(s):** Text and Image -**Output Format:** Text: String and Image -**Output Parameters:** Text: 1D, Image: 2D/3D - -## Ethical Considerations -NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-13b/metadata.json b/hf_models/llama3-vila-m3-13b/metadata.json deleted file mode 100644 index ba9de34d..00000000 --- a/hf_models/llama3-vila-m3-13b/metadata.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "version": "1.0.0", - "changelog": { - "1.0.0": "initial release of Llama3-VILA-M3-13B" - }, - "monai_version": "1.4.0", - "pytorch_version": "2.4.0", - "numpy_version": "1.24.4", - "required_packages_version": { - "torch": "2.4.0", - "torchvision": "0.19.0", - "pillow": "10.4.0", - "huggingface_hub": "0.24.2", - "transformers": "4.43.3" - }, - "supported_apps": { - "m3-nim": "" - }, - "name": "Llama3-VILA-M3-13B", - "task": "Medical visual language model", - "description": "M3 is a medical visual language model based on VILA and Llama 3 that supports medical image analysis including segmentation, classification, visual question answering, and report generation across multiple imaging modalities.", - "authors": "MONAI team", - "copyright": "NVIDIA", - "data_source": "MONAI", - "data_type": "Medical images", - "image_classes": "2D medical images from various modalities", - "huggingface_model_id": "MONAI/Llama3-VILA-M3-13B", - "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-13B", - "intended_use": "Research and clinical support for medical image analysis", - "references": [ - "VILA: https://github.com/NVlabs/VILA", - "Meta Llama 3: https://ai.meta.com/llama/" - ] -} \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-3b/LICENSE b/hf_models/llama3-vila-m3-3b/LICENSE deleted file mode 100644 index 6f093683..00000000 --- a/hf_models/llama3-vila-m3-3b/LICENSE +++ /dev/null @@ -1,33 +0,0 @@ -NVIDIA One-Way Non-Commercial License Agreement - -By downloading, using, or accessing the model (the "Model"), you are agreeing to the following terms. - -1. Definitions - a. "Model" means the Llama3-VILA-M3-3B AI model, including all associated weights, parameters, and other components. - b. "Commercial Use" means any use of the Model primarily intended for or directed toward commercial advantage or monetary compensation. - -2. License Grant - Subject to the terms and conditions of this Agreement, NVIDIA hereby grants to you a worldwide, non-exclusive, non-transferable, non-sublicensable, royalty-free license to use, reproduce, and create derivative works of the Model for non-commercial purposes only. - -3. Restrictions - a. Commercial Use is not permitted under this license without obtaining a separate commercial license from NVIDIA. - b. You shall not use the Model in connection with any illegal, harmful, fraudulent, infringing, or offensive use. - c. You shall not rent, lease, lend, sell, redistribute, or sublicense the Model. - -4. Disclaimer of Warranties - THE MODEL IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. - -5. Limitation of Liability - IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE MODEL OR THE USE OR OTHER DEALINGS IN THE MODEL. - -6. Attribution - Any use of the Model shall include appropriate attribution to NVIDIA and MONAI. - -7. Termination - This Agreement will terminate automatically if you breach any of its terms. - -8. Governing Law - This Agreement shall be governed by and construed in accordance with the laws of the United States, without regard to its conflict of law provisions. - -9. Entire Agreement - This Agreement constitutes the entire agreement between the parties with respect to the use of the Model. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-3b/README.md b/hf_models/llama3-vila-m3-3b/README.md deleted file mode 100644 index 9d37aade..00000000 --- a/hf_models/llama3-vila-m3-3b/README.md +++ /dev/null @@ -1,54 +0,0 @@ ---- -license: other -license_name: nvidia-oneway-noncommercial-license -license_link: LICENSE -tags: -- medical-imaging -- visual-language-model -- multimodal -- vila -- llama3 ---- - -# Llama3-VILA-M3-3B - -> Built with Meta Llama 3 - -## Model Overview - -## Description: -M3 is a medical visual language model that empowers medical imaging professionals, researchers, and healthcare enterprises by enhancing medical imaging workflows across various modalities. - -Key features include: -- Integration with expert models from the MONAI Model Zoo -- Support for multiple imaging modalities -- Lightweight 3B parameter model for faster inference and reduced hardware requirements - -For more details, see our [repo](https://github.com/Project-MONAI/VLM) - -### Core Capabilities -M3 NIM provides a comprehensive suite of 2D medical image analysis tools, including: -1. Segmentation -2. Classification -3. Visual Question Answering (VQA) -4. Report/Findings Generation - -These capabilities are applicable across various medical imaging modalities, leveraging expert models from the MONAI Model Zoo to ensure high-quality results. - -## Model Architecture: -**Architecture Type:** Auto-Regressive Vision Language Model -**Network Architecture:** [VILA](https://github.com/NVlabs/VILA) with Llama -**Parameters:** 3 billion parameters - -## Input: -**Input Type(s):** Text and Image -**Input Format(s):** Text: String, Image -**Input Parameters:** Text: 1D, Image: 2D - -## Output: -**Output Type(s):** Text and Image -**Output Format:** Text: String and Image -**Output Parameters:** Text: 1D, Image: 2D/3D - -## Ethical Considerations -NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-3b/metadata.json b/hf_models/llama3-vila-m3-3b/metadata.json deleted file mode 100644 index d9dce4f9..00000000 --- a/hf_models/llama3-vila-m3-3b/metadata.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "version": "1.0.0", - "changelog": { - "1.0.0": "initial release of Llama3-VILA-M3-3B" - }, - "monai_version": "1.4.0", - "pytorch_version": "2.4.0", - "numpy_version": "1.24.4", - "required_packages_version": { - "torch": "2.4.0", - "torchvision": "0.19.0", - "pillow": "10.4.0", - "huggingface_hub": "0.24.2", - "transformers": "4.43.3" - }, - "supported_apps": { - "m3-nim": "" - }, - "name": "Llama3-VILA-M3-3B", - "task": "Medical visual language model", - "description": "M3 is a medical visual language model based on VILA and Llama 3 that supports medical image analysis including segmentation, classification, visual question answering, and report generation across multiple imaging modalities.", - "authors": "MONAI team", - "copyright": "NVIDIA", - "data_source": "MONAI", - "data_type": "Medical images", - "image_classes": "2D medical images from various modalities", - "huggingface_model_id": "MONAI/Llama3-VILA-M3-3B", - "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-3B", - "intended_use": "Research and clinical support for medical image analysis", - "references": [ - "VILA: https://github.com/NVlabs/VILA", - "Meta Llama 3: https://ai.meta.com/llama/" - ] -} \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-8b/LICENSE b/hf_models/llama3-vila-m3-8b/LICENSE deleted file mode 100644 index bd18d81c..00000000 --- a/hf_models/llama3-vila-m3-8b/LICENSE +++ /dev/null @@ -1,33 +0,0 @@ -NVIDIA One-Way Non-Commercial License Agreement - -By downloading, using, or accessing the model (the "Model"), you are agreeing to the following terms. - -1. Definitions - a. "Model" means the Llama3-VILA-M3-8B AI model, including all associated weights, parameters, and other components. - b. "Commercial Use" means any use of the Model primarily intended for or directed toward commercial advantage or monetary compensation. - -2. License Grant - Subject to the terms and conditions of this Agreement, NVIDIA hereby grants to you a worldwide, non-exclusive, non-transferable, non-sublicensable, royalty-free license to use, reproduce, and create derivative works of the Model for non-commercial purposes only. - -3. Restrictions - a. Commercial Use is not permitted under this license without obtaining a separate commercial license from NVIDIA. - b. You shall not use the Model in connection with any illegal, harmful, fraudulent, infringing, or offensive use. - c. You shall not rent, lease, lend, sell, redistribute, or sublicense the Model. - -4. Disclaimer of Warranties - THE MODEL IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NONINFRINGEMENT. - -5. Limitation of Liability - IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT, OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION WITH THE MODEL OR THE USE OR OTHER DEALINGS IN THE MODEL. - -6. Attribution - Any use of the Model shall include appropriate attribution to NVIDIA and MONAI. - -7. Termination - This Agreement will terminate automatically if you breach any of its terms. - -8. Governing Law - This Agreement shall be governed by and construed in accordance with the laws of the United States, without regard to its conflict of law provisions. - -9. Entire Agreement - This Agreement constitutes the entire agreement between the parties with respect to the use of the Model. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-8b/README.md b/hf_models/llama3-vila-m3-8b/README.md deleted file mode 100644 index 66ea31da..00000000 --- a/hf_models/llama3-vila-m3-8b/README.md +++ /dev/null @@ -1,52 +0,0 @@ ---- -license: other -license_name: nvidia-oneway-noncommercial-license -license_link: LICENSE -tags: -- medical-imaging -- visual-language-model -- multimodal -- vila -- llama3 ---- - -# Llama3-VILA-M3-8B - -> Built with Meta Llama 3 - -## Model Overview - -## Description: -M3 is a medical visual language model that empowers medical imaging professionals, researchers, and healthcare enterprises by enhancing medical imaging workflows across various modalities. - -Key features include: -- Integration with expert models from the MONAI Model Zoo -- Support for multiple imaging modalities - -For more details, see our [repo](https://github.com/Project-MONAI/VLM) - -### Core Capabilities -M3 NIM provides a comprehensive suite of 2D medical image analysis tools, including: -1. Segmentation -2. Classification -3. Visual Question Answering (VQA) -4. Report/Findings Generation - -These capabilities are applicable across various medical imaging modalities, leveraging expert models from the MONAI Model Zoo to ensure high-quality results. - -## Model Architecture: -**Architecture Type:** Auto-Regressive Vision Language Model -**Network Architecture:** [VILA](https://github.com/NVlabs/VILA) with Llama - -## Input: -**Input Type(s):** Text and Image -**Input Format(s):** Text: String, Image -**Input Parameters:** Text: 1D, Image: 2D - -## Output: -**Output Type(s):** Text and Image -**Output Format:** Text: String and Image -**Output Parameters:** Text: 1D, Image: 2D/3D - -## Ethical Considerations -NVIDIA believes Trustworthy AI is a shared responsibility and we have established policies and practices to enable development for a wide array of AI applications. When downloaded or used in accordance with our terms of service, developers should work with their internal model team to ensure this model meets requirements for the relevant industry and use case and addresses unforeseen product misuse. \ No newline at end of file diff --git a/hf_models/llama3-vila-m3-8b/metadata.json b/hf_models/llama3-vila-m3-8b/metadata.json deleted file mode 100644 index df8131b4..00000000 --- a/hf_models/llama3-vila-m3-8b/metadata.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "version": "1.0.0", - "changelog": { - "1.0.0": "initial release of Llama3-VILA-M3-8B" - }, - "monai_version": "1.4.0", - "pytorch_version": "2.4.0", - "numpy_version": "1.24.4", - "required_packages_version": { - "torch": "2.4.0", - "torchvision": "0.19.0", - "pillow": "10.4.0", - "huggingface_hub": "0.24.2", - "transformers": "4.43.3" - }, - "supported_apps": { - "m3-nim": "" - }, - "name": "Llama3-VILA-M3-8B", - "task": "Medical visual language model", - "description": "M3 is a medical visual language model based on VILA and Llama 3 that supports medical image analysis including segmentation, classification, visual question answering, and report generation across multiple imaging modalities.", - "authors": "MONAI team", - "copyright": "NVIDIA", - "data_source": "MONAI", - "data_type": "Medical images", - "image_classes": "2D medical images from various modalities", - "huggingface_model_id": "MONAI/Llama3-VILA-M3-8B", - "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-8B", - "intended_use": "Research and clinical support for medical image analysis", - "references": [ - "VILA: https://github.com/NVlabs/VILA", - "Meta Llama 3: https://ai.meta.com/llama/" - ] -} \ No newline at end of file diff --git a/hf_models/llama3_vila_m3_13b/LICENSE b/hf_models/llama3_vila_m3_13b/LICENSE new file mode 100644 index 00000000..586ce9f0 --- /dev/null +++ b/hf_models/llama3_vila_m3_13b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 NVIDIA Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/hf_models/llama3_vila_m3_13b/README.md b/hf_models/llama3_vila_m3_13b/README.md new file mode 100644 index 00000000..93b55ce5 --- /dev/null +++ b/hf_models/llama3_vila_m3_13b/README.md @@ -0,0 +1,16 @@ +# VILA_M3_13B + +VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks. + +This model is available at: [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B) + +## Citation + +``` +@article{nath2025vila, + title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge}, + author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang}, + journal={arXiv preprint arXiv:2411.12915}, + year={2025} +} +``` \ No newline at end of file diff --git a/hf_models/llama3_vila_m3_13b/metadata.json b/hf_models/llama3_vila_m3_13b/metadata.json new file mode 100644 index 00000000..d62ad07c --- /dev/null +++ b/hf_models/llama3_vila_m3_13b/metadata.json @@ -0,0 +1,31 @@ +{ + "version": "1.0.0", + "changelog": { + "1.0.0": "initial release of VILA_M3_13B model" + }, + "monai_version": "1.4.0", + "pytorch_version": "2.4.0", + "numpy_version": "1.24.4", + "required_packages_version": { + "torch": "2.4.0", + "huggingface_hub": "0.24.2", + "transformers": "4.43.3" + }, + "supported_apps": { + "vila_m3": "" + }, + "name": "VILA_M3_13B", + "task": "Medical vision-language model", + "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.", + "authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH", + "copyright": "NVIDIA", + "data_source": "NVIDIA", + "data_type": "Medical images and text", + "image_classes": "Various medical imaging modalities", + "huggingface_model_id": "MONAI/Llama3-VILA-M3-13B", + "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-13B", + "intended_use": "Research in medical vision-language tasks", + "references": [ + "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)." + ] +} \ No newline at end of file diff --git a/hf_models/llama3_vila_m3_3b/LICENSE b/hf_models/llama3_vila_m3_3b/LICENSE new file mode 100644 index 00000000..586ce9f0 --- /dev/null +++ b/hf_models/llama3_vila_m3_3b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 NVIDIA Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/hf_models/llama3_vila_m3_3b/README.md b/hf_models/llama3_vila_m3_3b/README.md new file mode 100644 index 00000000..5a0072a5 --- /dev/null +++ b/hf_models/llama3_vila_m3_3b/README.md @@ -0,0 +1,16 @@ +# VILA_M3_3B + +VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks. + +This model is available at: [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B) + +## Citation + +``` +@article{nath2025vila, + title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge}, + author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang}, + journal={arXiv preprint arXiv:2411.12915}, + year={2025} +} +``` \ No newline at end of file diff --git a/hf_models/llama3_vila_m3_3b/metadata.json b/hf_models/llama3_vila_m3_3b/metadata.json new file mode 100644 index 00000000..dd84131d --- /dev/null +++ b/hf_models/llama3_vila_m3_3b/metadata.json @@ -0,0 +1,31 @@ +{ + "version": "1.0.0", + "changelog": { + "1.0.0": "initial release of VILA_M3_3B model" + }, + "monai_version": "1.4.0", + "pytorch_version": "2.4.0", + "numpy_version": "1.24.4", + "required_packages_version": { + "torch": "2.4.0", + "huggingface_hub": "0.24.2", + "transformers": "4.43.3" + }, + "supported_apps": { + "vila_m3": "" + }, + "name": "VILA_M3_3B", + "task": "Medical vision-language model", + "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.", + "authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH", + "copyright": "NVIDIA", + "data_source": "NVIDIA", + "data_type": "Medical images and text", + "image_classes": "Various medical imaging modalities", + "huggingface_model_id": "MONAI/Llama3-VILA-M3-3B", + "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-3B", + "intended_use": "Research in medical vision-language tasks", + "references": [ + "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)." + ] +} \ No newline at end of file diff --git a/hf_models/llama3_vila_m3_8b/LICENSE b/hf_models/llama3_vila_m3_8b/LICENSE new file mode 100644 index 00000000..586ce9f0 --- /dev/null +++ b/hf_models/llama3_vila_m3_8b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 NVIDIA Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/hf_models/llama3_vila_m3_8b/README.md b/hf_models/llama3_vila_m3_8b/README.md new file mode 100644 index 00000000..42ff902b --- /dev/null +++ b/hf_models/llama3_vila_m3_8b/README.md @@ -0,0 +1,16 @@ +# VILA_M3_8B + +VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks. + +This model is available at: [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B) + +## Citation + +``` +@article{nath2025vila, + title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge}, + author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang}, + journal={arXiv preprint arXiv:2411.12915}, + year={2025} +} +``` \ No newline at end of file diff --git a/hf_models/llama3_vila_m3_8b/metadata.json b/hf_models/llama3_vila_m3_8b/metadata.json new file mode 100644 index 00000000..f6bcdb63 --- /dev/null +++ b/hf_models/llama3_vila_m3_8b/metadata.json @@ -0,0 +1,31 @@ +{ + "version": "1.0.0", + "changelog": { + "1.0.0": "initial release of VILA_M3_8B model" + }, + "monai_version": "1.4.0", + "pytorch_version": "2.4.0", + "numpy_version": "1.24.4", + "required_packages_version": { + "torch": "2.4.0", + "huggingface_hub": "0.24.2", + "transformers": "4.43.3" + }, + "supported_apps": { + "vila_m3": "" + }, + "name": "VILA_M3_8B", + "task": "Medical vision-language model", + "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.", + "authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH", + "copyright": "NVIDIA", + "data_source": "NVIDIA", + "data_type": "Medical images and text", + "image_classes": "Various medical imaging modalities", + "huggingface_model_id": "MONAI/Llama3-VILA-M3-8B", + "huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-8B", + "intended_use": "Research in medical vision-language tasks", + "references": [ + "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)." + ] +} \ No newline at end of file From 36a15b76b897c1e6df5831728c304ff0e701a30e Mon Sep 17 00:00:00 2001 From: Michael Zephyr Date: Tue, 18 Mar 2025 07:58:50 -0700 Subject: [PATCH 03/11] Remove HF Authentication from README --- hf_models/README.md | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/hf_models/README.md b/hf_models/README.md index c3461b1e..d3eab4fd 100644 --- a/hf_models/README.md +++ b/hf_models/README.md @@ -12,14 +12,6 @@ Each model directory contains: These models must be accessed directly from Hugging Face using the `huggingface_hub` and `transformers` libraries. For complete usage instructions and examples, please visit the corresponding Hugging Face model repository linked below. -### Authentication - -Some models may require authentication with a Hugging Face token. You can set your token as an environment variable: - -```bash -export HF_TOKEN=your_huggingface_token -``` - ### Available Models | Model | Description | HF Repository | @@ -28,4 +20,4 @@ export HF_TOKEN=your_huggingface_token | llama3_vila_m3_3b | Lightweight medical vision language model that enhances VLMs with medical expert knowledge (3B parameters) | [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B) | | llama3_vila_m3_8b | Medical vision language model that utilizes domain-expert models to improve precision in medical imaging tasks (8B parameters) | [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B) | | llama3_vila_m3_13b | Enhanced medical vision language model with improved capabilities for various medical imaging tasks (13B parameters) | [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B) | -| ct_rate | Pioneering dataset of chest CT volumes paired with radiology reports, multi-abnormality labels, and metadata | [ibrahimhamamci/CT_RATE](https://huggingface.co/datasets/ibrahimhamamci/CT_RATE) | \ No newline at end of file +| ct_rate | Pioneering dataset of chest CT volumes paired with radiology reports, multi-abnormality labels, and metadata | [ibrahimhamamci/CT_RATE](https://huggingface.co/datasets/ibrahimhamamci/CT_RATE) | From fe8b67a76d054a630778d56f69c83d67f4e1485d Mon Sep 17 00:00:00 2001 From: Michael Zephyr Date: Tue, 18 Mar 2025 09:11:02 -0700 Subject: [PATCH 04/11] Fix CT-RATE Links to not use underscore --- hf_models/ct_rate/README.md | 2 +- hf_models/ct_rate/metadata.json | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hf_models/ct_rate/README.md b/hf_models/ct_rate/README.md index 67514971..29915e14 100644 --- a/hf_models/ct_rate/README.md +++ b/hf_models/ct_rate/README.md @@ -43,7 +43,7 @@ A multimodal AI assistant designed to enhance the interpretation and diagnostic ## Terms and Conditions -Users of the CT_RATE dataset must agree to the [Terms and Conditions](https://huggingface.co/datasets/ibrahimhamamci/CT_RATE) which specify: +Users of the CT_RATE dataset must agree to the [Terms and Conditions](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) which specify: - The dataset is intended solely for academic, research, and educational purposes - Any commercial exploitation is forbidden without permission diff --git a/hf_models/ct_rate/metadata.json b/hf_models/ct_rate/metadata.json index 941e8aba..c096821f 100644 --- a/hf_models/ct_rate/metadata.json +++ b/hf_models/ct_rate/metadata.json @@ -25,8 +25,8 @@ "data_source": "CT_RATE dataset", "data_type": "3D CT volumes and text", "image_classes": "3D chest CT volumes with various abnormalities", - "huggingface_dataset_id": "ibrahimhamamci/CT_RATE", - "huggingface_url": "https://huggingface.co/datasets/ibrahimhamamci/CT_RATE", + "huggingface_dataset_id": "ibrahimhamamci/CT-RATE", + "huggingface_url": "https://huggingface.co/datasets/ibrahimhamamci/CT-RATE", "intended_use": "Research and development of AI models for 3D CT analysis", "references": [ "Hamamci, Ibrahim Ethem, et al. 'Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography.' arXiv preprint arXiv:2403.17834 (2024).", From 5a3a455d4c62c74b6abd5d5642cffc939269b012 Mon Sep 17 00:00:00 2001 From: Michael Zephyr Date: Tue, 18 Mar 2025 09:13:02 -0700 Subject: [PATCH 05/11] Fix link issue in HF Model Readme --- hf_models/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hf_models/README.md b/hf_models/README.md index d3eab4fd..51054998 100644 --- a/hf_models/README.md +++ b/hf_models/README.md @@ -20,4 +20,4 @@ These models must be accessed directly from Hugging Face using the `huggingface_ | llama3_vila_m3_3b | Lightweight medical vision language model that enhances VLMs with medical expert knowledge (3B parameters) | [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B) | | llama3_vila_m3_8b | Medical vision language model that utilizes domain-expert models to improve precision in medical imaging tasks (8B parameters) | [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B) | | llama3_vila_m3_13b | Enhanced medical vision language model with improved capabilities for various medical imaging tasks (13B parameters) | [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B) | -| ct_rate | Pioneering dataset of chest CT volumes paired with radiology reports, multi-abnormality labels, and metadata | [ibrahimhamamci/CT_RATE](https://huggingface.co/datasets/ibrahimhamamci/CT_RATE) | +| ct_rate | Pioneering dataset of chest CT volumes paired with radiology reports, multi-abnormality labels, and metadata | [ibrahimhamamci/CT-RATE](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) | From b6f9672a7333db1858fdb726b9b6b09ae5423bc1 Mon Sep 17 00:00:00 2001 From: Michael Zephyr Date: Tue, 18 Mar 2025 09:27:41 -0700 Subject: [PATCH 06/11] Fix metadata.json for VILA-M3 and updated from CT-RATE to CT-CHAT model. --- hf_models/README.md | 2 +- hf_models/{ct_rate => ct_chat}/LICENSE | 0 hf_models/ct_chat/README.md | 88 +++++++++++++++++++ hf_models/{ct_rate => ct_chat}/metadata.json | 14 +-- hf_models/ct_rate/README.md | 92 -------------------- hf_models/llama3_vila_m3_13b/metadata.json | 3 - hf_models/llama3_vila_m3_3b/metadata.json | 3 - hf_models/llama3_vila_m3_8b/metadata.json | 3 - 8 files changed, 96 insertions(+), 109 deletions(-) rename hf_models/{ct_rate => ct_chat}/LICENSE (100%) create mode 100644 hf_models/ct_chat/README.md rename hf_models/{ct_rate => ct_chat}/metadata.json (58%) delete mode 100644 hf_models/ct_rate/README.md diff --git a/hf_models/README.md b/hf_models/README.md index 51054998..9d36fa47 100644 --- a/hf_models/README.md +++ b/hf_models/README.md @@ -20,4 +20,4 @@ These models must be accessed directly from Hugging Face using the `huggingface_ | llama3_vila_m3_3b | Lightweight medical vision language model that enhances VLMs with medical expert knowledge (3B parameters) | [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MONAI/Llama3-VILA-M3-3B) | | llama3_vila_m3_8b | Medical vision language model that utilizes domain-expert models to improve precision in medical imaging tasks (8B parameters) | [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MONAI/Llama3-VILA-M3-8B) | | llama3_vila_m3_13b | Enhanced medical vision language model with improved capabilities for various medical imaging tasks (13B parameters) | [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MONAI/Llama3-VILA-M3-13B) | -| ct_rate | Pioneering dataset of chest CT volumes paired with radiology reports, multi-abnormality labels, and metadata | [ibrahimhamamci/CT-RATE](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) | +| ct_chat | Vision-language foundational chat model for 3D chest CT volumes | [ibrahimhamamci/CT-RATE](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) | diff --git a/hf_models/ct_rate/LICENSE b/hf_models/ct_chat/LICENSE similarity index 100% rename from hf_models/ct_rate/LICENSE rename to hf_models/ct_chat/LICENSE diff --git a/hf_models/ct_chat/README.md b/hf_models/ct_chat/README.md new file mode 100644 index 00000000..d85ad889 --- /dev/null +++ b/hf_models/ct_chat/README.md @@ -0,0 +1,88 @@ +--- +license: cc-by-nc-sa-4.0 +tags: +- computed-tomography +- chest-ct +- medical-imaging +- vision-language-model +- multimodal +- medical-assistant +--- + +# CT-CHAT Model + +## [Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography](https://arxiv.org/abs/2403.17834) + +## Model Overview + +CT-CHAT is a vision-language foundational chat model for 3D chest CT volumes. Leveraging the VQA dataset derived from CT-RATE and pretrained 3D vision encoder from CT-CLIP, we developed this multimodal AI assistant specifically designed to enhance the interpretation and diagnostic capabilities of 3D chest CT imaging. + +Building on the strong foundation of CT-CLIP, CT-CHAT integrates both visual and language processing to handle diverse tasks including: +- Visual question answering +- Radiology report generation +- Multiple-choice diagnostic questions + +Trained on over 2.7 million question-answer pairs from the CT-RATE dataset, CT-CHAT leverages 3D spatial information, making it superior to 2D-based models. The model not only improves radiologist workflows by reducing interpretation time but also delivers highly accurate and clinically relevant responses, pushing the boundaries of 3D medical imaging analysis. + +## Technical Foundation + +CT-CHAT builds upon two key technological innovations: + +### CT-CLIP +A CT-focused contrastive language-image pre-training framework that serves as the visual encoder for CT-CHAT. As a versatile, self-supervised model, CT-CLIP is designed for broad application and outperforms state-of-the-art, fully supervised methods in multi-abnormality detection. + +### CT-RATE Dataset +A pioneering dataset of 25,692 non-contrast chest CT volumes (expanded to 50,188 through various reconstructions) paired with corresponding radiology text reports, multi-abnormality labels, and metadata from 21,304 unique patients. + +## Model Capabilities + +1. **Visual Question Answering**: Answer free-form questions about 3D CT volumes +2. **Report Generation**: Create comprehensive radiology reports from CT scans +3. **Diagnostic Support**: Assist with differential diagnoses and abnormality detection +4. **Educational Use**: Train medical students and residents on CT interpretation + +## Terms and Conditions + +Users of the CT-CHAT model must agree to the [Terms and Conditions](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) which specify: + +- The model is intended solely for academic, research, and educational purposes +- Any commercial exploitation is forbidden without permission +- Users must maintain data confidentiality and comply with data protection laws +- Proper attribution is required in any publications resulting from model use +- Redistribution of the model is not allowed + +## Citation + +When using this model, please consider citing the following related papers: + +```bibtex +@misc{hamamci2024foundation, + title={Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography}, + author={Ibrahim Ethem Hamamci and Sezgin Er and Furkan Almas and Ayse Gulnihan Simsek and Sevval Nil Esirgun and Irem Dogan and Muhammed Furkan Dasdelen and Omer Faruk Durugol and Bastian Wittmann and Tamaz Amiranashvili and Enis Simsar and Mehmet Simsar and Emine Bensu Erdemir and Abdullah Alanbay and Anjany Sekuboyina and Berkan Lafci and Christian Bluethgen and Mehmet Kemal Ozdemir and Bjoern Menze}, + year={2024}, + eprint={2403.17834}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2403.17834}, +} + +@misc{hamamci2024generatect, + title={GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes}, + author={Ibrahim Ethem Hamamci and Sezgin Er and Anjany Sekuboyina and Enis Simsar and Alperen Tezcan and Ayse Gulnihan Simsek and Sevval Nil Esirgun and Furkan Almas and Irem Dogan and Muhammed Furkan Dasdelen and Chinmay Prabhakar and Hadrien Reynaud and Sarthak Pati and Christian Bluethgen and Mehmet Kemal Ozdemir and Bjoern Menze}, + year={2024}, + eprint={2305.16037}, + archivePrefix={arXiv}, + primaryClass={cs.CV}, + url={https://arxiv.org/abs/2305.16037}, +} + +@misc{hamamci2024ct2rep, + title={CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging}, + author={Ibrahim Ethem Hamamci and Sezgin Er and Bjoern Menze}, + year={2024}, + eprint={2403.06801}, + archivePrefix={arXiv}, + primaryClass={eess.IV}, + url={https://arxiv.org/abs/2403.06801}, +} +``` \ No newline at end of file diff --git a/hf_models/ct_rate/metadata.json b/hf_models/ct_chat/metadata.json similarity index 58% rename from hf_models/ct_rate/metadata.json rename to hf_models/ct_chat/metadata.json index c096821f..71c9fc64 100644 --- a/hf_models/ct_rate/metadata.json +++ b/hf_models/ct_chat/metadata.json @@ -1,7 +1,7 @@ { "version": "1.0.0", "changelog": { - "1.0.0": "initial release of CT_RATE dataset" + "1.0.0": "initial release of CT_CHAT model" }, "monai_version": "1.4.0", "pytorch_version": "2.4.0", @@ -17,17 +17,17 @@ "ct_clip": "", "ct_chat": "" }, - "name": "CT_RATE", - "task": "3D Chest CT multimodal dataset", - "description": "CT_RATE is a pioneering dataset of 25,692 non-contrast chest CT volumes (expanded to 50,188 through various reconstructions) paired with corresponding radiology text reports, multi-abnormality labels, and metadata from 21,304 unique patients.", + "name": "CT_CHAT", + "task": "Vision-language foundational chat model for 3D chest CT volumes", + "description": "CT-CHAT is a multimodal AI assistant designed to enhance the interpretation and diagnostic capabilities of 3D chest CT imaging. Building on the strong foundation of CT-CLIP, it integrates both visual and language processing to handle diverse tasks like visual question answering, report generation, and multiple-choice questions. Trained on over 2.7 million question-answer pairs from CT-RATE, it leverages 3D spatial information, making it superior to 2D-based models.", "authors": "Ibrahim Ethem Hamamci, Sezgin Er, Furkan Almas, et al.", "copyright": "Ibrahim Ethem Hamamci and collaborators", - "data_source": "CT_RATE dataset", + "data_source": "CT-RATE dataset", "data_type": "3D CT volumes and text", - "image_classes": "3D chest CT volumes with various abnormalities", + "image_classes": "3D chest CT volumes with radiology reports and Q&A", "huggingface_dataset_id": "ibrahimhamamci/CT-RATE", "huggingface_url": "https://huggingface.co/datasets/ibrahimhamamci/CT-RATE", - "intended_use": "Research and development of AI models for 3D CT analysis", + "intended_use": "Research on multimodal medical AI assistants for radiology interpretation and diagnosis", "references": [ "Hamamci, Ibrahim Ethem, et al. 'Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography.' arXiv preprint arXiv:2403.17834 (2024).", "Hamamci, Ibrahim Ethem, et al. 'GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes.' arXiv preprint arXiv:2305.16037 (2024).", diff --git a/hf_models/ct_rate/README.md b/hf_models/ct_rate/README.md deleted file mode 100644 index 29915e14..00000000 --- a/hf_models/ct_rate/README.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -license: cc-by-nc-sa-4.0 -tags: -- computed-tomography -- chest-ct -- medical-imaging -- dataset -- multimodal ---- - -# CT_RATE Dataset - -## [Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography](https://arxiv.org/abs/2403.17834) - -CT_RATE is a pioneering dataset in 3D medical imaging that uniquely pairs textual data with image data focused on chest CT volumes. The dataset comprises chest CT volumes paired with corresponding radiology text reports, multi-abnormality labels, and metadata, all freely accessible to researchers. - -## Dataset Overview - -CT_RATE consists of 25,692 non-contrast chest CT volumes, expanded to 50,188 through various reconstructions, from 21,304 unique patients, along with corresponding radiology text reports, multi-abnormality labels, and metadata. - -The dataset is divided into: -- Training set: 20,000 patients -- Validation set: 1,304 patients - -File naming convention: `split_patientID_scanID_reconstructionID` -For example, "valid_53_a_1" indicates a CT volume from the validation set, scan "a" from patient 53, and reconstruction 1 of scan "a". - -## Applications - -This dataset has been used to develop several groundbreaking models: - -### CT-CLIP -A CT-focused contrastive language-image pre-training framework. As a versatile, self-supervised model, CT-CLIP is designed for broad application and does not require task-specific training. CT-CLIP outperforms state-of-the-art, fully supervised methods in multi-abnormality detection across all key metrics. - -### CT-CHAT -A multimodal AI assistant designed to enhance the interpretation and diagnostic capabilities of 3D chest CT imaging. Building on CT-CLIP, it integrates both visual and language processing to handle diverse tasks like visual question answering, report generation, and multiple-choice questions. - -## Dataset Configurations - -1. **Labels**: Multi-abnormality labels for the CT volumes -2. **Reports**: Corresponding radiology text reports -3. **Metadata**: Additional metadata for each CT volume - -## Terms and Conditions - -Users of the CT_RATE dataset must agree to the [Terms and Conditions](https://huggingface.co/datasets/ibrahimhamamci/CT-RATE) which specify: - -- The dataset is intended solely for academic, research, and educational purposes -- Any commercial exploitation is forbidden without permission -- Users must maintain data confidentiality and comply with data protection laws -- Proper attribution is required in any publications resulting from dataset use -- Redistribution of the dataset is not allowed - -## Ethical Approval - -Ethical approval documentation is available for researchers who require it for grant applications. - -## Citation - -When using this dataset, please consider citing the following related papers: - -```bibtex -@misc{hamamci2024foundation, - title={Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography}, - author={Ibrahim Ethem Hamamci and Sezgin Er and Furkan Almas and Ayse Gulnihan Simsek and Sevval Nil Esirgun and Irem Dogan and Muhammed Furkan Dasdelen and Omer Faruk Durugol and Bastian Wittmann and Tamaz Amiranashvili and Enis Simsar and Mehmet Simsar and Emine Bensu Erdemir and Abdullah Alanbay and Anjany Sekuboyina and Berkan Lafci and Christian Bluethgen and Mehmet Kemal Ozdemir and Bjoern Menze}, - year={2024}, - eprint={2403.17834}, - archivePrefix={arXiv}, - primaryClass={cs.CV}, - url={https://arxiv.org/abs/2403.17834}, -} - -@misc{hamamci2024generatect, - title={GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes}, - author={Ibrahim Ethem Hamamci and Sezgin Er and Anjany Sekuboyina and Enis Simsar and Alperen Tezcan and Ayse Gulnihan Simsek and Sevval Nil Esirgun and Furkan Almas and Irem Dogan and Muhammed Furkan Dasdelen and Chinmay Prabhakar and Hadrien Reynaud and Sarthak Pati and Christian Bluethgen and Mehmet Kemal Ozdemir and Bjoern Menze}, - year={2024}, - eprint={2305.16037}, - archivePrefix={arXiv}, - primaryClass={cs.CV}, - url={https://arxiv.org/abs/2305.16037}, -} - -@misc{hamamci2024ct2rep, - title={CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging}, - author={Ibrahim Ethem Hamamci and Sezgin Er and Bjoern Menze}, - year={2024}, - eprint={2403.06801}, - archivePrefix={arXiv}, - primaryClass={eess.IV}, - url={https://arxiv.org/abs/2403.06801}, -} -``` \ No newline at end of file diff --git a/hf_models/llama3_vila_m3_13b/metadata.json b/hf_models/llama3_vila_m3_13b/metadata.json index d62ad07c..39a66d6c 100644 --- a/hf_models/llama3_vila_m3_13b/metadata.json +++ b/hf_models/llama3_vila_m3_13b/metadata.json @@ -11,9 +11,6 @@ "huggingface_hub": "0.24.2", "transformers": "4.43.3" }, - "supported_apps": { - "vila_m3": "" - }, "name": "VILA_M3_13B", "task": "Medical vision-language model", "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.", diff --git a/hf_models/llama3_vila_m3_3b/metadata.json b/hf_models/llama3_vila_m3_3b/metadata.json index dd84131d..037e7ba2 100644 --- a/hf_models/llama3_vila_m3_3b/metadata.json +++ b/hf_models/llama3_vila_m3_3b/metadata.json @@ -11,9 +11,6 @@ "huggingface_hub": "0.24.2", "transformers": "4.43.3" }, - "supported_apps": { - "vila_m3": "" - }, "name": "VILA_M3_3B", "task": "Medical vision-language model", "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.", diff --git a/hf_models/llama3_vila_m3_8b/metadata.json b/hf_models/llama3_vila_m3_8b/metadata.json index f6bcdb63..b80c3c2d 100644 --- a/hf_models/llama3_vila_m3_8b/metadata.json +++ b/hf_models/llama3_vila_m3_8b/metadata.json @@ -11,9 +11,6 @@ "huggingface_hub": "0.24.2", "transformers": "4.43.3" }, - "supported_apps": { - "vila_m3": "" - }, "name": "VILA_M3_8B", "task": "Medical vision-language model", "description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.", From 4cb5b5f08e5aedb158adb27ef0286d09fa05af32 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 18 Mar 2025 16:28:45 +0000 Subject: [PATCH 07/11] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- hf_models/ct_chat/LICENSE | 2 +- hf_models/ct_chat/README.md | 14 +++++++------- hf_models/ct_chat/metadata.json | 2 +- hf_models/exaonepath/LICENSE | 2 +- hf_models/exaonepath/README.md | 6 +++--- hf_models/exaonepath/metadata.json | 3 ++- hf_models/llama3_vila_m3_13b/LICENSE | 2 +- hf_models/llama3_vila_m3_13b/README.md | 2 +- hf_models/llama3_vila_m3_13b/metadata.json | 2 +- hf_models/llama3_vila_m3_3b/LICENSE | 2 +- hf_models/llama3_vila_m3_3b/README.md | 2 +- hf_models/llama3_vila_m3_3b/metadata.json | 2 +- hf_models/llama3_vila_m3_8b/LICENSE | 2 +- hf_models/llama3_vila_m3_8b/README.md | 2 +- hf_models/llama3_vila_m3_8b/metadata.json | 2 +- 15 files changed, 24 insertions(+), 23 deletions(-) diff --git a/hf_models/ct_chat/LICENSE b/hf_models/ct_chat/LICENSE index 92b8f0f7..64fade3f 100644 --- a/hf_models/ct_chat/LICENSE +++ b/hf_models/ct_chat/LICENSE @@ -15,4 +15,4 @@ No additional restrictions — You may not apply legal terms or technological me Notices: You do not have to comply with the license for elements of the material in the public domain or where your use is permitted by an applicable exception or limitation. -No warranties are given. The license may not give you all of the permissions necessary for your intended use. For example, other rights such as publicity, privacy, or moral rights may limit how you use the material. \ No newline at end of file +No warranties are given. The license may not give you all of the permissions necessary for your intended use. For example, other rights such as publicity, privacy, or moral rights may limit how you use the material. diff --git a/hf_models/ct_chat/README.md b/hf_models/ct_chat/README.md index d85ad889..dba2b4f3 100644 --- a/hf_models/ct_chat/README.md +++ b/hf_models/ct_chat/README.md @@ -57,32 +57,32 @@ When using this model, please consider citing the following related papers: ```bibtex @misc{hamamci2024foundation, - title={Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography}, + title={Developing Generalist Foundation Models from a Multimodal Dataset for 3D Computed Tomography}, author={Ibrahim Ethem Hamamci and Sezgin Er and Furkan Almas and Ayse Gulnihan Simsek and Sevval Nil Esirgun and Irem Dogan and Muhammed Furkan Dasdelen and Omer Faruk Durugol and Bastian Wittmann and Tamaz Amiranashvili and Enis Simsar and Mehmet Simsar and Emine Bensu Erdemir and Abdullah Alanbay and Anjany Sekuboyina and Berkan Lafci and Christian Bluethgen and Mehmet Kemal Ozdemir and Bjoern Menze}, year={2024}, eprint={2403.17834}, archivePrefix={arXiv}, primaryClass={cs.CV}, - url={https://arxiv.org/abs/2403.17834}, + url={https://arxiv.org/abs/2403.17834}, } @misc{hamamci2024generatect, - title={GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes}, + title={GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes}, author={Ibrahim Ethem Hamamci and Sezgin Er and Anjany Sekuboyina and Enis Simsar and Alperen Tezcan and Ayse Gulnihan Simsek and Sevval Nil Esirgun and Furkan Almas and Irem Dogan and Muhammed Furkan Dasdelen and Chinmay Prabhakar and Hadrien Reynaud and Sarthak Pati and Christian Bluethgen and Mehmet Kemal Ozdemir and Bjoern Menze}, year={2024}, eprint={2305.16037}, archivePrefix={arXiv}, primaryClass={cs.CV}, - url={https://arxiv.org/abs/2305.16037}, + url={https://arxiv.org/abs/2305.16037}, } @misc{hamamci2024ct2rep, - title={CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging}, + title={CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging}, author={Ibrahim Ethem Hamamci and Sezgin Er and Bjoern Menze}, year={2024}, eprint={2403.06801}, archivePrefix={arXiv}, primaryClass={eess.IV}, - url={https://arxiv.org/abs/2403.06801}, + url={https://arxiv.org/abs/2403.06801}, } -``` \ No newline at end of file +``` diff --git a/hf_models/ct_chat/metadata.json b/hf_models/ct_chat/metadata.json index 71c9fc64..b81cd65b 100644 --- a/hf_models/ct_chat/metadata.json +++ b/hf_models/ct_chat/metadata.json @@ -33,4 +33,4 @@ "Hamamci, Ibrahim Ethem, et al. 'GenerateCT: Text-Conditional Generation of 3D Chest CT Volumes.' arXiv preprint arXiv:2305.16037 (2024).", "Hamamci, Ibrahim Ethem, et al. 'CT2Rep: Automated Radiology Report Generation for 3D Medical Imaging.' arXiv preprint arXiv:2403.06801 (2024)." ] -} \ No newline at end of file +} diff --git a/hf_models/exaonepath/LICENSE b/hf_models/exaonepath/LICENSE index 151c62a7..0edcddc0 100644 --- a/hf_models/exaonepath/LICENSE +++ b/hf_models/exaonepath/LICENSE @@ -31,4 +31,4 @@ This EXAONEPath AI Model License Agreement (the "Agreement") is entered into by This Agreement shall be governed by and construed in accordance with the laws of South Korea, without regard to its conflict of law provisions. 9. Entire Agreement - This Agreement constitutes the entire agreement between the parties with respect to the use of the Model. \ No newline at end of file + This Agreement constitutes the entire agreement between the parties with respect to the use of the Model. diff --git a/hf_models/exaonepath/README.md b/hf_models/exaonepath/README.md index 29fd6c17..3e52777e 100644 --- a/hf_models/exaonepath/README.md +++ b/hf_models/exaonepath/README.md @@ -17,8 +17,8 @@ tags: ## Introduction -We introduce EXAONEPath, a patch-level pathology pretrained model with 86 million parameters. -The model was pretrained on 285,153,903 patches extracted from a total of 34,795 WSIs. +We introduce EXAONEPath, a patch-level pathology pretrained model with 86 million parameters. +The model was pretrained on 285,153,903 patches extracted from a total of 34,795 WSIs. EXAONEPath demonstrates superior performance considering the number of WSIs used and the model's parameter count. @@ -158,4 +158,4 @@ If you find EXAONEPath useful, please cite it using this BibTeX: ``` ## Contact -LG AI Research Technical Support: contact_us1@lgresearch.ai \ No newline at end of file +LG AI Research Technical Support: contact_us1@lgresearch.ai diff --git a/hf_models/exaonepath/metadata.json b/hf_models/exaonepath/metadata.json index 87777e92..e92b6be9 100644 --- a/hf_models/exaonepath/metadata.json +++ b/hf_models/exaonepath/metadata.json @@ -29,5 +29,6 @@ "huggingface_url": "https://huggingface.co/LGAI-EXAONE/EXAONEPath", "intended_use": "Research and clinical support for pathology image analysis", "references": [ - "Yun, Juseung, et al. 'EXAONEPath 1.0 Patch-level Foundation Model for Pathology', arXiv preprint arXiv:2408.00380 (2024)."] + "Yun, Juseung, et al. 'EXAONEPath 1.0 Patch-level Foundation Model for Pathology', arXiv preprint arXiv:2408.00380 (2024)." + ] } diff --git a/hf_models/llama3_vila_m3_13b/LICENSE b/hf_models/llama3_vila_m3_13b/LICENSE index 586ce9f0..20d5ee6b 100644 --- a/hf_models/llama3_vila_m3_13b/LICENSE +++ b/hf_models/llama3_vila_m3_13b/LICENSE @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +SOFTWARE. diff --git a/hf_models/llama3_vila_m3_13b/README.md b/hf_models/llama3_vila_m3_13b/README.md index 93b55ce5..a8b16183 100644 --- a/hf_models/llama3_vila_m3_13b/README.md +++ b/hf_models/llama3_vila_m3_13b/README.md @@ -13,4 +13,4 @@ This model is available at: [MONAI/Llama3-VILA-M3-13B](https://huggingface.co/MO journal={arXiv preprint arXiv:2411.12915}, year={2025} } -``` \ No newline at end of file +``` diff --git a/hf_models/llama3_vila_m3_13b/metadata.json b/hf_models/llama3_vila_m3_13b/metadata.json index 39a66d6c..beafec2a 100644 --- a/hf_models/llama3_vila_m3_13b/metadata.json +++ b/hf_models/llama3_vila_m3_13b/metadata.json @@ -25,4 +25,4 @@ "references": [ "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)." ] -} \ No newline at end of file +} diff --git a/hf_models/llama3_vila_m3_3b/LICENSE b/hf_models/llama3_vila_m3_3b/LICENSE index 586ce9f0..20d5ee6b 100644 --- a/hf_models/llama3_vila_m3_3b/LICENSE +++ b/hf_models/llama3_vila_m3_3b/LICENSE @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +SOFTWARE. diff --git a/hf_models/llama3_vila_m3_3b/README.md b/hf_models/llama3_vila_m3_3b/README.md index 5a0072a5..c633cc73 100644 --- a/hf_models/llama3_vila_m3_3b/README.md +++ b/hf_models/llama3_vila_m3_3b/README.md @@ -13,4 +13,4 @@ This model is available at: [MONAI/Llama3-VILA-M3-3B](https://huggingface.co/MON journal={arXiv preprint arXiv:2411.12915}, year={2025} } -``` \ No newline at end of file +``` diff --git a/hf_models/llama3_vila_m3_3b/metadata.json b/hf_models/llama3_vila_m3_3b/metadata.json index 037e7ba2..d94873af 100644 --- a/hf_models/llama3_vila_m3_3b/metadata.json +++ b/hf_models/llama3_vila_m3_3b/metadata.json @@ -25,4 +25,4 @@ "references": [ "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)." ] -} \ No newline at end of file +} diff --git a/hf_models/llama3_vila_m3_8b/LICENSE b/hf_models/llama3_vila_m3_8b/LICENSE index 586ce9f0..20d5ee6b 100644 --- a/hf_models/llama3_vila_m3_8b/LICENSE +++ b/hf_models/llama3_vila_m3_8b/LICENSE @@ -18,4 +18,4 @@ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file +SOFTWARE. diff --git a/hf_models/llama3_vila_m3_8b/README.md b/hf_models/llama3_vila_m3_8b/README.md index 42ff902b..a62b5601 100644 --- a/hf_models/llama3_vila_m3_8b/README.md +++ b/hf_models/llama3_vila_m3_8b/README.md @@ -13,4 +13,4 @@ This model is available at: [MONAI/Llama3-VILA-M3-8B](https://huggingface.co/MON journal={arXiv preprint arXiv:2411.12915}, year={2025} } -``` \ No newline at end of file +``` diff --git a/hf_models/llama3_vila_m3_8b/metadata.json b/hf_models/llama3_vila_m3_8b/metadata.json index b80c3c2d..96d77280 100644 --- a/hf_models/llama3_vila_m3_8b/metadata.json +++ b/hf_models/llama3_vila_m3_8b/metadata.json @@ -25,4 +25,4 @@ "references": [ "Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)." ] -} \ No newline at end of file +} From 3813faa88641dfd68cb373e711c7f56898921db3 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Fri, 21 Mar 2025 15:34:31 +0800 Subject: [PATCH 08/11] add check Signed-off-by: Yiheng Wang --- ci/get_changed_bundle.py | 18 +++- ci/run_premerge_cpu.sh | 22 +++++ ci/verify_hf_model.py | 103 +++++++++++++++++++++ hf_models/ct_chat/metadata.json | 1 + hf_models/exaonepath/metadata.json | 1 + hf_models/llama3_vila_m3_13b/metadata.json | 1 + hf_models/llama3_vila_m3_3b/metadata.json | 1 + hf_models/llama3_vila_m3_8b/metadata.json | 1 + 8 files changed, 147 insertions(+), 1 deletion(-) create mode 100644 ci/verify_hf_model.py diff --git a/ci/get_changed_bundle.py b/ci/get_changed_bundle.py index 054ab0d3..b13e1edb 100644 --- a/ci/get_changed_bundle.py +++ b/ci/get_changed_bundle.py @@ -28,10 +28,26 @@ def get_changed_bundle(changed_dirs): bundle_names += f"{bundle} " print(bundle_names) +def get_changed_hf_model(changed_dirs): + """ + This function is used to get all changed hf models, a string which + contains all hf model names will be printed, and can be used in shell scripts. + """ + hf_model_names = "" + root_path = "hf_models" + hf_model_list = get_changed_bundle_list(changed_dirs, root_path=root_path) + for hf_model in hf_model_list: + hf_model_names += f"{hf_model} " + print(hf_model_names) + if __name__ == "__main__": parser = argparse.ArgumentParser(description="") parser.add_argument("-f", "--f", type=str, help="changed files.") + parser.add_argument("--hf_model", type=bool, default=False, help="if true, get changed hf models.") args = parser.parse_args() changed_dirs = args.f.splitlines() - get_changed_bundle(changed_dirs) + if args.hf_model: + get_changed_hf_model(changed_dirs) + else: + get_changed_bundle(changed_dirs) diff --git a/ci/run_premerge_cpu.sh b/ci/run_premerge_cpu.sh index f7326adc..19d18c2e 100755 --- a/ci/run_premerge_cpu.sh +++ b/ci/run_premerge_cpu.sh @@ -89,6 +89,28 @@ verify_bundle() { else echo "this pull request does not change any files in 'models', skip verify." fi + # check hf models + hf_model_changes=$(git diff --name-only $head_ref origin/dev -- hf_models) + if [ ! -z "$hf_model_changes" ] + then + # get all changed hf models + hf_model_list=$(python $(pwd)/ci/get_changed_bundle.py --f "$hf_model_changes" --hf_model) + if [ ! -z "$hf_model_list" ] + then + python $(pwd)/ci/prepare_schema.py --l "$hf_model_list" --p "hf_models" + echo $hf_model_list + for hf_model in $hf_model_list; + do + echo "verify hf model: $hf_model" + # verify hf model + python $(pwd)/ci/verify_hf_model.py -b "$hf_model" + done + else + echo "this pull request does not change any hf models, skip verify." + fi + else + echo "this pull request does not change any hf models, skip verify." + fi } diff --git a/ci/verify_hf_model.py b/ci/verify_hf_model.py new file mode 100644 index 00000000..d60af457 --- /dev/null +++ b/ci/verify_hf_model.py @@ -0,0 +1,103 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import os +import shutil +import sys + +import torch +from monai.bundle import verify_metadata +from monai.bundle.config_parser import ConfigParser +from monai.utils.module import optional_import +from utils import get_json_dict + + +def verify_hf_model_directory(models_path: str, model_name: str): + """ + Required files: + - README.md + - LICENSE + - metadata.json + + """ + + necessary_files_list = ["README.md", "LICENSE", "metadata.json"] + + model_path = os.path.join(models_path, model_name) + # verify necessary files are included + for file in necessary_files_list: + if not os.path.exists(os.path.join(model_path, file)): + raise ValueError(f"necessary file {file} is not existing.") + + +def verify_version_changes(models_path: str, model_name: str): + """ + This function is used to verify if "version" and "changelog" are correct in "configs/metadata.json". + In addition, if changing an existing hf model, a new version number should be provided. + + """ + + model_path = os.path.join(models_path, model_name) + + meta_file_path = os.path.join(model_path, "metadata.json") + metadata = get_json_dict(meta_file_path) + if "version" not in metadata: + raise ValueError(f"'version' is missing in configs/metadata.json of hf model: {model_name}.") + if "changelog" not in metadata: + raise ValueError(f"'changelog' is missing in configs/metadata.json of hf model: {model_name}.") + + # version number should be in changelog + latest_version = metadata["version"] + if latest_version not in metadata["changelog"].keys(): + raise ValueError( + f"version number: {latest_version} is missing in 'changelog' in configs/metadata.json of hf model: {model_name}." + ) + + +def verify_metadata_format(model_path: str): + """ + This function is used to verify the metadata format. + + """ + verify_metadata( + meta_file=os.path.join(model_path, "metadata.json"), + filepath=os.path.join(model_path, "eval/schema.json"), + ) + + +def verify(model_name, models_path="hf_models", mode="full"): + print(f"start verifying {model_name}:") + # add bundle path to ensure custom code can be used + sys.path = [os.path.join(models_path, model_name)] + sys.path + # verify bundle directory + verify_hf_model_directory(models_path, model_name) + print("directory is verified correctly.") + if mode != "regular": + # verify version, changelog + verify_version_changes(models_path, model_name) + print("version and changelog are verified correctly.") + # verify metadata format and data + model_path = os.path.join(models_path, model_name) + verify_metadata_format(model_path) + print("metadata format is verified correctly.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="") + parser.add_argument("-b", "--b", type=str, help="model name.") + parser.add_argument("-p", "--p", type=str, default="hf_models", help="models path.") + parser.add_argument("-m", "--mode", type=str, default="full", help="verify model mode (full/min).") + args = parser.parse_args() + model_name = args.m + models_path = args.p + mode = args.mode + verify(model_name, models_path, mode) diff --git a/hf_models/ct_chat/metadata.json b/hf_models/ct_chat/metadata.json index b81cd65b..9f9cef0b 100644 --- a/hf_models/ct_chat/metadata.json +++ b/hf_models/ct_chat/metadata.json @@ -1,4 +1,5 @@ { + "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json", "version": "1.0.0", "changelog": { "1.0.0": "initial release of CT_CHAT model" diff --git a/hf_models/exaonepath/metadata.json b/hf_models/exaonepath/metadata.json index e92b6be9..5c4e92fe 100644 --- a/hf_models/exaonepath/metadata.json +++ b/hf_models/exaonepath/metadata.json @@ -1,4 +1,5 @@ { + "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json", "version": "1.0.0", "changelog": { "1.0.0": "initial release of EXAONEPath 1.0" diff --git a/hf_models/llama3_vila_m3_13b/metadata.json b/hf_models/llama3_vila_m3_13b/metadata.json index beafec2a..18b1d117 100644 --- a/hf_models/llama3_vila_m3_13b/metadata.json +++ b/hf_models/llama3_vila_m3_13b/metadata.json @@ -1,4 +1,5 @@ { + "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json", "version": "1.0.0", "changelog": { "1.0.0": "initial release of VILA_M3_13B model" diff --git a/hf_models/llama3_vila_m3_3b/metadata.json b/hf_models/llama3_vila_m3_3b/metadata.json index d94873af..16542074 100644 --- a/hf_models/llama3_vila_m3_3b/metadata.json +++ b/hf_models/llama3_vila_m3_3b/metadata.json @@ -1,4 +1,5 @@ { + "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json", "version": "1.0.0", "changelog": { "1.0.0": "initial release of VILA_M3_3B model" diff --git a/hf_models/llama3_vila_m3_8b/metadata.json b/hf_models/llama3_vila_m3_8b/metadata.json index 96d77280..9fea8354 100644 --- a/hf_models/llama3_vila_m3_8b/metadata.json +++ b/hf_models/llama3_vila_m3_8b/metadata.json @@ -1,4 +1,5 @@ { + "schema": "https://github.com/Project-MONAI/MONAI-extra-test-data/releases/download/0.8.1/meta_schema_hf_20250321.json", "version": "1.0.0", "changelog": { "1.0.0": "initial release of VILA_M3_8B model" From 53a40f54927dab28985f8ac6c29dfdda02cf3105 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Fri, 21 Mar 2025 15:59:49 +0800 Subject: [PATCH 09/11] fix format issue Signed-off-by: Yiheng Wang --- .gitignore | 1 + ci/get_changed_bundle.py | 1 + ci/prepare_schema.py | 6 +++- ci/run_premerge_cpu.sh | 76 ++++++++++++++++++++-------------------- ci/utils.py | 7 ++-- ci/verify_hf_model.py | 17 ++++----- 6 files changed, 56 insertions(+), 52 deletions(-) diff --git a/.gitignore b/.gitignore index 351e54b8..0f2765e6 100644 --- a/.gitignore +++ b/.gitignore @@ -131,3 +131,4 @@ temp/ *.zip models/*/models/* models/*/output/* +hf_models/*/eval/* diff --git a/ci/get_changed_bundle.py b/ci/get_changed_bundle.py index b13e1edb..a177ea86 100644 --- a/ci/get_changed_bundle.py +++ b/ci/get_changed_bundle.py @@ -28,6 +28,7 @@ def get_changed_bundle(changed_dirs): bundle_names += f"{bundle} " print(bundle_names) + def get_changed_hf_model(changed_dirs): """ This function is used to get all changed hf models, a string which diff --git a/ci/prepare_schema.py b/ci/prepare_schema.py index c22576cf..4d140c00 100644 --- a/ci/prepare_schema.py +++ b/ci/prepare_schema.py @@ -16,7 +16,11 @@ def main(bundle_list, models_path): - prepare_schema(bundle_list, root_path=models_path) + if "hf_models" in models_path: + hf_model = True + else: + hf_model = False + prepare_schema(bundle_list, root_path=models_path, hf_model=hf_model) if __name__ == "__main__": diff --git a/ci/run_premerge_cpu.sh b/ci/run_premerge_cpu.sh index 19d18c2e..8fbf9b4b 100755 --- a/ci/run_premerge_cpu.sh +++ b/ci/run_premerge_cpu.sh @@ -52,49 +52,49 @@ verify_bundle() { head_ref=$(git rev-parse HEAD) git fetch origin dev $head_ref # achieve all changed files in 'models' - changes=$(git diff --name-only $head_ref origin/dev -- models) - if [ ! -z "$changes" ] - then - # get all changed bundles - bundle_list=$(python $(pwd)/ci/get_changed_bundle.py --f "$changes") - if [ ! -z "$bundle_list" ] - then - python $(pwd)/ci/prepare_schema.py --l "$bundle_list" - echo $bundle_list - for bundle in $bundle_list; - do - if is_excluded "$bundle"; then - echo "skip '$bundle' cpu premerge tests." - else - pip install -r requirements-dev.txt - # get required libraries according to the bundle's metadata file - requirements=$(python $(pwd)/ci/get_bundle_requirements.py --b "$bundle") - # check if ALLOW_MONAI_RC is set to 1, if so, append --pre to the pip install command - if [ $ALLOW_MONAI_RC = true ]; then - include_pre_release="--pre" - else - include_pre_release="" - fi - if [ ! -z "$requirements" ]; then - echo "install required libraries for bundle: $bundle" - pip install $include_pre_release -r "$requirements" - fi - # verify bundle - python $(pwd)/ci/verify_bundle.py -b "$bundle" -m "min" # min tests on cpu - fi - done - else - echo "this pull request does not change any bundles, skip verify." - fi - else - echo "this pull request does not change any files in 'models', skip verify." - fi + # changes=$(git diff --name-only $head_ref origin/dev -- models) + # if [ ! -z "$changes" ] + # then + # # get all changed bundles + # bundle_list=$(python $(pwd)/ci/get_changed_bundle.py --f "$changes") + # if [ ! -z "$bundle_list" ] + # then + # python $(pwd)/ci/prepare_schema.py --l "$bundle_list" + # echo $bundle_list + # for bundle in $bundle_list; + # do + # if is_excluded "$bundle"; then + # echo "skip '$bundle' cpu premerge tests." + # else + # pip install -r requirements-dev.txt + # # get required libraries according to the bundle's metadata file + # requirements=$(python $(pwd)/ci/get_bundle_requirements.py --b "$bundle") + # # check if ALLOW_MONAI_RC is set to 1, if so, append --pre to the pip install command + # if [ $ALLOW_MONAI_RC = true ]; then + # include_pre_release="--pre" + # else + # include_pre_release="" + # fi + # if [ ! -z "$requirements" ]; then + # echo "install required libraries for bundle: $bundle" + # pip install $include_pre_release -r "$requirements" + # fi + # # verify bundle + # python $(pwd)/ci/verify_bundle.py -b "$bundle" -m "min" # min tests on cpu + # fi + # done + # else + # echo "this pull request does not change any bundles, skip verify." + # fi + # else + # echo "this pull request does not change any files in 'models', skip verify." + # fi # check hf models hf_model_changes=$(git diff --name-only $head_ref origin/dev -- hf_models) if [ ! -z "$hf_model_changes" ] then # get all changed hf models - hf_model_list=$(python $(pwd)/ci/get_changed_bundle.py --f "$hf_model_changes" --hf_model) + hf_model_list=$(python $(pwd)/ci/get_changed_bundle.py --f "$hf_model_changes" --hf_model True) if [ ! -z "$hf_model_list" ] then python $(pwd)/ci/prepare_schema.py --l "$hf_model_list" --p "hf_models" diff --git a/ci/utils.py b/ci/utils.py index ba1db0b1..49f2d823 100644 --- a/ci/utils.py +++ b/ci/utils.py @@ -67,7 +67,7 @@ def get_changed_bundle_list(changed_dirs: List[str], root_path: str = "models"): return list(set(changed_bundle_list)) -def prepare_schema(bundle_list: List[str], root_path: str = "models"): +def prepare_schema(bundle_list: List[str], root_path: str = "models", hf_model: bool = False): """ This function is used to prepare schema for changed bundles. Due to Github's limitation (see: https://github.com/Project-MONAI/model-zoo/issues/111), @@ -79,7 +79,10 @@ def prepare_schema(bundle_list: List[str], root_path: str = "models"): for bundle_name in bundle_list: bundle_path = os.path.join(root_path, bundle_name) if os.path.exists(bundle_path): - meta_file_path = os.path.join(bundle_path, "configs/metadata.json") + if hf_model: + meta_file_path = os.path.join(bundle_path, "metadata.json") + else: + meta_file_path = os.path.join(bundle_path, "configs/metadata.json") metadata = get_json_dict(meta_file_path) schema_url = metadata["schema"] schema_name = schema_url.split("/")[-1] diff --git a/ci/verify_hf_model.py b/ci/verify_hf_model.py index d60af457..cedd59f5 100644 --- a/ci/verify_hf_model.py +++ b/ci/verify_hf_model.py @@ -11,13 +11,9 @@ import argparse import os -import shutil import sys -import torch from monai.bundle import verify_metadata -from monai.bundle.config_parser import ConfigParser -from monai.utils.module import optional_import from utils import get_json_dict @@ -41,7 +37,7 @@ def verify_hf_model_directory(models_path: str, model_name: str): def verify_version_changes(models_path: str, model_name: str): """ - This function is used to verify if "version" and "changelog" are correct in "configs/metadata.json". + This function is used to verify if "version" and "changelog" are correct in "metadata.json". In addition, if changing an existing hf model, a new version number should be provided. """ @@ -51,15 +47,15 @@ def verify_version_changes(models_path: str, model_name: str): meta_file_path = os.path.join(model_path, "metadata.json") metadata = get_json_dict(meta_file_path) if "version" not in metadata: - raise ValueError(f"'version' is missing in configs/metadata.json of hf model: {model_name}.") + raise ValueError(f"'version' is missing in metadata.json of hf model: {model_name}.") if "changelog" not in metadata: - raise ValueError(f"'changelog' is missing in configs/metadata.json of hf model: {model_name}.") + raise ValueError(f"'changelog' is missing in metadata.json of hf model: {model_name}.") # version number should be in changelog latest_version = metadata["version"] if latest_version not in metadata["changelog"].keys(): raise ValueError( - f"version number: {latest_version} is missing in 'changelog' in configs/metadata.json of hf model: {model_name}." + f"version number: {latest_version} is missing in 'changelog' in metadata.json of hf model: {model_name}." ) @@ -69,8 +65,7 @@ def verify_metadata_format(model_path: str): """ verify_metadata( - meta_file=os.path.join(model_path, "metadata.json"), - filepath=os.path.join(model_path, "eval/schema.json"), + meta_file=os.path.join(model_path, "metadata.json"), filepath=os.path.join(model_path, "eval/schema.json") ) @@ -97,7 +92,7 @@ def verify(model_name, models_path="hf_models", mode="full"): parser.add_argument("-p", "--p", type=str, default="hf_models", help="models path.") parser.add_argument("-m", "--mode", type=str, default="full", help="verify model mode (full/min).") args = parser.parse_args() - model_name = args.m + model_name = args.b models_path = args.p mode = args.mode verify(model_name, models_path, mode) From d8d83639f5518ec6cea81e614e60fb586e2f0c6c Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Fri, 21 Mar 2025 16:04:52 +0800 Subject: [PATCH 10/11] revert tmp changes Signed-off-by: Yiheng Wang --- ci/run_premerge_cpu.sh | 74 +++++++++++++++++++++--------------------- 1 file changed, 37 insertions(+), 37 deletions(-) diff --git a/ci/run_premerge_cpu.sh b/ci/run_premerge_cpu.sh index 8fbf9b4b..5312d0a2 100755 --- a/ci/run_premerge_cpu.sh +++ b/ci/run_premerge_cpu.sh @@ -52,43 +52,43 @@ verify_bundle() { head_ref=$(git rev-parse HEAD) git fetch origin dev $head_ref # achieve all changed files in 'models' - # changes=$(git diff --name-only $head_ref origin/dev -- models) - # if [ ! -z "$changes" ] - # then - # # get all changed bundles - # bundle_list=$(python $(pwd)/ci/get_changed_bundle.py --f "$changes") - # if [ ! -z "$bundle_list" ] - # then - # python $(pwd)/ci/prepare_schema.py --l "$bundle_list" - # echo $bundle_list - # for bundle in $bundle_list; - # do - # if is_excluded "$bundle"; then - # echo "skip '$bundle' cpu premerge tests." - # else - # pip install -r requirements-dev.txt - # # get required libraries according to the bundle's metadata file - # requirements=$(python $(pwd)/ci/get_bundle_requirements.py --b "$bundle") - # # check if ALLOW_MONAI_RC is set to 1, if so, append --pre to the pip install command - # if [ $ALLOW_MONAI_RC = true ]; then - # include_pre_release="--pre" - # else - # include_pre_release="" - # fi - # if [ ! -z "$requirements" ]; then - # echo "install required libraries for bundle: $bundle" - # pip install $include_pre_release -r "$requirements" - # fi - # # verify bundle - # python $(pwd)/ci/verify_bundle.py -b "$bundle" -m "min" # min tests on cpu - # fi - # done - # else - # echo "this pull request does not change any bundles, skip verify." - # fi - # else - # echo "this pull request does not change any files in 'models', skip verify." - # fi + changes=$(git diff --name-only $head_ref origin/dev -- models) + if [ ! -z "$changes" ] + then + # get all changed bundles + bundle_list=$(python $(pwd)/ci/get_changed_bundle.py --f "$changes") + if [ ! -z "$bundle_list" ] + then + python $(pwd)/ci/prepare_schema.py --l "$bundle_list" + echo $bundle_list + for bundle in $bundle_list; + do + if is_excluded "$bundle"; then + echo "skip '$bundle' cpu premerge tests." + else + pip install -r requirements-dev.txt + # get required libraries according to the bundle's metadata file + requirements=$(python $(pwd)/ci/get_bundle_requirements.py --b "$bundle") + # check if ALLOW_MONAI_RC is set to 1, if so, append --pre to the pip install command + if [ $ALLOW_MONAI_RC = true ]; then + include_pre_release="--pre" + else + include_pre_release="" + fi + if [ ! -z "$requirements" ]; then + echo "install required libraries for bundle: $bundle" + pip install $include_pre_release -r "$requirements" + fi + # verify bundle + python $(pwd)/ci/verify_bundle.py -b "$bundle" -m "min" # min tests on cpu + fi + done + else + echo "this pull request does not change any bundles, skip verify." + fi + else + echo "this pull request does not change any files in 'models', skip verify." + fi # check hf models hf_model_changes=$(git diff --name-only $head_ref origin/dev -- hf_models) if [ ! -z "$hf_model_changes" ] From 08fe5043667956de870042f229296cbfa11f8171 Mon Sep 17 00:00:00 2001 From: Yiheng Wang Date: Fri, 21 Mar 2025 16:12:44 +0800 Subject: [PATCH 11/11] add requirement Signed-off-by: Yiheng Wang --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index b9e71fc2..6d29a604 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,2 +1,3 @@ monai>=1.0.1 huggingface_hub==0.29.3 +jsonschema