Skip to content

Commit cb014b8

Browse files
committed
Deploying to gh-pages from @ dbc0541 🚀
1 parent 2374b3e commit cb014b8

2 files changed

Lines changed: 62 additions & 30 deletions

File tree

model_data.json

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -965,6 +965,36 @@
965965
"1.0.0": "Initial release"
966966
}
967967
},
968+
"hf_llama3_vila_m3_3b": {
969+
"model_name": "VILA_M3_3B",
970+
"description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.",
971+
"authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH",
972+
"papers": [
973+
"Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)."
974+
],
975+
"version": "1.0.0",
976+
"model_id": "hf_llama3_vila_m3_3b",
977+
"readme": "<h1>VILA_M3_3B</h1>\n<p>VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.</p>\n<p>This model is available at: <a href=\"https://huggingface.co/MONAI/Llama3-VILA-M3-3B\">MONAI/Llama3-VILA-M3-3B</a></p>\n<h2>Citation</h2>\n<pre><code>@article{nath2025vila,\n title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge},\n author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang},\n journal={arXiv preprint arXiv:2411.12915},\n year={2025}\n}\n</code></pre>",
978+
"huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-3B",
979+
"changelog": {
980+
"1.0.0": "initial release of VILA_M3_3B model"
981+
}
982+
},
983+
"hf_llama3_vila_m3_13b": {
984+
"model_name": "VILA_M3_13B",
985+
"description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.",
986+
"authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH",
987+
"papers": [
988+
"Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)."
989+
],
990+
"version": "1.0.0",
991+
"model_id": "hf_llama3_vila_m3_13b",
992+
"readme": "<h1>VILA_M3_13B</h1>\n<p>VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.</p>\n<p>This model is available at: <a href=\"https://huggingface.co/MONAI/Llama3-VILA-M3-13B\">MONAI/Llama3-VILA-M3-13B</a></p>\n<h2>Citation</h2>\n<pre><code>@article{nath2025vila,\n title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge},\n author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang},\n journal={arXiv preprint arXiv:2411.12915},\n year={2025}\n}\n</code></pre>",
993+
"huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-13B",
994+
"changelog": {
995+
"1.0.0": "initial release of VILA_M3_13B model"
996+
}
997+
},
968998
"hf_exaonepath": {
969999
"model_name": "EXAONEPath",
9701000
"description": "EXAONEPath is a patch-level pathology pretrained model with 86 million parameters, pretrained on 285,153,903 patches extracted from 34,795 WSIs.",
@@ -1011,35 +1041,5 @@
10111041
"changelog": {
10121042
"1.0.0": "initial release of CT_CHAT model"
10131043
}
1014-
},
1015-
"hf_llama3_vila_m3_3b": {
1016-
"model_name": "VILA_M3_3B",
1017-
"description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.",
1018-
"authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH",
1019-
"papers": [
1020-
"Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)."
1021-
],
1022-
"version": "1.0.0",
1023-
"model_id": "hf_llama3_vila_m3_3b",
1024-
"readme": "<h1>VILA_M3_3B</h1>\n<p>VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.</p>\n<p>This model is available at: <a href=\"https://huggingface.co/MONAI/Llama3-VILA-M3-3B\">MONAI/Llama3-VILA-M3-3B</a></p>\n<h2>Citation</h2>\n<pre><code>@article{nath2025vila,\n title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge},\n author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang},\n journal={arXiv preprint arXiv:2411.12915},\n year={2025}\n}\n</code></pre>",
1025-
"huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-3B",
1026-
"changelog": {
1027-
"1.0.0": "initial release of VILA_M3_3B model"
1028-
}
1029-
},
1030-
"hf_llama3_vila_m3_13b": {
1031-
"model_name": "VILA_M3_13B",
1032-
"description": "VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.",
1033-
"authors": "Vishwesh Nath, Wenqi Li, Dong Yang, Andriy Myronenko, et al. from NVIDIA, SingHealth, and NIH",
1034-
"papers": [
1035-
"Nath, Vishwesh, et al. 'VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge', arXiv preprint arXiv:2411.12915 (2025)."
1036-
],
1037-
"version": "1.0.0",
1038-
"model_id": "hf_llama3_vila_m3_13b",
1039-
"readme": "<h1>VILA_M3_13B</h1>\n<p>VILA_M3 is a medical vision language model that enhances VLMs with medical expert knowledge, utilizing domain-expert models to improve precision in medical imaging tasks.</p>\n<p>This model is available at: <a href=\"https://huggingface.co/MONAI/Llama3-VILA-M3-13B\">MONAI/Llama3-VILA-M3-13B</a></p>\n<h2>Citation</h2>\n<pre><code>@article{nath2025vila,\n title={VILA_M3: Enhancing Vision-Language Models with Medical Expert Knowledge},\n author={Nath, Vishwesh and Li, Wenqi and Yang, Dong and Myronenko, Andriy and Zheng, Mingxin and Lu, Yao and Liu, Zhijian and Yin, Hongxu and Tang, Yucheng and Guo, Pengfei and Zhao, Can and Xu, Ziyue and He, Yufan and Law, Yee Man and Simon, Benjamin and Harmon, Stephanie and Heinrich, Greg and Aylward, Stephen and Edgar, Marc and Zephyr, Michael and Han, Song and Molchanov, Pavlo and Turkbey, Baris and Roth, Holger and Xu, Daguang},\n journal={arXiv preprint arXiv:2411.12915},\n year={2025}\n}\n</code></pre>",
1040-
"huggingface_url": "https://huggingface.co/MONAI/Llama3-VILA-M3-13B",
1041-
"changelog": {
1042-
"1.0.0": "initial release of VILA_M3_13B model"
1043-
}
10441044
}
10451045
}

wg_federated_learning.html

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -404,6 +404,38 @@ <h3 class="text-xl font-semibold text-brand-primary mb-6 pb-2 border-b border-ne
404404
</div>
405405
</section>
406406

407+
<!-- Meeting Recordings Section -->
408+
<section class="py-24 bg-white">
409+
<div class="container">
410+
<h2 class="text-3xl font-bold text-neutral-darkestblack mb-8">Meeting Recordings</h2>
411+
<div class="grid grid-cols-1 md:grid-cols-2 gap-8">
412+
<div class="bg-white p-8 rounded-lg border border-neutral-200 hover:border-brand-primary hover:shadow-md transition-all duration-300">
413+
<h3 class="text-xl font-semibold text-brand-primary mb-6 pb-2 border-b border-neutral-200">YouTube Playlist</h3>
414+
<div class="aspect-w-16 aspect-h-9 mb-4">
415+
<iframe
416+
class="w-full h-full rounded-lg"
417+
src="https://www.youtube.com/embed/videoseries?list=PLtoSVSQ2XzyAKQa4AkoQEviFn9pN9OZY8"
418+
title="MONAI Federated Learning Working Group Meetings"
419+
frameborder="0"
420+
allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture"
421+
allowfullscreen>
422+
</iframe>
423+
</div>
424+
<p class="text-gray-700">Watch all our working group meetings and discussions on our YouTube playlist. Subscribe to stay updated with the latest developments in federated learning.</p>
425+
<a href="https://www.youtube.com/playlist?list=PLtoSVSQ2XzyAKQa4AkoQEviFn9pN9OZY8"
426+
target="_blank"
427+
rel="noopener noreferrer"
428+
class="inline-flex items-center mt-4 text-brand-primary hover:text-brand-dark transition-colors duration-300">
429+
<span>View Full Playlist</span>
430+
<svg class="w-4 h-4 ml-1" fill="none" stroke="currentColor" viewBox="0 0 24 24">
431+
<path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M10 6H6a2 2 0 00-2 2v10a2 2 0 002 2h10a2 2 0 002-2v-4M14 4h6m0 0v6m0-6L10 14"></path>
432+
</svg>
433+
</a>
434+
</div>
435+
</div>
436+
</div>
437+
</section>
438+
407439
<!-- Collaboration Opportunities Section -->
408440
<section class="py-24 bg-white">
409441
<div class="container">

0 commit comments

Comments
 (0)