You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
author = {He, Tingting and Zhai, Tianle and Huang, Xuhang and Li, Min},
8538
8551
doi = {10.1016/j.cnsns.2025.109183},
@@ -8547,6 +8560,15 @@ @article{HeEtAl2026
8547
8560
year = {2026},
8548
8561
}
8549
8562
8563
+
@unpublished{JiangEtAl2026,
8564
+
abstract = {We present a new training methodology for transformers using a multilevel, layer-parallel approach. Through a neural ODE formulation of transformers, our application of a multilevel parallel-in-time algorithm for the forward and backpropagation phases of training achieves parallel acceleration over the layer dimension. This dramatically enhances parallel scalability as the network depth increases, which is particularly useful for increasingly large foundational models. However, achieving this introduces errors that cause systematic bias in the gradients, which in turn reduces convergence when closer to the minima. We develop an algorithm to detect this critical transition and either switch to serial training or systematically increase the accuracy of layer-parallel training. Results, including BERT, GPT2, ViT, and machine translation architectures, demonstrate parallel-acceleration as well as accuracy commensurate with serial pre-training while fine-tuning is unaffected.},
8565
+
author = {Shuai Jiang and Marc Salvado and Eric C. Cyr and Alena Kopaničáková and Rolf Krause and Jacob B. Schroder},
8566
+
howpublished = {arXiv:2601.09026v1 [cs.LG]},
8567
+
title = {Layer-Parallel Training for Transformers},
8568
+
url = {https://arxiv.org/abs/2601.09026v1},
8569
+
year = {2026},
8570
+
}
8571
+
8550
8572
@inproceedings{YodaEtAl2026,
8551
8573
author = {Yoda, Ryo and Bolten, Matthias},
8552
8574
booktitle = {Proceedings of the Supercomputing Asia and International Conference on High Performance Computing in Asia Pacific Region},
0 commit comments