Skip to content

Commit 1921f3f

Browse files
authored
Merge pull request #1031 from Parallel-in-Time/bibtex-bibbot-1030-73b8b4a
pint.bib updates
2 parents 73b8b4a + 63cd14c commit 1921f3f

1 file changed

Lines changed: 22 additions & 0 deletions

File tree

_bibliography/pint.bib

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8533,6 +8533,19 @@ @article{AluthgeEtAl2026
85338533
year = {2026},
85348534
}
85358535

8536+
@article{GaraiEtAl2026,
8537+
author = {Garai, Gobinda and Mandal, Bankim C.},
8538+
doi = {10.1080/00207160.2026.2614077},
8539+
issn = {1029-0265},
8540+
journal = {International Journal of Computer Mathematics},
8541+
month = {January},
8542+
pages = {1–23},
8543+
publisher = {Informa UK Limited},
8544+
title = {Domain decomposition-based space-time preconditioner for the optimal control problem with constraints imposed by the Cahn-Hilliard equation},
8545+
url = {http://dx.doi.org/10.1080/00207160.2026.2614077},
8546+
year = {2026},
8547+
}
8548+
85368549
@article{HeEtAl2026,
85378550
author = {He, Tingting and Zhai, Tianle and Huang, Xuhang and Li, Min},
85388551
doi = {10.1016/j.cnsns.2025.109183},
@@ -8547,6 +8560,15 @@ @article{HeEtAl2026
85478560
year = {2026},
85488561
}
85498562

8563+
@unpublished{JiangEtAl2026,
8564+
abstract = {We present a new training methodology for transformers using a multilevel, layer-parallel approach. Through a neural ODE formulation of transformers, our application of a multilevel parallel-in-time algorithm for the forward and backpropagation phases of training achieves parallel acceleration over the layer dimension. This dramatically enhances parallel scalability as the network depth increases, which is particularly useful for increasingly large foundational models. However, achieving this introduces errors that cause systematic bias in the gradients, which in turn reduces convergence when closer to the minima. We develop an algorithm to detect this critical transition and either switch to serial training or systematically increase the accuracy of layer-parallel training. Results, including BERT, GPT2, ViT, and machine translation architectures, demonstrate parallel-acceleration as well as accuracy commensurate with serial pre-training while fine-tuning is unaffected.},
8565+
author = {Shuai Jiang and Marc Salvado and Eric C. Cyr and Alena Kopaničáková and Rolf Krause and Jacob B. Schroder},
8566+
howpublished = {arXiv:2601.09026v1 [cs.LG]},
8567+
title = {Layer-Parallel Training for Transformers},
8568+
url = {https://arxiv.org/abs/2601.09026v1},
8569+
year = {2026},
8570+
}
8571+
85508572
@inproceedings{YodaEtAl2026,
85518573
author = {Yoda, Ryo and Bolten, Matthias},
85528574
booktitle = {Proceedings of the Supercomputing Asia and International Conference on High Performance Computing in Asia Pacific Region},

0 commit comments

Comments
 (0)