@article{Holland_2021, title={Scaling-Up Robust Gradient Descent Techniques}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/16940}, DOI={10.1609/aaai.v35i9.16940}, abstractNote={We study a scalable alternative to robust gradient descent (RGD) techniques that can be used when losses and/or gradients can be heavy-tailed, though this will be unknown to the learner. The core technique is simple: instead of trying to robustly aggregate gradients at each step, which is costly and leads to sub-optimal dimension dependence in risk bounds, we choose a candidate which does not diverge too far from the majority of cheap stochastic sub-processes run over partitioned data. This lets us retain the formal strength of RGD methods at a fraction of the cost.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Holland, Matthew J.}, year={2021}, month={May}, pages={7694-7701} }