@article{Ding_Zhao_Hoi_Ong_2015, title={An Adaptive Gradient Method for Online AUC Maximization}, volume={29}, url={https://ojs.aaai.org/index.php/AAAI/article/view/9577}, DOI={10.1609/aaai.v29i1.9577}, abstractNote={ <p> Learning for maximizing AUC performance is an important research problem in machine learning. Unlike traditional batch learning methods for maximizing AUC which often suffer from poor scalability, recent years have witnessed some emerging studies that attempt to maximize AUC by single-pass online learning approaches. Despite their encouraging results reported, the existing online AUC maximization algorithms often adopt simple stochastic gradient descent approaches, which fail to exploit the geometry knowledge of the data observed in the online learning process, and thus could suffer from relatively slow convergence. To overcome the limitation of the existing studies, in this paper, we propose a novel algorithm of Adaptive Online AUC Maximization (AdaOAM), by applying an adaptive gradient method for exploiting the knowledge of historical gradients to perform more informative online learning. The new adaptive updating strategy by AdaOAM is less sensitive to parameter settings due to its natural effect of tuning the learning rate. In addition, the time complexity of the new algorithm remains the same as the previous non-adaptive algorithms. To demonstrate the effectiveness of the proposed algorithm, we analyze its theoretical bound, and further evaluate its empirical performance on both public benchmark datasets and anomaly detection datasets. The encouraging empirical results clearly show the effectiveness and efficiency of the proposed algorithm. </p> }, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ding, Yi and Zhao, Peilin and Hoi, Steven and Ong, Yew-Soon}, year={2015}, month={Feb.} }