@article{Chaudhury_2020, title={Understanding Generalization in Neural Networks for Robustness against Adversarial Vulnerabilities}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/7129}, DOI={10.1609/aaai.v34i10.7129}, abstractNote={<p>Neural networks have contributed to tremendous progress in the domains of computer vision, speech processing, and other real-world applications. However, recent studies have shown that these state-of-the-art models can be easily compromised by adding small imperceptible perturbations. My thesis summary frames the problem of adversarial robustness as an equivalent problem of learning suitable features that leads to good generalization in neural networks. This is motivated from learning in humans which is not trivially fooled by such perturbations due to robust feature learning which shows good out-of-sample generalization.</p>}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chaudhury, Subhajit}, year={2020}, month={Apr.}, pages={13714-13715} }