@article{Ghosh_Losalka_Black_2019, title={Resisting Adversarial Attacks Using Gaussian Mixture Variational Autoencoders}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/3828}, DOI={10.1609/aaai.v33i01.3301541}, abstractNote={<p>Susceptibility of deep neural networks to adversarial attacks poses a major theoretical and practical challenge. All efforts to harden classifiers against such attacks have seen limited success till now. Two distinct categories of samples against which deep neural networks are vulnerable, “adversarial samples” and “fooling samples”, have been tackled separately so far due to the difficulty posed when considered together. In this work, we show how one can defend against them both under a unified framework. Our model has the form of a variational autoencoder with a Gaussian mixture prior on the latent variable, such that each mixture component corresponds to a single class. We show how selective classification can be performed using this model, thereby causing the adversarial objective to entail a conflict. The proposed method leads to the rejection of adversarial samples instead of misclassification, while maintaining high precision and recall on test data. It also inherently provides a way of learning a selective classifier in a semi-supervised scenario, which can similarly resist adversarial attacks. We further show how one can reclassify the detected adversarial samples by iterative optimization.<sup>1</sup></p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ghosh, Partha and Losalka, Arpan and Black, Michael J.}, year={2019}, month={Jul.}, pages={541-548} }