@article{Zhang_Benz_Imtiaz_Kweon_2020, title={CD-UAP: Class Discriminative Universal Adversarial Perturbation}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/6154}, DOI={10.1609/aaai.v34i04.6154}, abstractNote={<p>A single universal adversarial perturbation (UAP) can be added to all natural images to change most of their predicted class labels. It is of high practical relevance for an attacker to have flexible control over the targeted classes to be attacked, however, the existing UAP method attacks samples from all classes. In this work, we propose a new universal attack method to generate a single perturbation that fools a target network to misclassify only a chosen group of classes, while having limited influence on the remaining classes. Since the proposed attack generates a universal adversarial perturbation that is discriminative to targeted and non-targeted classes, we term it class discriminative universal adversarial perturbation (CD-UAP). We propose one simple yet effective algorithm framework, under which we design and compare various loss function configurations tailored for the class discriminative universal attack. The proposed approach has been evaluated with extensive experiments on various benchmark datasets. Additionally, our proposed approach achieves state-of-the-art performance for the original task of UAP attacking all classes, which demonstrates the effectiveness of our approach.</p>}, number={04}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Chaoning and Benz, Philipp and Imtiaz, Tooba and Kweon, In-So}, year={2020}, month={Apr.}, pages={6754-6761} }