@article{Yang_Tsai_Yu_Ho_Jin_2020, title={Beyond Digital Domain: Fooling Deep Learning Based Recognition System in Physical World}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5459}, DOI={10.1609/aaai.v34i01.5459}, abstractNote={<p>Adversarial examples that can fool deep neural network (DNN) models in computer vision present a growing threat. The current methods of launching adversarial attacks concentrate on attacking image classifiers by adding noise to digital inputs. The problem of attacking object detection models and adversarial attacks in physical world are rarely touched. Some prior works are proposed to launch physical adversarial attack against object detection models, but limited by certain aspects. In this paper, we propose a novel physical adversarial attack targeting object detection models. Instead of simply printing images, we manufacture real metal objects that could achieve the adversarial effect. In both indoor and outdoor experiments we show our physical adversarial objects can fool widely applied object detection models including SSD, YOLO and Faster R-CNN in various environments. We also test our attack in a variety of commercial platforms for object detection and demonstrate that our attack is still valid on these platforms. Consider the potential defense mechanisms our adversarial objects may encounter, we conduct a series of experiments to evaluate the effect of existing defense methods on our physical attack.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Kaichen and Tsai, Tzungyu and Yu, Honggang and Ho, Tsung-Yi and Jin, Yier}, year={2020}, month={Apr.}, pages={1088-1095} }