@article{Xiao_Tsai_Sohn_Chandraker_Yang_2020, title={Adversarial Learning of Privacy-Preserving and Task-Oriented Representations}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/6930}, DOI={10.1609/aaai.v34i07.6930}, abstractNote={<p>Data privacy has emerged as an important issue as data-driven deep learning has been an essential component of modern machine learning systems. For instance, there could be a potential privacy risk of machine learning systems via the model inversion attack, whose goal is to reconstruct the input data from the latent representation of deep networks. Our work aims at learning a privacy-preserving and task-oriented representation to defend against such model inversion attacks. Specifically, we propose an adversarial reconstruction learning framework that prevents the latent representations decoded into original input data. By simulating the expected behavior of adversary, our framework is realized by minimizing the negative pixel reconstruction loss or the negative feature reconstruction (i.e., perceptual distance) loss. We validate the proposed method on face attribute prediction, showing that our method allows protecting visual privacy with a small decrease in utility performance. In addition, we show the utility-privacy trade-off with different choices of hyperparameter for negative perceptual distance loss at training, allowing service providers to determine the right level of privacy-protection with a certain utility performance. Moreover, we provide an extensive study with different selections of features, tasks, and the data to further analyze their influence on privacy protection.</p>}, number={07}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xiao, Taihong and Tsai, Yi-Hsuan and Sohn, Kihyuk and Chandraker, Manmohan and Yang, Ming-Hsuan}, year={2020}, month={Apr.}, pages={12434-12441} }