@article{Liu_Zhao_Sun_2020, title={Bayesian Adversarial Attack on Graph Neural Networks (Student Abstract)}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/7206}, DOI={10.1609/aaai.v34i10.7206}, abstractNote={<p>Adversarial attack on graph neural network (GNN) is distinctive as it often jointly trains the available nodes to generate a graph as an adversarial example. Existing attacking approaches usually consider the case that all the training set is available which may be impractical. In this paper, we propose a novel Bayesian adversarial attack approach based on projected gradient descent optimization, called Bayesian PGD attack, which gets more general attack examples than deterministic attack approaches. The generated adversarial examples by our approach using the same partial dataset as deterministic attack approaches would make the GNN have higher misclassification rate on graph node classification. Specifically, in our approach, the edge perturbation <em>Z</em> is used for generating adversarial examples, which is viewed as a random variable with scale constraint, and the optimization target of the edge perturbation is to maximize the KL divergence between its true posterior distribution <em>p</em>(<em>Z</em>|<em>D</em>) and its approximate variational distribution <em>q</em><sub><em>θ</em></sub>(<em>Z</em>). We experimentally find that the attack performance will decrease with the reduction of available nodes, and the effect of attack using different nodes varies greatly especially when the number of nodes is small. Through experimental comparison with the state-of-the-art attack approaches on GNNs, our approach is demonstrated to have better and robust attack performance.</p>}, number={10}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Xiao and Zhao, Jing and Sun, Shiliang}, year={2020}, month={Apr.}, pages={13867-13868} }