@article{Qin_Li_Che_Ni_Liu_2021, title={Co-GAT: A Co-Interactive Graph Attention Network for Joint Dialog Act Recognition and Sentiment Classification}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/17616}, DOI={10.1609/aaai.v35i15.17616}, abstractNote={In a dialog system, dialog act recognition and sentiment classification are two correlative tasks to capture speakers’ intentions, where dialog act and sentiment can indicate the explicit and the implicit intentions separately. The dialog context information (contextual information) and the mutual interaction information are two key factors that contribute to the two related tasks. Unfortunately, none of the existing approaches consider the two important sources of information simultaneously. In this paper, we propose a Co-Interactive Graph Attention Network (Co-GAT) to jointly perform the two tasks. The core module is a proposed co-interactive graph interaction layer where a cross-utterances connection and a cross-tasks connection are constructed and iteratively updated with each other, achieving to consider the two types of information simultaneously. Experimental results on two public datasets show that our model successfully captures the two sources of information and achieve the state-of-the-art performance. In addition, we find that the contributions from the contextual and mutual interaction information do not fully overlap with contextualized word representations (BERT, Roberta, XLNet).}, number={15}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Qin, Libo and Li, Zhouyang and Che, Wanxiang and Ni, Minheng and Liu, Ting}, year={2021}, month={May}, pages={13709-13717} }