@article{Zhou_Jia_Wang_Dong_Yin_Lei_2018, title={Inferring Emotion from Conversational Voice Data: A Semi-Supervised Multi-Path Generative Neural Network Approach}, volume={32}, url={https://ojs.aaai.org/index.php/AAAI/article/view/11280}, DOI={10.1609/aaai.v32i1.11280}, abstractNote={ <p> To give a more humanized response in Voice Dialogue Applications (VDAs), inferring emotion states from users’ queries may play an important role. However, in VDAs, we have tremendous amount of VDA users and massive scale of unlabeled data with high dimension features from multimodal information, which challenge the traditional speech emotion recognition methods. In this paper, to better infer emotion from conversational voice data, we proposed a semi-supervised multi-path generative neural network. Specifically, first, we build a novel supervised multi-path deep neural network framework. To avoid high dimensional input, raw features are trained by groups in local classifiers. Then high-level features of each local classifiers are concatenated as input of a global classifier. These two kinds classifiers are trained simultaneously through a single objective function to achieve a more effective and discriminative emotion inferring. To further solve the labeled-data-scarcity problem, we extend the multi-path deep neural network to a generative model based on semi-supervised variational autoencoder (semi-VAE), which is able to train the labeled and unlabeled data simultaneously. Experiment based on a 24,000 real-world dataset collected from Sogou Voice Assistant (SVAD13) and a benchmark dataset IEMOCAP show that our method significantly outperforms the existing state-of-the-art results. </p> }, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Suping and Jia, Jia and Wang, Qi and Dong, Yufei and Yin, Yufeng and Lei, Kehua}, year={2018}, month={Apr.} }