@article{Wang_Li_Wan_Ogunbona_Liu_2018, title={Cooperative Training of Deep Aggregation Networks for RGB-D Action Recognition}, volume={32}, url={https://ojs.aaai.org/index.php/AAAI/article/view/12228}, DOI={10.1609/aaai.v32i1.12228}, abstractNote={ <p> A novel deep neural network training paradigm that exploits the conjoint information in multiple heterogeneous sources is proposed. Specifically, in a RGB-D based action recognition task, it cooperatively trains a single convolutional neural network (named c-ConvNet) on both RGB visual features and depth features, and deeply aggregates the two kinds of features for action recognition. Differently from the conventional ConvNet that learns the deep separable features for homogeneous modality-based classification with only one softmax loss function, the c-ConvNet enhances the discriminative power of the deeply learned features and weakens the undesired modality discrepancy by jointly optimizing a ranking loss and a softmax loss for both homogeneous and heterogeneous modalities. The ranking loss consists of intra-modality and cross-modality triplet losses, and it reduces both the intra-modality and cross-modality feature variations. Furthermore, the correlations between RGB and depth data are embedded in the c-ConvNet, and can be retrieved by either of the modalities and contribute to the recognition in the case even only one of the modalities is available. The proposed method was extensively evaluated on two large RGB-D action recognition datasets, ChaLearn LAP IsoGD and NTU RGB+D datasets, and one small dataset, SYSU 3D HOI, and achieved state-of-the-art results. </p> }, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Pichao and Li, Wanqing and Wan, Jun and Ogunbona, Philip and Liu, Xinwang}, year={2018}, month={Apr.} }