@article{Gao_Lian_Wang_Sun_2020, title={Cross-Modal Subspace Clustering via Deep Canonical Correlation Analysis}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5808}, DOI={10.1609/aaai.v34i04.5808}, abstractNote={<p>For cross-modal subspace clustering, the key point is how to exploit the correlation information between cross-modal data. However, most hierarchical and structural correlation information among cross-modal data cannot be well exploited due to its high-dimensional non-linear property. To tackle this problem, in this paper, we propose an unsupervised framework named Cross-Modal Subspace Clustering via Deep Canonical Correlation Analysis (CMSC-DCCA), which incorporates the correlation constraint with a self-expressive layer to make full use of information among the inter-modal data and the intra-modal data. More specifically, the proposed model consists of three components: 1) deep canonical correlation analysis (Deep CCA) model; 2) self-expressive layer; 3) Deep CCA decoders. The Deep CCA model consists of convolutional encoders and correlation constraint. Convolutional encoders are used to obtain the latent representations of cross-modal data, while adding the correlation constraint for the latent representations can make full use of the information of the inter-modal data. Furthermore, self-expressive layer works on latent representations and constrain it perform self-expression properties, which makes the shared coefficient matrix could capture the hierarchical intra-modal correlations of each modality. Then Deep CCA decoders reconstruct data to ensure that the encoded features can preserve the structure of the original data. Experimental results on several real-world datasets demonstrate the proposed method outperforms the state-of-the-art methods.</p>}, number={04}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Gao, Quanxue and Lian, Huanhuan and Wang, Qianqian and Sun, Gan}, year={2020}, month={Apr.}, pages={3938-3945} }