@article{Zhao_Liu_Chu_Lu_Yu_2021, title={Joint Color-irrelevant Consistency Learning and Identity-aware Modality Adaptation for Visible-infrared Cross Modality Person Re-identification}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/16466}, DOI={10.1609/aaai.v35i4.16466}, abstractNote={Visible-infrared cross modality person re-identification (VI-ReID) is a core but challenging technology in the 24-hours intelligent surveillance system. How to eliminate the large modality gap lies in the heart of VI-ReID. Conventional methods mainly focus on directly aligning the heterogeneous modalities into the same space. However, due to the unbalanced color information between the visible and infrared images, the features of visible images tend to overfit the clothing color information, which would be harmful to the modality alignment. Besides, these methods mainly align the heterogeneous feature distributions in dataset-level while ignoring the valuable identity information, which may cause the feature misalignment of some identities and weaken the discrimination of features. To tackle above problems, we propose a novel approach for VI-ReID. It learns the color-irrelevant features through the color-irrelevant consistency learning (CICL) and aligns the identity-level feature distributions by the identity-aware modality adaptation (IAMA). The CICL and IAMA are integrated into a joint learning framework and can promote each other. Extensive experiments on two popular datasets SYSU-MM01 and RegDB demonstrate the superiority and effectiveness of our approach against the state-of-the-art methods.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Zhiwei and Liu, Bin and Chu, Qi and Lu, Yan and Yu, Nenghai}, year={2021}, month={May}, pages={3520-3528} }