@article{Liang_Guo_Li_Wang_Song_2019, title={Unsupervised Cross-Spectral Stereo Matching by Learning to Synthesize}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/4894}, DOI={10.1609/aaai.v33i01.33018706}, abstractNote={<p>Unsupervised cross-spectral stereo matching aims at recovering disparity given cross-spectral image pairs without any depth or disparity supervision. The estimated depth provides additional information complementary to original images, which can be helpful for other vision tasks such as tracking, recognition and detection. However, there are large appearance variations between images from different spectral bands, which is a challenge for cross-spectral stereo matching. Existing deep unsupervised stereo matching methods are sensitive to the appearance variations and do not perform well on cross-spectral data. We propose a novel unsupervised crossspectral stereo matching framework based on image-to-image translation. First, a style adaptation network transforms images across different spectral bands by cycle consistency and adversarial learning, during which appearance variations are minimized. Then, a stereo matching network is trained with image pairs from the same spectra using view reconstruction loss. At last, the estimated disparity is utilized to supervise the spectral translation network in an end-to-end way. Moreover, a novel style adaptation network F-cycleGAN is proposed to improve the robustness of spectral translation. Our method can tackle appearance variations and enhance the robustness of unsupervised cross-spectral stereo matching. Experimental results show that our method achieves good performance without using depth supervision or explicit semantic information.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liang, Mingyang and Guo, Xiaoyang and Li, Hongsheng and Wang, Xiaogang and Song, You}, year={2019}, month={Jul.}, pages={8706-8713} }