@article{Lyu_Liu_Wang_Kong_Liu_Liu_Chen_Yuan_2021, title={HR-Depth: High Resolution Self-Supervised Monocular Depth Estimation}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/16329}, DOI={10.1609/aaai.v35i3.16329}, abstractNote={Self-supervised learning shows great potential in monocular depth estimation, using image sequences as the only source of supervision. Although people try to use the high-resolution image for depth estimation, the accuracy of prediction has not been significantly improved. In this work, we find the core reason comes from the inaccurate depth estimation in large gradient regions, making the bilinear interpolation error gradually disappear as the resolution increases. To obtain more accurate depth estimation in large gradient regions, it is necessary to obtain high-resolution features with spatial and semantic information. Therefore, we present an improved DepthNet, HR-Depth, with two effective strategies: (1) re-design the skip-connection in DepthNet to get better high-resolution features and (2) propose feature fusion Squeeze-and-Excitation(fSE) module to fuse feature more efficiently. Using Resnet-18 as the encoder, HR-Depth surpasses all previous state-of-the-art(SoTA) methods with the least parameters at both high and low resolution. Moreover, previous SoTA methods are based on fairly complex and deep networks with a mass of parameters which limits their real applications. Thus we also construct a lightweight network which uses MobileNetV3 as encoder. Experiments show that the lightweight network can perform on par with many large models like Monodepth2 at high-resolution with only20%parameters. All codes and models will be available at https://github.com/shawLyu/HR-Depth.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lyu, Xiaoyang and Liu, Liang and Wang, Mengmeng and Kong, Xin and Liu, Lina and Liu, Yong and Chen, Xinxin and Yuan, Yi}, year={2021}, month={May}, pages={2294-2301} }