@article{Tang_Chen_Wang_Zeng_2022, title={Not All Voxels Are Equal: Semantic Scene Completion from the Point-Voxel Perspective}, volume={36}, url={https://ojs.aaai.org/index.php/AAAI/article/view/20134}, DOI={10.1609/aaai.v36i2.20134}, abstractNote={We revisit Semantic Scene Completion (SSC), a useful task to predict the semantic and occupancy representation of 3D scenes, in this paper. A number of methods for this task are always based on voxelized scene representations. Although voxel representations keep local structures of the scene, these methods suffer from heavy computation redundancy due to the existence of visible empty voxels when the network goes deeper. To address this dilemma, we propose our novel point-voxel aggregation network for this task. We first transfer the voxelized scenes to point clouds by removing these visible empty voxels and adopt a deep point stream to capture semantic information from the scene efficiently. Meanwhile, a light-weight voxel stream containing only two 3D convolution layers preserves local structures of the voxelized scenes. Furthermore, we design an anisotropic voxel aggregation operator to fuse the structure details from the voxel stream into the point stream, and a semantic-aware propagation module to enhance the up-sampling process in the point stream by semantic labels. We demonstrate that our model surpasses state-of-the-arts on two benchmarks by a large margin, with only the depth images as input.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tang, Jiaxiang and Chen, Xiaokang and Wang, Jingbo and Zeng, Gang}, year={2022}, month={Jun.}, pages={2352-2360} }