@article{Xu_Wan_Ye_Zou_Cao_2022, title={Sparse Cross-Scale Attention Network for Efficient LiDAR Panoptic Segmentation}, volume={36}, url={https://ojs.aaai.org/index.php/AAAI/article/view/20197}, DOI={10.1609/aaai.v36i3.20197}, abstractNote={Two major challenges of 3D LiDAR Panoptic Segmentation (PS) are that point clouds of an object are surface-aggregated and thus hard to model the long-range dependency especially for large instances, and that objects are too close to separate each other. Recent literature addresses these problems by time-consuming grouping processes such as dual-clustering, mean-shift offsets and etc., or by bird-eye-view (BEV) dense centroid representation that downplays geometry. However, the long-range geometry relationship has not been sufficiently modeled by local feature learning from the above methods. To this end, we present SCAN, a novel sparse cross-scale attention network to first align multi-scale sparse features with global voxel-encoded attention to capture the long-range relationship of instance context, which is able to boost the regression accuracy of the over-segmented large objects. For the surface-aggregated points, SCAN adopts a novel sparse class-agnostic representation of instance centroids, which can not only maintain the sparsity of aligned features to solve the under-segmentation on small objects, but also reduce the computation amount of the network through sparse convolution. Our method outperforms previous methods by a large margin in the SemanticKITTI dataset for the challenging 3D PS task, achieving 1st place with a real-time inference speed.}, number={3}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Shuangjie and Wan, Rui and Ye, Maosheng and Zou, Xiaoyi and Cao, Tongyi}, year={2022}, month={Jun.}, pages={2920-2928} }