@article{Duan_Lin_Zou_Yuan_Qian_Zhang_2022, title={A Unified Framework for Real Time Motion Completion}, volume={36}, url={https://ojs.aaai.org/index.php/AAAI/article/view/20368}, DOI={10.1609/aaai.v36i4.20368}, abstractNote={Motion completion, as a challenging and fundamental problem, is of great significance in film and game applications. For different motion completion application scenarios (in-betweening, in-filling, and blending), most previous methods deal with the completion problems with case-by-case methodology designs. In this work, we propose a simple but effective method to solve multiple motion completion problems under a unified framework and achieves a new state-of-the-art accuracy on LaFAN1 (+17% better than previous sota) under multiple evaluation settings. Inspired by the recent great success of self-attention-based transformer models, we consider the completion as a sequence-to-sequence prediction problem. Our method consists of three modules - a standard transformer encoder with self-attention that learns long-range dependencies of input motions, a trainable mixture embedding module that models temporal information and encodes different key-frame combinations in a unified form, and a new motion perceptual loss for better capturing high-frequency movements. Our method can predict multiple missing frames within a single forward propagation in real-time and get rid of the post-processing requirement. We also introduce a novel large-scale dance movement dataset for exploring the scaling capability of our method and its effectiveness in complex motion applications.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Duan, Yinglin and Lin, Yue and Zou, Zhengxia and Yuan, Yi and Qian, Zhehui and Zhang, Bohan}, year={2022}, month={Jun.}, pages={4459-4467} }