@article{Hu_Zhou_Li_2021, title={Hand-Model-Aware Sign Language Recognition}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/16247}, DOI={10.1609/aaai.v35i2.16247}, abstractNote={Hand gestures play a dominant role in the expression of sign language. Current deep-learning based video sign language recognition (SLR) methods usually follow a data-driven paradigm under the supervision of the category label. However, those methods suffer limited interpretability and may encounter the overfitting issue due to limited sign data sources. In this paper, we introduce the hand prior and propose a new hand-model-aware framework for isolated SLR with the modeling hand as the intermediate representation. We first transform the cropped hand sequence into the latent semantic feature. Then the hand model introduces the hand prior and provides a mapping from the semantic feature to the compact hand pose representation. Finally, the inference module enhances the spatio-temporal pose representation and performs the final recognition. Due to the lack of annotation on the hand pose under current sign language datasets, we further guide its learning by utilizing multiple weakly-supervised losses to constrain its spatial and temporal consistency. To validate the effectiveness of our method, we perform extensive experiments on four benchmark datasets, including NMFs-CSL, SLR500, MSASL and WLASL. Experimental results demonstrate that our method achieves state-of-the-art performance on all four popular benchmarks with a notable margin.}, number={2}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hu, Hezhen and Zhou, Wengang and Li, Houqiang}, year={2021}, month={May}, pages={1558-1566} }