@article{Chen_Jiang_2019, title={Motion Guided Spatial Attention for Video Captioning}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/4829}, DOI={10.1609/aaai.v33i01.33018191}, abstractNote={<p>Sequence-to-sequence models incorporated with attention mechanism have shown promising improvements on video captioning. While there is rich information both inside and between frames, spatial attention is rarely explored and motion information is usually handled by 3D-CNNs as just another modality for fusion. On the other hand, researches about human perception suggest that apparent motion can attract attention. Motivated by this, we aim to learn spatial attention on video frames under the guidance of motion information for caption generation. We present a novel video captioning framework by utilizing Motion Guided Spatial Attention (MGSA). The proposed MGSA exploits the motion between video frames by learning spatial attention from stacked optical flow images with a custom CNN. To further relate the spatial attention maps of video frames, we designed a Gated Attention Recurrent Unit (GARU) to adaptively incorporate previous attention maps. The whole framework can be trained in an end-to-end manner. We evaluate our approach on two benchmark datasets, MSVD and MSR-VTT. The experiments show that our designed model can generate better video representation and state of the art results are obtained under popular evaluation metrics such as BLEU@4, CIDEr, and METEOR.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Chen, Shaoxiang and Jiang, Yu-Gang}, year={2019}, month={Jul.}, pages={8191-8198} }