@article{Zhang_Peng_Fu_Luo_2020, title={Learning 2D Temporal Adjacent Networks for Moment Localization with Natural Language}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/6984}, DOI={10.1609/aaai.v34i07.6984}, abstractNote={<p>We address the problem of retrieving a specific moment from an untrimmed video by a query sentence. This is a challenging problem because a target moment may take place in relations to other temporal moments in the untrimmed video. Existing methods cannot tackle this challenge well since they consider temporal moments individually and neglect the temporal dependencies. In this paper, we model the temporal relations between video moments by a two-dimensional map, where one dimension indicates the starting time of a moment and the other indicates the end time. This 2D temporal map can cover diverse video moments with different lengths, while representing their adjacent relations. Based on the 2D map, we propose a Temporal Adjacent Network (2D-TAN), a single-shot framework for moment localization. It is capable of encoding the adjacent temporal relation, while learning discriminative features for matching video moments with referring expressions. We evaluate the proposed 2D-TAN on three challenging benchmarks, i.e., Charades-STA, ActivityNet Captions, and TACoS, where our 2D-TAN outperforms the state-of-the-art.</p>}, number={07}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Songyang and Peng, Houwen and Fu, Jianlong and Luo, Jiebo}, year={2020}, month={Apr.}, pages={12870-12877} }