@article{Jing_Si_Wang_Wang_Wang_Tan_2020, title={Pose-Guided Multi-Granularity Attention Network for Text-Based Person Search}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/6777}, DOI={10.1609/aaai.v34i07.6777}, abstractNote={<p>Text-based person search aims to retrieve the corresponding person images in an image database by virtue of a describing sentence about the person, which poses great potential for various applications such as video surveillance. Extracting visual contents corresponding to the human description is the key to this cross-modal matching problem. Moreover, correlated images and descriptions involve different granularities of semantic relevance, which is usually ignored in previous methods. To exploit the multilevel corresponding visual contents, we propose a pose-guided multi-granularity attention network (PMA). Firstly, we propose a coarse alignment network (CA) to select the related image regions to the global description by a similarity-based attention. To further capture the phrase-related visual body part, a fine-grained alignment network (FA) is proposed, which employs pose information to learn latent semantic alignment between visual body part and textual noun phrase. To verify the effectiveness of our model, we perform extensive experiments on the CUHK Person Description Dataset (CUHK-PEDES) which is currently the only available dataset for text-based person search. Experimental results show that our approach outperforms the state-of-the-art methods by 15 % in terms of the top-1 metric.</p>}, number={07}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Jing, Ya and Si, Chenyang and Wang, Junbo and Wang, Wei and Wang, Liang and Tan, Tieniu}, year={2020}, month={Apr.}, pages={11189-11196} }