@article{Zhang_Wei_Li_Wu_Zhu_Zhou_2021, title={Multi-modal Graph Fusion for Named Entity Recognition with Targeted Visual Guidance}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/17687}, DOI={10.1609/aaai.v35i16.17687}, abstractNote={Multi-modal named entity recognition (MNER) aims to discover named entities in free text and classify them into pre-defined types with images. However, dominant MNER models do not fully exploit fine-grained semantic correspondences between semantic units of different modalities, which have the potential to refine multi-modal representation learning. To deal with this issue, we propose a unified multi-modal graph fusion (UMGF) approach for MNER. Specifically, we first represent the input sentence and image using a unified multi-modal graph, which captures various semantic relationships between multi-modal semantic units (words and visual objects). Then, we stack multiple graph-based multi-modal fusion layers that iteratively perform semantic interactions to learn node representations. Finally, we achieve an attention-based multi-modal representation for each word and perform entity labeling with a CRF decoder. Experimentation on the two benchmark datasets demonstrates the superiority of our MNER model.}, number={16}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Dong and Wei, Suzhong and Li, Shoushan and Wu, Hanqian and Zhu, Qiaoming and Zhou, Guodong}, year={2021}, month={May}, pages={14347-14355} }