@article{Tanaka_Nishida_Yoshida_2021, title={VisualMRC: Machine Reading Comprehension on Document Images}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/17635}, DOI={10.1609/aaai.v35i15.17635}, abstractNote={Recent studies on machine reading comprehension have focused on text-level understanding but have not yet reached the level of human understanding of the visual layout and content of real-world documents. In this study, we introduce a new visual machine reading comprehension dataset, named VisualMRC, wherein given a question and a document image, a machine reads and comprehends texts in the image to answer the question in natural language. Compared with existing visual question answering datasets that contain texts in images, VisualMRC focuses more on developing natural language understanding and generation abilities. It contains 30,000+ pairs of a question and an abstractive answer for 10,000+ document images sourced from multiple domains of webpages. We also introduce a new model that extends existing sequence-to-sequence models, pre-trained with large-scale text corpora, to take into account the visual layout and content of documents. Experiments with VisualMRC show that this model outperformed the base sequence-to-sequence models and a state-of-the-art VQA model. However, its performance is still below that of humans on most automatic evaluation metrics. The dataset will facilitate research aimed at connecting vision and language understanding.}, number={15}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tanaka, Ryota and Nishida, Kyosuke and Yoshida, Sen}, year={2021}, month={May}, pages={13878-13888} }