@article{Fu_Zhang_2019, title={EA Reader: Enhance Attentive Reader for Cloze-Style Question Answering via Multi-Space Context Fusion}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/4600}, DOI={10.1609/aaai.v33i01.33016375}, abstractNote={<p>Query-document semantic interactions are essential for the success of many cloze-style question answering models. Recently, researchers have proposed several attention-based methods to predict the answer by focusing on appropriate subparts of the context document. In this paper, we design a novel module to produce the query-aware context vector, named Multi-Space based Context Fusion (MSCF), with the following considerations: (1) interactions are applied across multiple latent semantic spaces; (2) attention is measured at bit level, not at token level. Moreover, we extend MSCF to the multi-hop architecture. This unified model is called Enhanced Attentive Reader (EA Reader). During the iterative inference process, the reader is equipped with a novel memory update rule and maintains the understanding of documents through <em>read</em>, <em>update</em> and <em>write</em> operations. We conduct extensive experiments on four real-world datasets. Our results demonstrate that EA Reader outperforms state-of-the-art models.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Fu, Chengzhen and Zhang, Yan}, year={2019}, month={Jul.}, pages={6375-6382} }