@article{Zhang_Wu_Liu_Li_Zhou_Xu_2019, title={Regularizing Neural Machine Translation by Target-Bidirectional Agreement}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/3816}, DOI={10.1609/aaai.v33i01.3301443}, abstractNote={<p>Although Neural Machine Translation (NMT) has achieved remarkable progress in the past several years, most NMT systems still suffer from a fundamental shortcoming as in other sequence generation tasks: errors made early in generation process are fed as inputs to the model and can be quickly amplified, harming subsequent sequence generation. To address this issue, we propose a novel model regularization method for NMT training, which aims to improve the agreement between translations generated by left-to-right (L2R) and right-to-left (R2L) NMT decoders. This goal is achieved by introducing two Kullback-Leibler divergence regularization terms into the NMT training objective to reduce the mismatch between output probabilities of L2R and R2L models. In addition, we also employ a joint training strategy to allow L2R and R2L models to improve each other in an interactive update process. Experimental results show that our proposed method significantly outperforms state-of-the-art baselines on Chinese-English and English-German translation tasks.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Zhirui and Wu, Shuangzhi and Liu, Shujie and Li, Mu and Zhou, Ming and Xu, Tong}, year={2019}, month={Jul.}, pages={443-450} }