@article{Hsu_Chen_Hsu_Li_Lin_Huang_Ku_2020, title={Knowledge-Enriched Visual Storytelling}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/6303}, DOI={10.1609/aaai.v34i05.6303}, abstractNote={<p>Stories are diverse and highly personalized, resulting in a large possible output space for story generation. Existing end-to-end approaches produce monotonous stories because they are limited to the vocabulary and knowledge in a single training dataset. This paper introduces <strong>KG-Story</strong>, a three-stage framework that allows the story generation model to take advantage of external <strong>K</strong>nowledge <strong>G</strong>raphs to produce interesting stories. KG-Story distills a set of representative words from the input prompts, enriches the word set by using external knowledge graphs, and finally generates stories based on the enriched word set. This <em>distill-enrich-generate</em> framework allows the use of external resources not only for the enrichment phase, but also for the distillation and generation phases. In this paper, we show the superiority of KG-Story for visual storytelling, where the input prompt is a sequence of five photos and the output is a short story. Per the human ranking evaluation, stories generated by KG-Story are on average ranked better than that of the state-of-the-art systems. Our code and output stories are available at https://github.com/zychen423/KE-VIST.</p>}, number={05}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hsu, Chao-Chun and Chen, Zi-Yuan and Hsu, Chi-Yang and Li, Chih-Chia and Lin, Tzu-Yuan and Huang, Ting-Hao and Ku, Lun-Wei}, year={2020}, month={Apr.}, pages={7952-7960} }