@article{Li_Min_Shen_Carlson_Carin_2018, title={Video Generation From Text}, volume={32}, url={https://ojs.aaai.org/index.php/AAAI/article/view/12233}, DOI={10.1609/aaai.v32i1.12233}, abstractNote={ <p> Generating videos from text has proven to be a significant challenge for existing generative models. We tackle this problem by training a conditional generative model to extract both static and dynamic information from text. This is manifested in a hybrid framework, employing a Variational Autoencoder (VAE) and a Generative Adversarial Network (GAN). The static features, called "gist," are used to sketch text-conditioned background color and object layout structure. Dynamic features are considered by transforming input text into an image filter. To obtain a large amount of data for training the deep-learning model, we develop a method to automatically create a matched text-video corpus from publicly available online videos. Experimental results show that the proposed framework generates plausible and diverse short-duration smooth videos, while accurately reflecting the input text information. It significantly outperforms baseline models that directly adapt text-to-image generation procedures to produce videos. Performance is evaluated both visually and by adapting the inception score used to evaluate image generation in GANs. </p> }, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Yitong and Min, Martin and Shen, Dinghan and Carlson, David and Carin, Lawrence}, year={2018}, month={Apr.} }