@article{Wang_Xu_Zhang_Wang_Liu_2020, title={Consistent Video Style Transfer via Compound Regularization}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/6905}, DOI={10.1609/aaai.v34i07.6905}, abstractNote={<p>Recently, neural style transfer has drawn many attentions and significant progresses have been made, especially for image style transfer. However, flexible and consistent style transfer for videos remains a challenging problem. Existing training strategies, either using a significant amount of video data with optical flows or introducing single-frame regularizers, have limited performance on real videos. In this paper, we propose a novel interpretation of temporal consistency, based on which we analyze the drawbacks of existing training strategies; and then derive a new compound regularization. Experimental results show that the proposed regularization can better balance the spatial and temporal performance, which supports our modeling. Combining with the new cost formula, we design a zero-shot video style transfer framework. Moreover, for better feature migration, we introduce a new module to dynamically adjust inter-channel distributions. Quantitative and qualitative results demonstrate the superiority of our method over other state-of-the-art style transfer methods. Our project is publicly available at: https://daooshee.github.io/CompoundVST/.</p>}, number={07}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Wenjing and Xu, Jizheng and Zhang, Li and Wang, Yue and Liu, Jiaying}, year={2020}, month={Apr.}, pages={12233-12240} }