@article{Xu_Li_Xu_He_2020, title={GDFace: Gated Deformation for Multi-View Face Image Synthesis}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/6942}, DOI={10.1609/aaai.v34i07.6942}, abstractNote={<p>Photorealistic multi-view face synthesis from a single image is an important but challenging problem. Existing methods mainly learn a texture mapping model from the source face to the target face. However, they fail to consider the internal deformation caused by the change of poses, leading to the unsatisfactory synthesized results for large pose variations. In this paper, we propose a Gated Deformable Face Synthesis Network to model the deformation of faces that aids the synthesis of the target face image. Specifically, we propose a dual network that consists of two modules. The first module estimates the deformation of two views in the form of convolution offsets according to the input and target poses. The second one, on the other hand, leverages the predicted deformation offsets to create the target face image. In this way, pose changes are explicitly modeled in the face generator to cope with geometric transformation, by adaptively focusing on pertinent regions of the source image. To compensate offset estimation errors, we introduce a soft-gating mechanism that enables adaptive fusion between deformable features and primitive features. Extensive experimental results on five widely-used benchmarks show that our approach performs favorably against the state-of-the-arts on multi-view face synthesis, especially for large pose changes.</p>}, number={07}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Xu, Xuemiao and Li, Keke and Xu, Cheng and He, Shengfeng}, year={2020}, month={Apr.}, pages={12532-12540} }