@article{Ma_Su_Wang_Lu_2020, title={FPETS: Fully Parallel End-to-End Text-to-Speech System}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/6365}, DOI={10.1609/aaai.v34i05.6365}, abstractNote={<p>End-to-end Text-to-speech (TTS) system can greatly improve the quality of synthesised speech. But it usually suffers form high time latency due to its auto-regressive structure. And the synthesised speech may also suffer from some error modes, e.g. repeated words, mispronunciations, and skipped words. In this paper, we propose a novel non-autoregressive, fully parallel end-to-end TTS system (FPETS). It utilizes a new alignment model and the recently proposed U-shape convolutional structure, UFANS. Different from RNN, UFANS can capture long term information in a fully parallel manner. Trainable position encoding and two-step training strategy are used for learning better alignments. Experimental results show FPETS utilizes the power of parallel computation and reaches a significant speed up of inference compared with state-of-the-art end-to-end TTS systems. More specifically, FPETS is 600X faster than Tacotron2, 50X faster than DCTTS and 10X faster than Deep Voice3. And FPETS can generates audios with equal or better quality and fewer errors comparing with other system. As far as we know, FPETS is the first end-to-end TTS system which is fully parallel.</p>}, number={05}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Ma, Dabiao and Su, Zhiba and Wang, Wenxuan and Lu, Yuhao}, year={2020}, month={Apr.}, pages={8457-8463} }