@article{Kartal_Hernandez-Leal_Taylor_2019, title={Terminal Prediction as an Auxiliary Task for Deep Reinforcement Learning}, volume={15}, url={https://ojs.aaai.org/index.php/AIIDE/article/view/5222}, DOI={10.1609/aiide.v15i1.5222}, abstractNote={<p>Deep reinforcement learning has achieved great successes in recent years, but there are still open challenges, such as convergence to locally optimal policies and sample inefficiency. In this paper, we contribute a novel self-supervised auxiliary task, i.e., <em>Terminal Prediction</em> (TP), estimating temporal closeness to terminal states for episodic tasks. The intuition is to help representation learning by letting the agent predict how close it is to a terminal state, while learning its control policy. Although TP could be integrated with multiple algorithms, this paper focuses on Asynchronous Advantage Actor-Critic (A3C) and demonstrating the advantages of A3C-TP. Our extensive evaluation includes: a set of Atari games, the BipedalWalker domain, and a mini version of the recently proposed multi-agent Pommerman game. Our results on Atari games and the BipedalWalker domain suggest that A3C-TP outperforms standard A3C in most of the tested domains and in others it has similar performance. In Pommerman, our proposed method provides significant improvement both in learning efficiency and converging to better policies against different opponents.</p>}, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence and Interactive Digital Entertainment}, author={Kartal, Bilal and Hernandez-Leal, Pablo and Taylor, Matthew E.}, year={2019}, month={Oct.}, pages={38-44} }