@article{Topin_Veloso_2019, title={Generation of Policy-Level Explanations for Reinforcement Learning}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/4097}, DOI={10.1609/aaai.v33i01.33012514}, abstractNote={<p>Though reinforcement learning has greatly benefited from the incorporation of neural networks, the inability to verify the correctness of such systems limits their use. Current work in explainable deep learning focuses on explaining only a single decision in terms of input features, making it unsuitable for explaining a sequence of decisions. To address this need, we introduce Abstracted Policy Graphs, which are Markov chains of abstract states. This representation concisely summarizes a policy so that individual decisions can be explained in the context of expected future transitions. Additionally, we propose a method to generate these Abstracted Policy Graphs for deterministic policies given a learned value function and a set of observed transitions, potentially off-policy transitions used during training. Since no restrictions are placed on how the value function is generated, our method is compatible with many existing reinforcement learning methods. We prove that the worst-case time complexity of our method is quadratic in the number of features and linear in the number of provided transitions, <em>O</em>(|<em>F</em>|<sup>2</sup>|<em>tr samples</em>|). By applying our method to a family of domains, we show that our method scales well in practice and produces Abstracted Policy Graphs which reliably capture relationships within these domains.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Topin, Nicholay and Veloso, Manuela}, year={2019}, month={Jul.}, pages={2514-2521} }