@article{Lyu_Yang_Liu_Yoon_2019, title={Logic-Based Sequential Decision-Making}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5134}, DOI={10.1609/aaai.v33i01.33019995}, abstractNote={<p>Deep reinforcement learning (DRL) has gained great success by learning directly from high-dimensional sensory inputs, yet is notorious for the lack of interpretability. Interpretability of the subtasks is critical in hierarchical decision-making as it increases the transparency of black-box-style DRL approach and helps the RL practitioners to understand the high-level behavior of the system better. In this paper, we introduce symbolic planning into DRL and propose a framework of Symbolic Deep Reinforcement Learning (SDRL) that can handle both high-dimensional sensory inputs and symbolic planning. The task-level interpretability is enabled by relating symbolic actions to options. This framework features a planner – controller – meta-controller architecture, which takes charge of subtask scheduling, data-driven subtask learning, and subtask evaluation, respectively. The three components cross-fertilize each other and eventually converge to an optimal symbolic plan along with the learned subtasks, bringing together the advantages of long-term planning capability with symbolic knowledge and end-to-end reinforcement learning directly from a high-dimensional sensory input. Experimental results validate the interpretability of subtasks, along with improved data efficiency compared with state-of-the-art approaches.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Lyu, Daoming and Yang, Fangkai and Liu, Bo and Yoon, Daesub}, year={2019}, month={Jul.}, pages={9995-9996} }