@article{Abel_2019, title={A Theory of State Abstraction for Reinforcement Learning}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5075}, DOI={10.1609/aaai.v33i01.33019876}, abstractNote={<p>Reinforcement learning presents a challenging problem: agents must generalize experiences, efficiently explore the world, and learn from feedback that is delayed and often sparse, all while making use of a limited computational budget. Abstraction is essential to all of these endeavors. Through abstraction, agents can form concise models of both their surroundings and behavior, supporting effective decision making in diverse and complex environments. To this end, the goal of my doctoral research is to characterize the role abstraction plays in reinforcement learning, with a focus on state abstraction. I offer three desiderata articulating what it means for a state abstraction to be useful, and introduce classes of state abstractions that provide a partial path toward satisfying these desiderata. Collectively, I develop theory for state abstractions that can 1) preserve near-optimal behavior, 2) be learned and computed efficiently, and 3) can lower the time or data needed to make effective decisions. I close by discussing extensions of these results to an information theoretic paradigm of abstraction, and an extension to hierarchical abstraction that enjoys the same desirable properties.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Abel, David}, year={2019}, month={Jul.}, pages={9876-9877} }