@article{Brafman_De Giacomo_Patrizi_2018, title={LTLf/LDLf Non-Markovian Rewards}, volume={32}, url={https://ojs.aaai.org/index.php/AAAI/article/view/11572}, DOI={10.1609/aaai.v32i1.11572}, abstractNote={ <p> In Markov Decision Processes (MDPs), the reward obtained in a state is Markovian, i.e., depends on the last state and action. This dependency makes it difficult to reward more interesting long-term behaviors, such as always closing a door after it has been opened, or providing coffee only following a request. Extending MDPs to handle non-Markovian reward functions was the subject of two previous lines of work. Both use LTL variants to specify the reward function and then compile the new model back into a Markovian model. Building on recent progress in temporal logics over finite traces, we adopt LDLf for specifying non-Markovian rewards and provide an elegant automata construction for building a Markovian model, which extends that of previous work and offers strong minimality and compositionality guarantees. </p> }, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Brafman, Ronen and De Giacomo, Giuseppe and Patrizi, Fabio}, year={2018}, month={Apr.} }