@article{Wright_Qiao_Loscalzo_Yu_2015, title={Improving Approximate Value Iteration with Complex Returns by Bounding}, volume={29}, url={https://ojs.aaai.org/index.php/AAAI/article/view/9568}, DOI={10.1609/aaai.v29i1.9568}, abstractNote={ <p> Approximate value iteration (AVI) is a widely used technique in reinforcement learning. Most AVI methods do not take full advantage of the sequential relationship between samples within a trajectory in deriving value estimates, due to the challenges in dealing with the inherent bias and variance in the $n$-step returns. We propose a bounding method which uses a negatively biased but relatively low variance estimator generated from a complex return to provide a lower bound on the observed value of a traditional one-step return estimator. In addition, we develop a new Bounded FQI algorithm, which efficiently incorporates the bounding method into an AVI framework. Experiments show that our method produces more accurate value estimates than existing approaches, resulting in improved policies. </p> }, number={1}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wright, Robert and Qiao, Xingye and Loscalzo, Steven and Yu, Lei}, year={2015}, month={Feb.} }