@article{Wu_Kong_Yang_Kong_Zhang_Yu_Li_Liu_2021, title={LB-DESPOT: Efficient Online POMDP Planning Considering Lower Bound in Action Selection (Student Abstract)}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/17960}, DOI={10.1609/aaai.v35i18.17960}, abstractNote={Partially observable Markov decision process (POMDP) is an extension to MDP. It handles the state uncertainty by specifying the probability of getting a particular observation given the current state. DESPOT is one of the most popular scalable online planning algorithms for POMDPs, which manages to significantly reduce the size of the decision tree while deriving a near-optimal policy by considering only $K$ scenarios. Nevertheless, there is a gap in action selection criteria between planning and execution in DESPOT. During the planning stage, it keeps choosing the action with the highest upper bound, whereas when the planning ends, the action with the highest lower bound is chosen for execution. Here, we propose LB-DESPOT to alleviate this issue, which utilizes the lower bound in selecting an action branch to expand. Empirically, our method has attained better performance than DESPOT and POMCP, which is another state-of-the-art, on several challenging POMDP benchmark tasks.}, number={18}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wu, Chenyang and Kong, Rui and Yang, Guoyu and Kong, Xianghan and Zhang, Zongzhang and Yu, Yang and Li, Dong and Liu, Wulong}, year={2021}, month={May}, pages={15927-15928} }