@article{Liu_Xu_Cao_Chen_Kang_2019, title={Deep Reinforcement Learning via Past-Success Directed Exploration}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5126}, DOI={10.1609/aaai.v33i01.33019979}, abstractNote={<p>The balance between exploration and exploitation has always been a core challenge in reinforcement learning. This paper proposes “past-success exploration strategy combined with Softmax action selection”(PSE-Softmax) as an adaptive control method for taking advantage of the characteristics of the online learning process of the agent to adapt exploration parameters dynamically. The proposed strategy is tested on OpenAI Gym with discrete and continuous control tasks, and the experimental results show that PSE-Softmax strategy delivers better performance than deep reinforcement learning algorithms with basic exploration strategies.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Liu, Xiaoming and Xu, Zhixiong and Cao, Lei and Chen, Xiliang and Kang, Kai}, year={2019}, month={Jul.}, pages={9979-9980} }