@article{Kang_Zhao_Li_Xing_2021, title={Exploration via State influence Modeling}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/16981}, DOI={10.1609/aaai.v35i9.16981}, abstractNote={This paper studies the challenging problem of reinforcement learning (RL) in hard exploration tasks with sparse rewards. It focuses on the exploration stage before the agent gets the first positive reward, in which case, traditional RL algorithms with simple exploration strategies often work poorly. Unlike previous methods using some attribute of a single state as the intrinsic reward to encourage exploration, this work leverages the social influence between different states to permit more efficient exploration. It introduces a general intrinsic reward construction method to evaluate the social influence of states dynamically. Three kinds of social influence are introduced for a state: conformity, power, and authority. By measuring the state’s social influence, agents quickly find the focus state during the exploration process. The proposed RL framework with state social influence evaluation works well in hard exploration task. Extensive experimental analyses and comparisons in Grid Maze and many hard exploration Atari 2600 games demonstrate its high exploration efficiency.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kang, Yongxin and Zhao, Enmin and Li, Kai and Xing, Junliang}, year={2021}, month={May}, pages={8047-8054} }