@article{Zhang_Yang_An_Zhang_2021, title={Coordination Between Individual Agents in Multi-Agent Reinforcement Learning}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/17357}, DOI={10.1609/aaai.v35i13.17357}, abstractNote={The existing multi-agent reinforcement learning methods (MARL) for determining the coordination between agents focus on either global-level or neighborhood-level coordination between agents. However the problem of coordination between individual agents is remain to be solved. It is crucial for learning an optimal coordinated policy in unknown multi-agent environments to analyze the agent’s roles and the correlation between individual agents. To this end, in this paper we propose an agent-level coordination based MARL method. Specifically, it includes two parts in our method. The first is correlation analysis between individual agents based on the Pearson, Spearman, and Kendall correlation coefficients; And the second is an agent-level coordinated training framework where the communication message between weakly correlated agents is dropped out, and a correlation based reward function is built. The proposed method is verified in four mixed cooperative-competitive environments. The experimental results show that the proposed method outperforms the state-of-the-art MARL methods and can measure the correlation between individual agents accurately.}, number={13}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Yang and Yang, Qingyu and An, Dou and Zhang, Chengwei}, year={2021}, month={May}, pages={11387-11394} }