@article{Kim_Cho_Sung_2019, title={Message-Dropout: An Efficient Training Method for Multi-Agent Deep Reinforcement Learning}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/4564}, DOI={10.1609/aaai.v33i01.33016079}, abstractNote={<p>In this paper, we propose a new learning technique named <em>message-dropout</em> to improve the performance for multi-agent deep reinforcement learning under two application scenarios: 1) classical multi-agent reinforcement learning with direct message communication among agents and 2) centralized training with decentralized execution. In the first application scenario of multi-agent systems in which direct message communication among agents is allowed, the messagedropout technique drops out the received messages from other agents in a block-wise manner with a certain probability in the training phase and compensates for this effect by multiplying the weights of the dropped-out block units with a correction probability. The applied message-dropout technique effectively handles the increased input dimension in multi-agent reinforcement learning with communication and makes learning robust against communication errors in the execution phase. In the second application scenario of centralized training with decentralized execution, we particularly consider the application of the proposed messagedropout to Multi-Agent Deep Deterministic Policy Gradient (MADDPG), which uses a centralized critic to train a decentralized actor for each agent. We evaluate the proposed message-dropout technique for several games, and numerical results show that the proposed message-dropout technique with proper dropout rate improves the reinforcement learning performance significantly in terms of the training speed and the steady-state performance in the execution phase.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Woojun and Cho, Myungsik and Sung, Youngchul}, year={2019}, month={Jul.}, pages={6079-6086} }