@article{Yang_Vereshchaka_Zhou_Chen_Dong_2020, title={Variational Adversarial Kernel Learned Imitation Learning}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/6135}, DOI={10.1609/aaai.v34i04.6135}, abstractNote={<p>Imitation learning refers to the problem where an agent learns to perform a task through observing and mimicking expert demonstrations, without knowledge of the cost function. State-of-the-art imitation learning algorithms reduce imitation learning to distribution-matching problems by minimizing some distance measures. However, the distance measure may not always provide informative signals for a policy update. To this end, we propose the variational adversarial kernel learned imitation learning (VAKLIL), which measures the distance using the maximum mean discrepancy with variational kernel learning. Our method optimizes over a large cost-function space and is sample efficient and robust to overfitting. We demonstrate the performance of our algorithm through benchmarking with four state-of-the-art imitation learning algorithms over five high-dimensional control tasks, and a complex transportation control task. Experimental results indicate that our algorithm significantly outperforms related algorithms in all scenarios.</p>}, number={04}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yang, Fan and Vereshchaka, Alina and Zhou, Yufan and Chen, Changyou and Dong, Wen}, year={2020}, month={Apr.}, pages={6599-6606} }