@article{Zhou_Small_2021, title={Inverse Reinforcement Learning with Natural Language Goals}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/17326}, DOI={10.1609/aaai.v35i12.17326}, abstractNote={Humans generally use natural language to communicate task requirements to each other. Ideally, natural language should also be usable for communicating goals to autonomous machines (e.g., robots) to minimize friction in task specification. However, understanding and mapping natural language goals to sequences of states and actions is challenging. Specifically, existing work along these lines has encountered difficulty in generalizing learned policies to new natural language goals and environments. In this paper, we propose a novel adversarial inverse reinforcement learning algorithm to learn a language-conditioned policy and reward function. To improve generalization of the learned policy and reward function, we use a variational goal generator to relabel trajectories and sample diverse goals during training. Our algorithm outperforms multiple baselines by a large margin on a vision-based natural language instruction following dataset (Room-2-Room), demonstrating a promising advance in enabling the use of natural language instructions in specifying agent goals.}, number={12}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhou, Li and Small, Kevin}, year={2021}, month={May}, pages={11116-11124} }