@article{Bansal_Nushi_Kamar_Lasecki_Weld_Horvitz_2019, title={Beyond Accuracy: The Role of Mental Models in Human-AI Team Performance}, volume={7}, url={https://ojs.aaai.org/index.php/HCOMP/article/view/5285}, DOI={10.1609/hcomp.v7i1.5285}, abstractNote={<p>Decisions made by human-AI teams (<em>e.g</em>., AI-advised humans) are increasingly common in high-stakes domains such as healthcare, criminal justice, and finance. Achieving high <em>team</em> performance depends on more than just the accuracy of the AI system: Since the human and the AI may have different expertise, the highest team performance is often reached when they both know how and when to complement one another. We focus on a factor that is crucial to supporting such complementary: the human’s mental model of the AI capabilities, specifically the AI system’s <em>error boundary</em> (<em>i.e.</em> knowing “When does the AI err?”). Awareness of this lets the human decide when to accept or override the AI’s recommendation. We highlight two key properties of an AI’s error boundary, <em>parsimony</em> and <em>stochasticity</em>, and a property of the task, <em>dimensionality</em>. We show experimentally how these properties affect humans’ mental models of AI capabilities and the resulting team performance. We connect our evaluations to related work and propose goals, beyond accuracy, that merit consideration during model selection and optimization to improve overall human-AI team performance.</p>}, number={1}, journal={Proceedings of the AAAI Conference on Human Computation and Crowdsourcing}, author={Bansal, Gagan and Nushi, Besmira and Kamar, Ece and Lasecki, Walter S. and Weld, Daniel S. and Horvitz, Eric}, year={2019}, month={Oct.}, pages={2-11} }