@article{Arous_Dolamic_Yang_Bhardwaj_Cuccu_Cudré-Mauroux_2021, title={MARTA: Leveraging Human Rationales for Explainable Text Classification}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/16734}, DOI={10.1609/aaai.v35i7.16734}, abstractNote={Explainability is a key requirement for text classification in many application domains ranging from sentiment analysis to medical diagnosis or legal reviews. Existing methods often rely on "attention" mechanisms for explaining classification results by estimating the relative importance of input units. However, recent studies have shown that such mechanisms tend to mis-identify irrelevant input units in their explanation. In this work, we propose a hybrid human-AI approach that incorporates human rationales into attention-based text classification models to improve the explainability of classification results. Specifically, we ask workers to provide rationales for their annotation by selecting relevant pieces of text. We introduce MARTA, a Bayesian framework that jointly learns an attention-based model and the reliability of workers while injecting human rationales into model training. We derive a principled optimization algorithm based on variational inference with efficient updating rules for learning MARTA parameters. Extensive validation on real-world datasets shows that our framework significantly improves the state of the art both in terms of classification explainability and accuracy.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Arous, Ines and Dolamic, Ljiljana and Yang, Jie and Bhardwaj, Akansha and Cuccu, Giuseppe and Cudré-Mauroux, Philippe}, year={2021}, month={May}, pages={5868-5876} }