@article{Carreira-Perpiñán_Hada_2021, title={Counterfactual Explanations for Oblique Decision Trees:Exact, Efficient Algorithms}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/16851}, DOI={10.1609/aaai.v35i8.16851}, abstractNote={We consider counterfactual explanations, the problem of minimally adjusting features in a source input instance so that it is classified as a target class under a given classifier. This has become a topic of recent interest as a way to query a trained model and suggest possible actions to overturn its decision. Mathematically, the problem is formally equivalent to that of finding adversarial examples, which also has attracted significant attention recently. Most work on either counterfactual explanations or adversarial examples has focused on differentiable classifiers, such as neural nets. We focus on classification trees, both axis-aligned and oblique (having hyperplane splits). Although here the counterfactual optimization problem is nonconvex and nondifferentiable, we show that an exact solution can be computed very efficiently, even with high-dimensional feature vectors and with both continuous and categorical features, and demonstrate it in different datasets and settings. The results are particularly relevant for finance, medicine or legal applications, where interpretability and counterfactual explanations are particularly important.}, number={8}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Carreira-Perpiñán, Miguel Á. and Hada, Suryabhan Singh}, year={2021}, month={May}, pages={6903-6911} }