@article{Yuan_Zhang_Bojun_Liang_2021, title={Simpson’s Bias in NLP Training}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/17679}, DOI={10.1609/aaai.v35i16.17679}, abstractNote={In most machine learning tasks, we evaluate a model M on a given data population S by measuring a population-level metric F(S;M). Examples of such evaluation metric F include precision/recall for (binary) recognition, the F1 score for multi-class classification, and the BLEU metric for language generation. On the other hand, the model M is trained by optimizing a sample-level loss G(S_t; M) at each learning step t, where S_t is a subset of S (a.k.a. the mini-batch). Popular choices of G include cross-entropy loss, the Dice loss, and sentence-level BLEU scores. A fundamental assumption behind this paradigm is that the mean value of the sample-level loss G, if averaged over all possible samples, should effectively represent the population-level metric F of the task, such as, that E[ G(S_t; M) ] ~ F(S; M). In this paper, we systematically investigate the above assumption in several NLP tasks. We show, both theoretically and experimentally, that some popular designs of the sample-level loss G may be inconsistent with the true population-level metric F of the task, so that models trained to optimize the former can be substantially sub-optimal to the latter, a phenomenon we call it, Simpson’s bias, due to its deep connections with the classic paradox known as Simpson’s reversal paradox in statistics and social sciences.}, number={16}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Yuan, Fei and Zhang, Longtu and Bojun, Huang and Liang, Yaobo}, year={2021}, month={May}, pages={14276-14283} }