@article{Zhang_Weiss_2022, title={Longitudinal Fairness with Censorship}, volume={36}, url={https://ojs.aaai.org/index.php/AAAI/article/view/21484}, DOI={10.1609/aaai.v36i11.21484}, abstractNote={Recent works in artificial intelligence fairness attempt to mitigate discrimination by proposing constrained optimization programs that achieve parity for some fairness statistic. Most assume availability of the class label, which is impractical in many real-world applications such as precision medicine, actuarial analysis and recidivism prediction. Here we consider fairness in longitudinal right-censored environments, where the time to event might be unknown, resulting in censorship of the class label and inapplicability of existing fairness studies. We devise applicable fairness measures, propose a debiasing algorithm, and provide necessary theoretical constructs to bridge fairness with and without censorship for these important and socially-sensitive tasks. Our experiments on four censored datasets confirm the utility of our approach.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Wenbin and Weiss, Jeremy C.}, year={2022}, month={Jun.}, pages={12235-12243} }