@article{Patil_Ghalme_Nair_Narahari_2020, title={Achieving Fairness in the Stochastic Multi-Armed Bandit Problem}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5986}, DOI={10.1609/aaai.v34i04.5986}, abstractNote={<p>We study an interesting variant of the stochastic multi-armed bandit problem, which we call the F<span style="font-variant: small-caps;">air</span>-MAB problem, where, in addition to the objective of maximizing the sum of expected rewards, the algorithm also needs to ensure that at any time, each arm is pulled at least a pre-specified fraction of times. We investigate the interplay between <em>learning</em> and <em>fairness</em> in terms of a pre-specified vector denoting the fractions of guaranteed pulls. We define a <em>fairness-aware regret</em>, which we call <em>r</em>-Regret, that takes into account the above fairness constraints and extends the conventional notion of regret in a natural way. Our primary contribution is to obtain a complete characterization of a class of F<span style="font-variant: small-caps;">air</span>-MAB algorithms via two parameters: the unfairness tolerance and the learning algorithm used as a black-box. For this class of algorithms, we provide a fairness guarantee that holds uniformly over time, irrespective of the choice of the learning algorithm. Further, when the learning algorithm is UCB1, we show that our algorithm achieves constant <em>r</em>-Regret for a large enough time horizon. Finally, we analyze the <em>cost of fairness</em> in terms of the conventional notion of regret. We conclude by experimentally validating our theoretical results.</p>}, number={04}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Patil, Vishakha and Ghalme, Ganesh and Nair, Vineet and Narahari, Y.}, year={2020}, month={Apr.}, pages={5379-5386} }