@article{Balakrishnan_Bouneffouf_Mattei_Rossi_2019, title={Incorporating Behavioral Constraints in Online AI Systems}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/3762}, DOI={10.1609/aaai.v33i01.33013}, abstractNote={<p>AI systems that learn through reward feedback about the actions they take are increasingly deployed in domains that have significant impact on our daily life. However, in many cases the online rewards should not be the only guiding criteria, as there are additional constraints and/or priorities imposed by regulations, values, preferences, or ethical principles. We detail a novel online agent that learns a set of behavioral constraints by observation and uses these learned constraints as a guide when making decisions in an online setting while still being reactive to reward feedback. To define this agent, we propose to adopt a novel extension to the classical contextual multi-armed bandit setting and we provide a new algorithm called Behavior Constrained Thompson Sampling (BCTS) that allows for online learning while obeying exogenous constraints. Our agent learns a constrained policy that implements the observed behavioral constraints demonstrated by a teacher agent, and then uses this constrained policy to guide the reward-based online exploration and exploitation. We characterize the upper bound on the expected regret of the contextual bandit algorithm that underlies our agent and provide a case study with real world data in two application domains. Our experiments show that the designed agent is able to act within the set of behavior constraints without significantly degrading its overall reward performance.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Balakrishnan, Avinash and Bouneffouf, Djallel and Mattei, Nicholas and Rossi, Francesca}, year={2019}, month={Jul.}, pages={3-11} }