@article{Wang_Wang_Huang_2021, title={Adaptive Algorithms for Multi-armed Bandit with Composite and Anonymous Feedback}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/17224}, DOI={10.1609/aaai.v35i11.17224}, abstractNote={We study the multi-armed bandit (MAB) problem with composite and anonymous feedback. In this model, the reward of pulling an arm spreads over a period of time (we call this period as reward interval) and the player receives partial rewards of the action, convoluted with rewards from pulling other arms, successively. Existing results on this model require prior knowledge about the reward interval size as an input to their algorithms. In this paper, we propose adaptive algorithms for both the stochastic and the adversarial cases, without requiring any prior information about the reward interval. For the stochastic case, we prove that our algorithm guarantees a regret that matches the lower bounds (in order). For the adversarial case, we propose the first algorithm to jointly handle non-oblivious adversary and unknown reward interval size. We also conduct simulations based on real-world dataset. The results show that our algorithms outperform existing benchmarks.}, number={11}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wang, Siwei and Wang, Haoyun and Huang, Longbo}, year={2021}, month={May}, pages={10210-10217} }