@article{Zhang_Jiang_Cheng_Wu_Yu_Li_Guo_Zheng_Zheng_Sun_2021, title={One for More: Selecting Generalizable Samples for Generalizable ReID Model}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/16444}, DOI={10.1609/aaai.v35i4.16444}, abstractNote={Current training objectives of existing person Re-IDentification (ReID) models only ensure that the loss of the model decreases on selected training batch, with no regards to the performance on samples outside the batch. It will inevitably cause the model to over-fit the data in the dominant position (e.g., head data in imbalanced class, easy samples or noisy samples). The latest resampling methods address the issue by designing specific criterion to select specific samples that trains the model generalize more on certain type of data (e.g., hard samples, tail data), which is not adaptive to the inconsistent real world ReID data distributions. Therefore, instead of simply presuming on what samples are generalizable, this paper proposes a one-for-more training objective that directly takes the generalization ability of selected samples as a loss function and learn a sampler to automatically select generalizable samples. More importantly, our proposed one-for-more based sampler can be seamlessly integrated into the ReID training framework which is able to simultaneously train ReID models and the sampler in an end-to-end fashion. The experimental results show that our method can effectively improve the ReID model training and boost the performance of ReID models.}, number={4}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhang, Enwei and Jiang, Xinyang and Cheng, Hao and Wu, Ancong and Yu, Fufu and Li, Ke and Guo, Xiaowei and Zheng, Feng and Zheng, Weishi and Sun, Xing}, year={2021}, month={May}, pages={3324-3332} }