@article{Wan_Li_Yang_Zhang_2019, title={Transductive Zero-Shot Learning via Visual Center Adaptation}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/5166}, DOI={10.1609/aaai.v33i01.330110059}, abstractNote={<p>In this paper, we propose a Visual Center Adaptation Method (VCAM) to address the <em>domain shift</em> problem in zero-shot learning. For the seen classes in the training data, VCAM builds an embedding space by learning the mapping from semantic space to some visual centers. While for unseen classes in the test data, the construction of embedding space is constrained by a symmetric Chamfer-distance term, aiming to adapt the distribution of the synthetic visual centers to that of the <em>real</em> cluster centers. Therefore the learned embedding space can generalize the unseen classes well. Experiments on two widely used datasets demonstrate that our model significantly outperforms state-of-the-art methods.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Wan, Ziyu and Li, Yan and Yang, Min and Zhang, Junge}, year={2019}, month={Jul.}, pages={10059-10060} }