@article{Zhao_Zhang_Chen_2019, title={Self-Adversarially Learned Bayesian Sampling}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/4539}, DOI={10.1609/aaai.v33i01.33015893}, abstractNote={<p>Scalable Bayesian sampling is playing an important role in modern machine learning, especially in the fast-developed unsupervised-(deep)-learning models. While tremendous progresses have been achieved via scalable Bayesian sampling such as stochastic gradient MCMC (SG-MCMC) and Stein variational gradient descent (SVGD), the generated samples are typically highly correlated. Moreover, their sample-generation processes are often criticized to be inefficient. In this paper, we propose a novel self-adversarial learning framework that automatically learns a conditional generator to mimic the behavior of a Markov kernel (transition kernel). High-quality samples can be efficiently generated by direct forward passes though a learned generator. Most importantly, the learning process adopts a self-learning paradigm, requiring no information on existing Markov kernels, <em>e.g.</em>, knowledge of how to draw samples from them. Specifically, our framework learns to use current samples, either from the generator or pre-provided training data, to update the generator such that the generated samples progressively approach a target distribution, thus it is called self-learning. Experiments on both synthetic and real datasets verify advantages of our framework, outperforming related methods in terms of both sampling efficiency and sample quality.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhao, Yang and Zhang, Jianyi and Chen, Changyou}, year={2019}, month={Jul.}, pages={5893-5900} }