@article{Tian_Xu_Guan_Zhou_2020, title={Network as Regularization for Training Deep Neural Networks: Framework, Model and Performance}, volume={34}, url={https://ojs.aaai.org/index.php/AAAI/article/view/6063}, DOI={10.1609/aaai.v34i04.6063}, abstractNote={<p>Despite powerful representation ability, deep neural networks (DNNs) are prone to over-fitting, because of over-parametrization. Existing works have explored various regularization techniques to tackle the over-fitting problem. Some of them employed soft targets rather than one-hot labels to guide network training (<em>e.g.</em> label smoothing in classification tasks), which are called target-based regularization approaches in this paper. To alleviate the over-fitting problem, here we propose a new and general regularization framework that introduces an auxiliary network to dynamically incorporate guided semantic disturbance to the labels. We call it <em>N</em>etwork <em>a</em>s <em>R</em>egularization (<em>NaR</em> in short). During training, the disturbance is constructed by a convex combination of the predictions of the target network and the auxiliary network. These two networks are initialized separately. And the auxiliary network is trained independently from the target network, while providing instance-level and class-level semantic information to the latter progressively. We conduct extensive experiments to validate the effectiveness of the proposed method. Experimental results show that NaR outperforms many state-of-the-art target-based regularization methods, and other regularization approaches (<em>e.g.</em> <em>mixup</em>) can also benefit from combining with NaR.</p>}, number={04}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Tian, Kai and Xu, Yi and Guan, Jihong and Zhou, Shuigeng}, year={2020}, month={Apr.}, pages={6013-6020} }