@article{Kim_Kim_Bengio_2021, title={Visual Concept Reasoning Networks}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/16995}, DOI={10.1609/aaai.v35i9.16995}, abstractNote={A split-transform-merge strategy has been broadly used as an architectural constraint in convolutional neural networks for visual recognition tasks. It approximates sparsely connected networks by explicitly defining multiple branches to simultaneously learn representations with different visual concepts or properties. Dependencies or interactions between these representations are typically defined by dense and local operations, however, without any adaptiveness or high-level reasoning. In this work, we propose to exploit this strategy and combine it with our Visual Concept Reasoning Networks (VCRNet) to enable reasoning between high-level visual concepts. We associate each branch with a visual concept and derive a compact concept state by selecting a few local descriptors through an attention module. These concept states are then updated by graph-based interaction and used to adaptively modulate the local descriptors. We describe our proposed model by split-transform-attend-interact-modulate-merge stages, which are implemented by opting for a highly modularized architecture. Extensive experiments on visual recognition tasks such as image classification, semantic segmentation, object detection, scene recognition, and action recognition show that our proposed model, VCRNet, consistently improves the performance by increasing the number of parameters by less than 1%.}, number={9}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Kim, Taesup and Kim, Sungwoong and Bengio, Yoshua}, year={2021}, month={May}, pages={8172-8180} }