@article{Zhen_Wang_Zhou_Fang_Quan_2019, title={Learning Fully Dense Neural Networks for Image Semantic Segmentation}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/4965}, DOI={10.1609/aaai.v33i01.33019283}, abstractNote={<p>Semantic segmentation is pixel-wise classification which retains critical spatial information. The “feature map reuse” has been commonly adopted in CNN based approaches to take advantage of feature maps in the early layers for the later spatial reconstruction. Along this direction, we go a step further by proposing a fully dense neural network with an encoderdecoder structure that we abbreviate as FDNet. For each stage in the decoder module, feature maps of all the previous blocks are adaptively aggregated to feedforward as input. On the one hand, it reconstructs the spatial boundaries accurately. On the other hand, it learns more efficiently with the more efficient gradient backpropagation. In addition, we propose the boundary-aware loss function to focus more attention on the pixels near the boundary, which boosts the “hard examples” labeling. We have demonstrated the best performance of the FDNet on the two benchmark datasets: PASCAL VOC 2012, NYUDv2 over previous works when not considering training on other datasets.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Zhen, Mingmin and Wang, Jinglu and Zhou, Lei and Fang, Tian and Quan, Long}, year={2019}, month={Jul.}, pages={9283-9290} }