@article{Li_Zhang_Huang_2019, title={Multi-Scale 3D Convolution Network for Video Based Person Re-Identification}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/4882}, DOI={10.1609/aaai.v33i01.33018618}, abstractNote={<p>This paper proposes a two-stream convolution network to extract spatial and temporal cues for video based person ReIdentification (ReID). A temporal stream in this network is constructed by inserting several Multi-scale 3D (M3D) convolution layers into a 2D CNN network. The resulting M3D convolution network introduces a fraction of parameters into the 2D CNN, but gains the ability of multi-scale temporal feature learning. With this compact architecture, M3D convolution network is also more efficient and easier to optimize than existing 3D convolution networks. The temporal stream further involves Residual Attention Layers (RAL) to refine the temporal features. By jointly learning spatial-temporal attention masks in a residual manner, RAL identifies the discriminative spatial regions and temporal cues. The other stream in our network is implemented with a 2D CNN for spatial feature extraction. The spatial and temporal features from two streams are finally fused for the video based person ReID. Evaluations on three widely used benchmarks datasets, <em>i.e.</em>,<em>MARS</em>, <em>PRID2011</em>, and <em>iLIDS-VID</em> demonstrate the substantial advantages of our method over existing 3D convolution networks and state-of-art methods.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Li, Jianing and Zhang, Shiliang and Huang, Tiejun}, year={2019}, month={Jul.}, pages={8618-8625} }