@article{Hemamou_Felhi_Vandenbussche_Martin_Clavel_2019, title={HireNet: A Hierarchical Attention Model for the Automatic Analysis of Asynchronous Video Job Interviews}, volume={33}, url={https://ojs.aaai.org/index.php/AAAI/article/view/3832}, DOI={10.1609/aaai.v33i01.3301573}, abstractNote={<p>New technologies drastically change recruitment techniques. Some research projects aim at designing interactive systems that help candidates practice job interviews. Other studies aim at the automatic detection of social signals (<em>e.g.</em> smile, turn of speech, etc...) in videos of job interviews. These studies are limited with respect to the number of interviews they process, but also by the fact that they only analyze simulated job interviews (<em>e.g.</em> students pretending to apply for a fake position). Asynchronous video interviewing tools have become mature products on the human resources market, and thus, a popular step in the recruitment process. As part of a project to help recruiters, we collected a corpus of more than 7000 candidates having asynchronous video job interviews for real positions and recording videos of themselves answering a set of questions. We propose a new hierarchical attention model called HireNet that aims at predicting the hirability of the candidates as evaluated by recruiters. In HireNet, an interview is considered as a sequence of questions and answers containing salient socials signals. Two contextual sources of information are modeled in HireNet: the words contained in the question and in the job position. Our model achieves better F1-scores than previous approaches for each modality (verbal content, audio and video). Results from early and late multimodal fusion suggest that more sophisticated fusion schemes are needed to improve on the monomodal results. Finally, some examples of moments captured by the attention mechanisms suggest our model could potentially be used to help finding key moments in an asynchronous job interview.</p>}, number={01}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Hemamou, Léo and Felhi, Ghazi and Vandenbussche, Vincent and Martin, Jean-Claude and Clavel, Chloé}, year={2019}, month={Jul.}, pages={573-581} }