@article{Pramanick_Akhtar_Chakraborty_2021, title={Exercise? I thought you said ’Extra Fries’: Leveraging Sentence Demarcations and Multi-hop Attention for Meme Affect Analysis}, volume={15}, url={https://ojs.aaai.org/index.php/ICWSM/article/view/18080}, DOI={10.1609/icwsm.v15i1.18080}, abstractNote={Today’s Internet is awash in memes as they are humorous, satirical, or ironic which make people laugh. According to a survey, 33% of social media users in age bracket [13-35] send memes every day, whereas more than 50% send every week. Some of these memes spread rapidly within a very short time-frame, and their virality depends on the novelty of their (textual and visual) content. A few of them convey positive messages, such as funny or motivational quotes; while others are meant to mock/hurt someone’s feelings through sarcastic or offensive messages. Despite the appealing nature of memes and their rapid emergence on social media, effective analysis of memes has not been adequately attempted to the extent it deserves. Recently, in SemEval’20, a pioneering attempt has been made in this direction by organizing a shared task on `Memotion Analysis’ (meme emotion analysis). As expected, the competition attracted more than 500 participants with the final submission of [23-32] systems across three sub-tasks. In this paper, we attempt to solve the same set of tasks suggested in the SemEval’20 - Memotion Analysis competition. We propose a multi-hop attention-based deep neural network framework, called MHA-Meme, whose prime objective is to leverage the spatial-domain correspondence between the visual modality (an image) and various textual segments to extract fine-grained feature representations for classification. We evaluate MHA-Meme on the `Memotion Analysis’ dataset for all three sub-tasks - sentiment classification, affect classification, and affect class quantification. Our comparative study shows state-of-the-art performances of MHA-Meme for all three tasks compared to the top systems that participated in the competition. Unlike all the baselines which perform inconsistently across all three tasks, MHA-Meme outperforms baselines in all the tasks on average. Moreover, we validate the generalization of MHA-Meme on another set of manually annotated test samples and observe it to be consistent. Finally, we establish the interpretability of MHA-Meme.}, number={1}, journal={Proceedings of the International AAAI Conference on Web and Social Media}, author={Pramanick, Shraman and Akhtar, Md Shad and Chakraborty, Tanmoy}, year={2021}, month={May}, pages={513-524} }