@article{Potyka_2021, title={Interpreting Neural Networks as Quantitative Argumentation Frameworks}, volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/16801}, DOI={10.1609/aaai.v35i7.16801}, abstractNote={We show that an interesting class of feed-forward neural networks can be understood as quantitative argumentation frameworks. This connection creates a bridge between research in Formal Argumentation and Machine Learning. We generalize the semantics of feed-forward neural networks to acyclic graphs and study the resulting computational and semantical properties in argumentation graphs. As it turns out, the semantics gives stronger guarantees than existing semantics that have been tailor-made for the argumentation setting. From a machine-learning perspective, the connection does not seem immediately helpful. While it gives intuitive meaning to some feed-forward-neural networks, they remain difficult to understand due to their size and density. However, the connection seems helpful for combining background knowledge in form of sparse argumentation networks with dense neural networks that have been trained for complementary purposes and for learning the parameters of quantitative argumentation frameworks in an end-to-end fashion from data.}, number={7}, journal={Proceedings of the AAAI Conference on Artificial Intelligence}, author={Potyka, Nico}, year={2021}, month={May}, pages={6463-6470} }