@inbook{cf64791aede74f50a8290e126ed24474,
title = "Explaining and Interpreting LSTMs",
abstract = "While neural networks have acted as a strong unifying force in the design of modern AI systems, the neural network architectures themselves remain highly heterogeneous due to the variety of tasks to be solved. In this chapter, we explore how to adapt the Layer-wise Relevance Propagation (LRP) technique used for explaining the predictions of feed-forward networks to the LSTM architecture used for sequential data modeling and forecasting. The special accumulators and gated interactions present in the LSTM require both a new propagation scheme and an extension of the underlying theoretical framework to deliver faithful explanations.",
keywords = "Explainable artificial intelligence, Interpretability, LSTM, Model transparency, Recurrent neural networks",
author = "Leila Arras and Jos{\'e} Arjona-Medina and Michael Widrich and Gr{\'e}goire Montavon and Michael Gillhofer and M{\"u}ller, {Klaus Robert} and Sepp Hochreiter and Wojciech Samek",
note = "Funding Information: This work was supported by the German Ministry for Education and Research as Berlin Big Data Centre (01IS14013A), Berlin Center for Machine Learning (01IS18037I) and TraMeExCo (01IS18056A). Partial funding by DFG is acknowledged (EXC 2046/1, project-ID: 390685689). This work was also supported by the Institute for Information & Communications Technology Planning & Evaluation (IITP) grant funded by the Korea government (No. 2017-0-00451, No. 2017-0-01779). Publisher Copyright: {\textcopyright} Springer Nature Switzerland AG 2019.",
year = "2019",
doi = "10.1007/978-3-030-28954-6_11",
language = "English",
series = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
publisher = "Springer Verlag",
pages = "211--238",
booktitle = "Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)",
}