@inproceedings{8233e0fbebb544ff8cb01e0227349ffb,
title = "Freudian Slips: Analysing the Internal Representations of a Neural Network from Its Mistakes",
abstract = "The use of deep networks has improved the state of the art in various domains of AI, making practical applications possible. At the same time, there are increasing calls to make learning systems more transparent and explainable, due to concerns that they might develop biases in their internal representations that might lead to unintended discrimination, when applied to sensitive personal decisions. The use of vast subsymbolic distributed representations has made this task very difficult. We suggest that we can learn a lot about the biases and the internal representations of a deep network without having to unravel its connections, but by adopting the old psychological approach of analysing its “slips of the tongue”. We demonstrate in a practical example that an analysis of the confusion matrix can reveal that a CNN has represented a biological task in a way that reflects our understanding of taxonomy, inferring more structure than it was requested to by the training algorithm. In particular, we show how a CNN trained to recognise animal families, contains also higher order information about taxa such as the superfamily, parvorder, suborder and order for example. We speculate that various forms of psycho-metric testing for neural networks might provide us insight about their inner workings.",
keywords = "deep learning, taxonomy, computer vision, explainable AI, black-box testing",
author = "Sen Jia and Tom Lansdall-Welfare and Nello Cristianini",
year = "2017",
month = oct,
day = "4",
doi = "10.1007/978-3-319-68765-0_12",
language = "English",
isbn = "9783319687643",
series = "Lecture Notes in Computer Science",
publisher = "Springer",
pages = "138--148",
editor = "N Adams and A Tucker and D Weston",
booktitle = "Advances in Intelligent Data Analysis XVI",
}