@article {Leibig084210, author = {Christian Leibig and Vaneeda Allken and Philipp Berens and Siegfried Wahl}, title = {Leveraging uncertainty information from deep neural networks for disease detection}, elocation-id = {084210}, year = {2016}, doi = {10.1101/084210}, publisher = {Cold Spring Harbor Laboratory}, abstract = {In recent years, deep neural networks (DNNs) have revolutionized the field of computer vision and image processing. In medical imaging, algorithmic solutions based on DNNs have been shown to achieve high performance on tasks that previously required medical experts. So far DNN-based solutions for disease detection have been proposed without quantifying their uncertainty in a decision. In contrast, a physician knows whether she is uncertain about a case and will consult more experienced colleagues if needed. Here we propose to estimate the uncertainty of DNNs in medical diagnosis based on a recent theoretical insight on the link between dropout networks and approximate Bayesian inference. Using the example of detecting diabetic retinopathy (DR) from fundus photographs, we show that uncertainty informed decision referral improves diagnostic performance. Experiments across different networks, tasks and datasets showed robust generalization. Depending on network capacity and task/dataset difficulty, we surpass 85\% sensitivity and 80\% specificity as recommended by the NHS when referring 0\% {\textendash} 20\% of the most uncertain decisions for further inspection. We analyse causes of uncertainty by relating intuitions from 2D visualizations to the high-dimensional image space, showing that it is in particular the difficult decisions that the networks consider uncertain.}, URL = {https://www.biorxiv.org/content/early/2016/10/28/084210}, eprint = {https://www.biorxiv.org/content/early/2016/10/28/084210.full.pdf}, journal = {bioRxiv} }