@article {Isomura654467, author = {Takuya Isomura and Karl Friston}, title = {Reverse engineering neural networks to characterise their cost functions}, elocation-id = {654467}, year = {2019}, doi = {10.1101/654467}, publisher = {Cold Spring Harbor Laboratory}, abstract = {This work considers a class of biologically plausible cost functions for neural networks, where the same cost function is minimised by both neural activity and plasticity. We show that such cost functions can be cast as a variational bound on model evidence under an implicit generative model. Using generative models based on Markov decision processes (MDP), we show, analytically, that neural activity and plasticity perform Bayesian inference and learning, respectively, by maximising model evidence. Using mathematical and numerical analyses, we then confirm that biologically plausible cost functions{\textemdash}used in neural networks{\textemdash}correspond to variational free energy under some prior beliefs about the prevalence of latent states that generate inputs. These prior beliefs are determined by particular constants (i.e., thresholds) that define the cost function. This means that the Bayes optimal encoding of latent or hidden states is achieved when, and only when, the network{\textquoteright}s implicit priors match the process that generates the inputs. Our results suggest that when a neural network minimises its cost function, it is implicitly minimising variational free energy under optimal or sub-optimal prior beliefs. This insight is potentially important because it suggests that any free parameter of a neural network{\textquoteright}s cost function can itself be optimised{\textemdash}by minimisation with respect to variational free energy.}, URL = {https://www.biorxiv.org/content/early/2019/10/31/654467}, eprint = {https://www.biorxiv.org/content/early/2019/10/31/654467.full.pdf}, journal = {bioRxiv} }