@article {Farrell2020.03.04.977702, author = {Matthew Farrell and Stefano Recanatesi and R. Clay Reid and Stefan Mihalas and Eric Shea-Brown}, title = {Autoencoder networks extract latent variables and encode these variables in their connectomes}, elocation-id = {2020.03.04.977702}, year = {2020}, doi = {10.1101/2020.03.04.977702}, publisher = {Cold Spring Harbor Laboratory}, abstract = {Spectacular advances in imaging and data processing techniques are revealing a wealth of information about brain connectomes. This raises an exciting scientific opportunity: to infer the underlying circuit function from the structure of its connectivity. A potential roadblock, however, is that {\textendash} even with well constrained neural dynamics {\textendash} there are in principle many different connectomes that could support a given computation. Here, we define a tractable setting in which the problem of inferring circuit function from circuit connectivity can be analyzed in detail: the function of input compression and reconstruction, in an autoencoder network with a single hidden layer. Here, in general there is substantial ambiguity in the weights that can produce the same circuit function, because largely arbitrary changes to {\textquotedblleft}input{\textquotedblright} weights can be undone by applying the inverse modifications to the {\textquotedblleft}output{\textquotedblright} weights. However, we use mathematical arguments and simulations to show that adding simple, biologically motivated regularization of connectivity resolves this ambiguity in an interesting way: weights are constrained such that the latent variable structure underlying the inputs can be extracted from the weights by using nonlinear dimensionality reduction methods.}, URL = {https://www.biorxiv.org/content/early/2020/03/05/2020.03.04.977702}, eprint = {https://www.biorxiv.org/content/early/2020/03/05/2020.03.04.977702.full.pdf}, journal = {bioRxiv} }