@article {Driscoll2022.08.15.503870, author = {Laura Driscoll and Krishna Shenoy and David Sussillo}, title = {Flexible multitask computation in recurrent networks utilizes shared dynamical motifs}, elocation-id = {2022.08.15.503870}, year = {2022}, doi = {10.1101/2022.08.15.503870}, publisher = {Cold Spring Harbor Laboratory}, abstract = {Flexible computation is a hallmark of intelligent behavior. Yet, little is known about how neural networks contextually reconfigure for different computations. Humans are able to perform a new task without extensive training, presumably through the composition of elementary processes that were previously learned. Cognitive scientists have long hypothesized the possibility of a compositional neural code, where complex neural computations are made up of constituent components; however, the neural substrate underlying this structure remains elusive in biological and artificial neural networks. Here we identified an algorithmic neural substrate for compositional computation through the study of multitasking artificial recurrent neural networks. Dynamical systems analyses of networks revealed learned computational strategies that mirrored the modular subtask structure of the task-set used for training. Dynamical motifs such as attractors, decision boundaries and rotations were reused across different task computations. For example, tasks that required memory of a continuous circular variable repurposed the same ring attractor. We show that dynamical motifs are implemented by clusters of units and are reused across different contexts, allowing for flexibility and generalization of previously learned computation. Lesioning these clusters resulted in modular effects on network performance: a lesion that destroyed one dynamical motif only minimally perturbed the structure of other dynamical motifs. Finally, modular dynamical motifs could be reconfigured for fast transfer learning. After slow initial learning of dynamical motifs, a subsequent faster stage of learning reconfigured motifs to perform novel tasks. This work contributes to a more fundamental understanding of compositional computation underlying flexible general intelligence in neural systems. We present a conceptual framework that establishes dynamical motifs as a fundamental unit of computation, intermediate between the neuron and the network. As more whole brain imaging studies record neural activity from multiple specialized systems simultaneously, the framework of dynamical motifs will guide questions about specialization and generalization across brain regions.Competing Interest StatementKS serves on the Scientific Advisory Boards (SABs) of MIND-X Inc. (acquired by Blackrock Neurotech, Spring 2022), Inscopix Inc. and Heal Inc. He also serves as a consultant / advisor (and was on founding SAB) for CTRL-Labs (acquired by Facebook Reality Labs in Fall 2019, and is now a part of Meta Platform{\textquoteright}s Reality Labs) and serves as a consultant / advisor (and is a co-founder, 2016) for Neuralink. DS works for Meta Platform{\textquoteright}s Reality Labs, but the work presented here was done entirely at Stanford. LD has no competing interests.}, URL = {https://www.biorxiv.org/content/early/2022/08/15/2022.08.15.503870}, eprint = {https://www.biorxiv.org/content/early/2022/08/15/2022.08.15.503870.full.pdf}, journal = {bioRxiv} }