@article {Guest626374, author = {Olivia Guest and Bradley C. Love}, title = {Levels of Representation in a Deep Learning Model of Categorization}, elocation-id = {626374}, year = {2019}, doi = {10.1101/626374}, publisher = {Cold Spring Harbor Laboratory}, abstract = {Deep convolutional neural networks (DCNNs) rival humans in object recognition. The layers (or levels of representation) in DCNNs have been successfully aligned with processing stages along the ventral stream for visual processing. Here, we propose a model of concept learning that uses visual representations from these networks to build memory representations of novel categories, which may rely on the medial temporal lobe (MTL) and medial prefrontal cortex (mPFC). Our approach opens up two possibilities: a) formal investigations can involve photographic stimuli as opposed to stimuli handcrafted and coded by the experimenter; b) model comparison can determine which level of representation within a DCNN a learner is using during categorization decisions. Pursuing the latter point, DCNNs suggest that the shape bias in children relies on representations at more advanced network layers whereas a learner that relied on lower network layers would display a color bias. These results confirm the role of natural statistics in the shape bias (i.e., shape is predictive of category membership) while highlighting that the type of statistics matter, i.e., those from lower or higher levels of representation. We use the same approach to provide evidence that pigeons performing seemingly sophisticated categorization of complex imagery may in fact be relying on representations that are very low-level (i.e., retinotopic). Although complex features, such as shape, relatively predominate at more advanced network layers, even simple features, such as spatial frequency and orientation, are better represented at the more advanced layers, contrary to a standard hierarchical view.Author Note This work was supported by NIH (Grant 1P01HD080679), and a Wellcome Trust Investigator Award (Grant WT106931MA) to BCL, as well as The Alan Turing Institute under the EPSRC grant EP/N510129/1. Some of this work was originally reported at the 39th Annual Meeting of the Cognitive Science Society in 2017. Correspondences regarding this work can be sent to o.guest{at}ucl.ac.uk. The authors declare that they have no competing interests. The authors would like to thank the members of the Love lab at UCL for their useful input when preparing the manuscript. The code used to run these experiments is available here: https://osf.io/jxavn/.}, URL = {https://www.biorxiv.org/content/early/2019/05/03/626374}, eprint = {https://www.biorxiv.org/content/early/2019/05/03/626374.full.pdf}, journal = {bioRxiv} }