@inproceedings {Savin1824_2016, year = {2016}, author = {Savin, Cristina and Monk, Travis and Lücke, Jörg}, title = {Intrinsic plasticity for optimal learning of variable stimulus intensities}, booktitle = {Int. Conf. Computational Systems Neuroscience (COSYNE)}, abstract = {In many situations the meaning of a stimulus is the same despite fluctuations in its overall strength. A visual scene’s content does not depend on light intensity, or a word utterance should be recognised irrespective of its loudness. Nonetheless, gain fluctuations are an integral part of the input statistics and they can help differentiate between stimuli. In the visual domain, for instance objects of the same class are likely to have similar surface properties, resulting in a distinct distribution of light intensities. Light intensities can therefore help identify objects. The neural underpinnings of such computation are unclear. Existing models discard gain information by ad hoc preprocessing (Nessler, 2009; Keck, 2012) or by divisive normalisation (Schwarz, 2001) before learning the input statistics from normalized data. Overall, it is unknown how neural circuits can robustly extract statistical regularities in their inputs when the overall intensity of stimuli is variable. Here we develop a principled account of unsupervised learning in the face of gain variations. We introduce a novel generative mixture model (Product- Poisson-Gamma) that explicitly models the statistics of stimulus intensity, and we derive a biologically-plausible neural circuit implementation for inference and learning in this model. We find that explicitly taking into account gain variations improves the robustness of unsupervised learning, as differences in input strength help distinguish between classes with similar features but different gain statistics. From a biological perspective, the derived neural circuit, in which feature-sensitive neurons are equipped with a specific form of intrinsic plasticity (IP), provides novel insights into the interaction between Hebbian and IP during learning. Furthermore, our results imply that neural excitability reflects nontrivial input statistics, in particular the intensity of the features to which a neuron is sensitive.} }