@inproceedings {Kollmeier1719_2016, year = {2016}, author = {Kollmeier, Birger and Lenarz, Thomas and Warzybok, Anna and Schädler, Marc and Haumann, Sabine and Brand, Thomas and Lücke, Jörg}, title = {Auditory Profile and Common Audiological Functional Parameters (CAFPAs): From Diagnostics to Machine-Learning-based Evidence}, booktitle = {Assoc. Res. Otolaryng. MidWinter Meeting (ARO)}, URL = {http://c.ymcdn.com/sites/www.aro.org/resource/resmgr/Abstract_Archives/UPDATED_2016_ARO_Abstract_Bo.pdf}, abstract = {# Introduction # How well should the various audiological findings best be represented and how can this information be used to characterize the individual hearing problem of each patient – preferably in a way which is independent from his or her native language? This contribution reviews the approach and models developed in the Cluster of Excellence Hearing4All (Oldenburg/Hannover) to unite the diverse audiological databases, e.g. from the hearing research and clinical institutions in Oldenburg and in Hannover in a more abstract, comprehensive way than the previously defined “auditory profiles”. # Method # A set of “common audiological functional parameters” (CAFPAs) has been defined that serves as an abstract representation of the most important audiological characteristics of each patient. The CAFPAs include, e.g., sensitivity loss in different frequency regions, distortion component and compression loss at low and high frequencies, central and binaural loss, cognitive and socio-economic component of the hearing loss. These CAFPAs have been defined in order to make non-consistent and non-complete audiological data accessible for methods of machine learning, such as, e.g. Bayesian nets. Since speech recognition tests in noise are the most crucial outcome parameter, the multilingual matrix test (Kollmeier et al., 2015 Int. J. Audiol. online first) is used which is suitable not only for comparisons across different clinics but also across languages. The individual Speech Recognition Thresholds (SRT) in stationary and in fluctuating noise were predicted using the audiogram and an estimate of the internal level uncertainty as parameters following the Automatic Speech Recognition (ASR) approach by Schädler et al. (2015, Int. J. Audiol. Online first) # Results # Using such statistical methods, a data-driven audiological classification for different classes of hearing loss becomes possible. Estimates of the “typical” hearing loss and suprathreshold distortion components in combination with ASR-based speech recognition prediction allow to predict the individual performance for the closed-set Matrix sentence recognition test in different languages. # Conclusions # A consistency check across the different audiological input and outcome measures becomes possible by using auditory models adapted to the individual hearing impairment. The concept of a more abstract representation of audiological diagnostical information in combination with speech recognition prediction methods and other machine learning approaches appears to be promising for further research in diagnostical and rehabilitative audiology. # Funding# DFG, EXC 1077 Hearing4All} }