@article {Mirkovic1508_2015, year = {2015}, author = {Mirkovic, Bojana and Debener, Stefan and Jaeger, Manuela and De Vos, Maarten}, title = {Decoding the attended speech stream with multi-channel EEG: implications for online, daily-life applications}, journal = {J. Neural Eng.}, volume = {12}, number = {4}, DOI = {10.1088/1741-2560/12/4/046007}, URL = {http://iopscience.iop.org/1741-2552/12/4/046007/article}, abstract = {Objective. Recent studies have provided evidence that temporal envelope driven speech decoding from high-density electroencephalography (EEG) and magnetoencephalography recordings can identify the attended speech stream in a multi-speaker scenario. The present work replicated the previous high density EEG study and investigated the necessary technical requirements for practical attended speech decoding with EEG. Approach. Twelve normal hearing participants attended to one out of two simultaneously presented audiobook stories, while high density EEG was recorded. An offline iterative procedure eliminating those channels contributing the least to decoding provided insight into the necessary channel number and optimal cross-subject channel configuration. Aiming towards the future goal of near real-time classification with an individually trained decoder, the minimum duration of training data necessary for successful classification was determined by using a chronological cross-validation approach. Main results. Close replication of the previously reported results confirmed the method robustness. Decoder performance remained stable from 96 channels down to 25. Furthermore, for less than 15 min of training data, the subject-independent (pre-trained) decoder performed better than an individually trained decoder did. Significance. Our study complements previous research and provides information suggesting that efficient low-density EEG online decoding is within reach.} }