From 555ba578a19b285912aa5f718e5715f41db24609 Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Thu, 5 Apr 2018 13:05:55 +0200 Subject: [PATCH 01/21] Commit 1: weibo 2014 and zhou 2016 added Many changes also detailed in pull request --- moabb/datasets/Weibo2014.py | 127 +++++++++++++++++++++++++++++++ moabb/datasets/Zhou2016.py | 84 ++++++++++++++++++++ moabb/datasets/bbci_eeg_fnirs.py | 2 +- moabb/datasets/bnci.py | 10 +-- moabb/datasets/gigadb.py | 4 +- moabb/datasets/physionet_mi.py | 5 +- moabb/datasets/upper_limb.py | 2 +- moabb/tests/datasets.py | 16 ++++ 8 files changed, 239 insertions(+), 11 deletions(-) create mode 100644 moabb/datasets/Weibo2014.py create mode 100644 moabb/datasets/Zhou2016.py diff --git a/moabb/datasets/Weibo2014.py b/moabb/datasets/Weibo2014.py new file mode 100644 index 000000000..9dd27e40b --- /dev/null +++ b/moabb/datasets/Weibo2014.py @@ -0,0 +1,127 @@ +''' +Simple and compound motor imagery +https://doi.org/10.1371/journal.pone.0114853 +''' + +from .base import BaseDataset +import zipfile as z +from scipy.io import loadmat +from mne.datasets.utils import _get_path, _do_path_update +from mne.utils import _fetch_file +import mne +import numpy as np +import os +import shutil + +FILES = [] +FILES.append('https://dataverse.harvard.edu/api/access/datafile/2499178') +FILES.append('https://dataverse.harvard.edu/api/access/datafile/2499182') +FILES.append('https://dataverse.harvard.edu/api/access/datafile/2499179') + + +def eeg_data_path(base_path, subject): + file1_subj = ['cl', 'cyy', 'kyf', 'lnn'] + file2_subj = ['ls', 'ry', 'wcf'] + file3_subj = ['wx', 'yyx', 'zd'] + if not os.path.isfile(os.path.join(base_path, 'subject_{}.mat'.format(subject))): + if subject in range(1, 5): + if not os.path.isfile(os.path.join(base_path, 'data1.zip')): + _fetch_file(FILES[0], os.path.join( + base_path, 'data1.zip'), print_destination=False) + with z.ZipFile(os.path.join(base_path, 'data1.zip'), 'r') as f: + os.makedirs(os.path.join(base_path, 'data1'), exist_ok=True) + f.extractall(os.path.join(base_path, 'data1')) + for fname in os.listdir(os.path.join(base_path, 'data1')): + for ind, prefix in zip(range(1, 5), file1_subj): + if fname.beginswith(prefix): + os.rename(os.path.join(base_path, 'data1', fname), + os.path.join(base_path, + 'subject_{}.mat'.format(prefix))) + os.remove(os.path.join(base_path, 'data.zip')) + shutil.rmtree(os.path.join(base_path, 'data1')) + elif subject in range(5, 8): + if not os.path.isfile(os.path.join(base_path, 'data2.zip')): + _fetch_file(FILES[1], os.path.join( + base_path, 'data2.zip'), print_destination=False) + with z.ZipFile(os.path.join(base_path, 'data2.zip'), 'r') as f: + os.makedirs(os.path.join(base_path, 'data2'), exist_ok=True) + f.extractall(os.path.join(base_path, 'data2')) + for fname in os.listdir(os.path.join(base_path, 'data2')): + for ind, prefix in zip(range(5, 8), file2_subj): + if fname.startswith(prefix): + os.rename(os.path.join(base_path, 'data2', fname), + os.path.join(base_path, + 'subject_{}.mat'.format(ind))) + os.remove(os.path.join(base_path, 'data2.zip')) + shutil.rmtree(os.path.join(base_path, 'data2')) + elif subject in range(8, 11): + if not os.path.isfile(os.path.join(base_path, 'data3.zip')): + _fetch_file(FILES[2], os.path.join( + base_path, 'data3.zip'), print_destination=False) + with z.ZipFile(os.path.join(base_path, 'data3.zip'), 'r') as f: + os.makedirs(os.path.join(base_path, 'data3'), exist_ok=True) + f.extractall(os.path.join(base_path, 'data3')) + for fname in os.listdir(os.path.join(base_path, 'data3')): + for ind, prefix in zip(range(8, 11), file3_subj): + if fname.startswith(prefix): + os.rename(os.path.join(base_path, 'data3', fname), + os.path.join(base_path, + 'subject_{}.mat'.format(ind))) + os.remove(os.path.join(base_path, 'data3.zip')) + shutil.rmtree(os.path.join(base_path, 'data3')) + return os.path.join(base_path, 'subject_{}.mat'.format(subject)) + + +class Weibo2014(BaseDataset): + """Weibo 2014 Motor Imagery dataset""" + + def __init__(self): + super().__init__( + subjects=list(range(1, 11)), + sessions_per_subject=1, + events=dict(left_hand=1, right_hand=2, + hands=3, feet=4, left_hand_right_foot=5, + right_hand_left_foot=6, rest=7), + code='Weibo 2014', + # Full trial is 0-8 but with trialwise bandpass this reduces + # boundary effects + interval=[1, 7], + paradigm='imagery', + doi='10.7910/DVN/27306') + + def _get_single_subject_data(self, subject): + """return data for a single subject""" + fname = self.data_path(subject) + # TODO: add 1s 0 buffer between trials and make continuous + data = loadmat(fname, squeeze_me=True, struct_as_record=False, + verify_compressed_data_integrity=False) + montage = mne.channels.read_montage('standard_1020') + info = mne.create_info(ch_names=['EEG{}'.format(i) for i in range(1,65)]+['STIM014'], + ch_types=['eeg']*64+['stim'], + sfreq=200, montage=None) # until we get the channel names + event_ids = data['label'].ravel() + raw_data = np.transpose(data['data'], axes=[2, 0, 1]) + # de-mean each trial + raw_data = raw_data - np.mean(raw_data, axis=2, keepdims=True) + raw_events = np.zeros((raw_data.shape[0], 1, raw_data.shape[2])) + raw_events[:, 0, 0] = event_ids + data = np.concatenate([raw_data, raw_events], axis=1) + # add buffer in between trials + zeroshape = (data.shape[0], data.shape[1], 10) + data = np.concatenate([np.zeros(zeroshape), data, + np.zeros(zeroshape)], axis=2) + raw = mne.io.RawArray(data=np.concatenate(list(data),axis=1), + info=info, verbose=False) + return {'session_0': {'run_0': raw}} + + def data_path(self, subject, path=None, force_update=False, + update_path=None, verbose=None): + if subject not in self.subject_list: + raise(ValueError("Invalid subject number")) + key = 'MNE_DATASETS_WEIBO2014_PATH' + path = _get_path(path, key, "Weibo 2014") + _do_path_update(path, True, key, "Weibo 2014") + basepath = os.path.join(path, "MNE-weibo-2014") + if not os.path.isdir(basepath): + os.makedirs(basepath) + return eeg_data_path(basepath, subject) diff --git a/moabb/datasets/Zhou2016.py b/moabb/datasets/Zhou2016.py new file mode 100644 index 000000000..14342d132 --- /dev/null +++ b/moabb/datasets/Zhou2016.py @@ -0,0 +1,84 @@ +''' +Simple and compound motor imagery +https://doi.org/10.1371/journal.pone.0114853 +''' + +from .base import BaseDataset +import zipfile as z +from scipy.io import loadmat +from mne.datasets.utils import _get_path, _do_path_update +from mne.utils import _fetch_file +import mne +import numpy as np +import os +import shutil + +DATA_PATH = 'https://ndownloader.figshare.com/files/3662952' + + +def local_data_path(base_path, subject): + if not os.path.isdir(os.path.join(base_path, + 'subject_{}'.format(subject))): + if not os.path.isdir(os.path.join(base_path, 'data')): + _fetch_file(DATA_PATH, os.path.join(base_path, 'data.zip'), + print_destination=False) + with z.ZipFile(os.path.join(base_path, 'data.zip'), 'r') as f: + f.extractall(base_path) + os.remove(os.path.join(base_path, 'data.zip')) + datapath = os.path.join(base_path, 'data') + for i in range(1, 5): + os.makedirs(os.path.join(base_path, 'subject_{}'.format(i))) + for session in range(1,4): + for run in ['A','B']: + os.rename(os.path.join(datapath, 'S{}_{}{}.cnt'.format(i,session, run)), + os.path.join(base_path, + 'subject_{}'.format(i), + '{}{}.cnt'.format(session,run))) + shutil.rmtree(os.path.join(base_path, 'data')) + subjpath = os.path.join(base_path, 'subject_{}'.format(subject)) + return [[os.path.join(subjpath, + '{}{}.cnt'.format(y, x)) for x in ['A', 'B']] for y in ['1', '2', '3']] + + +class Zhou2016(BaseDataset): + """Zhou 2016 Imagery dataset""" + + def __init__(self): + super().__init__( + subjects=list(range(1, 5)), + sessions_per_subject=3, + events=dict(left_hand=1, right_hand=2, + feet=3), + code='Zhou 2016', + # MI 1-6s, prepare 0-1, break 6-10 + # boundary effects + interval=[1, 6], + paradigm='imagery', + doi='10.1371/journal.pone.0162657') + + def _get_single_subject_data(self, subject): + """return data for a single subject""" + files = self.data_path(subject) + + out = {} + for sess_ind, runlist in enumerate(files): + sess_key = 'session_{}'.format(sess_ind) + out[sess_key] = {} + for run_ind, fname in enumerate(runlist): + run_key = 'run_{}'.format(run_ind) + out[sess_key][run_key] = mne.io.read_raw_cnt(fname, + preload=True, + montage='standard_1020') + return out + + def data_path(self, subject, path=None, force_update=False, + update_path=None, verbose=None): + if subject not in self.subject_list: + raise(ValueError("Invalid subject number")) + key = 'MNE_DATASETS_ZHOU2016_PATH' + path = _get_path(path, key, "Zhou 2016") + _do_path_update(path, True, key, "Zhou 2016") + basepath = os.path.join(path, "MNE-zhou-2016") + if not os.path.isdir(basepath): + os.makedirs(basepath) + return local_data_path(basepath, subject) diff --git a/moabb/datasets/bbci_eeg_fnirs.py b/moabb/datasets/bbci_eeg_fnirs.py index a237b988f..341f24601 100644 --- a/moabb/datasets/bbci_eeg_fnirs.py +++ b/moabb/datasets/bbci_eeg_fnirs.py @@ -85,7 +85,7 @@ def __init__(self, fnirs=False, motor_imagery=True, sessions_per_subject=n_sessions, events=events, code='BBCI EEG fNIRS', - interval=[3.5, 10], + interval=[0, 10], # marker is for *task* start not cue start paradigm=('/').join(paradigms), doi='10.1109/TNSRE.2016.2628057') diff --git a/moabb/datasets/bnci.py b/moabb/datasets/bnci.py index b9a35b0e3..916a3889d 100644 --- a/moabb/datasets/bnci.py +++ b/moabb/datasets/bnci.py @@ -642,7 +642,7 @@ def data_path(self, subject, path=None, force_update=False, class BNCI2014001(MNEBNCI): """BNCI 2014-001 Motor Imagery dataset""" - def __init__(self, tmin=3.5, tmax=5.5): + def __init__(self, tmin=3.5, tmax=6): super().__init__( subjects=list(range(1, 10)), sessions_per_subject=2, @@ -656,7 +656,7 @@ def __init__(self, tmin=3.5, tmax=5.5): class BNCI2014002(MNEBNCI): """BNCI 2014-002 Motor Imagery dataset""" - def __init__(self, tmin=3.5, tmax=5.5): + def __init__(self, tmin=3.5, tmax=8): super().__init__( subjects=list(range(1, 15)), sessions_per_subject=1, @@ -670,7 +670,7 @@ def __init__(self, tmin=3.5, tmax=5.5): class BNCI2014004(MNEBNCI): """BNCI 2014-004 Motor Imagery dataset""" - def __init__(self, tmin=4.5, tmax=6.5): + def __init__(self, tmin=3, tmax=7.5): super().__init__( subjects=list(range(1, 10)), sessions_per_subject=5, @@ -684,7 +684,7 @@ def __init__(self, tmin=4.5, tmax=6.5): class BNCI2015001(MNEBNCI): """BNCI 2015-001 Motor Imagery dataset""" - def __init__(self, tmin=4, tmax=7.5): + def __init__(self, tmin=3, tmax=8): # FIXME: some participant have 3 sessions super().__init__( subjects=list(range(1, 13)), @@ -699,7 +699,7 @@ def __init__(self, tmin=4, tmax=7.5): class BNCI2015004(MNEBNCI): """BNCI 2015-004 Motor Imagery dataset""" - def __init__(self, tmin=4.25, tmax=10): + def __init__(self, tmin=3, tmax=10): super().__init__( subjects=list(range(1, 10)), sessions_per_subject=2, diff --git a/moabb/datasets/gigadb.py b/moabb/datasets/gigadb.py index 95e4289d7..8ac78a7be 100644 --- a/moabb/datasets/gigadb.py +++ b/moabb/datasets/gigadb.py @@ -24,7 +24,7 @@ def __init__(self): sessions_per_subject=1, events=dict(left_hand=1, right_hand=2), code='GigaDb Motor Imagery', - interval=[1, 3], + interval=[0.5, 2.5], # full trial is 0-3s, this drops edge effects paradigm='imagery', doi='10.5524/100295') for ii in [32, 46, 49]: @@ -49,7 +49,7 @@ def _get_single_subject_data(self, subject): ch_names = eeg_ch_names + emg_ch_names + ['Stim'] ch_types = ['eeg'] * 64 + ['emg'] * 4 + ['stim'] montage = read_montage('standard_1005') - + eeg_data_l = np.vstack([data.imagery_left * 1e-6, data.imagery_event]) eeg_data_r = np.vstack([data.imagery_right * 1e-6, data.imagery_event * 2]) diff --git a/moabb/datasets/physionet_mi.py b/moabb/datasets/physionet_mi.py index c0cede1c6..2b635f862 100644 --- a/moabb/datasets/physionet_mi.py +++ b/moabb/datasets/physionet_mi.py @@ -19,8 +19,9 @@ def __init__(self, imagined=True): sessions_per_subject=1, events=dict(left_hand=2, right_hand=3, feet=5, hands=4, rest=1), code='Physionet Motor Imagery', - interval=[1, 3], - paradigm='imagery' + interval=[0, 3], # website does not specify how long the trials are... + paradigm='imagery', + doi='10.1109/TBME.2004.827072' ) if imagined: diff --git a/moabb/datasets/upper_limb.py b/moabb/datasets/upper_limb.py index 6c69d1fb2..2c3447311 100644 --- a/moabb/datasets/upper_limb.py +++ b/moabb/datasets/upper_limb.py @@ -37,7 +37,7 @@ def __init__(self, imagined=True, executed=False): sessions_per_subject=n_sessions, events=event_id, code='Upper Limb Imagery', - interval=[2.5, 5], + interval=[2.5, 5], # according to paper 2-5 paradigm='imagery', doi='10.1371/journal.pone.0182578') diff --git a/moabb/tests/datasets.py b/moabb/tests/datasets.py index b1ff9c1f3..864ca85c8 100644 --- a/moabb/tests/datasets.py +++ b/moabb/tests/datasets.py @@ -4,6 +4,22 @@ from moabb.datasets.fake import FakeDataset +def _run_tests_on_dataset(d): + for s in d.subject_list: + data = d.get_data(subjects=[s]) + + # we should get a dict + assert isinstance(data, dict) + + # We should get a raw array at the end + rawdata = data[s]['session_0']['run_0'] + assert issubclass(type(rawdata), mne.io.BaseRaw), type(rawdata) + + # print events + print(mne.find_events(rawdata)) + print(d.event_id) + + class Test_Datasets(unittest.TestCase): def test_fake_dataset(self): From cba8318baf97f6308b6fa306302707ed97ab9315 Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Thu, 5 Apr 2018 13:41:34 +0200 Subject: [PATCH 02/21] Fix in Weibo downloading code --- moabb/datasets/Weibo2014.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/moabb/datasets/Weibo2014.py b/moabb/datasets/Weibo2014.py index 9dd27e40b..be135e103 100644 --- a/moabb/datasets/Weibo2014.py +++ b/moabb/datasets/Weibo2014.py @@ -33,11 +33,11 @@ def eeg_data_path(base_path, subject): f.extractall(os.path.join(base_path, 'data1')) for fname in os.listdir(os.path.join(base_path, 'data1')): for ind, prefix in zip(range(1, 5), file1_subj): - if fname.beginswith(prefix): + if fname.startswith(prefix): os.rename(os.path.join(base_path, 'data1', fname), os.path.join(base_path, - 'subject_{}.mat'.format(prefix))) - os.remove(os.path.join(base_path, 'data.zip')) + 'subject_{}.mat'.format(ind))) + os.remove(os.path.join(base_path, 'data1.zip')) shutil.rmtree(os.path.join(base_path, 'data1')) elif subject in range(5, 8): if not os.path.isfile(os.path.join(base_path, 'data2.zip')): From 252a03ef383d111cbc5a00789297c3f92f260eed Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Thu, 5 Apr 2018 15:26:35 +0200 Subject: [PATCH 03/21] Major changes 1. Added task_interval to all datasets so we know how imagination time and epoching interval are related 2. Added docs to BaseDataset so it's less confusing 3. Re-added ImageryNClass since LvR is too restrictive for our current datasets 4. Made paradigms look at the selected_events and not event_id parameters since we may only care about e.g. motor imageries or cognitive imagery 5. Separated filter bank and single-filter paradigms --- moabb/datasets/Weibo2014.py | 8 ++- moabb/datasets/Zhou2016.py | 3 +- moabb/datasets/__init__.py | 2 + moabb/datasets/base.py | 50 +++++++++++++++- moabb/datasets/bnci.py | 15 +++-- moabb/datasets/fake.py | 2 +- moabb/datasets/gigadb.py | 5 ++ moabb/datasets/upper_limb.py | 3 +- moabb/datasets/utils.py | 2 + moabb/paradigms/motor_imagery.py | 99 +++++++++++++++++++++++++++++++- 10 files changed, 174 insertions(+), 15 deletions(-) diff --git a/moabb/datasets/Weibo2014.py b/moabb/datasets/Weibo2014.py index be135e103..d7af37e2b 100644 --- a/moabb/datasets/Weibo2014.py +++ b/moabb/datasets/Weibo2014.py @@ -13,6 +13,9 @@ import os import shutil +import logging +log = logging.getLogger() + FILES = [] FILES.append('https://dataverse.harvard.edu/api/access/datafile/2499178') FILES.append('https://dataverse.harvard.edu/api/access/datafile/2499182') @@ -85,7 +88,7 @@ def __init__(self): code='Weibo 2014', # Full trial is 0-8 but with trialwise bandpass this reduces # boundary effects - interval=[1, 7], + interval=[0, 8], paradigm='imagery', doi='10.7910/DVN/27306') @@ -107,7 +110,8 @@ def _get_single_subject_data(self, subject): raw_events[:, 0, 0] = event_ids data = np.concatenate([raw_data, raw_events], axis=1) # add buffer in between trials - zeroshape = (data.shape[0], data.shape[1], 10) + log.warning('Trial data de-meaned and concatenated with a buffer to create cont data') + zeroshape = (data.shape[0], data.shape[1], 50) data = np.concatenate([np.zeros(zeroshape), data, np.zeros(zeroshape)], axis=2) raw = mne.io.RawArray(data=np.concatenate(list(data),axis=1), diff --git a/moabb/datasets/Zhou2016.py b/moabb/datasets/Zhou2016.py index 14342d132..85d86d73e 100644 --- a/moabb/datasets/Zhou2016.py +++ b/moabb/datasets/Zhou2016.py @@ -52,7 +52,8 @@ def __init__(self): code='Zhou 2016', # MI 1-6s, prepare 0-1, break 6-10 # boundary effects - interval=[1, 6], + interval=[0, 5], + task_interval=[1,6], paradigm='imagery', doi='10.1371/journal.pone.0162657') diff --git a/moabb/datasets/__init__.py b/moabb/datasets/__init__.py index 85979b153..5171eebff 100644 --- a/moabb/datasets/__init__.py +++ b/moabb/datasets/__init__.py @@ -11,3 +11,5 @@ from .openvibe_mi import OpenvibeMI from .bbci_eeg_fnirs import BBCIEEGfNIRS from .upper_limb import UpperLimb +from .Weibo2014 import Weibo2014 +from .Zhou2016 import Zhou2016 diff --git a/moabb/datasets/base.py b/moabb/datasets/base.py index de1d577bd..2cc442055 100644 --- a/moabb/datasets/base.py +++ b/moabb/datasets/base.py @@ -7,17 +7,63 @@ class BaseDataset(metaclass=abc.ABCMeta): """Base dataset""" - def __init__(self, subjects, sessions_per_subject, events, code, interval, - paradigm, doi=None): + def __init__(self, subjects, sessions_per_subject, events, + code, interval, paradigm, task_interval=None, doi=None): + """ + Parameters required for all datasets + parameters + ---------- + subjects: List of int + List of subject number # TODO: make identifiers more general + + sessions_per_subject: int + Number of sessions per subject + + events: dict of string: int + String codes for events matched with labels in the stim channel. Currently imagery codes codes can include: + - left_hand + - right_hand + - hands + - feet + - rest + - left_hand_right_foot + - right_hand_left_foot + - tongue + - navigation + - subtraction + - word_ass (for word association) + + code: string + Unique identifier for dataset, used in all plots + + interval: list with 2 entries + Interval relative to trial start for imagery + + paradigm: ['p300','imagery'] + Defines what sort of dataset this is (currently only imagery is implemented) + + task_interval: list of 2 entries or None + Defines the start and end of the imagery *relative to event marker.* If not specified, defaults to interval. + + doi: DOI for dataset, optional (for now) + """ if not isinstance(subjects, list): raise(ValueError("subjects must be a list")) self.subject_list = subjects self.n_sessions = sessions_per_subject self.event_id = events + self.selected_events = events.copy() self.code = code self.interval = interval + if task_interval is None: + assert interval[0]==0, 'Interval does not start at 0 so task onset is necessary' + self.task_interval = list(interval) + else: + if interval[1]-interval[0] >= task_interval[1]-task_interval[0]: + raise Warning('Given interval extends outside of imagery period') + self.task_interval = task_interval self.paradigm = paradigm self.doi = doi diff --git a/moabb/datasets/bnci.py b/moabb/datasets/bnci.py index 916a3889d..cd63708a5 100644 --- a/moabb/datasets/bnci.py +++ b/moabb/datasets/bnci.py @@ -642,7 +642,7 @@ def data_path(self, subject, path=None, force_update=False, class BNCI2014001(MNEBNCI): """BNCI 2014-001 Motor Imagery dataset""" - def __init__(self, tmin=3.5, tmax=6): + def __init__(self, tmin=0.5, tmax=4): super().__init__( subjects=list(range(1, 10)), sessions_per_subject=2, @@ -650,13 +650,14 @@ def __init__(self, tmin=3.5, tmax=6): code='001-2014', interval=[tmin, tmax], paradigm='imagery', + task_interval=[0, 4], doi='10.3389/fnins.2012.00055') class BNCI2014002(MNEBNCI): """BNCI 2014-002 Motor Imagery dataset""" - def __init__(self, tmin=3.5, tmax=8): + def __init__(self, tmin=0, tmax=5): super().__init__( subjects=list(range(1, 15)), sessions_per_subject=1, @@ -664,13 +665,14 @@ def __init__(self, tmin=3.5, tmax=8): code='002-2014', interval=[tmin, tmax], paradigm='imagery', + task_interval=[3, 8] doi='10.1515/bmt-2014-0117') class BNCI2014004(MNEBNCI): """BNCI 2014-004 Motor Imagery dataset""" - def __init__(self, tmin=3, tmax=7.5): + def __init__(self, tmin=0, tmax=4.5): super().__init__( subjects=list(range(1, 10)), sessions_per_subject=5, @@ -678,13 +680,14 @@ def __init__(self, tmin=3, tmax=7.5): code='004-2014', interval=[tmin, tmax], paradigm='imagery', + task_interval=[3, 7.5], doi='10.1109/TNSRE.2007.906956') class BNCI2015001(MNEBNCI): """BNCI 2015-001 Motor Imagery dataset""" - def __init__(self, tmin=3, tmax=8): + def __init__(self, tmin=0, tmax=5): # FIXME: some participant have 3 sessions super().__init__( subjects=list(range(1, 13)), @@ -693,13 +696,14 @@ def __init__(self, tmin=3, tmax=8): code='001-2015', interval=[tmin, tmax], paradigm='imagery', + task_interval=[3, 8], doi='10.1109/tnsre.2012.2189584') class BNCI2015004(MNEBNCI): """BNCI 2015-004 Motor Imagery dataset""" - def __init__(self, tmin=3, tmax=10): + def __init__(self, tmin=0, tmax=7): super().__init__( subjects=list(range(1, 10)), sessions_per_subject=2, @@ -708,4 +712,5 @@ def __init__(self, tmin=3, tmax=10): code='004-2015', interval=[tmin, tmax], paradigm='imagery', + task_interval=[3,10], doi='10.1371/journal.pone.0123727') diff --git a/moabb/datasets/fake.py b/moabb/datasets/fake.py index 8dce01ca1..6c33b154b 100644 --- a/moabb/datasets/fake.py +++ b/moabb/datasets/fake.py @@ -18,7 +18,7 @@ def __init__(self, event_list=['fake_c1', 'fake_c2', 'fake_c3'], self.n_runs = n_runs event_id = {ev: ii + 1 for ii, ev in enumerate(event_list)} super().__init__(list(range(1, n_subjects + 1)), n_sessions, event_id, - 'FakeDataset', [1, 3], 'imagery') + 'FakeDataset', [0, 3], 'imagery') def _get_single_subject_data(self, subject): diff --git a/moabb/datasets/gigadb.py b/moabb/datasets/gigadb.py index 8ac78a7be..eecabc038 100644 --- a/moabb/datasets/gigadb.py +++ b/moabb/datasets/gigadb.py @@ -11,6 +11,9 @@ from mne.io import RawArray from mne.channels import read_montage from . import download as dl +import logging + +log = logging.getLogger() GIGA_URL = 'ftp://penguin.genomics.cn/pub/10.5524/100001_101000/100295/mat_data/' @@ -26,6 +29,7 @@ def __init__(self): code='GigaDb Motor Imagery', interval=[0.5, 2.5], # full trial is 0-3s, this drops edge effects paradigm='imagery', + task_interval=[0,3], doi='10.5524/100295') for ii in [32, 46, 49]: self.subject_list.remove(ii) @@ -57,6 +61,7 @@ def _get_single_subject_data(self, subject): # trials are already non continuous. edge artifact can appears but # are likely to be present during rest / inter-trial activity eeg_data = np.hstack([eeg_data_l, eeg_data_r]) + log.warning('Trials stacked to create continuous data -- edge effects present') info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=data.srate, montage=montage) diff --git a/moabb/datasets/upper_limb.py b/moabb/datasets/upper_limb.py index 2c3447311..473fa32d5 100644 --- a/moabb/datasets/upper_limb.py +++ b/moabb/datasets/upper_limb.py @@ -37,8 +37,9 @@ def __init__(self, imagined=True, executed=False): sessions_per_subject=n_sessions, events=event_id, code='Upper Limb Imagery', - interval=[2.5, 5], # according to paper 2-5 + interval=[0.5, 3], # according to paper 2-5 paradigm='imagery', + task_interval=[2,5], doi='10.1371/journal.pone.0182578') def _get_single_subject_data(self, subject): diff --git a/moabb/datasets/utils.py b/moabb/datasets/utils.py index 07a5e8ede..0158bc214 100644 --- a/moabb/datasets/utils.py +++ b/moabb/datasets/utils.py @@ -30,6 +30,8 @@ def dataset_search(paradigm, multi_session=False, events=None, min_subjects: int, minimum subjects in dataset channels: list or set of channels + If given events, modifies selected_events attribute of each dataset to only include events on the list. + If given total classes, randomly selects n out of the full event dictionary ''' channels = set(channels) out_data = [] diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index 5792ccf56..b53c8b364 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -13,7 +13,7 @@ class BaseMotorImagery(BaseParadigm): - """Base Imagery paradigm Context. + """Base Motor imagery paradigm. Please use one of the child classes Parameters ---------- @@ -69,14 +69,14 @@ def process_raw(self, raw, dataset): # get event id if self.events is None: - event_id = dataset.event_id + event_id = dataset.selected_events else: event_id = {ev: dataset.event_id[ev] for ev in self.events} # pick events, based on event_id try: events = mne.pick_events(events, include=list(event_id.values())) - except RuntimeError: + except RuntimeError as r: # skip raw if no event found return @@ -84,6 +84,7 @@ def process_raw(self, raw, dataset): if self.interval is None: tmin, tmax = dataset.interval else: + # TODO: make this work with varying offsets since not all data starts at 0 tmin, tmax = self.interval if self.resample is not None: @@ -125,8 +126,20 @@ def scoring(self): return 'accuracy' +class MotorImagery(BaseMotorImagery): + + def __init__(self, bp_low=8, bp_high=32, **kwargs): + """ + Single-pass MI. Takes arguments bp_low and bp_high and not filters + """ + if 'filters' in kwargs.keys(): + raise(ValueError('MotorImagery does not take argument \'filters\'')) + super().__init__(filters=[[bp_low, bp_high]], **kwargs) + + class FilterBankMotorImagery(BaseMotorImagery): """Filter Bank MI.""" + def __init__(self, filters=[[8, 12], [12, 16], [16, 20], [20, 24], [24, 28], [28, 32]], **kwargs): """init""" @@ -139,6 +152,7 @@ class LeftRightImagery(BaseMotorImagery): Metric is 'roc_auc' """ + def __init__(self, **kwargs): if 'events' in kwargs.keys(): raise(ValueError('LeftRightImagery dont accept events')) @@ -155,6 +169,7 @@ class FilterBankLeftRightImagery(FilterBankMotorImagery): Metric is 'roc_auc' """ + def __init__(self, **kwargs): if 'events' in kwargs.keys(): raise(ValueError('LeftRightImagery dont accept events')) @@ -165,6 +180,84 @@ def scoring(self): return 'roc_auc' +def FilterBankImageryNClass(FilterBankMotorImagery): + """ + Filter bank n-class imagery. + + Metric is 'roc-auc' if 2 classes and 'accuracy' if more + + Parameters + ----------- + + events: List of event labels, used to filter datasets (e.g. if only motor imagery is desired) + + n_classes: int, number of classes each dataset must have. If events is given, requires all imagery sorts to be within the events list + + """ + + def __init__(self, n_classes=2, **kwargs): + "docstring" + super().__init__(**kwargs) + self.n_classes = n_classes + self.possible_events = self.events + assert n_classes <= len( + self.possible_events), 'More classes than events specified' + self.events = None + + @property + def datasets(self): + return utils.dataset_search(paradigm='imagery', + events=self.possible_events, + total_classes=self.n_classes, + has_all_events=False) + + @property + def scoring(self): + if self.n_classes == 2: + return 'roc-auc' + else: + return 'accuracy' + + +def ImageryNClass(MotorImagery): + """ + N-class imagery. + + Metric is 'roc-auc' if 2 classes and 'accuracy' if more + + Parameters + ----------- + + events: List of event labels, used to filter datasets (e.g. if only motor imagery is desired) + + n_classes: int, number of classes each dataset must have. If events is given, requires all imagery sorts to be within the events list + + """ + + def __init__(self, n_classes=2, **kwargs): + "docstring" + super().__init__(**kwargs) + self.n_classes = n_classes + self.possible_events = self.events + assert n_classes <= len( + self.possible_events), 'More classes than events specified' + self.events = None + + @property + def datasets(self): + return utils.dataset_search(paradigm='imagery', + events=self.possible_events, + total_classes=self.n_classes, + has_all_events=False) + + @property + def scoring(self): + if self.n_classes == 2: + return 'roc-auc' + else: + return 'accuracy' + + class FakeImageryParadigm(LeftRightImagery): """fake Imagery for left hand/right hand classification """ From f4339bf504fa822117e415403ab69e79a92c30da Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Thu, 5 Apr 2018 16:34:38 +0200 Subject: [PATCH 04/21] Working version, all changes not yet done --- moabb/datasets/base.py | 7 +++++-- moabb/datasets/bnci.py | 4 ++-- moabb/paradigms/motor_imagery.py | 22 ++++++++++++---------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/moabb/datasets/base.py b/moabb/datasets/base.py index 2cc442055..16fdbfeaf 100644 --- a/moabb/datasets/base.py +++ b/moabb/datasets/base.py @@ -2,6 +2,9 @@ Base class for a dataset """ import abc +import logging + +log = logging.getLogger() class BaseDataset(metaclass=abc.ABCMeta): @@ -61,8 +64,8 @@ def __init__(self, subjects, sessions_per_subject, events, assert interval[0]==0, 'Interval does not start at 0 so task onset is necessary' self.task_interval = list(interval) else: - if interval[1]-interval[0] >= task_interval[1]-task_interval[0]: - raise Warning('Given interval extends outside of imagery period') + if interval[1]-interval[0] > task_interval[1]-task_interval[0]: + log.warning('Given interval extends outside of imagery period') self.task_interval = task_interval self.paradigm = paradigm self.doi = doi diff --git a/moabb/datasets/bnci.py b/moabb/datasets/bnci.py index cd63708a5..c5155981c 100644 --- a/moabb/datasets/bnci.py +++ b/moabb/datasets/bnci.py @@ -665,7 +665,7 @@ def __init__(self, tmin=0, tmax=5): code='002-2014', interval=[tmin, tmax], paradigm='imagery', - task_interval=[3, 8] + task_interval=[3, 8], doi='10.1515/bmt-2014-0117') @@ -712,5 +712,5 @@ def __init__(self, tmin=0, tmax=7): code='004-2015', interval=[tmin, tmax], paradigm='imagery', - task_interval=[3,10], + task_interval=[3, 10], doi='10.1371/journal.pone.0123727') diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index b53c8b364..7d10fd027 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -76,6 +76,9 @@ def process_raw(self, raw, dataset): # pick events, based on event_id try: events = mne.pick_events(events, include=list(event_id.values())) + # not all runs have all events. Reduce event dict to only occurring events + events_in_run = np.unique(events[:,2]) + run_event_dict = {k: v for k, v in event_id.items() if v in events_in_run} except RuntimeError as r: # skip raw if no event found return @@ -84,7 +87,6 @@ def process_raw(self, raw, dataset): if self.interval is None: tmin, tmax = dataset.interval else: - # TODO: make this work with varying offsets since not all data starts at 0 tmin, tmax = self.interval if self.resample is not None: @@ -97,13 +99,13 @@ def process_raw(self, raw, dataset): raw_f = raw.copy().filter(fmin, fmax, method='iir', picks=picks, verbose=False) # epoch data - epochs = mne.Epochs(raw_f, events, event_id=event_id, + epochs = mne.Epochs(raw_f, events, event_id=run_event_dict, tmin=tmin, tmax=tmax, proj=False, baseline=None, preload=True, verbose=False, picks=picks) X.append(epochs.get_data()) - inv_events = {k: v for v, k in event_id.items()} + inv_events = {k: v for v, k in run_event_dict.items()} labels = np.array([inv_events[e] for e in epochs.events[:, -1]]) # if only one band, return a 3D array, otherwise return a 4D @@ -128,13 +130,13 @@ def scoring(self): class MotorImagery(BaseMotorImagery): - def __init__(self, bp_low=8, bp_high=32, **kwargs): + def __init__(self, fmin=8, fmax=32, **kwargs): """ - Single-pass MI. Takes arguments bp_low and bp_high and not filters + Single-pass MI. Takes arguments fmin and fmax and not filters """ if 'filters' in kwargs.keys(): raise(ValueError('MotorImagery does not take argument \'filters\'')) - super().__init__(filters=[[bp_low, bp_high]], **kwargs) + super().__init__(filters=[[fmin, fmax]], **kwargs) class FilterBankMotorImagery(BaseMotorImagery): @@ -180,7 +182,7 @@ def scoring(self): return 'roc_auc' -def FilterBankImageryNClass(FilterBankMotorImagery): +class FilterBankImageryNClass(FilterBankMotorImagery): """ Filter bank n-class imagery. @@ -214,12 +216,12 @@ def datasets(self): @property def scoring(self): if self.n_classes == 2: - return 'roc-auc' + return 'roc_auc' else: return 'accuracy' -def ImageryNClass(MotorImagery): +class ImageryNClass(MotorImagery): """ N-class imagery. @@ -253,7 +255,7 @@ def datasets(self): @property def scoring(self): if self.n_classes == 2: - return 'roc-auc' + return 'roc_auc' else: return 'accuracy' From 1e2d8e32e5324e38be4aa3dff64794af5ee5a7a2 Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Thu, 5 Apr 2018 17:09:46 +0200 Subject: [PATCH 05/21] Fixes 1. Use new function used_events to choose events for a paradigm within the paradigm 2. Fixed bug in Physionet where hand and feet imagery couldn't be used together 3. Added documentation to new datasets and put them in .rst file 4. Simplified download code in Weibo2014 --- docs/source/datasets.rst | 4 +- moabb/datasets/Weibo2014.py | 86 ++++++++++++++----------------- moabb/datasets/Zhou2016.py | 36 ++++++++++++- moabb/datasets/base.py | 1 - moabb/datasets/utils.py | 3 -- moabb/paradigms/motor_imagery.py | 87 +++++++++++++++++++++++++++----- 6 files changed, 149 insertions(+), 68 deletions(-) diff --git a/docs/source/datasets.rst b/docs/source/datasets.rst index d4715f8a1..de19b0bf8 100644 --- a/docs/source/datasets.rst +++ b/docs/source/datasets.rst @@ -25,7 +25,9 @@ Motor Imagery Datasets BBCIEEGfNIRS OpenvibeMI PhysionetMI - UpperLimb + UpperLimb + Zhou2016 + Weibo2014 ------------ ERP Datasets diff --git a/moabb/datasets/Weibo2014.py b/moabb/datasets/Weibo2014.py index d7af37e2b..d8f3dd558 100644 --- a/moabb/datasets/Weibo2014.py +++ b/moabb/datasets/Weibo2014.py @@ -26,57 +26,45 @@ def eeg_data_path(base_path, subject): file1_subj = ['cl', 'cyy', 'kyf', 'lnn'] file2_subj = ['ls', 'ry', 'wcf'] file3_subj = ['wx', 'yyx', 'zd'] - if not os.path.isfile(os.path.join(base_path, 'subject_{}.mat'.format(subject))): + + def get_subjects(sub_inds, sub_names, ind): + dataname = 'data{}'.format(ind) + if not os.path.isfile(os.path.join(base_path, dataname+'.zip')): + _fetch_file(FILES[ind], os.path.join( + base_path, dataname + '.zip'), print_destination=False) + with z.ZipFile(os.path.join(base_path, dataname + '.zip'), 'r') as f: + os.makedirs(os.path.join(base_path, dataname), exist_ok=True) + f.extractall(os.path.join(base_path, dataname)) + for fname in os.listdir(os.path.join(base_path, dataname)): + for ind, prefix in zip(range(1, 5), file1_subj): + if fname.startswith(prefix): + os.rename(os.path.join(base_path, dataname, fname), + os.path.join(base_path, + 'subject_{}.mat'.format(ind))) + os.remove(os.path.join(base_path, dataname + '.zip')) + shutil.rmtree(os.path.join(base_path, dataname)) + + if not os.path.isfile(os.path.join(base_path, + 'subject_{}.mat'.format(subject))): if subject in range(1, 5): - if not os.path.isfile(os.path.join(base_path, 'data1.zip')): - _fetch_file(FILES[0], os.path.join( - base_path, 'data1.zip'), print_destination=False) - with z.ZipFile(os.path.join(base_path, 'data1.zip'), 'r') as f: - os.makedirs(os.path.join(base_path, 'data1'), exist_ok=True) - f.extractall(os.path.join(base_path, 'data1')) - for fname in os.listdir(os.path.join(base_path, 'data1')): - for ind, prefix in zip(range(1, 5), file1_subj): - if fname.startswith(prefix): - os.rename(os.path.join(base_path, 'data1', fname), - os.path.join(base_path, - 'subject_{}.mat'.format(ind))) - os.remove(os.path.join(base_path, 'data1.zip')) - shutil.rmtree(os.path.join(base_path, 'data1')) + get_subjects(list(range(1, 5)), file1_subj, 0) elif subject in range(5, 8): - if not os.path.isfile(os.path.join(base_path, 'data2.zip')): - _fetch_file(FILES[1], os.path.join( - base_path, 'data2.zip'), print_destination=False) - with z.ZipFile(os.path.join(base_path, 'data2.zip'), 'r') as f: - os.makedirs(os.path.join(base_path, 'data2'), exist_ok=True) - f.extractall(os.path.join(base_path, 'data2')) - for fname in os.listdir(os.path.join(base_path, 'data2')): - for ind, prefix in zip(range(5, 8), file2_subj): - if fname.startswith(prefix): - os.rename(os.path.join(base_path, 'data2', fname), - os.path.join(base_path, - 'subject_{}.mat'.format(ind))) - os.remove(os.path.join(base_path, 'data2.zip')) - shutil.rmtree(os.path.join(base_path, 'data2')) + get_subjects(list(range(5, 8)), file2_subj, 1) elif subject in range(8, 11): - if not os.path.isfile(os.path.join(base_path, 'data3.zip')): - _fetch_file(FILES[2], os.path.join( - base_path, 'data3.zip'), print_destination=False) - with z.ZipFile(os.path.join(base_path, 'data3.zip'), 'r') as f: - os.makedirs(os.path.join(base_path, 'data3'), exist_ok=True) - f.extractall(os.path.join(base_path, 'data3')) - for fname in os.listdir(os.path.join(base_path, 'data3')): - for ind, prefix in zip(range(8, 11), file3_subj): - if fname.startswith(prefix): - os.rename(os.path.join(base_path, 'data3', fname), - os.path.join(base_path, - 'subject_{}.mat'.format(ind))) - os.remove(os.path.join(base_path, 'data3.zip')) - shutil.rmtree(os.path.join(base_path, 'data3')) + get_subjects(list(range(8, 11)), file3_subj, 2) return os.path.join(base_path, 'subject_{}.mat'.format(subject)) class Weibo2014(BaseDataset): - """Weibo 2014 Motor Imagery dataset""" + """Weibo 2014 Motor Imagery dataset [1] + + References ----------- + Yi Weibo, 2014, "EEG data of simple and compound limb + motor imagery", https://doi.org/10.7910/DVN/27306, Harvard Dataverse, V1 + + """ + + def __init__(self): super().__init__( @@ -99,9 +87,10 @@ def _get_single_subject_data(self, subject): data = loadmat(fname, squeeze_me=True, struct_as_record=False, verify_compressed_data_integrity=False) montage = mne.channels.read_montage('standard_1020') - info = mne.create_info(ch_names=['EEG{}'.format(i) for i in range(1,65)]+['STIM014'], + info = mne.create_info(ch_names=['EEG{}'.format(i) for i in range(1, 65)]+['STIM014'], ch_types=['eeg']*64+['stim'], - sfreq=200, montage=None) # until we get the channel names + sfreq=200, montage=None) + # until we get the channel names montage is None event_ids = data['label'].ravel() raw_data = np.transpose(data['data'], axes=[2, 0, 1]) # de-mean each trial @@ -110,11 +99,12 @@ def _get_single_subject_data(self, subject): raw_events[:, 0, 0] = event_ids data = np.concatenate([raw_data, raw_events], axis=1) # add buffer in between trials - log.warning('Trial data de-meaned and concatenated with a buffer to create cont data') + log.warning( + 'Trial data de-meaned and concatenated with a buffer to create cont data') zeroshape = (data.shape[0], data.shape[1], 50) data = np.concatenate([np.zeros(zeroshape), data, np.zeros(zeroshape)], axis=2) - raw = mne.io.RawArray(data=np.concatenate(list(data),axis=1), + raw = mne.io.RawArray(data=np.concatenate(list(data), axis=1), info=info, verbose=False) return {'session_0': {'run_0': raw}} diff --git a/moabb/datasets/Zhou2016.py b/moabb/datasets/Zhou2016.py index 85d86d73e..39d6a7449 100644 --- a/moabb/datasets/Zhou2016.py +++ b/moabb/datasets/Zhou2016.py @@ -41,7 +41,41 @@ def local_data_path(base_path, subject): class Zhou2016(BaseDataset): - """Zhou 2016 Imagery dataset""" + """Dataset from Zhou et al. 2016 [1] + + Abstract + ------------ + + Independent component analysis (ICA) as a promising spatial filtering method + can separate motor-related independent components (MRICs) from the + multichannel electroencephalogram (EEG) signals. However, the unpredictable + burst interferences may significantly degrade the performance of ICA-based + brain-computer interface (BCI) system. In this study, we proposed a new + algorithm frame to address this issue by combining the single-trial-based + ICA filter with zero-training classifier. We developed a two-round data + selection method to identify automatically the badly corrupted EEG trials in + the training set. The “high quality” training trials were utilized to + optimize the ICA filter. In addition, we proposed an accuracy-matrix method + to locate the artifact data segments within a single trial and investigated + which types of artifacts can influence the performance of the ICA-based + MIBCIs. Twenty-six EEG datasets of three-class motor imagery were used to + validate the proposed methods, and the classification accuracies were + compared with that obtained by frequently used common spatial pattern (CSP) + spatial filtering algorithm. The experimental results demonstrated that the + proposed optimizing strategy could effectively improve the stability, + practicality and classification performance of ICA-based MIBCI. The study + revealed that rational use of ICA method may be crucial in building a + practical ICA-based MIBCI system. + + References + ------------ + + [1] Zhou B, Wu X, Lv Z, Zhang L, Guo X (2016) A Fully Automated Trial + Selection Method for Optimization of Motor Imagery Based Brain-Computer + Interface. PLoS ONE 11(9): + e0162657. https://doi.org/10.1371/journal.pone.0162657 + + """ def __init__(self): super().__init__( diff --git a/moabb/datasets/base.py b/moabb/datasets/base.py index 16fdbfeaf..e98d53a24 100644 --- a/moabb/datasets/base.py +++ b/moabb/datasets/base.py @@ -57,7 +57,6 @@ def __init__(self, subjects, sessions_per_subject, events, self.subject_list = subjects self.n_sessions = sessions_per_subject self.event_id = events - self.selected_events = events.copy() self.code = code self.interval = interval if task_interval is None: diff --git a/moabb/datasets/utils.py b/moabb/datasets/utils.py index 0158bc214..da090e6a4 100644 --- a/moabb/datasets/utils.py +++ b/moabb/datasets/utils.py @@ -30,8 +30,6 @@ def dataset_search(paradigm, multi_session=False, events=None, min_subjects: int, minimum subjects in dataset channels: list or set of channels - If given events, modifies selected_events attribute of each dataset to only include events on the list. - If given total classes, randomly selects n out of the full event dictionary ''' channels = set(channels) out_data = [] @@ -79,7 +77,6 @@ def dataset_search(paradigm, multi_session=False, events=None, if n_events < n_classes: skip_dataset = True if keep_event_dict and not skip_dataset: - d.selected_events = keep_event_dict if len(channels) > 0: s1 = d.get_data([1], False)[0][0][0] if channels <= set(s1.info['ch_names']): diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index 7d10fd027..91a199514 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -58,6 +58,13 @@ def verify(self, dataset): # we should verify list of channels, somehow + def used_events(self, dataset): + if self.events is None: + event_id = dataset.event_id + else: + event_id = {ev: dataset.event_id[ev] for ev in self.events} + return event_id + def process_raw(self, raw, dataset): # find the events events = mne.find_events(raw, shortest_event=0, verbose=False) @@ -68,17 +75,15 @@ def process_raw(self, raw, dataset): include=channels) # get event id - if self.events is None: - event_id = dataset.selected_events - else: - event_id = {ev: dataset.event_id[ev] for ev in self.events} + event_id = self.used_events(dataset) # pick events, based on event_id try: events = mne.pick_events(events, include=list(event_id.values())) # not all runs have all events. Reduce event dict to only occurring events - events_in_run = np.unique(events[:,2]) - run_event_dict = {k: v for k, v in event_id.items() if v in events_in_run} + events_in_run = np.unique(events[:, 2]) + run_event_dict = {k: v for k, + v in event_id.items() if v in events_in_run} except RuntimeError as r: # skip raw if no event found return @@ -201,15 +206,42 @@ def __init__(self, n_classes=2, **kwargs): "docstring" super().__init__(**kwargs) self.n_classes = n_classes - self.possible_events = self.events assert n_classes <= len( - self.possible_events), 'More classes than events specified' - self.events = None + self.events), 'More classes than events specified' + + if self.events is None: + log.warning("Choosing from all possible events") + + def verify(self, dataset): + assert dataset.paradigm == 'imagery' + if self.events is None: + assert len(dataset.event_id) >= self.n_classes + else: + overlap = len(set(self.events) & set(dataset.event_id.keys())) + assert overlap >= self.n_classes + + def used_events(self, dataset): + out = {} + if self.events is None: + for k, v in dataset.event_id.items(): + out[k] = v + if len(out) == self.n_classes: + break + else: + for event in self.events: + if event in dataset.event_id.keys(): + out[event] = dataset.event_id[event] + if len(out) == self.n_classes: + break + if len(out) < self.n_classes: + raise ValueError("Dataset {} did not have enough events in {} to run analysis".format( + dataset.code, self.events)) + return out @property def datasets(self): return utils.dataset_search(paradigm='imagery', - events=self.possible_events, + events=self.events, total_classes=self.n_classes, has_all_events=False) @@ -240,15 +272,42 @@ def __init__(self, n_classes=2, **kwargs): "docstring" super().__init__(**kwargs) self.n_classes = n_classes - self.possible_events = self.events assert n_classes <= len( - self.possible_events), 'More classes than events specified' - self.events = None + self.events), 'More classes than events specified' + + if self.events is None: + log.warning("Choosing from all possible events") + + def verify(self, dataset): + assert dataset.paradigm == 'imagery' + if self.events is None: + assert len(dataset.event_id) >= self.n_classes + else: + overlap = len(set(self.events) & set(dataset.event_id.keys())) + assert overlap >= self.n_classes + + def used_events(self, dataset): + out = {} + if self.events is None: + for k, v in dataset.event_id.items(): + out[k] = v + if len(out) == self.n_classes: + break + else: + for event in self.events: + if event in dataset.event_id.keys(): + out[event] = dataset.event_id[event] + if len(out) == self.n_classes: + break + if len(out) < self.n_classes: + raise ValueError("Dataset {} did not have enough events in {} to run analysis".format( + dataset.code, self.events)) + return out @property def datasets(self): return utils.dataset_search(paradigm='imagery', - events=self.possible_events, + events=self.events, total_classes=self.n_classes, has_all_events=False) From 63722f510ab0ff0806cd4d093aac544c080677f6 Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Thu, 5 Apr 2018 17:12:35 +0200 Subject: [PATCH 06/21] cosmetic fix --- moabb/datasets/Weibo2014.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/moabb/datasets/Weibo2014.py b/moabb/datasets/Weibo2014.py index d8f3dd558..c4a3081b7 100644 --- a/moabb/datasets/Weibo2014.py +++ b/moabb/datasets/Weibo2014.py @@ -58,7 +58,8 @@ def get_subjects(sub_inds, sub_names, ind): class Weibo2014(BaseDataset): """Weibo 2014 Motor Imagery dataset [1] - References ----------- + References + ----------- Yi Weibo, 2014, "EEG data of simple and compound limb motor imagery", https://doi.org/10.7910/DVN/27306, Harvard Dataverse, V1 From f08009a2ed6624e845853b6b4ef33bcfe3741204 Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Thu, 5 Apr 2018 17:32:31 +0200 Subject: [PATCH 07/21] fixed error in new weibo dl code --- moabb/datasets/Weibo2014.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moabb/datasets/Weibo2014.py b/moabb/datasets/Weibo2014.py index c4a3081b7..01fb5dd30 100644 --- a/moabb/datasets/Weibo2014.py +++ b/moabb/datasets/Weibo2014.py @@ -36,7 +36,7 @@ def get_subjects(sub_inds, sub_names, ind): os.makedirs(os.path.join(base_path, dataname), exist_ok=True) f.extractall(os.path.join(base_path, dataname)) for fname in os.listdir(os.path.join(base_path, dataname)): - for ind, prefix in zip(range(1, 5), file1_subj): + for ind, prefix in zip(sub_inds, sub_names): if fname.startswith(prefix): os.rename(os.path.join(base_path, dataname, fname), os.path.join(base_path, From 1a3aab66c9512d3d061cf3bfb43941b2fc618bac Mon Sep 17 00:00:00 2001 From: Alexandre Barachant Date: Thu, 5 Apr 2018 22:32:17 -0400 Subject: [PATCH 08/21] replace BaseMotorImagery from tutorial --- moabb/paradigms/base.py | 4 ++-- moabb/paradigms/motor_imagery.py | 22 +++++++++++++++------- tutorials/plot_explore_paradigm.py | 10 +++++----- 3 files changed, 22 insertions(+), 14 deletions(-) diff --git a/moabb/paradigms/base.py b/moabb/paradigms/base.py index 8d75b798a..ee41397c8 100644 --- a/moabb/paradigms/base.py +++ b/moabb/paradigms/base.py @@ -1,9 +1,9 @@ -from abc import ABC, abstractproperty, abstractmethod +from abc import ABCMeta, abstractproperty, abstractmethod import numpy as np import pandas as pd -class BaseParadigm(ABC): +class BaseParadigm(metaclass=ABCMeta): """Base Paradigm. """ diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index 91a199514..ba45994e0 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -1,5 +1,6 @@ -"""Motor Imagery contexts""" +"""Motor Imagery Paradigms""" +import abc import mne import numpy as np import pandas as pd @@ -12,7 +13,7 @@ log = logging.getLogger() -class BaseMotorImagery(BaseParadigm): +class BaseMotorImagery(BaseParadigm, metaclass=abc.ABCMeta): """Base Motor imagery paradigm. Please use one of the child classes Parameters @@ -196,10 +197,13 @@ class FilterBankImageryNClass(FilterBankMotorImagery): Parameters ----------- - events: List of event labels, used to filter datasets (e.g. if only motor imagery is desired) - - n_classes: int, number of classes each dataset must have. If events is given, requires all imagery sorts to be within the events list + events: List of str + event labels used to filter datasets (e.g. if only motor imagery is + desired). + n_classes: int, + number of classes each dataset must have. If events is given, + requires all imagery sorts to be within the events list. """ def __init__(self, n_classes=2, **kwargs): @@ -262,9 +266,13 @@ class ImageryNClass(MotorImagery): Parameters ----------- - events: List of event labels, used to filter datasets (e.g. if only motor imagery is desired) + events: List of str + event labels used to filter datasets (e.g. if only motor imagery is + desired). - n_classes: int, number of classes each dataset must have. If events is given, requires all imagery sorts to be within the events list + n_classes: int, + number of classes each dataset must have. If events is given, + requires all imagery sorts to be within the events list. """ diff --git a/tutorials/plot_explore_paradigm.py b/tutorials/plot_explore_paradigm.py index e0809ea53..12d0f4eb1 100644 --- a/tutorials/plot_explore_paradigm.py +++ b/tutorials/plot_explore_paradigm.py @@ -15,7 +15,7 @@ This tutorial explore the paradigm object, with 3 examples of paradigm : - - BaseMotorImagery + - MotorImagery - FilterBankMotorImagery - LeftRightImagery """ @@ -25,18 +25,18 @@ import numpy as np from moabb.datasets import BNCI2014001 -from moabb.paradigms import (LeftRightImagery, BaseMotorImagery, +from moabb.paradigms import (LeftRightImagery, MotorImagery, FilterBankMotorImagery) print(__doc__) ############################################################################### -# Base MotorImagery +# MotorImagery # ----------------- # -# First, lets take a example of the BaseMotorImagery paradigm. +# First, lets take a example of the MotorImagery paradigm. -paradigm = BaseMotorImagery() +paradigm = MotorImagery() print(paradigm.__doc__) From 650416ba49ed33aa4142219d48c2d357e12a1e03 Mon Sep 17 00:00:00 2001 From: Alexandre Barachant Date: Fri, 6 Apr 2018 01:07:18 -0400 Subject: [PATCH 09/21] better doc and minor fix --- moabb/datasets/Weibo2014.py | 74 ++++++++++++++++++++++++-------- moabb/datasets/Zhou2016.py | 52 ++++++++++------------ moabb/paradigms/motor_imagery.py | 5 ++- 3 files changed, 82 insertions(+), 49 deletions(-) diff --git a/moabb/datasets/Weibo2014.py b/moabb/datasets/Weibo2014.py index 01fb5dd30..4355d2e2b 100644 --- a/moabb/datasets/Weibo2014.py +++ b/moabb/datasets/Weibo2014.py @@ -56,17 +56,41 @@ def get_subjects(sub_inds, sub_names, ind): class Weibo2014(BaseDataset): - """Weibo 2014 Motor Imagery dataset [1] - - References - ----------- - Yi Weibo, 2014, "EEG data of simple and compound limb - motor imagery", https://doi.org/10.7910/DVN/27306, Harvard Dataverse, V1 - + """Weibo 2014 Motor Imagery dataset. + + Dataset from the article *Evaluation of EEG oscillatory patterns and + cognitive process during simple and compound limb motor imagery* [1]_. + + It contains data recorded on 10 subjects, with 60 electrodes. + + This dataset was used to investigate the differences of the EEG patterns + between simple limb motor imagery and compound limb motor + imagery. Seven kinds of mental tasks have been designed, involving three + tasks of simple limb motor imagery (left hand, right hand, feet), three + tasks of compound limb motor imagery combining hand with hand/foot + (both hands, left hand combined with right foot, right hand combined with + left foot) and rest state. + + At the beginning of each trial (8 seconds), a white circle appeared at the + center of the monitor. After 2 seconds, a red circle (preparation cue) + appeared for 1 second to remind the subjects of paying attention to the + character indication next. Then red circle disappeared and character + indication (‘Left Hand’, ‘Left Hand & Right Foot’, et al) was presented on + the screen for 4 seconds, during which the participants were asked to + perform kinesthetic motor imagery rather than a visual type of imagery + while avoiding any muscle movement. After 7 seconds, ‘Rest’ was presented + for 1 second before next trial (Fig. 1(a)). The experiments were divided + into 9 sections, involving 8 sections consisting of 60 trials each for six + kinds of MI tasks (10 trials for each MI task in one section) and one + section consisting of 80 trials for rest state. The sequence of six MI + tasks was randomized. Intersection break was about 5 to 10 minutes. + + References + ----------- + .. [1] Yi, Weibo, et al. "Evaluation of EEG oscillatory patterns and + cognitive process during simple and compound limb motor imagery." + PloS one 9.12 (2014). https://doi.org/10.1371/journal.pone.0114853 """ - - - def __init__(self): super().__init__( subjects=list(range(1, 11)), @@ -77,9 +101,10 @@ def __init__(self): code='Weibo 2014', # Full trial is 0-8 but with trialwise bandpass this reduces # boundary effects - interval=[0, 8], + interval=[3, 7], + task_interval=[0, 8], paradigm='imagery', - doi='10.7910/DVN/27306') + doi='10.1371/journal.pone.0114853') def _get_single_subject_data(self, subject): """return data for a single subject""" @@ -87,9 +112,22 @@ def _get_single_subject_data(self, subject): # TODO: add 1s 0 buffer between trials and make continuous data = loadmat(fname, squeeze_me=True, struct_as_record=False, verify_compressed_data_integrity=False) - montage = mne.channels.read_montage('standard_1020') - info = mne.create_info(ch_names=['EEG{}'.format(i) for i in range(1, 65)]+['STIM014'], - ch_types=['eeg']*64+['stim'], + montage = mne.channels.read_montage('standard_1005') + ch_names = ['Fp1', 'Fpz', 'Fp2', 'AF3', 'AF4', 'F7', 'F5', 'F3', 'F1', + 'Fz', 'F2', 'F4', 'F6', 'F8', 'FT7', 'FC5', 'FC3', 'FC1', + 'FCz', 'FC2', 'FC4', 'FC6', 'FT8', 'T7', 'C5', 'C3', 'C1', + 'Cz', 'C2', 'C4', 'C6', 'T8', 'TP7', 'CP5', 'CP3', 'CP1', + 'CPz', 'CP2', 'CP4', 'CP6', 'TP8', 'P7', 'P5', 'P3', 'P1', + 'Pz', 'P2', 'P4', 'P6', 'P8', 'PO7', 'PO5', 'PO3', 'POz', + 'PO4', 'PO6', 'PO8', 'CB1', 'O1', 'Oz', 'O2', 'CB2', 'VEO', + 'HEO'] + + ch_types = ['eeg'] * 62 + ['eog'] * 2 + # FIXME not sure what are those CB1 / CB2 + ch_types[57] = 'misc' + ch_types[61] = 'misc' + info = mne.create_info(ch_names=ch_names + ['STIM014'], + ch_types=ch_types + ['stim'], sfreq=200, montage=None) # until we get the channel names montage is None event_ids = data['label'].ravel() @@ -98,15 +136,17 @@ def _get_single_subject_data(self, subject): raw_data = raw_data - np.mean(raw_data, axis=2, keepdims=True) raw_events = np.zeros((raw_data.shape[0], 1, raw_data.shape[2])) raw_events[:, 0, 0] = event_ids - data = np.concatenate([raw_data, raw_events], axis=1) + data = np.concatenate([1e-6 * raw_data, raw_events], axis=1) # add buffer in between trials log.warning( - 'Trial data de-meaned and concatenated with a buffer to create cont data') + "Trial data de-meaned and concatenated with a buffer to create " + "cont data") zeroshape = (data.shape[0], data.shape[1], 50) data = np.concatenate([np.zeros(zeroshape), data, np.zeros(zeroshape)], axis=2) raw = mne.io.RawArray(data=np.concatenate(list(data), axis=1), info=info, verbose=False) + raw.set_montage(montage) return {'session_0': {'run_0': raw}} def data_path(self, subject, path=None, force_update=False, diff --git a/moabb/datasets/Zhou2016.py b/moabb/datasets/Zhou2016.py index 39d6a7449..779e1f691 100644 --- a/moabb/datasets/Zhou2016.py +++ b/moabb/datasets/Zhou2016.py @@ -41,40 +41,32 @@ def local_data_path(base_path, subject): class Zhou2016(BaseDataset): - """Dataset from Zhou et al. 2016 [1] + """Dataset from Zhou et al. 2016. - Abstract - ------------ + Dataset from the article *A Fully Automated Trial Selection Method for + Optimization of Motor Imagery Based Brain-Computer Interface* [1]_. + This dataset contains data recorded on 4 subjects performing 3 type of + motor imagery: left hand, right hand and feet. - Independent component analysis (ICA) as a promising spatial filtering method - can separate motor-related independent components (MRICs) from the - multichannel electroencephalogram (EEG) signals. However, the unpredictable - burst interferences may significantly degrade the performance of ICA-based - brain-computer interface (BCI) system. In this study, we proposed a new - algorithm frame to address this issue by combining the single-trial-based - ICA filter with zero-training classifier. We developed a two-round data - selection method to identify automatically the badly corrupted EEG trials in - the training set. The “high quality” training trials were utilized to - optimize the ICA filter. In addition, we proposed an accuracy-matrix method - to locate the artifact data segments within a single trial and investigated - which types of artifacts can influence the performance of the ICA-based - MIBCIs. Twenty-six EEG datasets of three-class motor imagery were used to - validate the proposed methods, and the classification accuracies were - compared with that obtained by frequently used common spatial pattern (CSP) - spatial filtering algorithm. The experimental results demonstrated that the - proposed optimizing strategy could effectively improve the stability, - practicality and classification performance of ICA-based MIBCI. The study - revealed that rational use of ICA method may be crucial in building a - practical ICA-based MIBCI system. + Every subject went through three sessions, each of which contained two + consecutive runs with several minutes inter-run breaks, and each run + comprised 75 trials (25 trials per class). The intervals between two + sessions varied from several days to several months. - References - ------------ + A trial started by a short beep indicating 1 s preparation time, + and followed by a red arrow pointing randomly to three directions (left, + right, or bottom) lasting for 5 s and then presented a black screen for + 4 s. The subject was instructed to immediately perform the imagination + tasks of the left hand, right hand or foot movement respectively according + to the cue direction, and try to relax during the black screen. - [1] Zhou B, Wu X, Lv Z, Zhang L, Guo X (2016) A Fully Automated Trial - Selection Method for Optimization of Motor Imagery Based Brain-Computer - Interface. PLoS ONE 11(9): - e0162657. https://doi.org/10.1371/journal.pone.0162657 + References + ---------- + .. [1] Zhou B, Wu X, Lv Z, Zhang L, Guo X (2016) A Fully Automated + Trial Selection Method for Optimization of Motor Imagery Based + Brain-Computer Interface. PLoS ONE 11(9). + https://doi.org/10.1371/journal.pone.0162657 """ def __init__(self): @@ -87,7 +79,7 @@ def __init__(self): # MI 1-6s, prepare 0-1, break 6-10 # boundary effects interval=[0, 5], - task_interval=[1,6], + task_interval=[1, 6], paradigm='imagery', doi='10.1371/journal.pone.0162657') diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index ba45994e0..9db09891d 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -13,7 +13,7 @@ log = logging.getLogger() -class BaseMotorImagery(BaseParadigm, metaclass=abc.ABCMeta): +class BaseMotorImagery(BaseParadigm): """Base Motor imagery paradigm. Please use one of the child classes Parameters @@ -109,7 +109,8 @@ def process_raw(self, raw, dataset): tmin=tmin, tmax=tmax, proj=False, baseline=None, preload=True, verbose=False, picks=picks) - X.append(epochs.get_data()) + # MNE is in V, rescale to have uV + X.append(1e6 * epochs.get_data()) inv_events = {k: v for v, k in run_event_dict.items()} labels = np.array([inv_events[e] for e in epochs.events[:, -1]]) From a9c120559c3611532976c999289c31c5f0e7a98e Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Fri, 6 Apr 2018 12:56:14 +0200 Subject: [PATCH 10/21] Added raw documentation to all datasets and removed trial_interval Needs to be prettied up at some point... --- moabb/datasets/Weibo2014.py | 5 +- moabb/datasets/Zhou2016.py | 3 +- moabb/datasets/alex_mi.py | 16 ++- moabb/datasets/base.py | 14 +-- moabb/datasets/bbci_eeg_fnirs.py | 28 ++++- moabb/datasets/bnci.py | 198 +++++++++++++++++++++++++++---- moabb/datasets/gigadb.py | 28 +++-- moabb/datasets/openvibe_mi.py | 4 +- moabb/datasets/physionet_mi.py | 5 +- moabb/datasets/upper_limb.py | 3 +- moabb/paradigms/motor_imagery.py | 13 +- 11 files changed, 260 insertions(+), 57 deletions(-) diff --git a/moabb/datasets/Weibo2014.py b/moabb/datasets/Weibo2014.py index 4355d2e2b..718f3959e 100644 --- a/moabb/datasets/Weibo2014.py +++ b/moabb/datasets/Weibo2014.py @@ -91,6 +91,7 @@ class Weibo2014(BaseDataset): cognitive process during simple and compound limb motor imagery." PloS one 9.12 (2014). https://doi.org/10.1371/journal.pone.0114853 """ + def __init__(self): super().__init__( subjects=list(range(1, 11)), @@ -99,10 +100,8 @@ def __init__(self): hands=3, feet=4, left_hand_right_foot=5, right_hand_left_foot=6, rest=7), code='Weibo 2014', - # Full trial is 0-8 but with trialwise bandpass this reduces - # boundary effects + # Full trial w/ rest is 0-8 interval=[3, 7], - task_interval=[0, 8], paradigm='imagery', doi='10.1371/journal.pone.0114853') diff --git a/moabb/datasets/Zhou2016.py b/moabb/datasets/Zhou2016.py index 779e1f691..175821b48 100644 --- a/moabb/datasets/Zhou2016.py +++ b/moabb/datasets/Zhou2016.py @@ -78,8 +78,7 @@ def __init__(self): code='Zhou 2016', # MI 1-6s, prepare 0-1, break 6-10 # boundary effects - interval=[0, 5], - task_interval=[1, 6], + interval=[1, 6], paradigm='imagery', doi='10.1371/journal.pone.0162657') diff --git a/moabb/datasets/alex_mi.py b/moabb/datasets/alex_mi.py index c834646e9..60a7a17e6 100644 --- a/moabb/datasets/alex_mi.py +++ b/moabb/datasets/alex_mi.py @@ -11,7 +11,21 @@ class AlexMI(BaseDataset): - """Alex Motor Imagery dataset""" + """Alex Motor Imagery dataset + This Dataset contains EEG recordings from 8 subjects, performing 2 task of motor + imagination (right hand, feet or rest). Data have been recorded at 512Hz with 16 + wet electrodes (Fpz, F7, F3, Fz, F4, F8, T7, C3, Cz, C4, T8, P7, P3, Pz, P4, P8) + with a g.tec g.USBamp EEG amplifier. + + File are provided in MNE raw file format. A stimulation channel encoding the + timing of the motor imagination. The start of a trial is encoded as 1, then the + actual start of the motor imagination is encoded with 2 for imagination of a + right hand movement, 3 for imagination of both feet movement and 4 with a rest + trial. + + The duration of each trial is 3 second. There is 20 trial of each class. + + """ def __init__(self): super().__init__( diff --git a/moabb/datasets/base.py b/moabb/datasets/base.py index e98d53a24..cdca2b327 100644 --- a/moabb/datasets/base.py +++ b/moabb/datasets/base.py @@ -11,7 +11,7 @@ class BaseDataset(metaclass=abc.ABCMeta): """Base dataset""" def __init__(self, subjects, sessions_per_subject, events, - code, interval, paradigm, task_interval=None, doi=None): + code, interval, paradigm, doi=None): """ Parameters required for all datasets @@ -41,14 +41,11 @@ def __init__(self, subjects, sessions_per_subject, events, Unique identifier for dataset, used in all plots interval: list with 2 entries - Interval relative to trial start for imagery + Imagery interval as defined in the dataset description paradigm: ['p300','imagery'] Defines what sort of dataset this is (currently only imagery is implemented) - task_interval: list of 2 entries or None - Defines the start and end of the imagery *relative to event marker.* If not specified, defaults to interval. - doi: DOI for dataset, optional (for now) """ if not isinstance(subjects, list): @@ -59,13 +56,6 @@ def __init__(self, subjects, sessions_per_subject, events, self.event_id = events self.code = code self.interval = interval - if task_interval is None: - assert interval[0]==0, 'Interval does not start at 0 so task onset is necessary' - self.task_interval = list(interval) - else: - if interval[1]-interval[0] > task_interval[1]-task_interval[0]: - log.warning('Given interval extends outside of imagery period') - self.task_interval = task_interval self.paradigm = paradigm self.doi = doi diff --git a/moabb/datasets/bbci_eeg_fnirs.py b/moabb/datasets/bbci_eeg_fnirs.py index 341f24601..74b82d34a 100644 --- a/moabb/datasets/bbci_eeg_fnirs.py +++ b/moabb/datasets/bbci_eeg_fnirs.py @@ -58,7 +58,33 @@ def fnirs_data_path(path, subject): class BBCIEEGfNIRS(BaseDataset): - """BBCI EEG fNIRS Motor Imagery dataset""" + """BBCI EEG fNIRS Motor Imagery dataset + + Data Acquisition + ---------------------------------------- + EEG and NIRS data was collected in an ordinary bright room. EEG data was recorded by a multichannel BrainAmp EEG amplifier with thirty active electrodes (Brain Products GmbH, Gilching, Germany) with linked mastoids reference at 1000 Hz sampling rate. The EEG amplifier was also used to measure the electrooculogram (EOG), electrocardiogram (ECG) and respiration with a piezo based breathing belt. Thirty EEG electrodes were placed on a custom-made stretchy fabric cap (EASYCAP GmbH, Herrsching am Ammersee, Germany) and placed according to the international 10-5 system (AFp1, AFp2, AFF1h, AFF2h, AFF5h, AFF6h, F3, F4, F7, F8, FCC3h, FCC4h, FCC5h, FCC6h, T7, T8, Cz, CCP3h, CCP4h, CCP5h, CCP6h, Pz, P3, P4, P7, P8, PPO1h, PPO2h, POO1, POO2 and Fz for ground electrode). + + NIRS data was collected by NIRScout (NIRx GmbH, Berlin, Germany) at 12.5 Hz sampling rate. Each adjacent source-detector pair creates one physiological NIRS channel. Fourteen sources and sixteen detectors resulting in thirty-six physiological channels were placed at frontal (nine channels around Fp1, Fp2, and Fpz), motor (twelve channels around C3 and C4, respectively) and visual areas (three channels around Oz). The inter-optode distance was 30 mm. NIRS optodes were fixed on the same cap as the EEG electrodes. Ambient lights were sufficiently blocked by a firm contact between NIRS optodes and scalp and use of an opaque cap. + + EOG was recorded using two vertical (above and below left eye) and two horizontal (outer canthus of each eye) electrodes. ECG was recorded based on Einthoven triangle derivations I and II, and respiration was measured using a respiration belt on the lower chest. EOG, ECG and respiration were sampled at the same sampling rate of the EEG. ECG and respiration data were not analyzed in this study, but are provided along with the other signals. + + Experimental Procedure + ---------------------------------------- + + The subjects sat on a comfortable armchair in front of a 50-inch white screen. The distance between their heads and the screen was 1.6 m. They were asked not to move any part of the body during the data recording. The experiment consisted of three sessions of left and right hand MI (dataset A) and MA and baseline tasks (taking a rest without any thought) (dataset B) each. Each session comprised a 1 min pre-experiment resting period, 20 repetitions of the given task and a 1 min post-experiment resting period. The task started with 2 s of a visual introduction of the task, followed by 10 s of a task period and resting period which was given randomly from 15 to 17 s. At the beginning and end of the task period, a short beep (250 ms) was played. All instructions were displayed on the white screen by a video projector. MI and MA tasks were performed in separate sessions but in alternating order (i.e., sessions 1, 3 and 5 for MI (dataset A) and sessions 2, 4 and 6 for MA (dataset B)). Fig. 2 shows the schematic diagram of the experimental paradigm. Five sorts of motion artifacts induced by eye and head movements (dataset C) were measured. The motion artifacts were recorded after all MI and MA task recordings. The experiment did not include the pre- and post-experiment resting state periods. + + Motor Imagery (Dataset A) + ---------------------------- + + For motor imagery, subjects were instructed to perform haptic motor imagery (i.e. to imagine the feeling of opening and closing their hands as they were grabbing a ball) to ensure that actual motor imagery, not visual imagery, was performed. All subjects were naive to the MI experiment. For the visual instruction, a black arrow pointing to either the left or right side appeared at the center of the screen for 2 s. The arrow disappeared with a short beep sound and then a black fixation cross was displayed during the task period. The subjects were asked to imagine hand gripping (opening and closing their hands) in a 1 Hz pace. This pace was shown to and repeated by the subjects by performing real hand gripping before the experiment. Motor imagery was performed continuously over the task period. The task period was finished with a short beep sound and a 'STOP' displayed for 1s on the screen. The fixation cross was displayed again during the rest period and the subjects were asked to gaze at it to minimize their eye movements. This process was repeated twenty times in a single session (ten trials per condition in a single session; thirty trials in the whole sessions). In a single session, motor imagery tasks were performed on the basis of ten subsequent blocks randomly consisting of one of two conditions: Either first left and then right hand motor imagery or vice versa. + + Mental Arithmetic (Dataset B) + ---------------------------------------- + + For the visual instruction of the MA task, an initial subtraction such as 'three-digit number minus one-digit number' (e.g., 384-8) appeared at the center of the screen for 2 s. The subjects were instructed to memorize the numbers while the initial subtraction was displayed on the screen. The initial subtraction disappeared with a short beep sound and a black fixation cross was displayed during the task period in which the subjects were asked to repeatedly perform to subtract the one-digit number from the result of the previous subtraction. For the baseline task, no specific sign but the black fixation cross was displayed on the screen, and the subjects were instructed to take a rest. Note that there were other rest periods between the MA and baseline task periods, as same with the MI paradigm. Both task periods were finished with a short beep sound and a 'STOP' displayed for 1 s on the screen. The fixation cross was displayed again during the rest period. MA and baseline trials were randomized in the same way as MI. + + + """ def __init__(self, fnirs=False, motor_imagery=True, mental_arithmetic=False): diff --git a/moabb/datasets/bnci.py b/moabb/datasets/bnci.py index c5155981c..671ac15fa 100644 --- a/moabb/datasets/bnci.py +++ b/moabb/datasets/bnci.py @@ -640,77 +640,235 @@ def data_path(self, subject, path=None, force_update=False, class BNCI2014001(MNEBNCI): - """BNCI 2014-001 Motor Imagery dataset""" + """BNCI 2014-001 Motor Imagery dataset + +This data set consists of EEG data from 9 subjects. The cue-based BCI +paradigm consisted of four different motor imagery tasks, namely the imag- +ination of movement of the left hand (class 1), right hand (class 2), both +feet (class 3), and tongue (class 4). Two sessions on different days were +recorded for each subject. Each session is comprised of 6 runs separated by +short breaks. One run consists of 48 trials (12 for each of the four possible +classes), yielding a total of 288 trials per session. + +The subjects were sitting in a comfortable armchair in front of a com- puter +screen. At the beginning of a trial ( t = 0 s), a fixation cross appeared on +the black screen. In addition, a short acoustic warning tone was pre- sented. +After two seconds ( t = 2 s), a cue in the form of an arrow pointing either to +the left, right, down or up (corresponding to one of the four classes left hand, +right hand, foot or tongue) appeared and stayed on the screen for 1.25 s. This +prompted the subjects to perform the desired motor imagery task. No feedback +was provided. The subjects were ask to carry out the motor imagery task until +the fixation cross disappeared from the screen at t = 6 s. + +Twenty-two Ag/AgCl electrodes (with inter-electrode distances of 3.5 cm) were +used to record the EEG; the montage is shown in Figure 3 left. All signals were +recorded monopolarly with the left mastoid serving as reference and the right +mastoid as ground. The signals were sampled with 250 Hz and bandpass-filtered +between 0.5 Hz and 100 Hz. The sensitivity of the amplifier was set to 100 μV +. An additional 50 Hz notch filter was enabled to suppress line noise +""" - def __init__(self, tmin=0.5, tmax=4): + def __init__(self): super().__init__( subjects=list(range(1, 10)), sessions_per_subject=2, events={'left_hand': 1, 'right_hand': 2, 'feet': 3, 'tongue': 4}, code='001-2014', - interval=[tmin, tmax], + interval=[2, 6], paradigm='imagery', - task_interval=[0, 4], doi='10.3389/fnins.2012.00055') class BNCI2014002(MNEBNCI): - """BNCI 2014-002 Motor Imagery dataset""" + """BNCI 2014-002 Motor Imagery dataset + + The session consisted of eight runs, five of them for training and three + with feedback for validation. One run was composed of 20 trials. Taken + together, we recorded 50 trials per class for training and 30 trials per + class for validation. Participants had the task of performing sustained (5 + seconds) kinaesthetic motor imagery (MI) of the right hand and of the feet + each as instructed by the cue. At 0 s, a white colored cross appeared on + screen, 2 s later a beep sounded to catch the participant’s attention. The + cue was displayed from 3 s to 4 s. Participants were instructed to start + with MI as soon as they recognized the cue and to perform the indicated MI + until the cross disappeared at 8 s. A rest period with a random length + between 2 s and 3 s was presented between trials. Participants did not + receive feedback during training. Feedback was presented in form of a white + coloured bar-graph. The length of the bar-graph reflected the amount of + correct classifications over the last second. EEG was measured with a + biosignal amplifier and active Ag/AgCl electrodes (g.USBamp, g.LADYbird, + Guger Technologies OG, Schiedlberg, Austria) at a sampling rate of 512 Hz. + The electrodes placement was designed for obtaining three Laplacian + derivations. Center electrodes at positions C3, Cz, and C4 and four + additional electrodes around each center electrode with a distance of 2.5 + cm, 15 electrodes total. The reference electrode was mounted on the left + mastoid and the ground electrode on the right mastoid. The 13 participants + were aged between 20 and 30 years, 8 naive to the task, and had no known + medical or neurological diseases. + + """ - def __init__(self, tmin=0, tmax=5): + def __init__(self): super().__init__( subjects=list(range(1, 15)), sessions_per_subject=1, events={'right_hand': 1, 'feet': 2}, code='002-2014', - interval=[tmin, tmax], + interval=[3, 8], paradigm='imagery', - task_interval=[3, 8], doi='10.1515/bmt-2014-0117') class BNCI2014004(MNEBNCI): - """BNCI 2014-004 Motor Imagery dataset""" + """BNCI 2014-004 Motor Imagery dataset:BCI Competition 2008 – Graz data set B + + This data set consists of EEG data from 9 subjects of a study published in [1]. + The subjects were right-handed, had normal or corrected-to-normal vision and + were paid for participating in the experiments. All volunteers were sitting in + an armchair, watching a flat screen monitor placed approximately 1 m away at eye + level. For each subject 5 sessions are provided, whereby the first two sessions + contain training data without feedback (screening), and the last three sessions + were recorded with feedback. + + Three bipolar recordings (C3, Cz, and C4) were recorded with a sampling + frequency of 250 Hz.They were bandpass- filtered between 0.5 Hz and 100 Hz, and + a notch filter at 50 Hz was enabled. The placement of the three bipolar + recordings (large or small distances, more anterior or posterior) were slightly + different for each subject (for more details see [1]). The electrode position + Fz served as EEG ground. In addition to the EEG channels, the electrooculogram + (EOG) was recorded with three monopolar electrodes. + + The cue-based screening paradigm consisted of two classes, + namely the motor imagery (MI) of left hand (class 1) and right hand (class 2). + Each subject participated in two screening sessions without feedback recorded on + two different days within two weeks. Each session consisted of six runs with + ten trials each and two classes of imagery. This resulted in 20 trials per run + and 120 trials per session. Data of 120 repetitions of each MI class were + available for each person in total. Prior to the first motor im- agery training + the subject executed and imagined different movements for each body part and + selected the one which they could imagine best (e. g., squeezing a ball or + pulling a brake). + + Each trial started with a fixation cross and an additional short acoustic + warning tone (1 kHz, 70 ms). Some seconds later a visual cue was presented + for 1.25 seconds. Afterwards the subjects had to imagine the corresponding + hand movement over a period of 4 seconds. Each trial was followed by a + short break of at least 1.5 seconds. A randomized time of up to 1 second + was added to the break to avoid adaptation + + For the three online feedback sessions four runs with smiley feedback + were recorded, whereby each run consisted of twenty trials for each type of + motor imagery. At the beginning of each trial (second 0) the feedback (a + gray smiley) was centered on the screen. At second 2, a short warning beep + (1 kHz, 70 ms) was given. The cue was presented from second 3 to 7.5. At + second 7.5 the screen went blank and a random interval between 1.0 and 2.0 + seconds was added to the trial. + + [1] R. Leeb, F. Lee, C. Keinrath, R. Scherer, H. Bischof, G. Pfurtscheller. + Brain-computer communication: motivation, aim, and impact of ex- + ploring a virtual apartment. IEEE Transactions on Neural Systems and + Rehabilitation Engineering 15, 473–482, 2007 + """ - def __init__(self, tmin=0, tmax=4.5): + def __init__(self): super().__init__( subjects=list(range(1, 10)), sessions_per_subject=5, events={'left_hand': 1, 'right_hand': 2}, code='004-2014', - interval=[tmin, tmax], + interval=[3, 7.5], paradigm='imagery', - task_interval=[3, 7.5], doi='10.1109/TNSRE.2007.906956') class BNCI2015001(MNEBNCI): - """BNCI 2015-001 Motor Imagery dataset""" + """BNCI 2015-001 Motor Imagery dataset + We acquired the EEG from three Laplacian derivations, 3.5 cm (center-to- + center) around the electrode positions (according to International 10-20 + System of Electrode Placement) C3 (FC3, C5, CP3 and C1), Cz (FCz, C1, CPz + and C2) and C4 (FC4, C2, CP4 and C6). The acquisition hardware was a + g.GAMMAsys active electrode system along with a g.USBamp amplifier (g.tec, + Guger Tech- nologies OEG, Graz, Austria). The system sampled at 512 Hz, + with a bandpass filter between 0.5 and 100 Hz and a notch filter at 50 Hz. + The order of the channels in the data is FC3, FCz, FC4, C5, C3, C1, Cz, C2, + C4, C6, CP3, CPz, CP4. + + The task for the user was to perform sustained right hand versus both feet + movement imagery starting from the cue (second 3) to the end of the cross + period (sec- ond 8). A trial started with 3 s of reference period, followed + by a brisk audible cue and a visual cue (arrow right for right hand, arrow + down for both feet) from second 3 to 4.25. The activity period, where the + users received feedback, lasted from second 4 to 8. There was a random 2 to + 3 s pause between the trials. + + + + [1] J. Faller, C. Vidaurre, T. Solis-Escalante, C. Neuper and R. + Scherer (2012) Autocalibration and recurrent adaptation: Towards a plug and + play online ERD- BCI. IEEE Transactions on Neural Systems and + Rehabilitation Engineering, 20(3), 313-319 . Doi: 10.1109/tnsre.2012.2189584. + """ - def __init__(self, tmin=0, tmax=5): + def __init__(self): # FIXME: some participant have 3 sessions super().__init__( subjects=list(range(1, 13)), sessions_per_subject=2, events={'right_hand': 1, 'feet': 2}, code='001-2015', - interval=[tmin, tmax], + interval=[3, 8], paradigm='imagery', - task_interval=[3, 8], doi='10.1109/tnsre.2012.2189584') class BNCI2015004(MNEBNCI): - """BNCI 2015-004 Motor Imagery dataset""" + """BNCI 2015-004 Motor Imagery dataset + We provide EEG data recorded from nine users with disability (spinal cord + injury and stroke) on two different days (sessions). Users performed, + follow- ing a cue-guided experimental paradigm, five distinct mental tasks + (MT). MTs include mental word association (condition WORD), mental subtrac- + tion (SUB), spatial navigation (NAV), right hand motor imagery (HAND) and + feet motor imagery (FEET). Details on the experimental paradigm are + summarized in Figure 1. The session for a single subject consisted of 8 + runs resulting in 40 trials of each class for each day. One single + experimental run consisted of 25 cues, with 5 of each mental task. Cues + were presented in random order. + + EEG was recorded from 30 electrode channels placed on the scalp accord- ing + to the international 10-20 system. Electrode positions included channels + AFz, F7, F3, Fz, F4, F8, FC3, FCz, FC4, T3, C3, Cz, C4, T4, CP3, CPz,CP4, + P7, P5, P3, P1, Pz, P2, P4, P6, P8, PO3, PO4, O1, and O2. Ref- erence and + ground were placed at the left and right mastoid, respectively. The g.tec + GAMMAsys system with g.LADYbird active electrodes and two g.USBamp biosignal + amplifiers (Guger Technolgies, Graz, Austria) was used for recording. EEG + was band pass filtered 0.5-100 Hz (notch filter at 50 Hz) and sampled at a + rate of 256 Hz. + + The duration of a single imagery trials is 10 s. At t = 0 s, a cross was + presented in the middle of the screen. Participants were asked to relax and + fixate the cross to avoid eye movements. At t = 3 s, a beep was sounded to + get the participant’s attention. The cue indicating the requested imagery + task, one out of five graphical symbols, was presented from t = 3 s to t = + 4.25 s. At t = 10 s, a second beep was sounded and the fixation-cross + disappeared, which indicated the end of the trial. A variable break + (inter-trial- interval, ITI) lasting between 2.5 s and 3.5 s occurred before + the start of the next trial. Participants were asked to avoid movements + during the imagery period, and to move and blink during the + ITI. Experimental runs began and ended with a blank screen (duration 4 s) + + [1] Scherer R, Faller J, Friedrich EVC, Opisso E, Costa U, Kübler A, et + al. (2015) Individually Adapted Imagery Improves Brain-Computer Interface + Performance in End-Users with Disability. PLoS ONE 10(5): + e0123727. https://doi.org/10.1371/journal.pone.0123727 + """ - def __init__(self, tmin=0, tmax=7): + def __init__(self): super().__init__( subjects=list(range(1, 10)), sessions_per_subject=2, events=dict(right_hand=4, feet=5, navigation=3, subtraction=2, word_ass=1), code='004-2015', - interval=[tmin, tmax], + interval=[3, 10], paradigm='imagery', - task_interval=[3, 10], doi='10.1371/journal.pone.0123727') diff --git a/moabb/datasets/gigadb.py b/moabb/datasets/gigadb.py index eecabc038..94a3cd4c6 100644 --- a/moabb/datasets/gigadb.py +++ b/moabb/datasets/gigadb.py @@ -19,7 +19,15 @@ class GigaDbMI(BaseDataset): - """GigaDb Motor Imagery dataset""" + """GigaDb Motor Imagery dataset + We conducted a BCI experiment for motor imagery movement (MI movement) of the left and right hands with 52 subjects (19 females, mean age ± SD age = 24.8 ± 3.86 years); the experiment was approved by the Institutional Review Board of Gwangju Institute of Science and Technology. Each subject took part in the same experiment, and subject ID was denoted and indexed as s1, s2, …, s52. Subjects s20 and s33 were both-handed, and the other 50 subjects were right-handed. + + EEG data were collected using 64 Ag/AgCl active electrodes. As shown in Fig. 1, a 64-channel montage based on the international 10-10 system was used to record the EEG signals with 512 Hz sampling rates. The EEG device used in this experiment was the Biosemi ActiveTwo system. The BCI2000 system 3.0.2 was used to collect EEG data and present instructions (left hand or right hand MI). Furthermore, we recorded EMG as well as EEG simultaneously with the same system and sampling rate to check actual hand movements. Two EMG electrodes were attached to the flexor digitorum profundus and extensor digitorum on each arm. + + Subjects were asked to imagine the hand movement depending on the instruction given. Five or six runs were performed during the MI experiment. After each run, we calculated the classification accuracy over one run and gave the subject feedback to increase motivation. Between each run, a maximum 4-minute break was given depending on the subject's demands. + + [1] Hohyun Cho, Minkyu Ahn, Sangtae Ahn, Moonyoung Kwon, Sung Chan Jun; EEG datasets for motor imagery brain–computer interface, GigaScience, Volume 6, Issue 7, 1 July 2017, Pages 1–8, https://doi.org/10.1093/gigascience/gix034 + """ def __init__(self): super().__init__( @@ -27,9 +35,8 @@ def __init__(self): sessions_per_subject=1, events=dict(left_hand=1, right_hand=2), code='GigaDb Motor Imagery', - interval=[0.5, 2.5], # full trial is 0-3s, this drops edge effects + interval=[0, 3], # full trial is 0-3s, but edge effects paradigm='imagery', - task_interval=[0,3], doi='10.5524/100295') for ii in [32, 46, 49]: self.subject_list.remove(ii) @@ -53,15 +60,20 @@ def _get_single_subject_data(self, subject): ch_names = eeg_ch_names + emg_ch_names + ['Stim'] ch_types = ['eeg'] * 64 + ['emg'] * 4 + ['stim'] montage = read_montage('standard_1005') - - eeg_data_l = np.vstack([data.imagery_left * 1e-6, data.imagery_event]) - eeg_data_r = np.vstack([data.imagery_right * 1e-6, + imagery_left = data.imagery_left - \ + data.imagery_left.mean(axis=1, keepdims=True) + imagery_right = data.imagery_right - \ + data.imagery_right.mean(axis=1, keepdims=True) + + eeg_data_l = np.vstack([imagery_left * 1e-6, data.imagery_event]) + eeg_data_r = np.vstack([imagery_right * 1e-6, data.imagery_event * 2]) # trials are already non continuous. edge artifact can appears but # are likely to be present during rest / inter-trial activity - eeg_data = np.hstack([eeg_data_l, eeg_data_r]) - log.warning('Trials stacked to create continuous data -- edge effects present') + eeg_data = np.hstack([eeg_data_l, np.zeros((eeg_data_l.shape[0], 500)),eeg_data_r]) + log.warning( + 'Trials demeaned and stacked with zero buffer to create continuous data -- edge effects present') info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=data.srate, montage=montage) diff --git a/moabb/datasets/openvibe_mi.py b/moabb/datasets/openvibe_mi.py index 739afb9ca..1fd1bbbbf 100644 --- a/moabb/datasets/openvibe_mi.py +++ b/moabb/datasets/openvibe_mi.py @@ -39,13 +39,13 @@ def convert_inria_csv_to_mne(path): class OpenvibeMI(BaseDataset): """Openvibe Motor Imagery dataset""" - def __init__(self, tmin=0, tmax=3): + def __init__(self): super().__init__( subjects=[1], sessions_per_subject=14, events=dict(right_hand=1, left_hand=2), code='Openvibe Motor Imagery', - interval=[tmin, tmax], + interval=[0, 3], paradigm='imagery') def _get_single_subject_data(self, subject): diff --git a/moabb/datasets/physionet_mi.py b/moabb/datasets/physionet_mi.py index 2b635f862..248ef5a88 100644 --- a/moabb/datasets/physionet_mi.py +++ b/moabb/datasets/physionet_mi.py @@ -11,7 +11,10 @@ class PhysionetMI(BaseDataset): - """Physionet Motor Imagery dataset""" + """Physionet Motor Imagery dataset [1] + + [1]https://physionet.org/pn4/eegmmidb/ + """ def __init__(self, imagined=True): super().__init__( diff --git a/moabb/datasets/upper_limb.py b/moabb/datasets/upper_limb.py index 473fa32d5..a0c0dd48d 100644 --- a/moabb/datasets/upper_limb.py +++ b/moabb/datasets/upper_limb.py @@ -37,9 +37,8 @@ def __init__(self, imagined=True, executed=False): sessions_per_subject=n_sessions, events=event_id, code='Upper Limb Imagery', - interval=[0.5, 3], # according to paper 2-5 + interval=[2, 5], # according to paper 2-5 paradigm='imagery', - task_interval=[2,5], doi='10.1371/journal.pone.0182578') def _get_single_subject_data(self, subject): diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index 9db09891d..f7f59fa3c 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -59,12 +59,9 @@ def verify(self, dataset): # we should verify list of channels, somehow + @abc.abstractmethod def used_events(self, dataset): - if self.events is None: - event_id = dataset.event_id - else: - event_id = {ev: dataset.event_id[ev] for ev in self.events} - return event_id + pass def process_raw(self, raw, dataset): # find the events @@ -167,6 +164,9 @@ def __init__(self, **kwargs): raise(ValueError('LeftRightImagery dont accept events')) super().__init__(events=['left_hand', 'right_hand'], **kwargs) + def used_events(self, dataset): + return {ev: dataset.event_id[ev] for ev in self.events} + @property def scoring(self): return 'roc_auc' @@ -184,6 +184,9 @@ def __init__(self, **kwargs): raise(ValueError('LeftRightImagery dont accept events')) super().__init__(events=['left_hand', 'right_hand'], **kwargs) + def used_events(self, dataset): + return {ev: dataset.event_id[ev] for ev in self.events} + @property def scoring(self): return 'roc_auc' From a0070bd76fc9a9163480c97d8a53661fbb64f1ec Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Fri, 6 Apr 2018 13:00:40 +0200 Subject: [PATCH 11/21] Updated dataset_search to include interval param --- moabb/datasets/utils.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/moabb/datasets/utils.py b/moabb/datasets/utils.py index da090e6a4..e30af833a 100644 --- a/moabb/datasets/utils.py +++ b/moabb/datasets/utils.py @@ -14,8 +14,8 @@ def dataset_search(paradigm, multi_session=False, events=None, - has_all_events=False, total_classes=None, min_subjects=1, - channels=[]): + has_all_events=False, total_classes=None, interval=None, + min_subjects=1, channels=[]): ''' Function that returns a list of datasets that match given criteria. Valid criteria are: @@ -27,6 +27,7 @@ def dataset_search(paradigm, multi_session=False, events=None, multi_session: bool, if True only returns datasets with more than one session per subject. If False return all paradigm: 'imagery','p300',(more to come) + interval: Length of motor imagery interval, in seconds. Only used in imagery paradigm min_subjects: int, minimum subjects in dataset channels: list or set of channels @@ -51,6 +52,9 @@ def dataset_search(paradigm, multi_session=False, events=None, continue if paradigm == d.paradigm: + if interval is not None: + if d.interval[1]-d.interval[0] < interval: + continue keep_event_dict = {} if events is None: # randomly keep n_classes events From fa0222f5559a7c2fca35ad858e2d8621c1aaccd3 Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Fri, 6 Apr 2018 13:15:14 +0200 Subject: [PATCH 12/21] Updated utils to use interval, fixes - use on_missing - intervals specified by paradigm are relative to imagery start - added min interval search to utils - paradigm datasets property now use interval search --- moabb/datasets/utils.py | 3 ++- moabb/paradigms/motor_imagery.py | 33 ++++++++++++++++++++++---------- 2 files changed, 25 insertions(+), 11 deletions(-) diff --git a/moabb/datasets/utils.py b/moabb/datasets/utils.py index e30af833a..cfc97815a 100644 --- a/moabb/datasets/utils.py +++ b/moabb/datasets/utils.py @@ -53,7 +53,8 @@ def dataset_search(paradigm, multi_session=False, events=None, if paradigm == d.paradigm: if interval is not None: - if d.interval[1]-d.interval[0] < interval: + min_interval = interval[1]-interval[0] + if d.interval[1]-d.interval[0] < min_interval: continue keep_event_dict = {} if events is None: diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index f7f59fa3c..e3552fe41 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -22,8 +22,9 @@ class BaseMotorImagery(BaseParadigm): filters: List of list (defaults [[7, 35]]) bank of filter to apply interval: list | None, (default None) - time interval to epoch trial. If None, defaults to the dataset-defined - interval + time interval to epoch trial, shifted to imagery start (e.g. + [0,2] becomes [3,5] if imagery starts 3s after the cue). If None, + defaults to the dataset-defined interval events: List of str | None (default None) event to use for epoching. If None, default to all events defined in the dataset. @@ -78,10 +79,6 @@ def process_raw(self, raw, dataset): # pick events, based on event_id try: events = mne.pick_events(events, include=list(event_id.values())) - # not all runs have all events. Reduce event dict to only occurring events - events_in_run = np.unique(events[:, 2]) - run_event_dict = {k: v for k, - v in event_id.items() if v in events_in_run} except RuntimeError as r: # skip raw if no event found return @@ -90,7 +87,7 @@ def process_raw(self, raw, dataset): if self.interval is None: tmin, tmax = dataset.interval else: - tmin, tmax = self.interval + tmin, tmax = [t + dataset.interval[0] for t in self.interval] if self.resample is not None: raw = raw.copy().resample(self.resample) @@ -102,14 +99,15 @@ def process_raw(self, raw, dataset): raw_f = raw.copy().filter(fmin, fmax, method='iir', picks=picks, verbose=False) # epoch data - epochs = mne.Epochs(raw_f, events, event_id=run_event_dict, + epochs = mne.Epochs(raw_f, events, event_id=event_id, tmin=tmin, tmax=tmax, proj=False, baseline=None, preload=True, - verbose=False, picks=picks) + verbose=False, picks=picks, + on_missing='ignore') # MNE is in V, rescale to have uV X.append(1e6 * epochs.get_data()) - inv_events = {k: v for v, k in run_event_dict.items()} + inv_events = {k: v for v, k in event_id.items()} labels = np.array([inv_events[e] for e in epochs.events[:, -1]]) # if only one band, return a 3D array, otherwise return a 4D @@ -123,8 +121,13 @@ def process_raw(self, raw, dataset): @property def datasets(self): + if self.interval is None: + interval = None + else: + interval = self.interval[1]-self.interval[0] return utils.dataset_search(paradigm='imagery', events=self.events, + interval=interval, has_all_events=True) @property @@ -248,9 +251,14 @@ def used_events(self, dataset): @property def datasets(self): + if self.interval is None: + interval = None + else: + interval = self.interval[1]-self.interval[0] return utils.dataset_search(paradigm='imagery', events=self.events, total_classes=self.n_classes, + interval=interval, has_all_events=False) @property @@ -318,9 +326,14 @@ def used_events(self, dataset): @property def datasets(self): + if self.interval is None: + interval = None + else: + interval = self.interval[1]-self.interval[0] return utils.dataset_search(paradigm='imagery', events=self.events, total_classes=self.n_classes, + interval=interval, has_all_events=False) @property From d03ab091047434990db7ba6f8dab9ad59809fa6a Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Fri, 6 Apr 2018 13:17:09 +0200 Subject: [PATCH 13/21] hotfix, interval in utils is float --- moabb/datasets/utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/moabb/datasets/utils.py b/moabb/datasets/utils.py index cfc97815a..e30af833a 100644 --- a/moabb/datasets/utils.py +++ b/moabb/datasets/utils.py @@ -53,8 +53,7 @@ def dataset_search(paradigm, multi_session=False, events=None, if paradigm == d.paradigm: if interval is not None: - min_interval = interval[1]-interval[0] - if d.interval[1]-d.interval[0] < min_interval: + if d.interval[1]-d.interval[0] < interval: continue keep_event_dict = {} if events is None: From ed0972c3f4aca5be9ad552610b0859c63cbe3a63 Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Fri, 6 Apr 2018 16:19:23 +0200 Subject: [PATCH 14/21] Fixed tutorial? and halfhearted doc changes --- moabb/datasets/bbci_eeg_fnirs.py | 105 ++++++++++++++++++++++++----- moabb/datasets/bnci.py | 64 +++++++++++------- moabb/paradigms/motor_imagery.py | 10 +-- tutorials/plot_explore_paradigm.py | 2 +- 4 files changed, 132 insertions(+), 49 deletions(-) diff --git a/moabb/datasets/bbci_eeg_fnirs.py b/moabb/datasets/bbci_eeg_fnirs.py index 74b82d34a..540fd32dc 100644 --- a/moabb/datasets/bbci_eeg_fnirs.py +++ b/moabb/datasets/bbci_eeg_fnirs.py @@ -60,29 +60,100 @@ def fnirs_data_path(path, subject): class BBCIEEGfNIRS(BaseDataset): """BBCI EEG fNIRS Motor Imagery dataset - Data Acquisition + Data Acquisition + ---------------------------------------- + + EEG and NIRS data was collected in an ordinary bright room. EEG data was + recorded by a multichannel BrainAmp EEG amplifier with thirty active + electrodes (Brain Products GmbH, Gilching, Germany) with linked mastoids + reference at 1000 Hz sampling rate. The EEG amplifier was also used to + measure the electrooculogram (EOG), electrocardiogram (ECG) and respiration + with a piezo based breathing belt. Thirty EEG electrodes were placed on a + custom-made stretchy fabric cap (EASYCAP GmbH, Herrsching am Ammersee, + Germany) and placed according to the international 10-5 system (AFp1, AFp2, + AFF1h, AFF2h, AFF5h, AFF6h, F3, F4, F7, F8, FCC3h, FCC4h, FCC5h, FCC6h, T7, + T8, Cz, CCP3h, CCP4h, CCP5h, CCP6h, Pz, P3, P4, P7, P8, PPO1h, PPO2h, POO1, + POO2 and Fz for ground electrode). + + NIRS data was collected by NIRScout (NIRx GmbH, Berlin, Germany) at 12.5 Hz + sampling rate. Each adjacent source-detector pair creates one physiological + NIRS channel. Fourteen sources and sixteen detectors resulting in thirty-six + physiological channels were placed at frontal (nine channels around Fp1, + Fp2, and Fpz), motor (twelve channels around C3 and C4, respectively) and + visual areas (three channels around Oz). The inter-optode distance was 30 + mm. NIRS optodes were fixed on the same cap as the EEG electrodes. Ambient + lights were sufficiently blocked by a firm contact between NIRS optodes and + scalp and use of an opaque cap. + + EOG was recorded using two vertical (above and below left eye) and two + horizontal (outer canthus of each eye) electrodes. ECG was recorded based on + Einthoven triangle derivations I and II, and respiration was measured using + a respiration belt on the lower chest. EOG, ECG and respiration were sampled + at the same sampling rate of the EEG. ECG and respiration data were not + analyzed in this study, but are provided along with the other signals. + + Experimental Procedure ---------------------------------------- - EEG and NIRS data was collected in an ordinary bright room. EEG data was recorded by a multichannel BrainAmp EEG amplifier with thirty active electrodes (Brain Products GmbH, Gilching, Germany) with linked mastoids reference at 1000 Hz sampling rate. The EEG amplifier was also used to measure the electrooculogram (EOG), electrocardiogram (ECG) and respiration with a piezo based breathing belt. Thirty EEG electrodes were placed on a custom-made stretchy fabric cap (EASYCAP GmbH, Herrsching am Ammersee, Germany) and placed according to the international 10-5 system (AFp1, AFp2, AFF1h, AFF2h, AFF5h, AFF6h, F3, F4, F7, F8, FCC3h, FCC4h, FCC5h, FCC6h, T7, T8, Cz, CCP3h, CCP4h, CCP5h, CCP6h, Pz, P3, P4, P7, P8, PPO1h, PPO2h, POO1, POO2 and Fz for ground electrode). - NIRS data was collected by NIRScout (NIRx GmbH, Berlin, Germany) at 12.5 Hz sampling rate. Each adjacent source-detector pair creates one physiological NIRS channel. Fourteen sources and sixteen detectors resulting in thirty-six physiological channels were placed at frontal (nine channels around Fp1, Fp2, and Fpz), motor (twelve channels around C3 and C4, respectively) and visual areas (three channels around Oz). The inter-optode distance was 30 mm. NIRS optodes were fixed on the same cap as the EEG electrodes. Ambient lights were sufficiently blocked by a firm contact between NIRS optodes and scalp and use of an opaque cap. - - EOG was recorded using two vertical (above and below left eye) and two horizontal (outer canthus of each eye) electrodes. ECG was recorded based on Einthoven triangle derivations I and II, and respiration was measured using a respiration belt on the lower chest. EOG, ECG and respiration were sampled at the same sampling rate of the EEG. ECG and respiration data were not analyzed in this study, but are provided along with the other signals. - - Experimental Procedure - ---------------------------------------- - - The subjects sat on a comfortable armchair in front of a 50-inch white screen. The distance between their heads and the screen was 1.6 m. They were asked not to move any part of the body during the data recording. The experiment consisted of three sessions of left and right hand MI (dataset A) and MA and baseline tasks (taking a rest without any thought) (dataset B) each. Each session comprised a 1 min pre-experiment resting period, 20 repetitions of the given task and a 1 min post-experiment resting period. The task started with 2 s of a visual introduction of the task, followed by 10 s of a task period and resting period which was given randomly from 15 to 17 s. At the beginning and end of the task period, a short beep (250 ms) was played. All instructions were displayed on the white screen by a video projector. MI and MA tasks were performed in separate sessions but in alternating order (i.e., sessions 1, 3 and 5 for MI (dataset A) and sessions 2, 4 and 6 for MA (dataset B)). Fig. 2 shows the schematic diagram of the experimental paradigm. Five sorts of motion artifacts induced by eye and head movements (dataset C) were measured. The motion artifacts were recorded after all MI and MA task recordings. The experiment did not include the pre- and post-experiment resting state periods. - - Motor Imagery (Dataset A) + The subjects sat on a comfortable armchair in front of a 50-inch white + screen. The distance between their heads and the screen was 1.6 m. They were + asked not to move any part of the body during the data recording. The + experiment consisted of three sessions of left and right hand MI (dataset A) + and MA and baseline tasks (taking a rest without any thought) (dataset B) + each. Each session comprised a 1 min pre-experiment resting period, 20 + repetitions of the given task and a 1 min post-experiment resting + period. The task started with 2 s of a visual introduction of the task, + followed by 10 s of a task period and resting period which was given + randomly from 15 to 17 s. At the beginning and end of the task period, a + short beep (250 ms) was played. All instructions were displayed on the white + screen by a video projector. MI and MA tasks were performed in separate + sessions but in alternating order (i.e., sessions 1, 3 and 5 for MI (dataset + A) and sessions 2, 4 and 6 for MA (dataset B)). Fig. 2 shows the schematic + diagram of the experimental paradigm. Five sorts of motion artifacts induced + by eye and head movements (dataset C) were measured. The motion artifacts + were recorded after all MI and MA task recordings. The experiment did not + include the pre- and post-experiment resting state periods. + + Motor Imagery (Dataset A) ---------------------------- - For motor imagery, subjects were instructed to perform haptic motor imagery (i.e. to imagine the feeling of opening and closing their hands as they were grabbing a ball) to ensure that actual motor imagery, not visual imagery, was performed. All subjects were naive to the MI experiment. For the visual instruction, a black arrow pointing to either the left or right side appeared at the center of the screen for 2 s. The arrow disappeared with a short beep sound and then a black fixation cross was displayed during the task period. The subjects were asked to imagine hand gripping (opening and closing their hands) in a 1 Hz pace. This pace was shown to and repeated by the subjects by performing real hand gripping before the experiment. Motor imagery was performed continuously over the task period. The task period was finished with a short beep sound and a 'STOP' displayed for 1s on the screen. The fixation cross was displayed again during the rest period and the subjects were asked to gaze at it to minimize their eye movements. This process was repeated twenty times in a single session (ten trials per condition in a single session; thirty trials in the whole sessions). In a single session, motor imagery tasks were performed on the basis of ten subsequent blocks randomly consisting of one of two conditions: Either first left and then right hand motor imagery or vice versa. - - Mental Arithmetic (Dataset B) + For motor imagery, subjects were instructed to perform haptic motor imagery + (i.e. to imagine the feeling of opening and closing their hands as they were + grabbing a ball) to ensure that actual motor imagery, not visual imagery, + was performed. All subjects were naive to the MI experiment. For the visual + instruction, a black arrow pointing to either the left or right side + appeared at the center of the screen for 2 s. The arrow disappeared with a + short beep sound and then a black fixation cross was displayed during the + task period. The subjects were asked to imagine hand gripping (opening and + closing their hands) in a 1 Hz pace. This pace was shown to and repeated by + the subjects by performing real hand gripping before the experiment. Motor + imagery was performed continuously over the task period. The task period was + finished with a short beep sound and a 'STOP' displayed for 1s on the + screen. The fixation cross was displayed again during the rest period and + the subjects were asked to gaze at it to minimize their eye movements. This + process was repeated twenty times in a single session (ten trials per + condition in a single session; thirty trials in the whole sessions). In a + single session, motor imagery tasks were performed on the basis of ten + subsequent blocks randomly consisting of one of two conditions: Either first + left and then right hand motor imagery or vice versa. + + Mental Arithmetic (Dataset B) ---------------------------------------- - For the visual instruction of the MA task, an initial subtraction such as 'three-digit number minus one-digit number' (e.g., 384-8) appeared at the center of the screen for 2 s. The subjects were instructed to memorize the numbers while the initial subtraction was displayed on the screen. The initial subtraction disappeared with a short beep sound and a black fixation cross was displayed during the task period in which the subjects were asked to repeatedly perform to subtract the one-digit number from the result of the previous subtraction. For the baseline task, no specific sign but the black fixation cross was displayed on the screen, and the subjects were instructed to take a rest. Note that there were other rest periods between the MA and baseline task periods, as same with the MI paradigm. Both task periods were finished with a short beep sound and a 'STOP' displayed for 1 s on the screen. The fixation cross was displayed again during the rest period. MA and baseline trials were randomized in the same way as MI. - + For the visual instruction of the MA task, an initial subtraction such as + 'three-digit number minus one-digit number' (e.g., 384-8) appeared at the + center of the screen for 2 s. The subjects were instructed to memorize the + numbers while the initial subtraction was displayed on the screen. The + initial subtraction disappeared with a short beep sound and a black fixation + cross was displayed during the task period in which the subjects were asked + to repeatedly perform to subtract the one-digit number from the result of + the previous subtraction. For the baseline task, no specific sign but the + black fixation cross was displayed on the screen, and the subjects were + instructed to take a rest. Note that there were other rest periods between + the MA and baseline task periods, as same with the MI paradigm. Both task + periods were finished with a short beep sound and a 'STOP' displayed for 1 s + on the screen. The fixation cross was displayed again during the rest + period. MA and baseline trials were randomized in the same way as MI. """ diff --git a/moabb/datasets/bnci.py b/moabb/datasets/bnci.py index 671ac15fa..5657cc0e3 100644 --- a/moabb/datasets/bnci.py +++ b/moabb/datasets/bnci.py @@ -641,32 +641,39 @@ def data_path(self, subject, path=None, force_update=False, class BNCI2014001(MNEBNCI): """BNCI 2014-001 Motor Imagery dataset - -This data set consists of EEG data from 9 subjects. The cue-based BCI -paradigm consisted of four different motor imagery tasks, namely the imag- -ination of movement of the left hand (class 1), right hand (class 2), both -feet (class 3), and tongue (class 4). Two sessions on different days were -recorded for each subject. Each session is comprised of 6 runs separated by -short breaks. One run consists of 48 trials (12 for each of the four possible -classes), yielding a total of 288 trials per session. - -The subjects were sitting in a comfortable armchair in front of a com- puter -screen. At the beginning of a trial ( t = 0 s), a fixation cross appeared on -the black screen. In addition, a short acoustic warning tone was pre- sented. -After two seconds ( t = 2 s), a cue in the form of an arrow pointing either to -the left, right, down or up (corresponding to one of the four classes left hand, -right hand, foot or tongue) appeared and stayed on the screen for 1.25 s. This -prompted the subjects to perform the desired motor imagery task. No feedback -was provided. The subjects were ask to carry out the motor imagery task until -the fixation cross disappeared from the screen at t = 6 s. - -Twenty-two Ag/AgCl electrodes (with inter-electrode distances of 3.5 cm) were -used to record the EEG; the montage is shown in Figure 3 left. All signals were -recorded monopolarly with the left mastoid serving as reference and the right -mastoid as ground. The signals were sampled with 250 Hz and bandpass-filtered -between 0.5 Hz and 100 Hz. The sensitivity of the amplifier was set to 100 μV -. An additional 50 Hz notch filter was enabled to suppress line noise -""" + + This data set consists of EEG data from 9 subjects. The cue-based BCI + paradigm consisted of four different motor imagery tasks, namely the imag- + ination of movement of the left hand (class 1), right hand (class 2), both + feet (class 3), and tongue (class 4). Two sessions on different days were + recorded for each subject. Each session is comprised of 6 runs separated by + short breaks. One run consists of 48 trials (12 for each of the four + possible classes), yielding a total of 288 trials per session. + + The subjects were sitting in a comfortable armchair in front of a com- puter + screen. At the beginning of a trial ( t = 0 s), a fixation cross appeared + on the black screen. In addition, a short acoustic warning tone was pre- + sented. After two seconds ( t = 2 s), a cue in the form of an arrow + pointing either to the left, right, down or up (corresponding to one of the + four classes left hand, right hand, foot or tongue) appeared and stayed on + the screen for 1.25 s. This prompted the subjects to perform the desired + motor imagery task. No feedback was provided. The subjects were ask to + carry out the motor imagery task until the fixation cross disappeared from + the screen at t = 6 s. + + Twenty-two Ag/AgCl electrodes (with inter-electrode distances of 3.5 cm) + were used to record the EEG; the montage is shown in Figure 3 left. All + signals were recorded monopolarly with the left mastoid serving as reference + and the right mastoid as ground. The signals were sampled with 250 Hz and + bandpass-filtered between 0.5 Hz and 100 Hz. The sensitivity of the + amplifier was set to 100 μV . An additional 50 Hz notch filter was enabled + to suppress line noise + + References + ---------- + + .. [1] doi.org/10.3389/fnins.2012.00055 + """ def __init__(self): super().__init__( @@ -706,6 +713,11 @@ class for validation. Participants had the task of performing sustained (5 were aged between 20 and 30 years, 8 naive to the task, and had no known medical or neurological diseases. + References + ----------- + + .. [1] doi.org/10.1515/bmt-2014-0117 + """ def __init__(self): diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index e3552fe41..9aab4cb6d 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -135,7 +135,7 @@ def scoring(self): return 'accuracy' -class MotorImagery(BaseMotorImagery): +class SinglePass(BaseMotorImagery): def __init__(self, fmin=8, fmax=32, **kwargs): """ @@ -155,7 +155,7 @@ def __init__(self, filters=[[8, 12], [12, 16], [16, 20], [20, 24], super().__init__(filters=filters, **kwargs) -class LeftRightImagery(BaseMotorImagery): +class LeftRightImagery(SinglePass): """Motor Imagery for left hand/right hand classification Metric is 'roc_auc' @@ -195,7 +195,7 @@ def scoring(self): return 'roc_auc' -class FilterBankImageryNClass(FilterBankMotorImagery): +class FilterBankMotorImagery(FilterBankMotorImagery): """ Filter bank n-class imagery. @@ -269,9 +269,9 @@ def scoring(self): return 'accuracy' -class ImageryNClass(MotorImagery): +class MotorImagery(SinglePass): """ - N-class imagery. + N-class motor imagery. Metric is 'roc-auc' if 2 classes and 'accuracy' if more diff --git a/tutorials/plot_explore_paradigm.py b/tutorials/plot_explore_paradigm.py index 12d0f4eb1..2a1c3887f 100644 --- a/tutorials/plot_explore_paradigm.py +++ b/tutorials/plot_explore_paradigm.py @@ -36,7 +36,7 @@ # # First, lets take a example of the MotorImagery paradigm. -paradigm = MotorImagery() +paradigm = MotorImagery(n_classes=4) print(paradigm.__doc__) From 1be88be9d88c037c84e2f533cb53f59135db4ef9 Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Fri, 6 Apr 2018 16:31:26 +0200 Subject: [PATCH 15/21] fixed filterbank script also fixed bug in ImageryNClass init --- examples/plot_filterbank_csp_vs_csp.py | 7 ++++--- moabb/paradigms/motor_imagery.py | 13 +++++++------ 2 files changed, 11 insertions(+), 9 deletions(-) diff --git a/examples/plot_filterbank_csp_vs_csp.py b/examples/plot_filterbank_csp_vs_csp.py index 46cd5615e..f1c9a2a7e 100644 --- a/examples/plot_filterbank_csp_vs_csp.py +++ b/examples/plot_filterbank_csp_vs_csp.py @@ -67,8 +67,9 @@ overwrite = False # set to True if we want to overwrite cached results # broadband filters -filters = [[8, 35]] -paradigm = LeftRightImagery(filters=filters) +fmin=8 +fmax=35 +paradigm = LeftRightImagery(fmin=fmin, fmax=fmax) evaluation = CrossSessionEvaluation(paradigm=paradigm, datasets=datasets, suffix='examples', overwrite=overwrite) results = evaluation.process(pipelines) @@ -78,7 +79,7 @@ # bank of 6 filter, by 4 Hz increment filters = [[8, 12], [12, 16], [16, 20], [20, 24], [24, 28], [28, 35]] -paradigm = FilterBankLeftRightImagery() +paradigm = FilterBankLeftRightImagery(filters=filters) evaluation = CrossSessionEvaluation(paradigm=paradigm, datasets=datasets, suffix='examples', overwrite=overwrite) results_fb = evaluation.process(pipelines_fb) diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index 9aab4cb6d..437ee7a1a 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -146,7 +146,7 @@ def __init__(self, fmin=8, fmax=32, **kwargs): super().__init__(filters=[[fmin, fmax]], **kwargs) -class FilterBankMotorImagery(BaseMotorImagery): +class FilterBank(BaseMotorImagery): """Filter Bank MI.""" def __init__(self, filters=[[8, 12], [12, 16], [16, 20], [20, 24], @@ -175,7 +175,7 @@ def scoring(self): return 'roc_auc' -class FilterBankLeftRightImagery(FilterBankMotorImagery): +class FilterBankLeftRightImagery(FilterBank): """Filter Bank Motor Imagery for left hand/right hand classification Metric is 'roc_auc' @@ -195,9 +195,9 @@ def scoring(self): return 'roc_auc' -class FilterBankMotorImagery(FilterBankMotorImagery): +class FilterBankMotorImagery(FilterBank): """ - Filter bank n-class imagery. + Filter bank n-class motor imagery. Metric is 'roc-auc' if 2 classes and 'accuracy' if more @@ -292,11 +292,12 @@ def __init__(self, n_classes=2, **kwargs): "docstring" super().__init__(**kwargs) self.n_classes = n_classes - assert n_classes <= len( - self.events), 'More classes than events specified' if self.events is None: log.warning("Choosing from all possible events") + else: + assert n_classes <= len( + self.events), 'More classes than events specified' def verify(self, dataset): assert dataset.paradigm == 'imagery' From 9142d54fdfab7c7992e416b585dff431e0cb19ee Mon Sep 17 00:00:00 2001 From: Vinay Jayaram Date: Fri, 6 Apr 2018 16:39:07 +0200 Subject: [PATCH 16/21] fixed failing tests BaseMotorImagery is still abstract --- moabb/paradigms/motor_imagery.py | 5 +++-- moabb/tests/paradigms.py | 15 ++++++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index 437ee7a1a..83d475802 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -217,11 +217,12 @@ def __init__(self, n_classes=2, **kwargs): "docstring" super().__init__(**kwargs) self.n_classes = n_classes - assert n_classes <= len( - self.events), 'More classes than events specified' if self.events is None: log.warning("Choosing from all possible events") + else: + assert n_classes <= len( + self.events), 'More classes than events specified' def verify(self, dataset): assert dataset.paradigm == 'imagery' diff --git a/moabb/tests/paradigms.py b/moabb/tests/paradigms.py index 6fa7157cd..3962f64f7 100644 --- a/moabb/tests/paradigms.py +++ b/moabb/tests/paradigms.py @@ -12,11 +12,16 @@ class Test_MotorImagery(unittest.TestCase): def test_BaseImagery_paradigm(self): - self.assertRaises(ValueError, BaseMotorImagery, interval=1) - self.assertRaises(ValueError, BaseMotorImagery, interval=[0, 1, 3]) - self.assertRaises(ValueError, BaseMotorImagery, interval=[1, 0]) + class SimpleMotorImagery(BaseMotorImagery): - paradigm = BaseMotorImagery() + def used_events(self, dataset): + return dataset.event_id + + self.assertRaises(ValueError, SimpleMotorImagery, interval=1) + self.assertRaises(ValueError, SimpleMotorImagery, interval=[0, 1, 3]) + self.assertRaises(ValueError, SimpleMotorImagery, interval=[1, 0]) + + paradigm = SimpleMotorImagery() dataset = FakeDataset() X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) @@ -39,7 +44,7 @@ def test_BaseImagery_paradigm(self): self.assertEquals(len(np.unique(metadata.session)), 2) # can work with filter bank - paradigm = BaseMotorImagery(filters=[[7, 12], [12, 24]]) + paradigm = SimpleMotorImagery(filters=[[7, 12], [12, 24]]) dataset = FakeDataset() X, labels, metadata = paradigm.get_data(dataset, subjects=[1]) From ed7e11242ef886bd59fbac67881fad0aa2169826 Mon Sep 17 00:00:00 2001 From: Alexandre Barachant Date: Sat, 7 Apr 2018 14:05:21 -0400 Subject: [PATCH 17/21] change interval to tmin, tmax --- moabb/paradigms/motor_imagery.py | 58 +++++++++++++++++++------------- moabb/tests/paradigms.py | 4 +-- 2 files changed, 36 insertions(+), 26 deletions(-) diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index 83d475802..84055314b 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -14,42 +14,53 @@ class BaseMotorImagery(BaseParadigm): - """Base Motor imagery paradigm. Please use one of the child classes + """Base Motor imagery paradigm. + + Please use one of the child classes Parameters ---------- - filters: List of list (defaults [[7, 35]]) - bank of filter to apply - interval: list | None, (default None) - time interval to epoch trial, shifted to imagery start (e.g. - [0,2] becomes [3,5] if imagery starts 3s after the cue). If None, - defaults to the dataset-defined interval + filters: list of list (defaults [[7, 35]]) + bank of bandpass filter to apply. + events: List of str | None (default None) event to use for epoching. If None, default to all events defined in the dataset. + + tmin: float (default 0.0) + Start time (in second) of the epoch, relative to the dataset specific + task interval e.g. tmin = 1 would mean the epoch will start 1 second + after the begining of the task as defined by the dataset. + + tmax: float | None, (default None) + End time (in second) of the epoch, relative to the begining of the + dataset specific task interval. tmax = 5 would mean the epoch will end + 5 second after the begining of the task as defined in the dataset. If + None, use the dataset value. + + channels: list of str | None (default None) + list of channel to select. If None, use all EEG channels available in + the dataset. + + resample: float | None (default None) + If not None, resample the eeg data with the sampling rate provided. """ - def __init__(self, filters=[[7, 35]], channels=None, interval=None, - events=None, resample=None, **kwargs): + def __init__(self, filters=[[7, 35]], events=None, tmin=0.0, tmax=None, + channels=None, resample=None, **kwargs): super().__init__(**kwargs) self.filters = filters self.channels = channels self.events = events self.resample = resample - if interval is not None: - if not isinstance(interval, list): - raise(ValueError("interval must be a list")) - - if len(interval) != 2: - raise(ValueError("interval must be a list of 2 elements")) + if (tmax is not None): + if tmin >= tmax: + raise(ValueError("tmax must be greater than tmin")) - if interval[0] >= interval[1]: - raise(ValueError("first element of interval must be greater" - "than the second element")) - - self.interval = interval + self.tmin = tmin + self.tmax = tmax def verify(self, dataset): assert dataset.paradigm == 'imagery' @@ -84,10 +95,11 @@ def process_raw(self, raw, dataset): return # get interval - if self.interval is None: - tmin, tmax = dataset.interval + tmin = self.tmin + dataset.interval[0] + if self.tmax is None: + tmax = dataset.interval[1] else: - tmin, tmax = [t + dataset.interval[0] for t in self.interval] + tmax = self.tmax + dataset.interval[0] if self.resample is not None: raw = raw.copy().resample(self.resample) diff --git a/moabb/tests/paradigms.py b/moabb/tests/paradigms.py index 3962f64f7..d62f216ca 100644 --- a/moabb/tests/paradigms.py +++ b/moabb/tests/paradigms.py @@ -17,9 +17,7 @@ class SimpleMotorImagery(BaseMotorImagery): def used_events(self, dataset): return dataset.event_id - self.assertRaises(ValueError, SimpleMotorImagery, interval=1) - self.assertRaises(ValueError, SimpleMotorImagery, interval=[0, 1, 3]) - self.assertRaises(ValueError, SimpleMotorImagery, interval=[1, 0]) + self.assertRaises(ValueError, SimpleMotorImagery, tmin=1, tmax=0) paradigm = SimpleMotorImagery() dataset = FakeDataset() From 741fbaa7c4132cc4e8ffbbd8aeeecca875c9896f Mon Sep 17 00:00:00 2001 From: Alexandre Barachant Date: Sat, 7 Apr 2018 14:35:13 -0400 Subject: [PATCH 18/21] wip --- docs/source/paradigms.rst | 7 ++- moabb/paradigms/base.py | 13 ++++- moabb/paradigms/motor_imagery.py | 79 +++++++++++++++++++++++++----- tutorials/plot_explore_paradigm.py | 6 +-- 4 files changed, 87 insertions(+), 18 deletions(-) diff --git a/docs/source/paradigms.rst b/docs/source/paradigms.rst index 376f32237..1e4bc4ca9 100644 --- a/docs/source/paradigms.rst +++ b/docs/source/paradigms.rst @@ -14,9 +14,13 @@ Motor Imagery Paradigms :toctree: generated/ :template: class.rst - BaseMotorImagery + SinglePass + MotorImagery LeftRightImagery + FilterBank + FilterBankMotorImagery + FilterBankLeftRightImagery ------------ Base & Utils @@ -26,4 +30,5 @@ Base & Utils :toctree: generated/ :template: class.rst + motor_imagery.BaseMotorImagery base.BaseParadigm diff --git a/moabb/paradigms/base.py b/moabb/paradigms/base.py index ee41397c8..cc92b68d9 100644 --- a/moabb/paradigms/base.py +++ b/moabb/paradigms/base.py @@ -8,7 +8,6 @@ class BaseParadigm(metaclass=ABCMeta): """ def __init__(self): - """init""" pass @abstractproperty @@ -57,12 +56,24 @@ def process_raw(self, raw, dataset): metadata is a dataframe with as many row as the length of the data and labels. + Parameters + ---------- + + raw: mne.Raw instance + the raw EEG data. + + dataset : dataset instance + The dataset corresponding to the raw file. mainly use to access + dataset specific information. + returns ------- X : np.ndarray the data that will be used as features for the model + labels: np.ndarray the labels for training / evaluating the model + metadata: pd.DataFrame A dataframe containing the metadata diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index 84055314b..5285809ae 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -48,8 +48,8 @@ class BaseMotorImagery(BaseParadigm): """ def __init__(self, filters=[[7, 35]], events=None, tmin=0.0, tmax=None, - channels=None, resample=None, **kwargs): - super().__init__(**kwargs) + channels=None, resample=None): + super().__init__() self.filters = filters self.channels = channels self.events = events @@ -133,10 +133,10 @@ def process_raw(self, raw, dataset): @property def datasets(self): - if self.interval is None: + if self.tmax is None: interval = None else: - interval = self.interval[1]-self.interval[0] + interval = self.tmax - self.tmin return utils.dataset_search(paradigm='imagery', events=self.events, interval=interval, @@ -148,11 +148,42 @@ def scoring(self): class SinglePass(BaseMotorImagery): + """Single Bandpass filter motot Imagery. + + Motor imagery paradigm with only one bandpass filter (default 8 to 32 Hz) + + Parameters + ---------- + fmin: float (default 8) + cutoff frequency (Hz) for the high pass filter + + fmax: float (default 32) + cutoff frequency (Hz) for the low pass filter + + events: List of str | None (default None) + event to use for epoching. If None, default to all events defined in + the dataset. + + tmin: float (default 0.0) + Start time (in second) of the epoch, relative to the dataset specific + task interval e.g. tmin = 1 would mean the epoch will start 1 second + after the begining of the task as defined by the dataset. + + tmax: float | None, (default None) + End time (in second) of the epoch, relative to the begining of the + dataset specific task interval. tmax = 5 would mean the epoch will end + 5 second after the begining of the task as defined in the dataset. If + None, use the dataset value. + + channels: list of str | None (default None) + list of channel to select. If None, use all EEG channels available in + the dataset. + resample: float | None (default None) + If not None, resample the eeg data with the sampling rate provided. + + """ def __init__(self, fmin=8, fmax=32, **kwargs): - """ - Single-pass MI. Takes arguments fmin and fmax and not filters - """ if 'filters' in kwargs.keys(): raise(ValueError('MotorImagery does not take argument \'filters\'')) super().__init__(filters=[[fmin, fmax]], **kwargs) @@ -264,10 +295,10 @@ def used_events(self, dataset): @property def datasets(self): - if self.interval is None: + if self.tmax is None: interval = None else: - interval = self.interval[1]-self.interval[0] + interval = self.tmax - self.tmin return utils.dataset_search(paradigm='imagery', events=self.events, total_classes=self.n_classes, @@ -299,10 +330,32 @@ class MotorImagery(SinglePass): number of classes each dataset must have. If events is given, requires all imagery sorts to be within the events list. + fmin: float (default 8) + cutoff frequency (Hz) for the high pass filter + + fmax: float (default 32) + cutoff frequency (Hz) for the low pass filter + + tmin: float (default 0.0) + Start time (in second) of the epoch, relative to the dataset specific + task interval e.g. tmin = 1 would mean the epoch will start 1 second + after the begining of the task as defined by the dataset. + + tmax: float | None, (default None) + End time (in second) of the epoch, relative to the begining of the + dataset specific task interval. tmax = 5 would mean the epoch will end + 5 second after the begining of the task as defined in the dataset. If + None, use the dataset value. + + channels: list of str | None (default None) + list of channel to select. If None, use all EEG channels available in + the dataset. + + resample: float | None (default None) + If not None, resample the eeg data with the sampling rate provided. """ def __init__(self, n_classes=2, **kwargs): - "docstring" super().__init__(**kwargs) self.n_classes = n_classes @@ -340,10 +393,10 @@ def used_events(self, dataset): @property def datasets(self): - if self.interval is None: + if self.tmax is None: interval = None else: - interval = self.interval[1]-self.interval[0] + interval = self.tmax - self.tmin return utils.dataset_search(paradigm='imagery', events=self.events, total_classes=self.n_classes, @@ -359,7 +412,7 @@ def scoring(self): class FakeImageryParadigm(LeftRightImagery): - """fake Imagery for left hand/right hand classification + """Fake Imagery for left hand/right hand classification. """ @property diff --git a/tutorials/plot_explore_paradigm.py b/tutorials/plot_explore_paradigm.py index 2a1c3887f..3fb3eff18 100644 --- a/tutorials/plot_explore_paradigm.py +++ b/tutorials/plot_explore_paradigm.py @@ -14,10 +14,10 @@ coefficient for continuous paradigms. This tutorial explore the paradigm object, with 3 examples of paradigm : + - MotorImagery + - FilterBankMotorImagery + - LeftRightImagery - - MotorImagery - - FilterBankMotorImagery - - LeftRightImagery """ # Authors: Alexandre Barachant # From a4c9c360100bf12a05671cf3ca3edc0759094143 Mon Sep 17 00:00:00 2001 From: Alexandre Barachant Date: Sun, 8 Apr 2018 17:19:28 -0400 Subject: [PATCH 19/21] wip --- docs/source/datasets.rst | 9 +- docs/source/paradigms.rst | 4 +- docs/source/pipelines.rst | 7 +- moabb/datasets/Weibo2014.py | 2 +- moabb/datasets/Zhou2016.py | 27 ++- moabb/datasets/__init__.py | 10 +- moabb/datasets/alex_mi.py | 35 ++-- moabb/datasets/base.py | 79 +++---- moabb/datasets/bbci_eeg_fnirs.py | 341 +++++++++++++++++++++---------- moabb/datasets/bnci.py | 184 ++++++++++------- moabb/datasets/download.py | 4 +- moabb/datasets/fake.py | 2 +- moabb/datasets/gigadb.py | 62 ++++-- moabb/datasets/openvibe_mi.py | 58 +++++- moabb/datasets/physionet_mi.py | 92 +++++++-- moabb/datasets/upper_limb.py | 59 +++++- moabb/datasets/utils.py | 44 ++-- moabb/tests/download.py | 18 +- 18 files changed, 703 insertions(+), 334 deletions(-) diff --git a/docs/source/datasets.rst b/docs/source/datasets.rst index de19b0bf8..463237bae 100644 --- a/docs/source/datasets.rst +++ b/docs/source/datasets.rst @@ -21,12 +21,13 @@ Motor Imagery Datasets BNCI2015004 AlexMI - GigaDbMI - BBCIEEGfNIRS + Cho2017 + Shin2017A + Shin2017B OpenvibeMI PhysionetMI - UpperLimb - Zhou2016 + Ofner2017 + Zhou2016 Weibo2014 ------------ diff --git a/docs/source/paradigms.rst b/docs/source/paradigms.rst index 1e4bc4ca9..15915e8cb 100644 --- a/docs/source/paradigms.rst +++ b/docs/source/paradigms.rst @@ -14,11 +14,9 @@ Motor Imagery Paradigms :toctree: generated/ :template: class.rst - SinglePass MotorImagery LeftRightImagery - FilterBank FilterBankMotorImagery FilterBankLeftRightImagery @@ -31,4 +29,6 @@ Base & Utils :template: class.rst motor_imagery.BaseMotorImagery + motor_imagery.SinglePass + motor_imagery.FilterBank base.BaseParadigm diff --git a/docs/source/pipelines.rst b/docs/source/pipelines.rst index 5d8fafcfc..71de1c941 100644 --- a/docs/source/pipelines.rst +++ b/docs/source/pipelines.rst @@ -6,15 +6,16 @@ Pipelines .. currentmodule:: moabb.pipelines ----------------------- -Motor Imagery Datasets ----------------------- +--------- +Pipelines +--------- .. autosummary:: :toctree: generated/ :template: class.rst features.LogVariance + filter_bank.FilterBank ------------ Base & Utils diff --git a/moabb/datasets/Weibo2014.py b/moabb/datasets/Weibo2014.py index 718f3959e..cc7a913db 100644 --- a/moabb/datasets/Weibo2014.py +++ b/moabb/datasets/Weibo2014.py @@ -56,7 +56,7 @@ def get_subjects(sub_inds, sub_names, ind): class Weibo2014(BaseDataset): - """Weibo 2014 Motor Imagery dataset. + """Motor Imagery dataset from Weibo et al 2014. Dataset from the article *Evaluation of EEG oscillatory patterns and cognitive process during simple and compound limb motor imagery* [1]_. diff --git a/moabb/datasets/Zhou2016.py b/moabb/datasets/Zhou2016.py index 175821b48..35529a6f1 100644 --- a/moabb/datasets/Zhou2016.py +++ b/moabb/datasets/Zhou2016.py @@ -1,15 +1,13 @@ ''' -Simple and compound motor imagery +Simple and compound motor imagery. https://doi.org/10.1371/journal.pone.0114853 ''' from .base import BaseDataset import zipfile as z -from scipy.io import loadmat +from mne.io import read_raw_cnt from mne.datasets.utils import _get_path, _do_path_update from mne.utils import _fetch_file -import mne -import numpy as np import os import shutil @@ -28,20 +26,22 @@ def local_data_path(base_path, subject): datapath = os.path.join(base_path, 'data') for i in range(1, 5): os.makedirs(os.path.join(base_path, 'subject_{}'.format(i))) - for session in range(1,4): - for run in ['A','B']: - os.rename(os.path.join(datapath, 'S{}_{}{}.cnt'.format(i,session, run)), + for session in range(1, 4): + for run in ['A', 'B']: + os.rename(os.path.join(datapath, + 'S{}_{}{}.cnt'.format(i, session, + run)), os.path.join(base_path, 'subject_{}'.format(i), - '{}{}.cnt'.format(session,run))) + '{}{}.cnt'.format(session, run))) shutil.rmtree(os.path.join(base_path, 'data')) subjpath = os.path.join(base_path, 'subject_{}'.format(subject)) - return [[os.path.join(subjpath, - '{}{}.cnt'.format(y, x)) for x in ['A', 'B']] for y in ['1', '2', '3']] + return [[os.path.join(subjpath, '{}{}.cnt'.format(y, x)) + for x in ['A', 'B']] for y in ['1', '2', '3']] class Zhou2016(BaseDataset): - """Dataset from Zhou et al. 2016. + """Motor Imagery dataset from Zhou et al 2016. Dataset from the article *A Fully Automated Trial Selection Method for Optimization of Motor Imagery Based Brain-Computer Interface* [1]_. @@ -92,9 +92,8 @@ def _get_single_subject_data(self, subject): out[sess_key] = {} for run_ind, fname in enumerate(runlist): run_key = 'run_{}'.format(run_ind) - out[sess_key][run_key] = mne.io.read_raw_cnt(fname, - preload=True, - montage='standard_1020') + out[sess_key][run_key] = read_raw_cnt(fname, preload=True, + montage='standard_1005') return out def data_path(self, subject, path=None, force_update=False, diff --git a/moabb/datasets/__init__.py b/moabb/datasets/__init__.py index 5171eebff..79db20c84 100644 --- a/moabb/datasets/__init__.py +++ b/moabb/datasets/__init__.py @@ -4,12 +4,14 @@ and will convert them into a MNE raw object. There are options to pool all the different recording sessions per subject or to evaluate them separately. """ -from .gigadb import GigaDbMI +# flake8: noqa +from .gigadb import Cho2017 from .alex_mi import AlexMI from .physionet_mi import PhysionetMI -from .bnci import BNCI2014001, BNCI2014002, BNCI2014004, BNCI2015001, BNCI2015004 +from .bnci import (BNCI2014001, BNCI2014002, BNCI2014004, BNCI2015001, + BNCI2015004) from .openvibe_mi import OpenvibeMI -from .bbci_eeg_fnirs import BBCIEEGfNIRS -from .upper_limb import UpperLimb +from .bbci_eeg_fnirs import Shin2017A, Shin2017B +from .upper_limb import Ofner2017 from .Weibo2014 import Weibo2014 from .Zhou2016 import Zhou2016 diff --git a/moabb/datasets/alex_mi.py b/moabb/datasets/alex_mi.py index 60a7a17e6..b9864294b 100644 --- a/moabb/datasets/alex_mi.py +++ b/moabb/datasets/alex_mi.py @@ -11,20 +11,30 @@ class AlexMI(BaseDataset): - """Alex Motor Imagery dataset - This Dataset contains EEG recordings from 8 subjects, performing 2 task of motor - imagination (right hand, feet or rest). Data have been recorded at 512Hz with 16 - wet electrodes (Fpz, F7, F3, Fz, F4, F8, T7, C3, Cz, C4, T8, P7, P3, Pz, P4, P8) - with a g.tec g.USBamp EEG amplifier. - - File are provided in MNE raw file format. A stimulation channel encoding the - timing of the motor imagination. The start of a trial is encoded as 1, then the - actual start of the motor imagination is encoded with 2 for imagination of a - right hand movement, 3 for imagination of both feet movement and 4 with a rest - trial. + """Alex Motor Imagery dataset. + + Motor imagery dataset from the PhD dissertation of A. Barachant [1]_. + + This Dataset contains EEG recordings from 8 subjects, performing 2 task of + motor imagination (right hand, feet or rest). Data have been recorded at + 512Hz with 16 wet electrodes (Fpz, F7, F3, Fz, F4, F8, T7, C3, Cz, C4, T8, + P7, P3, Pz, P4, P8) with a g.tec g.USBamp EEG amplifier. + + File are provided in MNE raw file format. A stimulation channel encoding + the timing of the motor imagination. The start of a trial is encoded as 1, + then the actual start of the motor imagination is encoded with 2 for + imagination of a right hand movement, 3 for imagination of both feet + movement and 4 with a rest trial. The duration of each trial is 3 second. There is 20 trial of each class. + references + ---------- + .. [1] Barachant, A., 2012. Commande robuste d'un effecteur par une + interface cerveau machine EEG asynchrone (Doctoral dissertation, + Université de Grenoble). + https://tel.archives-ouvertes.fr/tel-01196752 + """ def __init__(self): @@ -34,8 +44,7 @@ def __init__(self): events=dict(right_hand=2, feet=3, rest=4), code='Alexandre Motor Imagery', interval=[0, 3], - paradigm='imagery' - ) + paradigm='imagery') def _get_single_subject_data(self, subject): """return data for a single subject""" diff --git a/moabb/datasets/base.py b/moabb/datasets/base.py index cdca2b327..60914f640 100644 --- a/moabb/datasets/base.py +++ b/moabb/datasets/base.py @@ -8,46 +8,48 @@ class BaseDataset(metaclass=abc.ABCMeta): - """Base dataset""" + """BaseDataset + + Parameters required for all datasets + + parameters + ---------- + subjects: List of int + List of subject number # TODO: make identifiers more general + + sessions_per_subject: int + Number of sessions per subject + + events: dict of string: int + String codes for events matched with labels in the stim channel. + Currently imagery codes codes can include: + - left_hand + - right_hand + - hands + - feet + - rest + - left_hand_right_foot + - right_hand_left_foot + - tongue + - navigation + - subtraction + - word_ass (for word association) + + code: string + Unique identifier for dataset, used in all plots + + interval: list with 2 entries + Imagery interval as defined in the dataset description + + paradigm: ['p300','imagery', 'ssvep'] + Defines what sort of dataset this is (currently only imagery is + implemented) + + doi: DOI for dataset, optional (for now) + """ def __init__(self, subjects, sessions_per_subject, events, code, interval, paradigm, doi=None): - """ - Parameters required for all datasets - - parameters - ---------- - subjects: List of int - List of subject number # TODO: make identifiers more general - - sessions_per_subject: int - Number of sessions per subject - - events: dict of string: int - String codes for events matched with labels in the stim channel. Currently imagery codes codes can include: - - left_hand - - right_hand - - hands - - feet - - rest - - left_hand_right_foot - - right_hand_left_foot - - tongue - - navigation - - subtraction - - word_ass (for word association) - - code: string - Unique identifier for dataset, used in all plots - - interval: list with 2 entries - Imagery interval as defined in the dataset description - - paradigm: ['p300','imagery'] - Defines what sort of dataset this is (currently only imagery is implemented) - - doi: DOI for dataset, optional (for now) - """ if not isinstance(subjects, list): raise(ValueError("subjects must be a list")) @@ -134,8 +136,7 @@ def download(self, path=None, force_update=False, @abc.abstractmethod def _get_single_subject_data(self, subject): - """ - Return the data of a single subject + """Return the data of a single subject. The returned data is a dictionary with the folowing structure diff --git a/moabb/datasets/bbci_eeg_fnirs.py b/moabb/datasets/bbci_eeg_fnirs.py index 540fd32dc..40f72e098 100644 --- a/moabb/datasets/bbci_eeg_fnirs.py +++ b/moabb/datasets/bbci_eeg_fnirs.py @@ -9,14 +9,13 @@ from mne import create_info from mne.io import RawArray from mne.channels import read_montage -from . import download as dl import os.path as op import os import zipfile as z from mne.datasets.utils import _get_path, _do_path_update -from mne.utils import _fetch_file, _url_to_local_path +from mne.utils import _fetch_file -BBCIFNIRS_URL = 'http://doc.ml.tu-berlin.de/hBCI/' +SHIN_URL = 'http://doc.ml.tu-berlin.de/hBCI' def eeg_data_path(base_path, subject): @@ -29,9 +28,11 @@ def eeg_data_path(base_path, subject): for low, high in intervals: if subject >= low and subject <= high: if not op.isfile(op.join(base_path, 'EEG.zip')): - _fetch_file('http://doc.ml.tu-berlin.de/hBCI/EEG/EEG_{:02d}-{:02d}.zip'.format(low, - high), - op.join(base_path, 'EEG.zip'), print_destination=False) + _fetch_file('{}/EEG/EEG_{:02d}-{:02d}.zip'.format(SHIN_URL, + low, + high), + op.join(base_path, 'EEG.zip'), + print_destination=False) with z.ZipFile(op.join(base_path, 'EEG.zip'), 'r') as f: f.extractall(op.join(base_path, 'EEG')) os.remove(op.join(base_path, 'EEG.zip')) @@ -44,7 +45,6 @@ def eeg_data_path(base_path, subject): def fnirs_data_path(path, subject): datapath = op.join(path, 'NIRS', 'subject {:02d}'.format(subject)) if not op.isfile(op.join(datapath, 'mrk.mat')): - print('No fNIRS files for subject, suggesting dataset not yet downloaded. All subjects must now be downloaded') # fNIRS if not op.isfile(op.join(path, 'fNIRS.zip')): _fetch_file('http://doc.ml.tu-berlin.de/hBCI/NIRS/NIRS_01-29.zip', @@ -57,106 +57,9 @@ def fnirs_data_path(path, subject): return [op.join(datapath, fn) for fn in ['cnt.mat', 'mrk.mat']] -class BBCIEEGfNIRS(BaseDataset): - """BBCI EEG fNIRS Motor Imagery dataset - - Data Acquisition - ---------------------------------------- - - EEG and NIRS data was collected in an ordinary bright room. EEG data was - recorded by a multichannel BrainAmp EEG amplifier with thirty active - electrodes (Brain Products GmbH, Gilching, Germany) with linked mastoids - reference at 1000 Hz sampling rate. The EEG amplifier was also used to - measure the electrooculogram (EOG), electrocardiogram (ECG) and respiration - with a piezo based breathing belt. Thirty EEG electrodes were placed on a - custom-made stretchy fabric cap (EASYCAP GmbH, Herrsching am Ammersee, - Germany) and placed according to the international 10-5 system (AFp1, AFp2, - AFF1h, AFF2h, AFF5h, AFF6h, F3, F4, F7, F8, FCC3h, FCC4h, FCC5h, FCC6h, T7, - T8, Cz, CCP3h, CCP4h, CCP5h, CCP6h, Pz, P3, P4, P7, P8, PPO1h, PPO2h, POO1, - POO2 and Fz for ground electrode). - - NIRS data was collected by NIRScout (NIRx GmbH, Berlin, Germany) at 12.5 Hz - sampling rate. Each adjacent source-detector pair creates one physiological - NIRS channel. Fourteen sources and sixteen detectors resulting in thirty-six - physiological channels were placed at frontal (nine channels around Fp1, - Fp2, and Fpz), motor (twelve channels around C3 and C4, respectively) and - visual areas (three channels around Oz). The inter-optode distance was 30 - mm. NIRS optodes were fixed on the same cap as the EEG electrodes. Ambient - lights were sufficiently blocked by a firm contact between NIRS optodes and - scalp and use of an opaque cap. - - EOG was recorded using two vertical (above and below left eye) and two - horizontal (outer canthus of each eye) electrodes. ECG was recorded based on - Einthoven triangle derivations I and II, and respiration was measured using - a respiration belt on the lower chest. EOG, ECG and respiration were sampled - at the same sampling rate of the EEG. ECG and respiration data were not - analyzed in this study, but are provided along with the other signals. - - Experimental Procedure - ---------------------------------------- - - The subjects sat on a comfortable armchair in front of a 50-inch white - screen. The distance between their heads and the screen was 1.6 m. They were - asked not to move any part of the body during the data recording. The - experiment consisted of three sessions of left and right hand MI (dataset A) - and MA and baseline tasks (taking a rest without any thought) (dataset B) - each. Each session comprised a 1 min pre-experiment resting period, 20 - repetitions of the given task and a 1 min post-experiment resting - period. The task started with 2 s of a visual introduction of the task, - followed by 10 s of a task period and resting period which was given - randomly from 15 to 17 s. At the beginning and end of the task period, a - short beep (250 ms) was played. All instructions were displayed on the white - screen by a video projector. MI and MA tasks were performed in separate - sessions but in alternating order (i.e., sessions 1, 3 and 5 for MI (dataset - A) and sessions 2, 4 and 6 for MA (dataset B)). Fig. 2 shows the schematic - diagram of the experimental paradigm. Five sorts of motion artifacts induced - by eye and head movements (dataset C) were measured. The motion artifacts - were recorded after all MI and MA task recordings. The experiment did not - include the pre- and post-experiment resting state periods. - - Motor Imagery (Dataset A) - ---------------------------- - - For motor imagery, subjects were instructed to perform haptic motor imagery - (i.e. to imagine the feeling of opening and closing their hands as they were - grabbing a ball) to ensure that actual motor imagery, not visual imagery, - was performed. All subjects were naive to the MI experiment. For the visual - instruction, a black arrow pointing to either the left or right side - appeared at the center of the screen for 2 s. The arrow disappeared with a - short beep sound and then a black fixation cross was displayed during the - task period. The subjects were asked to imagine hand gripping (opening and - closing their hands) in a 1 Hz pace. This pace was shown to and repeated by - the subjects by performing real hand gripping before the experiment. Motor - imagery was performed continuously over the task period. The task period was - finished with a short beep sound and a 'STOP' displayed for 1s on the - screen. The fixation cross was displayed again during the rest period and - the subjects were asked to gaze at it to minimize their eye movements. This - process was repeated twenty times in a single session (ten trials per - condition in a single session; thirty trials in the whole sessions). In a - single session, motor imagery tasks were performed on the basis of ten - subsequent blocks randomly consisting of one of two conditions: Either first - left and then right hand motor imagery or vice versa. - - Mental Arithmetic (Dataset B) - ---------------------------------------- - - For the visual instruction of the MA task, an initial subtraction such as - 'three-digit number minus one-digit number' (e.g., 384-8) appeared at the - center of the screen for 2 s. The subjects were instructed to memorize the - numbers while the initial subtraction was displayed on the screen. The - initial subtraction disappeared with a short beep sound and a black fixation - cross was displayed during the task period in which the subjects were asked - to repeatedly perform to subtract the one-digit number from the result of - the previous subtraction. For the baseline task, no specific sign but the - black fixation cross was displayed on the screen, and the subjects were - instructed to take a rest. Note that there were other rest periods between - the MA and baseline task periods, as same with the MI paradigm. Both task - periods were finished with a short beep sound and a 'STOP' displayed for 1 s - on the screen. The fixation cross was displayed again during the rest - period. MA and baseline trials were randomized in the same way as MI. - +class Shin2017(BaseDataset): + """Not to be used. """ - def __init__(self, fnirs=False, motor_imagery=True, mental_arithmetic=False): if not any([motor_imagery, mental_arithmetic]): @@ -181,11 +84,14 @@ def __init__(self, fnirs=False, motor_imagery=True, super().__init__(subjects=list(range(1, 30)), sessions_per_subject=n_sessions, events=events, - code='BBCI EEG fNIRS', - interval=[0, 10], # marker is for *task* start not cue start + code='Shin2017', + # marker is for *task* start not cue start + interval=[0, 10], paradigm=('/').join(paradigms), doi='10.1109/TNSRE.2016.2628057') + if fnirs: + raise(NotImplementedError("Fnirs not implemented.")) self.fnirs = fnirs # TODO: actually incorporate fNIRS somehow def _get_single_subject_data(self, subject): @@ -242,3 +148,222 @@ def data_path(self, subject, path=None, force_update=False, return fnirs_data_path(op.join(path, 'MNE-eegfnirs-data'), subject) else: return eeg_data_path(op.join(path, 'MNE-eegfnirs-data'), subject) + + +class Shin2017A(Shin2017): + """Motor Imagey Dataset from Shin et al 2017. + + Dataset A from [1]_. + + **Data Acquisition** + + EEG and NIRS data was collected in an ordinary bright room. EEG data was + recorded by a multichannel BrainAmp EEG amplifier with thirty active + electrodes (Brain Products GmbH, Gilching, Germany) with linked mastoids + reference at 1000 Hz sampling rate. The EEG amplifier was also used to + measure the electrooculogram (EOG), electrocardiogram (ECG) and respiration + with a piezo based breathing belt. Thirty EEG electrodes were placed on a + custom-made stretchy fabric cap (EASYCAP GmbH, Herrsching am Ammersee, + Germany) and placed according to the international 10-5 system (AFp1, AFp2, + AFF1h, AFF2h, AFF5h, AFF6h, F3, F4, F7, F8, FCC3h, FCC4h, FCC5h, FCC6h, T7, + T8, Cz, CCP3h, CCP4h, CCP5h, CCP6h, Pz, P3, P4, P7, P8, PPO1h, PPO2h, POO1, + POO2 and Fz for ground electrode). + + NIRS data was collected by NIRScout (NIRx GmbH, Berlin, Germany) at 12.5 Hz + sampling rate. Each adjacent source-detector pair creates one physiological + NIRS channel. Fourteen sources and sixteen detectors resulting in + thirty-six + physiological channels were placed at frontal (nine channels around Fp1, + Fp2, and Fpz), motor (twelve channels around C3 and C4, respectively) and + visual areas (three channels around Oz). The inter-optode distance was 30 + mm. NIRS optodes were fixed on the same cap as the EEG electrodes. Ambient + lights were sufficiently blocked by a firm contact between NIRS optodes and + scalp and use of an opaque cap. + + EOG was recorded using two vertical (above and below left eye) and two + horizontal (outer canthus of each eye) electrodes. ECG was recorded based + on + Einthoven triangle derivations I and II, and respiration was measured using + a respiration belt on the lower chest. EOG, ECG and respiration were + sampled + at the same sampling rate of the EEG. ECG and respiration data were not + analyzed in this study, but are provided along with the other signals. + + **Experimental Procedure** + + The subjects sat on a comfortable armchair in front of a 50-inch white + screen. The distance between their heads and the screen was 1.6 m. They + were + asked not to move any part of the body during the data recording. The + experiment consisted of three sessions of left and right hand MI (dataset + A)and MA and baseline tasks (taking a rest without any thought) (dataset B) + each. Each session comprised a 1 min pre-experiment resting period, 20 + repetitions of the given task and a 1 min post-experiment resting + period. The task started with 2 s of a visual introduction of the task, + followed by 10 s of a task period and resting period which was given + randomly from 15 to 17 s. At the beginning and end of the task period, a + short beep (250 ms) was played. All instructions were displayed on the + white + screen by a video projector. MI and MA tasks were performed in separate + sessions but in alternating order (i.e., sessions 1, 3 and 5 for MI + (dataset + A) and sessions 2, 4 and 6 for MA (dataset B)). Fig. 2 shows the schematic + diagram of the experimental paradigm. Five sorts of motion artifacts + induced + by eye and head movements (dataset C) were measured. The motion artifacts + were recorded after all MI and MA task recordings. The experiment did not + include the pre- and post-experiment resting state periods. + + **Motor Imagery (Dataset A)** + + For motor imagery, subjects were instructed to perform haptic motor imagery + (i.e. to imagine the feeling of opening and closing their hands as they + were + grabbing a ball) to ensure that actual motor imagery, not visual imagery, + was performed. All subjects were naive to the MI experiment. For the visual + instruction, a black arrow pointing to either the left or right side + appeared at the center of the screen for 2 s. The arrow disappeared with a + short beep sound and then a black fixation cross was displayed during the + task period. The subjects were asked to imagine hand gripping (opening and + closing their hands) in a 1 Hz pace. This pace was shown to and repeated by + the subjects by performing real hand gripping before the experiment. Motor + imagery was performed continuously over the task period. The task period + was finished with a short beep sound and a 'STOP' displayed for 1s on the + screen. The fixation cross was displayed again during the rest period and + the subjects were asked to gaze at it to minimize their eye movements. This + process was repeated twenty times in a single session (ten trials per + condition in a single session; thirty trials in the whole sessions). In a + single session, motor imagery tasks were performed on the basis of ten + subsequent blocks randomly consisting of one of two conditions: Either + first left and then right hand motor imagery or vice versa. + + **Mental Arithmetic (Dataset B)** + + For the visual instruction of the MA task, an initial subtraction such as + 'three-digit number minus one-digit number' (e.g., 384-8) appeared at the + center of the screen for 2 s. The subjects were instructed to memorize the + numbers while the initial subtraction was displayed on the screen. The + initial subtraction disappeared with a short beep sound and a black + fixation cross was displayed during the task period in which the subjects + were asked + to repeatedly perform to subtract the one-digit number from the result of + the previous subtraction. For the baseline task, no specific sign but the + black fixation cross was displayed on the screen, and the subjects were + instructed to take a rest. Note that there were other rest periods between + the MA and baseline task periods, as same with the MI paradigm. Both task + periods were finished with a short beep sound and a 'STOP' displayed for + 1 s on the screen. The fixation cross was displayed again during the rest + period. MA and baseline trials were randomized in the same way as MI. + + references + ---------- + + .. [1] Shin, J., von Lühmann, A., Blankertz, B., Kim, D.W., Jeong, J., + Hwang, H.J. and Müller, K.R., 2017. Open access dataset for EEG+NIRS + single-trial classification. IEEE Transactions on Neural Systems + and Rehabilitation Engineering, 25(10), pp.1735-1745. + + """ + + def __init__(self): + super().__init__(fnirs=False, motor_imagery=True, + mental_arithmetic=False) + self.code = 'Shin2017A' + + +class Shin2017B(Shin2017): + """Mental Arithmetic Dataset from Shin et al 2017. + + Dataset B from [1]_. + + **Data Acquisition** + + EEG and NIRS data was collected in an ordinary bright room. EEG data was + recorded by a multichannel BrainAmp EEG amplifier with thirty active + electrodes (Brain Products GmbH, Gilching, Germany) with linked mastoids + reference at 1000 Hz sampling rate. The EEG amplifier was also used to + measure the electrooculogram (EOG), electrocardiogram (ECG) and respiration + with a piezo based breathing belt. Thirty EEG electrodes were placed on a + custom-made stretchy fabric cap (EASYCAP GmbH, Herrsching am Ammersee, + Germany) and placed according to the international 10-5 system (AFp1, AFp2, + AFF1h, AFF2h, AFF5h, AFF6h, F3, F4, F7, F8, FCC3h, FCC4h, FCC5h, FCC6h, T7, + T8, Cz, CCP3h, CCP4h, CCP5h, CCP6h, Pz, P3, P4, P7, P8, PPO1h, PPO2h, POO1, + POO2 and Fz for ground electrode). + + NIRS data was collected by NIRScout (NIRx GmbH, Berlin, Germany) at 12.5 Hz + sampling rate. Each adjacent source-detector pair creates one physiological + NIRS channel. Fourteen sources and sixteen detectors resulting in + thirty-six + physiological channels were placed at frontal (nine channels around Fp1, + Fp2, and Fpz), motor (twelve channels around C3 and C4, respectively) and + visual areas (three channels around Oz). The inter-optode distance was 30 + mm. NIRS optodes were fixed on the same cap as the EEG electrodes. Ambient + lights were sufficiently blocked by a firm contact between NIRS optodes and + scalp and use of an opaque cap. + + EOG was recorded using two vertical (above and below left eye) and two + horizontal (outer canthus of each eye) electrodes. ECG was recorded based + on + Einthoven triangle derivations I and II, and respiration was measured using + a respiration belt on the lower chest. EOG, ECG and respiration were + sampled + at the same sampling rate of the EEG. ECG and respiration data were not + analyzed in this study, but are provided along with the other signals. + + **Experimental Procedure** + + The subjects sat on a comfortable armchair in front of a 50-inch white + screen. The distance between their heads and the screen was 1.6 m. They + were + asked not to move any part of the body during the data recording. The + experiment consisted of three sessions of left and right hand MI (dataset + A)and MA and baseline tasks (taking a rest without any thought) (dataset B) + each. Each session comprised a 1 min pre-experiment resting period, 20 + repetitions of the given task and a 1 min post-experiment resting + period. The task started with 2 s of a visual introduction of the task, + followed by 10 s of a task period and resting period which was given + randomly from 15 to 17 s. At the beginning and end of the task period, a + short beep (250 ms) was played. All instructions were displayed on the + white + screen by a video projector. MI and MA tasks were performed in separate + sessions but in alternating order (i.e., sessions 1, 3 and 5 for MI + (dataset + A) and sessions 2, 4 and 6 for MA (dataset B)). Fig. 2 shows the schematic + diagram of the experimental paradigm. Five sorts of motion artifacts + induced + by eye and head movements (dataset C) were measured. The motion artifacts + were recorded after all MI and MA task recordings. The experiment did not + include the pre- and post-experiment resting state periods. + + **Mental Arithmetic (Dataset B)** + + For the visual instruction of the MA task, an initial subtraction such as + 'three-digit number minus one-digit number' (e.g., 384-8) appeared at the + center of the screen for 2 s. The subjects were instructed to memorize the + numbers while the initial subtraction was displayed on the screen. The + initial subtraction disappeared with a short beep sound and a black + fixation cross was displayed during the task period in which the subjects + were asked + to repeatedly perform to subtract the one-digit number from the result of + the previous subtraction. For the baseline task, no specific sign but the + black fixation cross was displayed on the screen, and the subjects were + instructed to take a rest. Note that there were other rest periods between + the MA and baseline task periods, as same with the MI paradigm. Both task + periods were finished with a short beep sound and a 'STOP' displayed for + 1 s on the screen. The fixation cross was displayed again during the rest + period. MA and baseline trials were randomized in the same way as MI. + + references + ---------- + + .. [1] Shin, J., von Lühmann, A., Blankertz, B., Kim, D.W., Jeong, J., + Hwang, H.J. and Müller, K.R., 2017. Open access dataset for EEG+NIRS + single-trial classification. IEEE Transactions on Neural Systems + and Rehabilitation Engineering, 25(10), pp.1735-1745. + + """ + + def __init__(self): + super().__init__(fnirs=False, motor_imagery=False, + mental_arithmetic=True) + self.code = 'Shin2017B' diff --git a/moabb/datasets/bnci.py b/moabb/datasets/bnci.py index 5657cc0e3..1e833c54e 100644 --- a/moabb/datasets/bnci.py +++ b/moabb/datasets/bnci.py @@ -640,20 +640,24 @@ def data_path(self, subject, path=None, force_update=False, class BNCI2014001(MNEBNCI): - """BNCI 2014-001 Motor Imagery dataset + """BNCI 2014-001 Motor Imagery dataset. + + Dataset IIa from BCI Competition 4 [1]_. + + **Dataset Description** This data set consists of EEG data from 9 subjects. The cue-based BCI paradigm consisted of four different motor imagery tasks, namely the imag- ination of movement of the left hand (class 1), right hand (class 2), both feet (class 3), and tongue (class 4). Two sessions on different days were - recorded for each subject. Each session is comprised of 6 runs separated by - short breaks. One run consists of 48 trials (12 for each of the four + recorded for each subject. Each session is comprised of 6 runs separated + by short breaks. One run consists of 48 trials (12 for each of the four possible classes), yielding a total of 288 trials per session. - The subjects were sitting in a comfortable armchair in front of a com- puter + The subjects were sitting in a comfortable armchair in front of a computer screen. At the beginning of a trial ( t = 0 s), a fixation cross appeared - on the black screen. In addition, a short acoustic warning tone was pre- - sented. After two seconds ( t = 2 s), a cue in the form of an arrow + on the black screen. In addition, a short acoustic warning tone was + presented. After two seconds ( t = 2 s), a cue in the form of an arrow pointing either to the left, right, down or up (corresponding to one of the four classes left hand, right hand, foot or tongue) appeared and stayed on the screen for 1.25 s. This prompted the subjects to perform the desired @@ -663,16 +667,19 @@ class BNCI2014001(MNEBNCI): Twenty-two Ag/AgCl electrodes (with inter-electrode distances of 3.5 cm) were used to record the EEG; the montage is shown in Figure 3 left. All - signals were recorded monopolarly with the left mastoid serving as reference - and the right mastoid as ground. The signals were sampled with 250 Hz and - bandpass-filtered between 0.5 Hz and 100 Hz. The sensitivity of the - amplifier was set to 100 μV . An additional 50 Hz notch filter was enabled - to suppress line noise + signals were recorded monopolarly with the left mastoid serving as + reference and the right mastoid as ground. The signals were sampled with. + 250 Hz and bandpass-filtered between 0.5 Hz and 100 Hz. The sensitivity of + the amplifier was set to 100 μV . An additional 50 Hz notch filter was + enabled to suppress line noise References ---------- - .. [1] doi.org/10.3389/fnins.2012.00055 + .. [1] Tangermann, M., Müller, K.R., Aertsen, A., Birbaumer, N., Braun, C., + Brunner, C., Leeb, R., Mehring, C., Miller, K.J., Mueller-Putz, G. + and Nolte, G., 2012. Review of the BCI competition IV. + Frontiers in neuroscience, 6, p.55. """ def __init__(self): @@ -687,8 +694,12 @@ def __init__(self): class BNCI2014002(MNEBNCI): - """BNCI 2014-002 Motor Imagery dataset - + """BNCI 2014-002 Motor Imagery dataset. + + Motor Imagery Dataset from [1]_. + + **Dataset description** + The session consisted of eight runs, five of them for training and three with feedback for validation. One run was composed of 20 trials. Taken together, we recorded 50 trials per class for training and 30 trials per @@ -700,7 +711,8 @@ class for validation. Participants had the task of performing sustained (5 with MI as soon as they recognized the cue and to perform the indicated MI until the cross disappeared at 8 s. A rest period with a random length between 2 s and 3 s was presented between trials. Participants did not - receive feedback during training. Feedback was presented in form of a white + receive feedback during training. Feedback was presented in form of a + white coloured bar-graph. The length of the bar-graph reflected the amount of correct classifications over the last second. EEG was measured with a biosignal amplifier and active Ag/AgCl electrodes (g.USBamp, g.LADYbird, @@ -712,12 +724,15 @@ class for validation. Participants had the task of performing sustained (5 mastoid and the ground electrode on the right mastoid. The 13 participants were aged between 20 and 30 years, 8 naive to the task, and had no known medical or neurological diseases. - + References ----------- - .. [1] doi.org/10.1515/bmt-2014-0117 - + .. [1] Steyrl, D., Scherer, R., Faller, J. and Müller-Putz, G.R., 2016. + Random forests in non-invasive sensorimotor rhythm brain-computer + interfaces: a practical and convenient non-linear classifier. + Biomedical Engineering/Biomedizinische Technik, 61(1), pp.77-86. + """ def __init__(self): @@ -732,34 +747,41 @@ def __init__(self): class BNCI2014004(MNEBNCI): - """BNCI 2014-004 Motor Imagery dataset:BCI Competition 2008 – Graz data set B - - This data set consists of EEG data from 9 subjects of a study published in [1]. - The subjects were right-handed, had normal or corrected-to-normal vision and - were paid for participating in the experiments. All volunteers were sitting in - an armchair, watching a flat screen monitor placed approximately 1 m away at eye - level. For each subject 5 sessions are provided, whereby the first two sessions - contain training data without feedback (screening), and the last three sessions - were recorded with feedback. + """BNCI 2014-004 Motor Imagery dataset. + + Dataset B from BCI Competition 2008 [1]_. + + **Dataset description** + + This data set consists of EEG data from 9 subjects of a study published in + [1]_. The subjects were right-handed, had normal or corrected-to-normal + vision and were paid for participating in the experiments. + All volunteers were sitting in an armchair, watching a flat screen monitor + placed approximately 1 m away at eye level. For each subject 5 sessions + are provided, whereby the first two sessions contain training data without + feedback (screening), and the last three sessions were recorded with + feedback. Three bipolar recordings (C3, Cz, and C4) were recorded with a sampling - frequency of 250 Hz.They were bandpass- filtered between 0.5 Hz and 100 Hz, and - a notch filter at 50 Hz was enabled. The placement of the three bipolar - recordings (large or small distances, more anterior or posterior) were slightly - different for each subject (for more details see [1]). The electrode position - Fz served as EEG ground. In addition to the EEG channels, the electrooculogram - (EOG) was recorded with three monopolar electrodes. + frequency of 250 Hz.They were bandpass- filtered between 0.5 Hz and 100 Hz, + and a notch filter at 50 Hz was enabled. The placement of the three + bipolar recordings (large or small distances, more anterior or posterior) + were slightly different for each subject (for more details see [1]). + The electrode position Fz served as EEG ground. In addition to the EEG + channels, the electrooculogram (EOG) was recorded with three monopolar + electrodes. The cue-based screening paradigm consisted of two classes, - namely the motor imagery (MI) of left hand (class 1) and right hand (class 2). - Each subject participated in two screening sessions without feedback recorded on - two different days within two weeks. Each session consisted of six runs with - ten trials each and two classes of imagery. This resulted in 20 trials per run - and 120 trials per session. Data of 120 repetitions of each MI class were - available for each person in total. Prior to the first motor im- agery training - the subject executed and imagined different movements for each body part and - selected the one which they could imagine best (e. g., squeezing a ball or - pulling a brake). + namely the motor imagery (MI) of left hand (class 1) and right hand + (class 2). + Each subject participated in two screening sessions without feedback + recorded on two different days within two weeks. + Each session consisted of six runs with ten trials each and two classes of + imagery. This resulted in 20 trials per run and 120 trials per session. + Data of 120 repetitions of each MI class were available for each person in + total. Prior to the first motor im- agery training the subject executed + and imagined different movements for each body part and selected the one + which they could imagine best (e. g., squeezing a ball or pulling a brake). Each trial started with a fixation cross and an additional short acoustic warning tone (1 kHz, 70 ms). Some seconds later a visual cue was presented @@ -768,7 +790,7 @@ class BNCI2014004(MNEBNCI): short break of at least 1.5 seconds. A randomized time of up to 1 second was added to the break to avoid adaptation - For the three online feedback sessions four runs with smiley feedback + For the three online feedback sessions four runs with smiley feedback were recorded, whereby each run consisted of twenty trials for each type of motor imagery. At the beginning of each trial (second 0) the feedback (a gray smiley) was centered on the screen. At second 2, a short warning beep @@ -776,10 +798,14 @@ class BNCI2014004(MNEBNCI): second 7.5 the screen went blank and a random interval between 1.0 and 2.0 seconds was added to the trial. - [1] R. Leeb, F. Lee, C. Keinrath, R. Scherer, H. Bischof, G. Pfurtscheller. - Brain-computer communication: motivation, aim, and impact of ex- - ploring a virtual apartment. IEEE Transactions on Neural Systems and - Rehabilitation Engineering 15, 473–482, 2007 + References + ---------- + + .. [1] R. Leeb, F. Lee, C. Keinrath, R. Scherer, H. Bischof, + G. Pfurtscheller. Brain-computer communication: motivation, aim, + and impact of exploring a virtual apartment. IEEE Transactions on + Neural Systems and Rehabilitation Engineering 15, 473–482, 2007 + """ def __init__(self): @@ -794,7 +820,12 @@ def __init__(self): class BNCI2015001(MNEBNCI): - """BNCI 2015-001 Motor Imagery dataset + """BNCI 2015-001 Motor Imagery dataset. + + Dataset from [1]_. + + **Dataset description** + We acquired the EEG from three Laplacian derivations, 3.5 cm (center-to- center) around the electrode positions (according to International 10-20 System of Electrode Placement) C3 (FC3, C5, CP3 and C1), Cz (FCz, C1, CPz @@ -807,18 +838,21 @@ class BNCI2015001(MNEBNCI): The task for the user was to perform sustained right hand versus both feet movement imagery starting from the cue (second 3) to the end of the cross - period (sec- ond 8). A trial started with 3 s of reference period, followed - by a brisk audible cue and a visual cue (arrow right for right hand, arrow - down for both feet) from second 3 to 4.25. The activity period, where the - users received feedback, lasted from second 4 to 8. There was a random 2 to - 3 s pause between the trials. + period (sec- ond 8). A trial started with 3 s of reference period, + followed by a brisk audible cue and a visual cue (arrow right for right + hand, arrow down for both feet) from second 3 to 4.25. + The activity period, where the users received feedback, lasted from + second 4 to 8. There was a random 2 to 3 s pause between the trials. + References + ---------- + + .. [1] J. Faller, C. Vidaurre, T. Solis-Escalante, C. Neuper and R. + Scherer (2012). Autocalibration and recurrent adaptation: Towards a + plug and play online ERD- BCI. IEEE Transactions on Neural Systems + and Rehabilitation Engineering, 20(3), 313-319. - [1] J. Faller, C. Vidaurre, T. Solis-Escalante, C. Neuper and R. - Scherer (2012) Autocalibration and recurrent adaptation: Towards a plug and - play online ERD- BCI. IEEE Transactions on Neural Systems and - Rehabilitation Engineering, 20(3), 313-319 . Doi: 10.1109/tnsre.2012.2189584. """ def __init__(self): @@ -834,44 +868,56 @@ def __init__(self): class BNCI2015004(MNEBNCI): - """BNCI 2015-004 Motor Imagery dataset + """BNCI 2015-004 Motor Imagery dataset. + + Dataset from [1]_. + + **Dataset description** + We provide EEG data recorded from nine users with disability (spinal cord injury and stroke) on two different days (sessions). Users performed, follow- ing a cue-guided experimental paradigm, five distinct mental tasks - (MT). MTs include mental word association (condition WORD), mental subtrac- - tion (SUB), spatial navigation (NAV), right hand motor imagery (HAND) and + (MT). MTs include mental word association (condition WORD), mental + subtraction (SUB), spatial navigation (NAV), right hand motor imagery + (HAND) and feet motor imagery (FEET). Details on the experimental paradigm are summarized in Figure 1. The session for a single subject consisted of 8 runs resulting in 40 trials of each class for each day. One single experimental run consisted of 25 cues, with 5 of each mental task. Cues were presented in random order. - EEG was recorded from 30 electrode channels placed on the scalp accord- ing + EEG was recorded from 30 electrode channels placed on the scalp according to the international 10-20 system. Electrode positions included channels AFz, F7, F3, Fz, F4, F8, FC3, FCz, FC4, T3, C3, Cz, C4, T4, CP3, CPz,CP4, - P7, P5, P3, P1, Pz, P2, P4, P6, P8, PO3, PO4, O1, and O2. Ref- erence and + P7, P5, P3, P1, Pz, P2, P4, P6, P8, PO3, PO4, O1, and O2. Reference and ground were placed at the left and right mastoid, respectively. The g.tec - GAMMAsys system with g.LADYbird active electrodes and two g.USBamp biosignal + GAMMAsys system with g.LADYbird active electrodes and two g.USBamp + biosignal amplifiers (Guger Technolgies, Graz, Austria) was used for recording. EEG was band pass filtered 0.5-100 Hz (notch filter at 50 Hz) and sampled at a rate of 256 Hz. The duration of a single imagery trials is 10 s. At t = 0 s, a cross was - presented in the middle of the screen. Participants were asked to relax and + presented in the middle of the screen. Participants were asked to relax + and fixate the cross to avoid eye movements. At t = 3 s, a beep was sounded to get the participant’s attention. The cue indicating the requested imagery task, one out of five graphical symbols, was presented from t = 3 s to t = 4.25 s. At t = 10 s, a second beep was sounded and the fixation-cross disappeared, which indicated the end of the trial. A variable break - (inter-trial- interval, ITI) lasting between 2.5 s and 3.5 s occurred before + (inter-trial-interval, ITI) lasting between 2.5 s and 3.5 s occurred + before the start of the next trial. Participants were asked to avoid movements during the imagery period, and to move and blink during the ITI. Experimental runs began and ended with a blank screen (duration 4 s) - [1] Scherer R, Faller J, Friedrich EVC, Opisso E, Costa U, Kübler A, et - al. (2015) Individually Adapted Imagery Improves Brain-Computer Interface - Performance in End-Users with Disability. PLoS ONE 10(5): - e0123727. https://doi.org/10.1371/journal.pone.0123727 + References + ---------- + + .. [1] Scherer R, Faller J, Friedrich EVC, Opisso E, Costa U, Kübler A, et + al. (2015) Individually Adapted Imagery Improves Brain-Computer + Interface Performance in End-Users with Disability. PLoS ONE 10(5). + https://doi.org/10.1371/journal.pone.0123727 """ def __init__(self): diff --git a/moabb/datasets/download.py b/moabb/datasets/download.py index 107765d60..c093a4370 100644 --- a/moabb/datasets/download.py +++ b/moabb/datasets/download.py @@ -46,9 +46,9 @@ def data_path(url, sign, path=None, force_update=False, update_path=True, """ # noqa: E501 sign = sign.upper() key = 'MNE_DATASETS_{:s}_PATH'.format(sign) + key_dest = 'MNE-{:s}-data'.format(sign.lower()) path = _get_path(path, key, sign) - destination = _url_to_local_path(url, op.join(path, - 'MNE-{:s}-data'.format(sign.lower()))) + destination = _url_to_local_path(url, op.join(path, key_dest)) # Fetch the file if not op.isfile(destination) or force_update: if op.isfile(destination): diff --git a/moabb/datasets/fake.py b/moabb/datasets/fake.py index 6c33b154b..067c51201 100644 --- a/moabb/datasets/fake.py +++ b/moabb/datasets/fake.py @@ -13,7 +13,7 @@ class FakeDataset(BaseDataset): """ - def __init__(self, event_list=['fake_c1', 'fake_c2', 'fake_c3'], + def __init__(self, event_list=('fake_c1', 'fake_c2', 'fake_c3'), n_sessions=2, n_runs=2, n_subjects=10): self.n_runs = n_runs event_id = {ev: ii + 1 for ii, ev in enumerate(event_list)} diff --git a/moabb/datasets/gigadb.py b/moabb/datasets/gigadb.py index 94a3cd4c6..c9b7a4d3e 100644 --- a/moabb/datasets/gigadb.py +++ b/moabb/datasets/gigadb.py @@ -15,18 +15,46 @@ log = logging.getLogger() -GIGA_URL = 'ftp://penguin.genomics.cn/pub/10.5524/100001_101000/100295/mat_data/' - - -class GigaDbMI(BaseDataset): - """GigaDb Motor Imagery dataset - We conducted a BCI experiment for motor imagery movement (MI movement) of the left and right hands with 52 subjects (19 females, mean age ± SD age = 24.8 ± 3.86 years); the experiment was approved by the Institutional Review Board of Gwangju Institute of Science and Technology. Each subject took part in the same experiment, and subject ID was denoted and indexed as s1, s2, …, s52. Subjects s20 and s33 were both-handed, and the other 50 subjects were right-handed. - - EEG data were collected using 64 Ag/AgCl active electrodes. As shown in Fig. 1, a 64-channel montage based on the international 10-10 system was used to record the EEG signals with 512 Hz sampling rates. The EEG device used in this experiment was the Biosemi ActiveTwo system. The BCI2000 system 3.0.2 was used to collect EEG data and present instructions (left hand or right hand MI). Furthermore, we recorded EMG as well as EEG simultaneously with the same system and sampling rate to check actual hand movements. Two EMG electrodes were attached to the flexor digitorum profundus and extensor digitorum on each arm. - - Subjects were asked to imagine the hand movement depending on the instruction given. Five or six runs were performed during the MI experiment. After each run, we calculated the classification accuracy over one run and gave the subject feedback to increase motivation. Between each run, a maximum 4-minute break was given depending on the subject's demands. - - [1] Hohyun Cho, Minkyu Ahn, Sangtae Ahn, Moonyoung Kwon, Sung Chan Jun; EEG datasets for motor imagery brain–computer interface, GigaScience, Volume 6, Issue 7, 1 July 2017, Pages 1–8, https://doi.org/10.1093/gigascience/gix034 +GIGA_URL = 'ftp://penguin.genomics.cn/pub/10.5524/100001_101000/100295/mat_data/' # noqa + + +class Cho2017(BaseDataset): + """Motor Imagery dataset from Cho et al 2017. + + Dataset from the paper [1]_. + + **Dataset Description** + + We conducted a BCI experiment for motor imagery movement (MI movement) + of the left and right hands with 52 subjects (19 females, mean age ± SD + age = 24.8 ± 3.86 years); Each subject took part in the same experiment, + and subject ID was denoted and indexed as s1, s2, …, s52. + Subjects s20 and s33 were both-handed, and the other 50 subjects + were right-handed. + + EEG data were collected using 64 Ag/AgCl active electrodes. + A 64-channel montage based on the international 10-10 system was used to + record the EEG signals with 512 Hz sampling rates. + The EEG device used in this experiment was the Biosemi ActiveTwo system. + The BCI2000 system 3.0.2 was used to collect EEG data and present + instructions (left hand or right hand MI). Furthermore, we recorded + EMG as well as EEG simultaneously with the same system and sampling rate + to check actual hand movements. Two EMG electrodes were attached to the + flexor digitorum profundus and extensor digitorum on each arm. + + Subjects were asked to imagine the hand movement depending on the + instruction given. Five or six runs were performed during the MI + experiment. After each run, we calculated the classification + accuracy over one run and gave the subject feedback to increase motivation. + Between each run, a maximum 4-minute break was given depending on + the subject's demands. + + References + ---------- + + .. [1] Cho, H., Ahn, M., Ahn, S., Kwon, M. and Jun, S.C., 2017. + EEG datasets for motor imagery brain computer interface. + GigaScience. https://doi.org/10.1093/gigascience/gix034 """ def __init__(self): @@ -34,10 +62,11 @@ def __init__(self): subjects=list(range(1, 53)), sessions_per_subject=1, events=dict(left_hand=1, right_hand=2), - code='GigaDb Motor Imagery', + code='Cho2017', interval=[0, 3], # full trial is 0-3s, but edge effects paradigm='imagery', doi='10.5524/100295') + for ii in [32, 46, 49]: self.subject_list.remove(ii) @@ -71,9 +100,10 @@ def _get_single_subject_data(self, subject): # trials are already non continuous. edge artifact can appears but # are likely to be present during rest / inter-trial activity - eeg_data = np.hstack([eeg_data_l, np.zeros((eeg_data_l.shape[0], 500)),eeg_data_r]) - log.warning( - 'Trials demeaned and stacked with zero buffer to create continuous data -- edge effects present') + eeg_data = np.hstack([eeg_data_l, np.zeros((eeg_data_l.shape[0], 500)), + eeg_data_r]) + log.warning("Trials demeaned and stacked with zero buffer to create " + "continuous data -- edge effects present") info = create_info(ch_names=ch_names, ch_types=ch_types, sfreq=data.srate, montage=montage) diff --git a/moabb/datasets/openvibe_mi.py b/moabb/datasets/openvibe_mi.py index 1fd1bbbbf..a6c5485ca 100644 --- a/moabb/datasets/openvibe_mi.py +++ b/moabb/datasets/openvibe_mi.py @@ -37,30 +37,70 @@ def convert_inria_csv_to_mne(path): class OpenvibeMI(BaseDataset): - """Openvibe Motor Imagery dataset""" + """Openvibe Motor Imagery dataset. + + This datasets includes 14 records of left and right hand motor imagery from + a single subject. They include 11 channels : C3, C4, Nz, FC3, FC4, C5, C1, + C2, C6, CP3 and CP4. The channels are recorded in common average mode and + Nz can be used as a reference if needed. The signal is sampled at 512 Hz + and was recorded with our Mindmedia NeXus32B amplifier. + + Each file consists in 40 trials where the subject was requested to imagine + either left or right hand movements (20 each). The experiment followed the + Graz University protocol [1]_. + + The files were recorded on three different days of the same month. + + The data set has been used in the paper [2]_. + + references + ---------- + + .. [1] Pfurtscheller, G. & Neuper, C. Motor Imagery and Direct + Brain-Computer Communication. Proceedings of the IEEE, 89, + 1123-1134, 2001. + + .. [2] N. Brodu, F. Lotte, A. Lécuyer. Exploring Two Novel Features for + EEG-based Brain-Computer Interfaces: Multifractal Cumulants and + Predictive Complexity. Neurocomputing 79: 87-94, 2012. + + + """ def __init__(self): super().__init__( subjects=[1], - sessions_per_subject=14, + sessions_per_subject=3, events=dict(right_hand=1, left_hand=2), code='Openvibe Motor Imagery', - interval=[0, 3], + # 5 second is the duration of the feedback in the OV protocol. + interval=[0, 5], paradigm='imagery') def _get_single_subject_data(self, subject): """return data for subject""" data = {} - for ii in range(1, 15): - raw = self._get_single_session_data(ii) - data["session_%d" % ii] = {'run_0': raw} + + # data are recorded on 3 different day (session). it's not specified + # wich run is wich session, but by looking at the data, we can identify + # the 3 sessions. + + sessions = [[1, 2, 3, 4], + [5, 6, 7, 8, 9], + [10, 11, 12, 13, 14]] + + for jj, session in enumerate(sessions): + for ii, run in enumerate(session): + raw = self._get_single_run_data(run) + data["session_%d" % jj] = {'run_%d' % ii: raw} + return data - def _get_single_session_data(self, session): + def _get_single_run_data(self, run): """return data for a single recording session""" - csv_path = self.data_path(1)[session - 1] + csv_path = self.data_path(1)[run - 1] fif_path = os.path.join(os.path.dirname(csv_path), - 'raw_{:d}.fif'.format(session)) + 'raw_{:d}.fif'.format(run)) if not os.path.isfile(fif_path): print('Resaving .csv file as .fif for ease of future loading') raw = convert_inria_csv_to_mne(csv_path) diff --git a/moabb/datasets/physionet_mi.py b/moabb/datasets/physionet_mi.py index 248ef5a88..85b690d54 100644 --- a/moabb/datasets/physionet_mi.py +++ b/moabb/datasets/physionet_mi.py @@ -11,45 +11,105 @@ class PhysionetMI(BaseDataset): - """Physionet Motor Imagery dataset [1] + """Physionet Motor Imagery dataset. - [1]https://physionet.org/pn4/eegmmidb/ + Physionet MI dataset: https://physionet.org/pn4/eegmmidb/ + + This data set consists of over 1500 one- and two-minute EEG recordings, + obtained from 109 volunteers. + + Subjects performed different motor/imagery tasks while 64-channel EEG were + recorded using the BCI2000 system (http://www.bci2000.org). + Each subject performed 14 experimental runs: two one-minute baseline runs + (one with eyes open, one with eyes closed), and three two-minute runs of + each of the four following tasks: + + 1. A target appears on either the left or the right side of the screen. + The subject opens and closes the corresponding fist until the target + disappears. Then the subject relaxes. + + 2. A target appears on either the left or the right side of the screen. + The subject imagines opening and closing the corresponding fist until + the target disappears. Then the subject relaxes. + + 3. A target appears on either the top or the bottom of the screen. + The subject opens and closes either both fists (if the target is on top) + or both feet (if the target is on the bottom) until the target + disappears. Then the subject relaxes. + + 4. A target appears on either the top or the bottom of the screen. + The subject imagines opening and closing either both fists + (if the target is on top) or both feet (if the target is on the bottom) + until the target disappears. Then the subject relaxes. + + parameters + ---------- + + imagined: bool (default True) + if True, return runs corresponding to motor imagination. + + executed: bool (default False) + if True, return runs corresponding to motor execution. + + references + ---------- + + .. [1] Schalk, G., McFarland, D.J., Hinterberger, T., Birbaumer, N. and + Wolpaw, J.R., 2004. BCI2000: a general-purpose brain-computer + interface (BCI) system. IEEE Transactions on biomedical engineering, + 51(6), pp.1034-1043. + + .. [2] Goldberger, A.L., Amaral, L.A., Glass, L., Hausdorff, J.M., Ivanov, + P.C., Mark, R.G., Mietus, J.E., Moody, G.B., Peng, C.K., Stanley, + H.E. and PhysioBank, P., PhysioNet: components of a new research + resource for complex physiologic signals Circulation 2000 Volume + 101 Issue 23 pp. E215–E220. """ - def __init__(self, imagined=True): + def __init__(self, imagined=True, executed=False): super().__init__( subjects=list(range(1, 110)), sessions_per_subject=1, events=dict(left_hand=2, right_hand=3, feet=5, hands=4, rest=1), code='Physionet Motor Imagery', - interval=[0, 3], # website does not specify how long the trials are... + # website does not specify how long the trials are, but the + # interval between 2 trial is 4 second. + interval=[0, 3], paradigm='imagery', - doi='10.1109/TBME.2004.827072' - ) + doi='10.1109/TBME.2004.827072') + + self.feet_runs = [] + self.hand_runs = [] if imagined: - self.feet_runs = [6, 10, 14] - self.hand_runs = [4, 8, 12] - else: - self.feet_runs = [5, 9, 13] - self.hand_runs = [3, 7, 11] + self.feet_runs += [6, 10, 14] + self.hand_runs += [4, 8, 12] + + if executed: + self.feet_runs += [5, 9, 13] + self.hand_runs += [3, 7, 11] def _load_one_run(self, subject, run, preload=True): raw_fname = eegbci.load_data(subject, runs=[run], verbose='ERROR', base_url=BASE_URL)[0] raw = read_raw_edf(raw_fname, preload=preload, verbose='ERROR') raw.rename_channels(lambda x: x.strip('.')) + raw.set_montage(mne.channels.read_montage('standard_1005')) return raw def _get_single_subject_data(self, subject): """return data for a single subject""" data = {} + # baseline runs + data['baseline_eye_open'] = self._load_one_run(subject, 1) + data['baseline_eye_closed'] = self._load_one_run(subject, 2) + # hand runs for run in self.hand_runs: data['run_%d' % run] = self._load_one_run(subject, run) - # feet_runs runs + # feet runs for run in self.feet_runs: raw = self._load_one_run(subject, run) @@ -67,9 +127,7 @@ def data_path(self, subject, path=None, force_update=False, if subject not in self.subject_list: raise(ValueError("Invalid subject number")) - paths = [] - paths += eegbci.load_data(subject, runs=self.feet_runs, - verbose=verbose) - paths += eegbci.load_data(subject, runs=self.hand_runs, - verbose=verbose) + paths = eegbci.load_data(subject, + runs=[1, 2] + self.hand_runs + self.feet_runs, + verbose=verbose) return paths diff --git a/moabb/datasets/upper_limb.py b/moabb/datasets/upper_limb.py index a0c0dd48d..72a247a7a 100644 --- a/moabb/datasets/upper_limb.py +++ b/moabb/datasets/upper_limb.py @@ -9,14 +9,49 @@ UPPER_LIMB_URL = 'https://zenodo.org/record/834976/files/' -class UpperLimb(BaseDataset): - """Upper Limb motor dataset. - - Upper limb dataset : - http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0182578 - - Consist in 6 upper limb movement, recoded over 2 sessions. - The first session is motor execution, the second session is imagination. +class Ofner2017(BaseDataset): + """Motor Imagery ataset from Ofner et al 2017. + + Upper limb Motor imagery dataset from the paper [1]_. + + **Dataset description** + + We recruited 15 healthy subjects aged between 22 and 40 years with a mean + age of 27 years (standard deviation 5 years). Nine subjects were female, + and all the subjects except s1 were right-handed. + + We measured each subject in two sessions on two different days, which were + not separated by more than one week. In the first session the subjects + performed ME, and MI in the second session. The subjects performed six + movement types which were the same in both sessions and comprised of + elbow flexion/extension, forearm supination/pronation and hand open/close; + all with the right upper limb. All movements started at a + neutral position: the hand half open, the lower arm extended to 120 + degree and in a neutral rotation, i.e. thumb on the inner side. + Additionally to the movement classes, a rest class was recorded in which + subjects were instructed to avoid any movement and to stay in the starting + position. In the ME session, we instructed subjects to execute sustained + movements. In the MI session, we asked subjects to perform kinesthetic MI + of the movements done in the ME session (subjects performed one ME run + immediately before the MI session to support kinesthetic MI). + + The paradigm was trial-based and cues were displayed on a computer screen + in front of the subjects, Fig 2 shows the sequence of the paradigm. + At second 0, a beep sounded and a cross popped up on the computer screen + (subjects were instructed to fixate their gaze on the cross). Afterwards, + at second 2, a cue was presented on the computer screen, indicating the + required task (one out of six movements or rest) to the subjects. At the + end of the trial, subjects moved back to the starting position. In every + session, we recorded 10 runs with 42 trials per run. We presented 6 + movement classes and a rest class and recorded 60 trials per class in a + session. + + References + ---------- + .. [1] Ofner, P., Schwarz, A., Pereira, J. and Müller-Putz, G.R., 2017. + Upper limb movements can be decoded from the time-domain of + low-frequency EEG. PloS one, 12(8), p.e0182578. + https://doi.org/10.1371/journal.pone.0182578 """ @@ -36,8 +71,8 @@ def __init__(self, imagined=True, executed=False): subjects=list(range(1, 16)), sessions_per_subject=n_sessions, events=event_id, - code='Upper Limb Imagery', - interval=[2, 5], # according to paper 2-5 + code='Ofner2017', + interval=[2, 5], # according to paper 2-5 paradigm='imagery', doi='10.1371/journal.pone.0182578') @@ -86,9 +121,11 @@ def data_path(self, subject, path=None, force_update=False, else: sessions = [session] + # FIXME check the value are in V and not uV. for session in sessions: for run in range(1, 11): - url = f"{UPPER_LIMB_URL}motor{session}_subject{subject}_run{run}.gdf" + url = (f"{UPPER_LIMB_URL}motor{session}_subject{subject}" + + f"_run{run}.gdf") p = dl.data_path(url, 'UPPERLIMB', path, force_update, update_path, verbose) paths.append(p) diff --git a/moabb/datasets/utils.py b/moabb/datasets/utils.py index e30af833a..5c018ec41 100644 --- a/moabb/datasets/utils.py +++ b/moabb/datasets/utils.py @@ -3,7 +3,6 @@ ''' import inspect -import numpy as np import moabb.datasets as db from moabb.datasets.base import BaseDataset @@ -15,21 +14,39 @@ def dataset_search(paradigm, multi_session=False, events=None, has_all_events=False, total_classes=None, interval=None, - min_subjects=1, channels=[]): + min_subjects=1, channels=()): ''' Function that returns a list of datasets that match given criteria. Valid criteria are: + Parameters + ---------- + paradigm: str + 'imagery','p300',(more to come) + + multi_session: bool + if True only returns datasets with more than one session per subject. + If False return all + events: list of strings - total_classes: int or None, total number of classes (returns all if None), will either truncate or choose - from events. Defaults to 100 to keep all classes. - has_all_events: bool, skip datasets that don't have all events in events - multi_session: bool, if True only returns datasets with more than one - session per subject. If False return all - paradigm: 'imagery','p300',(more to come) - interval: Length of motor imagery interval, in seconds. Only used in imagery paradigm - min_subjects: int, minimum subjects in dataset - channels: list or set of channels + events to select + + has_all_events: bool + skip datasets that don't have all events in events + + total_classes: int or None + total number of classes (returns all if None) + will either truncate or choose rom events. + + interval: + Length of motor imagery interval, in seconds. Only used in imagery + paradigm + + min_subjects: int, + minimum subjects in dataset + + channels: list of str + list or set of channels ''' channels = set(channels) @@ -76,7 +93,8 @@ def dataset_search(paradigm, multi_session=False, events=None, else: if has_all_events: skip_dataset = True - # don't want to use datasets with less than total number of labels + # don't want to use datasets with less than total number of + # labels if n_classes is not None: if n_events < n_classes: skip_dataset = True @@ -118,7 +136,7 @@ def find_intersecting_channels(datasets, verbose=False): dset_chans.append(processed) keep_datasets.append(d) else: - print('Dataset {:s} has no recognizable EEG channels'.format(type(d).__name__)) + print('Dataset {:s} has no recognizable EEG channels'.format(type(d).__name__)) # noqa for d in dset_chans: allchans.intersection_update(d) allchans = [s.replace('Z', 'z') for s in allchans] diff --git a/moabb/tests/download.py b/moabb/tests/download.py index b37ef54e8..e680af162 100644 --- a/moabb/tests/download.py +++ b/moabb/tests/download.py @@ -1,13 +1,14 @@ ''' Tests to ensure that datasets download correctly ''' -from moabb.datasets.gigadb import GigaDbMI +from moabb.datasets.gigadb import Cho2017 from moabb.datasets.alex_mi import AlexMI from moabb.datasets.physionet_mi import PhysionetMI -from moabb.datasets.bnci import BNCI2014001, BNCI2014002, BNCI2014004, BNCI2015001, BNCI2015004 +from moabb.datasets.bnci import (BNCI2014001, BNCI2014002, BNCI2014004, + BNCI2015001, BNCI2015004) from moabb.datasets.openvibe_mi import OpenvibeMI -from moabb.datasets.bbci_eeg_fnirs import BBCIEEGfNIRS -from moabb.datasets.upper_limb import UpperLimb +from moabb.datasets.bbci_eeg_fnirs import Shin2017A, Shin2017B +from moabb.datasets.upper_limb import Ofner2017 import unittest import mne @@ -37,8 +38,8 @@ def run_dataset(self, dataset): for _, raw in runs.items(): self.assertTrue(isinstance(raw, mne.io.BaseRaw)) - def test_gigadb(self): - self.run_dataset(GigaDbMI) + def test_cho2017(self): + self.run_dataset(Cho2017) def test_bnci_1401(self): self.run_dataset(BNCI2014001) @@ -65,10 +66,11 @@ def test_physionet(self): self.run_dataset(PhysionetMI) def test_eegfnirs(self): - self.run_dataset(BBCIEEGfNIRS) + self.run_dataset(Shin2017A) + self.run_dataset(Shin2017B) def test_upper_limb(self): - self.run_dataset(UpperLimb) + self.run_dataset(Ofner2017) if __name__ == '__main__': From 737e744e0a48d9c203e06d636689986b1bc0ad59 Mon Sep 17 00:00:00 2001 From: Alexandre Barachant Date: Sun, 8 Apr 2018 17:50:15 -0400 Subject: [PATCH 20/21] small fix --- moabb/datasets/bnci.py | 4 ++-- moabb/paradigms/motor_imagery.py | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/moabb/datasets/bnci.py b/moabb/datasets/bnci.py index 1e833c54e..5a1c6b426 100644 --- a/moabb/datasets/bnci.py +++ b/moabb/datasets/bnci.py @@ -749,7 +749,7 @@ def __init__(self): class BNCI2014004(MNEBNCI): """BNCI 2014-004 Motor Imagery dataset. - Dataset B from BCI Competition 2008 [1]_. + Dataset B from BCI Competition 2008. **Dataset description** @@ -844,7 +844,6 @@ class BNCI2015001(MNEBNCI): The activity period, where the users received feedback, lasted from second 4 to 8. There was a random 2 to 3 s pause between the trials. - References ---------- @@ -918,6 +917,7 @@ class BNCI2015004(MNEBNCI): al. (2015) Individually Adapted Imagery Improves Brain-Computer Interface Performance in End-Users with Disability. PLoS ONE 10(5). https://doi.org/10.1371/journal.pone.0123727 + """ def __init__(self): diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index 5285809ae..42672fce5 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -47,7 +47,7 @@ class BaseMotorImagery(BaseParadigm): If not None, resample the eeg data with the sampling rate provided. """ - def __init__(self, filters=[[7, 35]], events=None, tmin=0.0, tmax=None, + def __init__(self, filters=((7, 35)), events=None, tmin=0.0, tmax=None, channels=None, resample=None): super().__init__() self.filters = filters @@ -185,15 +185,15 @@ class SinglePass(BaseMotorImagery): """ def __init__(self, fmin=8, fmax=32, **kwargs): if 'filters' in kwargs.keys(): - raise(ValueError('MotorImagery does not take argument \'filters\'')) + raise(ValueError("MotorImagery does not take argument filters")) super().__init__(filters=[[fmin, fmax]], **kwargs) class FilterBank(BaseMotorImagery): """Filter Bank MI.""" - def __init__(self, filters=[[8, 12], [12, 16], [16, 20], [20, 24], - [24, 28], [28, 32]], **kwargs): + def __init__(self, filters=([8, 12], [12, 16], [16, 20], [20, 24], + [24, 28], [28, 32]), **kwargs): """init""" super().__init__(filters=filters, **kwargs) @@ -290,7 +290,7 @@ def used_events(self, dataset): break if len(out) < self.n_classes: raise ValueError("Dataset {} did not have enough events in {} to run analysis".format( - dataset.code, self.events)) + dataset.code, self.events)) return out @property From 05347fe6ea76fbcef7150283faead2315f479f54 Mon Sep 17 00:00:00 2001 From: Alexandre Barachant Date: Sun, 8 Apr 2018 17:56:10 -0400 Subject: [PATCH 21/21] fix tests --- moabb/paradigms/motor_imagery.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/moabb/paradigms/motor_imagery.py b/moabb/paradigms/motor_imagery.py index 42672fce5..88a123b99 100644 --- a/moabb/paradigms/motor_imagery.py +++ b/moabb/paradigms/motor_imagery.py @@ -47,7 +47,7 @@ class BaseMotorImagery(BaseParadigm): If not None, resample the eeg data with the sampling rate provided. """ - def __init__(self, filters=((7, 35)), events=None, tmin=0.0, tmax=None, + def __init__(self, filters=([7, 35],), events=None, tmin=0.0, tmax=None, channels=None, resample=None): super().__init__() self.filters = filters