forked from mne-tools/mne-python
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathlinear_model_patterns.py
More file actions
158 lines (133 loc) · 4.52 KB
/
linear_model_patterns.py
File metadata and controls
158 lines (133 loc) · 4.52 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
"""
.. _ex-linear-patterns:
===============================================================
Linear classifier on sensor data with plot patterns and filters
===============================================================
Here decoding, a.k.a MVPA or supervised machine learning, is applied to M/EEG
data in sensor space. Fit a linear classifier with the LinearModel object
providing topographical patterns which are more neurophysiologically
interpretable :footcite:`HaufeEtAl2014` than the classifier filters (weight
vectors). The patterns explain how the MEG and EEG data were generated from
the discriminant neural sources which are extracted by the filters.
Note patterns/filters in MEG data are more similar than EEG data
because the noise is less spatially correlated in MEG than EEG.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Romain Trachel <trachelr@gmail.com>
# Jean-Rémi King <jeanremi.king@gmail.com>
#
# License: BSD-3-Clause
# Copyright the MNE-Python contributors.
# %%
from sklearn.linear_model import LogisticRegression
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
import mne
from mne import io
from mne.datasets import sample
# import a linear classifier from mne.decoding
from mne.decoding import (
LinearModel,
SpatialFilter,
Vectorizer,
get_spatial_filter_from_estimator,
)
print(__doc__)
data_path = sample.data_path()
sample_path = data_path / "MEG" / "sample"
# %%
# Set parameters
raw_fname = sample_path / "sample_audvis_filt-0-40_raw.fif"
event_fname = sample_path / "sample_audvis_filt-0-40_raw-eve.fif"
tmin, tmax = -0.1, 0.4
event_id = dict(aud_l=1, vis_l=3)
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(0.5, 25, fir_design="firwin")
events = mne.read_events(event_fname)
# Read epochs
epochs = mne.Epochs(
raw, events, event_id, tmin, tmax, proj=True, decim=2, baseline=None, preload=True
)
del raw
labels = epochs.events[:, -1]
# get MEG data
meg_epochs = epochs.copy().pick(picks="meg", exclude="bads")
meg_data = meg_epochs.get_data(copy=False).reshape(len(labels), -1)
# %%
# Decoding in sensor space using a LogisticRegression classifier
# --------------------------------------------------------------
clf = LogisticRegression(solver="liblinear") # liblinear is faster than lbfgs
scaler = StandardScaler()
# create a linear model with LogisticRegression
model = LinearModel(clf)
# fit the classifier on MEG data
X = scaler.fit_transform(meg_data)
model.fit(X, labels)
coefs = dict()
for name, coef in (("patterns", model.patterns_), ("filters", model.filters_)):
# We fit the linear model on Z-scored data. To make the filters
# interpretable, we must reverse this normalization step
coef = scaler.inverse_transform([coef])[0]
# The data was vectorized to fit a single model across all time points and
# all channels. We thus reshape it:
coefs[name] = coef.reshape(len(meg_epochs.ch_names), -1).T
# Now we can instantiate the visualization container
spf = SpatialFilter(info=meg_epochs.info, **coefs)
fig = spf.plot_patterns(
# we will automatically select patterns
components="auto",
# as our filters and patterns correspond to actual times
# we can align them
tmin=epochs.tmin,
units="fT", # it's physical - we inversed the scaling
show=False, # to set the title below
name_format=None, # to plot actual times
)
fig.suptitle("MEG patterns")
# Same for filters
fig = spf.plot_filters(
components="auto",
tmin=epochs.tmin,
units="fT",
show=False,
name_format=None,
)
fig.suptitle("MEG filters")
# %%
# Let's do the same on EEG data using a scikit-learn pipeline
X = epochs.pick(picks="eeg", exclude="bads")
y = epochs.events[:, 2]
# Define a unique pipeline to sequentially:
clf = make_pipeline(
Vectorizer(), # 1) vectorize across time and channels
StandardScaler(), # 2) normalize features across trials
LinearModel( # 3) fits a logistic regression
LogisticRegression(solver="liblinear")
),
)
clf.fit(X, y)
spf = get_spatial_filter_from_estimator(
clf, info=epochs.info, inverse_transform=True, step_name="linearmodel"
)
fig = spf.plot_patterns(
components="auto",
tmin=epochs.tmin,
units="uV",
show=False,
name_format=None,
)
fig.suptitle("EEG patterns")
# Same for filters
fig = spf.plot_filters(
components="auto",
tmin=epochs.tmin,
units="uV",
show=False,
name_format=None,
)
fig.suptitle("EEG filters")
# %%
# References
# ----------
# .. footbibliography::