From 97b38c9a75c06ffaecf5eef588a689899f86ded2 Mon Sep 17 00:00:00 2001 From: Eric Larson Date: Tue, 14 Jun 2016 04:05:45 -0400 Subject: [PATCH] MRG: Revise filtering (#3277) * ENH: Add generic filter function * DOC: Minor fix [ci skip] * DOC: Add some tutorial materials [circle full] * FIX: Fix tests * STY: Fix plots [ci skip] * FIX: Minor fixes * FIX: Spelling * FIX: Minor fixes * FIX: Typos * FIX: Fix name --- Makefile | 3 +- circle.yml | 6 +- doc/conf.py | 7 +- doc/faq.rst | 3 +- doc/manual/cookbook.rst | 2 - doc/manual/io.rst | 2 +- doc/manual/source_localization/inverse.rst | 2 +- doc/python_reference.rst | 1 - doc/tutorials.rst | 11 + doc/whats_new.rst | 2 + mne/epochs.py | 47 ++- mne/filter.py | 131 +++++- mne/io/base.py | 104 ++--- mne/io/fiff/tests/test_raw_fiff.py | 121 +++--- mne/io/meas_info.py | 10 +- mne/stats/parametric.py | 12 +- .../plot_artifacts_correction_filtering.py | 8 +- tutorials/plot_artifacts_correction_ica.py | 3 +- tutorials/plot_background_filtering.py | 396 ++++++++++++++++++ tutorials/plot_brainstorm_auditory.py | 2 + tutorials/plot_eeg_erp.py | 1 + tutorials/plot_epochs_to_data_frame.py | 2 +- tutorials/plot_forward.py | 4 +- tutorials/plot_sensors_time_frequency.py | 2 +- ..._time_frequency_repeated_measures_anova.py | 29 +- 25 files changed, 713 insertions(+), 198 deletions(-) create mode 100644 tutorials/plot_background_filtering.py diff --git a/Makefile b/Makefile index 109cd03ccbe..9353cf1097c 100755 --- a/Makefile +++ b/Makefile @@ -5,8 +5,7 @@ PYTHON ?= python NOSETESTS ?= nosetests CTAGS ?= ctags -# The *.fif had to be there twice to be properly ignored (!) -CODESPELL_SKIPS ?= "*.fif,*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig" +CODESPELL_SKIPS ?= "*.fif,*.eve,*.gz,*.tgz,*.zip,*.mat,*.stc,*.label,*.w,*.bz2,*.annot,*.sulc,*.log,*.local-copy,*.orig_avg,*.inflated_avg,*.gii,*.pyc,*.doctree,*.pickle,*.inv,*.png,*.edf,*.touch,*.thickness,*.nofix,*.volume,*.defect_borders,*.mgh,lh.*,rh.*,COR-*,FreeSurferColorLUT.txt,*.examples,.xdebug_mris_calc,bad.segments,BadChannels,*.hist,empty_file,*.orig,*.js,*.map,*.ipynb" CODESPELL_DIRS ?= mne/ doc/ tutorials/ examples/ all: clean inplace test test-doc diff --git a/circle.yml b/circle.yml index 4d75e4605fb..1ecdf48db66 100644 --- a/circle.yml +++ b/circle.yml @@ -44,7 +44,7 @@ dependencies: override: - cd /home/ubuntu/mne-python && python setup.py develop; - - if [ "$CIRCLE_BRANCH" == "master" ] || [ "$CIRCLE_BRANCH" == "maint/0.12" ]; then + - if [ "$CIRCLE_BRANCH" == "master" ] || [ "$CIRCLE_BRANCH" == "maint/0.12" ] || [[ `git log -1 --pretty=%B` == *"[circle full]"* ]]; then mkdir -p ~/mne_data; python -c "import mne; mne.datasets._download_all_example_data()"; fi @@ -57,12 +57,12 @@ dependencies: test: override: - - if [ "$CIRCLE_BRANCH" == "master" ] || [ "$CIRCLE_BRANCH" == "maint/0.12" ]; then + - if [ "$CIRCLE_BRANCH" == "master" ] || [ "$CIRCLE_BRANCH" == "maint/0.12" ] || [[ `git log -1 --pretty=%B` == *"[circle full]"* ]]; then make test-doc; else cd doc && make html_dev-noplot; fi - - if [ "$CIRCLE_BRANCH" == "master" ]; then cd doc && make html_dev; fi: + - if [ "$CIRCLE_BRANCH" == "master" ] || [[ `git log -1 --pretty=%B` == *"[circle full]"* ]]; then cd doc && make html_dev; fi: timeout: 1500 - if [ "$CIRCLE_BRANCH" == "maint/0.12" ]; then cd doc && make html_stable; fi: timeout: 1500 diff --git a/doc/conf.py b/doc/conf.py index 481f1a0eb04..e5438cf4291 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -264,7 +264,11 @@ trim_doctests_flags = True # Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'http://docs.python.org/': None} +intersphinx_mapping = { + 'python': ('http://docs.python.org/', None), + 'numpy': ('http://docs.scipy.org/doc/numpy-dev/', None), + 'scipy': ('http://scipy.github.io/devdocs/', None), +} examples_dirs = ['../examples', '../tutorials'] gallery_dirs = ['auto_examples', 'auto_tutorials'] @@ -290,4 +294,5 @@ 'gallery_dirs': gallery_dirs, 'find_mayavi_figures': find_mayavi_figures, 'default_thumb_file': os.path.join('_static', 'mne_helmet.png'), + 'mod_example_dir': 'generated', } diff --git a/doc/faq.rst b/doc/faq.rst index 9424b1c17b8..469e669ceb1 100644 --- a/doc/faq.rst +++ b/doc/faq.rst @@ -84,8 +84,7 @@ If you want to write your own data to disk (e.g., subject behavioral scores), we strongly recommend using `h5io `_, which is based on the `HDF5 format `_ and -`h5py `_, -to save data in a fast, future-compatible, standard format. +h5py_, to save data in a fast, future-compatible, standard format. Resampling and decimating data diff --git a/doc/manual/cookbook.rst b/doc/manual/cookbook.rst index aca913189a7..fab76e1a745 100644 --- a/doc/manual/cookbook.rst +++ b/doc/manual/cookbook.rst @@ -384,8 +384,6 @@ ways: stationary with respect to background brain activity. This can also use :func:`mne.compute_raw_covariance`. -See :ref:`covariance` for more information. - .. _CIHCFJEI: Calculating the inverse operator diff --git a/doc/manual/io.rst b/doc/manual/io.rst index 16d61419bd6..3bfc8487e98 100644 --- a/doc/manual/io.rst +++ b/doc/manual/io.rst @@ -373,7 +373,7 @@ Arbitrary (e.g., simulated or manually read in) raw data can be constructed from memory by making use of :class:`mne.io.RawArray`, :class:`mne.EpochsArray` or :class:`mne.EvokedArray` in combination with :func:`mne.create_info`. -This functionality is illustrated in :ref:`example_io_plot_objects_from_arrays.py` . +This functionality is illustrated in :ref:`sphx_glr_auto_examples_io_plot_objects_from_arrays.py`. Using 3rd party libraries such as NEO (https://pythonhosted.org/neo/) in combination with these functions abundant electrophysiological file formats can be easily loaded into MNE. diff --git a/doc/manual/source_localization/inverse.rst b/doc/manual/source_localization/inverse.rst index 6e2d866f6dd..6e383e06177 100644 --- a/doc/manual/source_localization/inverse.rst +++ b/doc/manual/source_localization/inverse.rst @@ -184,7 +184,7 @@ Using the UNIX tools :ref:`mne_inverse_operator`, the values :math:`\varepsilon_k` can be adjusted with the regularization options ``--magreg`` , ``--gradreg`` , and ``--eegreg`` specified at the time of the inverse operator decomposition, see :ref:`inverse_operator`. The convenience script -:ref:`mne_do_inverse_solution` has the ``--magreg`` and ``--gradreg`` combined to +:ref:`mne_do_inverse_operator` has the ``--magreg`` and ``--gradreg`` combined to a single option, ``--megreg`` , see :ref:`CIHCFJEI`. Suggested range of values for :math:`\varepsilon_k` is :math:`0.05 \dotso 0.2`. diff --git a/doc/python_reference.rst b/doc/python_reference.rst index 649d69dc0be..c1cbea5ee3a 100644 --- a/doc/python_reference.rst +++ b/doc/python_reference.rst @@ -819,7 +819,6 @@ Functions that operate on ``np.ndarray`` objects: cwt_morlet dpss_windows morlet - multitaper_psd single_trial_power stft istft diff --git a/doc/tutorials.rst b/doc/tutorials.rst index e9c1e6dbca1..8805476c72e 100644 --- a/doc/tutorials.rst +++ b/doc/tutorials.rst @@ -50,6 +50,17 @@ For further reading: auto_tutorials/plot_python_intro.rst tutorials/seven_stories_about_mne.rst +.. container:: span box + + .. raw:: html + +

Background information

+ + .. toctree:: + :maxdepth: 1 + + auto_tutorials/plot_background_filtering.rst + .. container:: span box .. raw:: html diff --git a/doc/whats_new.rst b/doc/whats_new.rst index 375a425d9f3..affa65ff128 100644 --- a/doc/whats_new.rst +++ b/doc/whats_new.rst @@ -17,6 +17,8 @@ Changelog - Added the ability to decimate :class:`mne.Evoked` objects with :func:`mne.Evoked.decimate` by `Eric Larson`_ + - Add generic array-filtering function :func:`mne.filter.filter_` by `Eric Larson`_ + BUG ~~~ diff --git a/mne/epochs.py b/mne/epochs.py index 992b30972dc..9589f1ede20 100644 --- a/mne/epochs.py +++ b/mne/epochs.py @@ -1538,29 +1538,30 @@ def __getitem__(self, item): ----- Epochs can be accessed as ``epochs[...]`` in several ways: - 1. ``epochs[idx]``: Return ``Epochs`` object with a subset of - epochs (supports single index and python-style slicing). - - 2. ``epochs['name']``: Return ``Epochs`` object with a copy of the - subset of epochs corresponding to an experimental condition as - specified by 'name'. - - If conditions are tagged by names separated by '/' (e.g. - 'audio/left', 'audio/right'), and 'name' is not in itself an - event key, this selects every event whose condition contains - the 'name' tag (e.g., 'left' matches 'audio/left' and - 'visual/left'; but not 'audio_left'). Note that tags like - 'auditory/left' and 'left/auditory' will be treated the - same way when accessed using tags. - - 3. ``epochs[['name_1', 'name_2', ... ]]``: Return ``Epochs`` object - with a copy of the subset of epochs corresponding to multiple - experimental conditions as specified by - ``'name_1', 'name_2', ...`` . - - If conditions are separated by '/', selects every item containing - every list tag (e.g. ['audio', 'left'] selects 'audio/left' and - 'audio/center/left', but not 'audio/right'). + 1. ``epochs[idx]``: Return ``Epochs`` object with a subset of + epochs (supports single index and python-style slicing). + + 2. ``epochs['name']``: Return ``Epochs`` object with a copy of the + subset of epochs corresponding to an experimental condition as + specified by 'name'. + + If conditions are tagged by names separated by '/' (e.g. + 'audio/left', 'audio/right'), and 'name' is not in itself an + event key, this selects every event whose condition contains + the 'name' tag (e.g., 'left' matches 'audio/left' and + 'visual/left'; but not 'audio_left'). Note that tags like + 'auditory/left' and 'left/auditory' will be treated the + same way when accessed using tags. + + 3. ``epochs[['name_1', 'name_2', ... ]]``: Return ``Epochs`` object + with a copy of the subset of epochs corresponding to multiple + experimental conditions as specified by + ``'name_1', 'name_2', ...`` . + + If conditions are separated by '/', selects every item + containing every list tag (e.g. ['audio', 'left'] selects + 'audio/left' and 'audio/center/left', but not 'audio/right'). + """ data = self._data del self._data diff --git a/mne/filter.py b/mne/filter.py index 2401f85022b..1bb3c7a60ac 100644 --- a/mne/filter.py +++ b/mne/filter.py @@ -278,11 +278,10 @@ def _filter(x, Fs, freq, gain, filter_length='10s', picks=None, n_jobs=1, gain : 1d array Filter gain at frequency sampling points. filter_length : str (Default: '10s') | int | None - Length of the filter to use. If None or "len(x) < filter_length", - the filter length used is len(x). Otherwise, if int, overlap-add - filtering with a filter of the specified length in samples) is - used (faster for long signals). If str, a human-readable time in - units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted + Length of the filter to use. If None or ``len(x) < filter_length``, + the filter length used is ``len(x)``. If int, a filter of the + specified length in samples is used. If str, a human-readable time + in units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted to the shortest power-of-two length at least that duration. picks : array-like of int | None Indices to filter. If None all indices will be filtered. @@ -409,6 +408,7 @@ def _filtfilt(x, b, a, padlen, picks, n_jobs, copy): def _estimate_ringing_samples(b, a): """Helper function for determining IIR padding""" + # XXX Need to extend this to more than 1000 samples for long IIR filters! from scipy.signal import lfilter x = np.zeros(1000) x[0] = 1 @@ -572,6 +572,127 @@ def _check_method(method, iir_params, extra_types): return iir_params +@verbose +def filter_data(data, sfreq, l_freq, h_freq, picks=None, filter_length='10s', + l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, n_jobs=1, + method='fft', iir_params=None, copy=True, verbose=None): + """Filter a subset of channels. + + Applies a zero-phase low-pass, high-pass, band-pass, or band-stop + filter to the channels selected by ``picks``. + + ``l_freq`` and ``h_freq`` are the frequencies below which and above + which, respectively, to filter out of the data. Thus the uses are: + + * ``l_freq < h_freq``: band-pass filter + * ``l_freq > h_freq``: band-stop filter + * ``l_freq is not None and h_freq is None``: high-pass filter + * ``l_freq is None and h_freq is not None``: low-pass filter + + .. note:: If n_jobs > 1, more memory is required as + ``len(picks) * n_times`` additional time points need to + be temporaily stored in memory. + + Parameters + ---------- + data : ndarray, shape (n_channels, n_times) + The data to filter. + sfreq : float + The sample frequency in Hz. + l_freq : float | None + Low cut-off frequency in Hz. If None the data are only low-passed. + h_freq : float | None + High cut-off frequency in Hz. If None the data are only + high-passed. + picks : array-like of int | None + Indices of channels to filter. If None all channels will be + filtered. + filter_length : str (Default: '10s') | int | None + Length of the filter to use. If None or ``len(x) < filter_length``, + the filter length used is ``len(x)``. If int, a filter of the + specified length in samples is used. If str, a human-readable time + in units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted + to the shortest power-of-two length at least that duration. + Not used for 'iir' filters. + l_trans_bandwidth : float + Width of the transition band at the low cut-off frequency in Hz + (high pass or cutoff 1 in bandpass). Not used for 'iir' filters. + h_trans_bandwidth : float + Width of the transition band at the high cut-off frequency in Hz + (low pass or cutoff 2 in bandpass). Not used for 'iir' filters. + n_jobs : int | str + Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda + is installed properly, CUDA is initialized, and method='fft'. + method : str + 'fft' will use overlap-add FIR filtering, 'iir' will use IIR + forward-backward filtering (via filtfilt). + iir_params : dict | None + Dictionary of parameters to use for IIR filtering. + See mne.filter.construct_iir_filter for details. If iir_params + is None and method="iir", 4th order Butterworth will be used. + copy : bool + If True, a copy of x, filtered, is returned. Otherwise, it operates + on x in place. + verbose : bool, str, int, or None + If not None, override default verbose level (see mne.verbose). + Defaults to self.verbose. + + Returns + ------- + data : ndarray, shape (n_channels, n_times) + The filtered data. + """ + if not isinstance(data, np.ndarray) or data.ndim != 2: + raise ValueError('data must be an array with two dimensions') + sfreq = float(sfreq) + if sfreq < 0: + raise ValueError('sfreq must be positive') + if h_freq is not None: + h_freq = float(h_freq) + if h_freq > (sfreq / 2.): + raise ValueError('h_freq (%s) must be less than the Nyquist ' + 'frequency %s' % (h_freq, sfreq / 2.)) + if l_freq is not None: + l_freq = float(l_freq) + if l_freq == 0: + l_freq = None + if l_freq is None and h_freq is not None: + logger.info('Low-pass filtering at %0.2g Hz' % h_freq) + low_pass_filter(data, sfreq, h_freq, + filter_length=filter_length, + trans_bandwidth=h_trans_bandwidth, method=method, + iir_params=iir_params, picks=picks, n_jobs=n_jobs, + copy=copy) + if l_freq is not None and h_freq is None: + logger.info('High-pass filtering at %0.2g Hz' % l_freq) + high_pass_filter( + data, sfreq, l_freq, filter_length=filter_length, + trans_bandwidth=l_trans_bandwidth, method=method, + iir_params=iir_params, picks=picks, n_jobs=n_jobs, copy=copy) + if l_freq is not None and h_freq is not None: + if l_freq < h_freq: + logger.info('Band-pass filtering from %0.2g - %0.2g Hz' + % (l_freq, h_freq)) + data = band_pass_filter( + data, sfreq, l_freq, h_freq, + filter_length=filter_length, + l_trans_bandwidth=l_trans_bandwidth, + h_trans_bandwidth=h_trans_bandwidth, + method=method, iir_params=iir_params, picks=picks, + n_jobs=n_jobs, copy=copy) + else: + logger.info('Band-stop filtering from %0.2g - %0.2g Hz' + % (h_freq, l_freq)) + data = band_stop_filter( + data, sfreq, h_freq, l_freq, + filter_length=filter_length, + l_trans_bandwidth=h_trans_bandwidth, + h_trans_bandwidth=l_trans_bandwidth, method=method, + iir_params=iir_params, picks=picks, n_jobs=n_jobs, + copy=copy) + return data + + @verbose def band_pass_filter(x, Fs, Fp1, Fp2, filter_length='10s', l_trans_bandwidth=0.5, h_trans_bandwidth=0.5, diff --git a/mne/io/base.py b/mne/io/base.py index 7bb8696e4b2..0610b99286b 100644 --- a/mne/io/base.py +++ b/mne/io/base.py @@ -29,8 +29,7 @@ write_complex64, write_complex128, write_int, write_id, write_string, write_name_list, _get_split_size) -from ..filter import (low_pass_filter, high_pass_filter, band_pass_filter, - notch_filter, band_stop_filter, resample, +from ..filter import (filter_data, notch_filter, resample, _resample_stim_channels) from ..fixes import in1d from ..parallel import parallel_func @@ -857,21 +856,18 @@ def filter(self, l_freq, h_freq, picks=None, filter_length='10s', Indices of channels to filter. If None only the data (MEG/EEG) channels will be filtered. filter_length : str (Default: '10s') | int | None - Length of the filter to use. If None or "len(x) < filter_length", - the filter length used is len(x). Otherwise, if int, overlap-add - filtering with a filter of the specified length in samples) is - used (faster for long signals). If str, a human-readable time in - units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted + Length of the filter to use. If None or ``len(x) < filter_length``, + the filter length used is ``len(x)``. If int, a filter of the + specified length in samples is used. If str, a human-readable time + in units of "s" or "ms" (e.g., "10s" or "5500ms") will be converted to the shortest power-of-two length at least that duration. Not used for 'iir' filters. l_trans_bandwidth : float Width of the transition band at the low cut-off frequency in Hz - (high pass or cutoff 1 in bandpass). Not used if 'order' is - specified in iir_params. + (high pass or cutoff 1 in bandpass). Not used for 'iir' filters. h_trans_bandwidth : float Width of the transition band at the high cut-off frequency in Hz - (low pass or cutoff 2 in bandpass). Not used if 'order' is - specified in iir_params. + (low pass or cutoff 2 in bandpass). Not used for 'iir' filters. n_jobs : int | str Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda is installed properly, CUDA is initialized, and method='fft'. @@ -896,78 +892,40 @@ def filter(self, l_freq, h_freq, picks=None, filter_length='10s', mne.Epochs.savgol_filter mne.io.Raw.notch_filter mne.io.Raw.resample + mne.filter.filter_data """ - fs = float(self.info['sfreq']) - if l_freq == 0: - l_freq = None - if h_freq is not None and h_freq > (fs / 2.): - h_freq = None - if l_freq is not None and not isinstance(l_freq, float): - l_freq = float(l_freq) - if h_freq is not None and not isinstance(h_freq, float): - h_freq = float(h_freq) _check_preload(self, 'raw.filter') + data_picks = _pick_data_or_ica(self.info) + update_info = False if picks is None: - picks = _pick_data_or_ica(self.info) + picks = data_picks + update_info = True # let's be safe. - if len(picks) < 1: + if len(picks) == 0: raise RuntimeError('Could not find any valid channels for ' 'your Raw object. Please contact the ' 'MNE-Python developers.') - - # update info if filter is applied to all data channels, - # and it's not a band-stop filter - if h_freq is not None: - if (l_freq is None or l_freq < h_freq) and \ - (self.info["lowpass"] is None or - h_freq < self.info['lowpass']): - self.info['lowpass'] = h_freq - if l_freq is not None: - if (h_freq is None or l_freq < h_freq) and \ - (self.info["highpass"] is None or - l_freq > self.info['highpass']): - self.info['highpass'] = l_freq - else: - if h_freq is not None or l_freq is not None: + elif h_freq is not None or l_freq is not None: + if in1d(data_picks, picks).all(): + update_info = True + else: logger.info('Filtering a subset of channels. The highpass and ' 'lowpass values in the measurement info will not ' 'be updated.') - - if l_freq is None and h_freq is not None: - logger.info('Low-pass filtering at %0.2g Hz' % h_freq) - low_pass_filter(self._data, fs, h_freq, - filter_length=filter_length, - trans_bandwidth=h_trans_bandwidth, method=method, - iir_params=iir_params, picks=picks, n_jobs=n_jobs, - copy=False) - if l_freq is not None and h_freq is None: - logger.info('High-pass filtering at %0.2g Hz' % l_freq) - high_pass_filter(self._data, fs, l_freq, - filter_length=filter_length, - trans_bandwidth=l_trans_bandwidth, method=method, - iir_params=iir_params, picks=picks, n_jobs=n_jobs, - copy=False) - if l_freq is not None and h_freq is not None: - if l_freq < h_freq: - logger.info('Band-pass filtering from %0.2g - %0.2g Hz' - % (l_freq, h_freq)) - self._data = band_pass_filter( - self._data, fs, l_freq, h_freq, - filter_length=filter_length, - l_trans_bandwidth=l_trans_bandwidth, - h_trans_bandwidth=h_trans_bandwidth, - method=method, iir_params=iir_params, picks=picks, - n_jobs=n_jobs, copy=False) - else: - logger.info('Band-stop filtering from %0.2g - %0.2g Hz' - % (h_freq, l_freq)) - self._data = band_stop_filter( - self._data, fs, h_freq, l_freq, - filter_length=filter_length, - l_trans_bandwidth=h_trans_bandwidth, - h_trans_bandwidth=l_trans_bandwidth, method=method, - iir_params=iir_params, picks=picks, n_jobs=n_jobs, - copy=False) + filter_data(self._data, self.info['sfreq'], l_freq, h_freq, picks, + filter_length, l_trans_bandwidth, h_trans_bandwidth, + n_jobs, method, iir_params, copy=False) + # update info if filter is applied to all data channels, + # and it's not a band-stop filter + if update_info: + if h_freq is not None and (l_freq is None or l_freq < h_freq) and \ + (self.info["lowpass"] is None or + h_freq < self.info['lowpass']): + self.info['lowpass'] = h_freq + if l_freq is not None and (h_freq is None or l_freq < h_freq) and \ + (self.info["highpass"] is None or + l_freq > self.info['highpass']): + self.info['highpass'] = l_freq return self @verbose diff --git a/mne/io/fiff/tests/test_raw_fiff.py b/mne/io/fiff/tests/test_raw_fiff.py index e37c40b7307..2578046bf5b 100644 --- a/mne/io/fiff/tests/test_raw_fiff.py +++ b/mne/io/fiff/tests/test_raw_fiff.py @@ -46,12 +46,10 @@ bad_file_wrong = op.join(base_dir, 'test_wrong_bads.txt') hp_fname = op.join(base_dir, 'test_chpi_raw_hp.txt') hp_fif_fname = op.join(base_dir, 'test_chpi_raw_sss.fif') -rng = np.random.RandomState(0) def test_fix_types(): - """Test fixing of channel types - """ + """Test fixing of channel types""" for fname, change in ((hp_fif_fname, True), (test_fif_fname, False), (ctf_fname, False)): raw = Raw(fname) @@ -75,8 +73,7 @@ def test_fix_types(): def test_concat(): - """Test RawFIF concatenation - """ + """Test RawFIF concatenation""" # we trim the file to save lots of memory and some time tempdir = _TempDir() raw = read_raw_fif(test_fif_fname) @@ -89,8 +86,7 @@ def test_concat(): @testing.requires_testing_data def test_hash_raw(): - """Test hashing raw objects - """ + """Test hashing raw objects""" raw = read_raw_fif(fif_fname) assert_raises(RuntimeError, raw.__hash__) raw = Raw(fif_fname).crop(0, 0.5, copy=False) @@ -107,8 +103,7 @@ def test_hash_raw(): @testing.requires_testing_data def test_maxshield(): - """Test maxshield warning - """ + """Test maxshield warning""" with warnings.catch_warnings(record=True) as w: warnings.simplefilter('always') Raw(ms_fname, allow_maxshield=True) @@ -118,8 +113,7 @@ def test_maxshield(): @testing.requires_testing_data def test_subject_info(): - """Test reading subject information - """ + """Test reading subject information""" tempdir = _TempDir() raw = Raw(fif_fname).crop(0, 1, copy=False) assert_true(raw.info['subject_info'] is None) @@ -141,8 +135,7 @@ def test_subject_info(): @testing.requires_testing_data def test_copy_append(): - """Test raw copying and appending combinations - """ + """Test raw copying and appending combinations""" raw = Raw(fif_fname, preload=True).copy() raw_full = Raw(fif_fname) raw_full.append(raw) @@ -153,8 +146,7 @@ def test_copy_append(): @slow_test @testing.requires_testing_data def test_rank_estimation(): - """Test raw rank estimation - """ + """Test raw rank estimation""" iter_tests = itt.product( [fif_fname, hp_fif_fname], # sss ['norm', dict(mag=1e11, grad=1e9, eeg=1e5)] @@ -196,8 +188,7 @@ def test_rank_estimation(): @testing.requires_testing_data def test_output_formats(): - """Test saving and loading raw data using multiple formats - """ + """Test saving and loading raw data using multiple formats""" tempdir = _TempDir() formats = ['short', 'int', 'single', 'double'] tols = [1e-4, 1e-7, 1e-7, 1e-15] @@ -227,8 +218,7 @@ def _compare_combo(raw, new, times, n_times): @slow_test @testing.requires_testing_data def test_multiple_files(): - """Test loading multiple files simultaneously - """ + """Test loading multiple files simultaneously""" # split file tempdir = _TempDir() raw = Raw(fif_fname).crop(0, 10, copy=False) @@ -354,8 +344,7 @@ def test_multiple_files(): @testing.requires_testing_data def test_split_files(): - """Test writing and reading of split raw files - """ + """Test writing and reading of split raw files""" tempdir = _TempDir() raw_1 = Raw(fif_fname, preload=True) # Test a very close corner case @@ -442,8 +431,7 @@ def test_split_files(): def test_load_bad_channels(): - """Test reading/writing of bad channels - """ + """Test reading/writing of bad channels""" tempdir = _TempDir() # Load correctly marked file (manually done in mne_process_raw) raw_marked = Raw(fif_bad_marked_fname) @@ -485,8 +473,8 @@ def test_load_bad_channels(): @slow_test @testing.requires_testing_data def test_io_raw(): - """Test IO for raw data (Neuromag + CTF + gz) - """ + """Test IO for raw data (Neuromag + CTF + gz)""" + rng = np.random.RandomState(0) tempdir = _TempDir() # test unicode io for chars in [b'\xc3\xa4\xc3\xb6\xc3\xa9', b'a']: @@ -600,8 +588,7 @@ def test_io_raw(): @testing.requires_testing_data def test_io_complex(): - """Test IO with complex data types - """ + """Test IO with complex data types""" rng = np.random.RandomState(0) tempdir = _TempDir() dtypes = [np.complex64, np.complex128] @@ -640,8 +627,7 @@ def test_io_complex(): @testing.requires_testing_data def test_getitem(): - """Test getitem/indexing of Raw - """ + """Test getitem/indexing of Raw""" for preload in [False, True, 'memmap.dat']: raw = Raw(fif_fname, preload=preload) data, times = raw[0, :] @@ -663,8 +649,7 @@ def test_getitem(): @testing.requires_testing_data def test_proj(): - """Test SSP proj operations - """ + """Test SSP proj operations""" tempdir = _TempDir() for proj in [True, False]: raw = Raw(fif_fname, preload=False, proj=proj) @@ -734,9 +719,9 @@ def test_proj(): @testing.requires_testing_data def test_preload_modify(): - """Test preloading and modifying data - """ + """Test preloading and modifying data""" tempdir = _TempDir() + rng = np.random.RandomState(0) for preload in [False, True, 'memmap.dat']: raw = Raw(fif_fname, preload=preload) @@ -765,8 +750,7 @@ def test_preload_modify(): @slow_test @testing.requires_testing_data def test_filter(): - """Test filtering (FIR and IIR) and Raw.apply_function interface - """ + """Test filtering (FIR and IIR) and Raw.apply_function interface""" raw = Raw(fif_fname).crop(0, 7, copy=False) raw.load_data() sig_dec = 11 @@ -834,6 +818,38 @@ def test_filter(): data, _ = raw[picks, :] assert_array_almost_equal(data, data_notch, sig_dec_notch_fit) + # filter should set the "lowpass" and "highpass" parameters + raw = RawArray(np.random.randn(3, 1000), + create_info(3, 1000., ['eeg'] * 2 + ['stim'])) + raw.info['lowpass'] = raw.info['highpass'] = None + for kind in ('none', 'lowpass', 'highpass', 'bandpass', 'bandstop'): + print(kind) + h_freq = l_freq = None + if kind in ('lowpass', 'bandpass'): + h_freq = 70 + if kind in ('highpass', 'bandpass'): + l_freq = 30 + if kind == 'bandstop': + l_freq, h_freq = 70, 30 + assert_true(raw.info['lowpass'] is None) + assert_true(raw.info['highpass'] is None) + kwargs = dict(l_trans_bandwidth=20, h_trans_bandwidth=20, + filter_length=200) + raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(1), + **kwargs) + assert_true(raw.info['lowpass'] is None) + assert_true(raw.info['highpass'] is None) + raw_filt = raw.copy().filter(l_freq, h_freq, **kwargs) + wanted_h = h_freq if kind != 'bandstop' else None + wanted_l = l_freq if kind != 'bandstop' else None + assert_equal(raw_filt.info['lowpass'], wanted_h) + assert_equal(raw_filt.info['highpass'], wanted_l) + # Using all data channels should still set the params (GH#3259) + raw_filt = raw.copy().filter(l_freq, h_freq, picks=np.arange(2), + **kwargs) + assert_equal(raw_filt.info['lowpass'], wanted_h) + assert_equal(raw_filt.info['highpass'], wanted_l) + def test_filter_picks(): """Test filtering default channel picks""" @@ -864,8 +880,7 @@ def test_filter_picks(): @testing.requires_testing_data def test_crop(): - """Test cropping raw files - """ + """Test cropping raw files""" # split a concatenated file to test a difficult case raw = concatenate_raws([Raw(f) for f in [fif_fname, fif_fname]]) split_size = 10. # in seconds @@ -915,8 +930,7 @@ def test_crop(): @testing.requires_testing_data def test_resample(): - """Test resample (with I/O and multiple files) - """ + """Test resample (with I/O and multiple files)""" tempdir = _TempDir() raw = Raw(fif_fname).crop(0, 3, copy=False) raw.load_data() @@ -1029,8 +1043,7 @@ def test_resample(): @testing.requires_testing_data def test_hilbert(): - """Test computation of analytic signal using hilbert - """ + """Test computation of analytic signal using hilbert""" raw = Raw(fif_fname, preload=True) picks_meg = pick_types(raw.info, meg=True, exclude='bads') picks = picks_meg[:4] @@ -1059,8 +1072,7 @@ def test_hilbert(): @testing.requires_testing_data def test_raw_copy(): - """Test Raw copy - """ + """Test Raw copy""" raw = Raw(fif_fname, preload=True) data, _ = raw[:, :] copied = raw.copy() @@ -1093,8 +1105,8 @@ def test_to_data_frame(): def test_add_channels(): - """Test raw splitting / re-appending channel types - """ + """Test raw splitting / re-appending channel types""" + rng = np.random.RandomState(0) raw = Raw(test_fif_fname).crop(0, 1, copy=False).load_data() raw_nopre = Raw(test_fif_fname, preload=False) raw_eeg_meg = raw.copy().pick_types(meg=True, eeg=True) @@ -1116,7 +1128,7 @@ def test_add_channels(): # Testing force updates raw_arr_info = create_info(['1', '2'], raw_meg.info['sfreq'], 'eeg') orig_head_t = raw_arr_info['dev_head_t'] - raw_arr = np.random.randn(2, raw_eeg.n_times) + raw_arr = rng.randn(2, raw_eeg.n_times) raw_arr = RawArray(raw_arr, raw_arr_info) # This should error because of conflicts in Info assert_raises(ValueError, raw_meg.copy().add_channels, [raw_arr]) @@ -1166,15 +1178,14 @@ def test_save(): @testing.requires_testing_data def test_with_statement(): - """ Test with statement """ + """Test with statement""" for preload in [True, False]: with Raw(fif_fname, preload=preload) as raw_: print(raw_) def test_compensation_raw(): - """Test Raw compensation - """ + """Test Raw compensation""" tempdir = _TempDir() raw1 = Raw(ctf_comp_fname, compensation=None) assert_true(raw1.comp is None) @@ -1213,8 +1224,7 @@ def test_compensation_raw(): @requires_mne def test_compensation_raw_mne(): - """Test Raw compensation by comparing with MNE - """ + """Test Raw compensation by comparing with MNE""" tempdir = _TempDir() def compensate_mne(fname, grad): @@ -1239,8 +1249,7 @@ def compensate_mne(fname, grad): @testing.requires_testing_data def test_drop_channels_mixin(): - """Test channels-dropping functionality - """ + """Test channels-dropping functionality""" raw = Raw(fif_fname, preload=True) drop_ch = raw.ch_names[:3] ch_names = raw.ch_names[3:] @@ -1259,8 +1268,7 @@ def test_drop_channels_mixin(): @testing.requires_testing_data def test_pick_channels_mixin(): - """Test channel-picking functionality - """ + """Test channel-picking functionality""" # preload is True raw = Raw(fif_fname, preload=True) @@ -1285,8 +1293,7 @@ def test_pick_channels_mixin(): @testing.requires_testing_data def test_equalize_channels(): - """Test equalization of channels - """ + """Test equalization of channels""" raw1 = Raw(fif_fname, preload=True) raw2 = raw1.copy() diff --git a/mne/io/meas_info.py b/mne/io/meas_info.py index f0a6d50bd04..b29f80aa851 100644 --- a/mne/io/meas_info.py +++ b/mne/io/meas_info.py @@ -1363,7 +1363,7 @@ def create_info(ch_names, sfreq, ch_types=None, montage=None): ---------- ch_names : list of str | int Channel names. If an int, a list of channel names will be created - from range(ch_names) + from :func:`range(ch_names) `. sfreq : float Sample rate of the data. ch_types : list of str | str @@ -1379,6 +1379,11 @@ def create_info(ch_names, sfreq, ch_types=None, montage=None): can be specifed and applied to the info. See also the documentation of :func:`mne.channels.read_montage` for more information. + Returns + ------- + info : instance of Info + The measurement info. + Notes ----- The info dictionary will be sparsely populated to enable functionality @@ -1402,7 +1407,8 @@ def create_info(ch_names, sfreq, ch_types=None, montage=None): if isinstance(ch_types, string_types): ch_types = [ch_types] * nchan if len(ch_types) != nchan: - raise ValueError('ch_types and ch_names must be the same length') + raise ValueError('ch_types and ch_names must be the same length ' + '(%s != %s)' % (len(ch_types), nchan)) info = _empty_info(sfreq) info['meas_date'] = np.array([0, 0], np.int32) loc = np.concatenate((np.zeros(3), np.eye(3).ravel())).astype(np.float32) diff --git a/mne/stats/parametric.py b/mne/stats/parametric.py index 37a8e5bf761..49a6bab7ddd 100644 --- a/mne/stats/parametric.py +++ b/mne/stats/parametric.py @@ -198,11 +198,13 @@ def f_threshold_mway_rm(n_subjects, factor_levels, effects='A*B', effects : str A string denoting the effect to be returned. The following mapping is currently supported: - 'A': main effect of A - 'B': main effect of B - 'A:B': interaction effect - 'A+B': both main effects - 'A*B': all three effects + + * ``'A'``: main effect of A + * ``'B'``: main effect of B + * ``'A:B'``: interaction effect + * ``'A+B'``: both main effects + * ``'A*B'``: all three effects + pvalue : float The p-value to be thresholded. diff --git a/tutorials/plot_artifacts_correction_filtering.py b/tutorials/plot_artifacts_correction_filtering.py index af7dda7e568..49f30eb414a 100644 --- a/tutorials/plot_artifacts_correction_filtering.py +++ b/tutorials/plot_artifacts_correction_filtering.py @@ -1,8 +1,8 @@ """ .. _tut_artifacts_filter: -Filtering and Resampling -======================== +Filtering and resampling data +============================= Certain artifacts are restricted to certain frequencies and can therefore be fixed by filtering. An artifact that typically affects only some @@ -13,6 +13,10 @@ geographical location). Some peaks may also be present at the harmonic frequencies, i.e. the integer multiples of the power-line frequency, e.g. 100Hz, 150Hz, ... (or 120Hz, 180Hz, ...). + +This tutorial covers some basics of how to filter data in MNE-Python. +For more in-depth information about filter design in general and in +MNE-Python in particular, check out _tut_background_filtering_. """ import numpy as np diff --git a/tutorials/plot_artifacts_correction_ica.py b/tutorials/plot_artifacts_correction_ica.py index 03c9bf95084..8fe0078d597 100644 --- a/tutorials/plot_artifacts_correction_ica.py +++ b/tutorials/plot_artifacts_correction_ica.py @@ -91,7 +91,8 @@ # That component is also showing a prototypical average vertical EOG time # course. # -# Pay attention to the labels, a customized read-out of the ica.labels_ +# Pay attention to the labels, a customized read-out of the +# :attr:`ica.labels_ ` print(ica.labels_) ############################################################################### diff --git a/tutorials/plot_background_filtering.py b/tutorials/plot_background_filtering.py new file mode 100644 index 00000000000..146a9113ae9 --- /dev/null +++ b/tutorials/plot_background_filtering.py @@ -0,0 +1,396 @@ +# -*- coding: utf-8 -*- +r""" +.. _tut_background_filtering: + +Background information on filtering +=================================== + +Here we give some background information on filtering in general, +and how it is done in MNE-Python in particular. +Recommended reading for practical applications of digital +filter design can be found in [1]_. To see how to use the default filters +in MNE-Python on actual data, see the :ref:`tut_artifacts_filter` tutorial. + +.. contents:: + +Filtering basics +---------------- +Let's get some of the basic math down. In the frequency domain, digital +filters have a transfer function that is given by: + +.. math:: + + H(z) &= \frac{b_0 + b_1 z^{-1} + b_2 z^{-2} + ... + b_M z^{-M}} + {1 + a_1 z^{-1} + a_2 z^{-2} + ... + a_N z^{-M}} \\ + &= \frac{\sum_0^Mb_kz^{-k}}{\sum_1^Na_kz^{-k}} + +In the time domain, the numerator coefficients :math:`b_k` and denominator +coefficients :math:`a_k` can be used to obtain our output data +:math:`y(n)` in terms of our input data :math:`x(n)` as: + +.. math:: + :label: summations + + y(n) &= b_0 x(n) + b_1 x(n-1) + ... + b_M x(n-M) + - a_1 y(n-1) - a_2 y(n - 2) - ... - a_N y(n - N)\\ + &= \sum_0^M b_k x(n-k) - \sum_1^N a_k y(n-k) + +In other words, the output at time :math:`n` is determined by a sum over: + + 1. The numerator coefficients :math:`b_k`, which get multiplied by + the previous input :math:`x(n-k)` values, and + 2. The denominator coefficients :math:`a_k`, which get multiplied by + the previous output :math:`y(n-k)` values. + +Note that these summations in :eq:`summations` correspond nicely to +(1) a weighted `moving average`_ and (2) an autoregression_. + +Filters are broken into two classes: FIR_ (finite impulse response) and +IIR_ (infinite impulse response) based on these coefficients. +FIR filters use a finite number of numerator +coefficients :math:`b_k` (:math:`\forall k, a_k=0`), and thus each output +value of :math:`y(n)` depends only on the :math:`M` previous input values. +IIR filters depend on the previous input and output values, and thus can have +effectively infinite impulse responses. + +As outlined in [1]_, FIR and IIR have different tradeoffs: + + * A causal FIR filter can be linear-phase -- i.e., the same time delay + across all frequencies -- whereas a causal IIR filter cannot. The phase + and group delay characteristics are also usually better for FIR filters. + * IIR filters can generally have a steeper cutoff than an FIR filter of + equivalent order. + * IIR filters are generally less numerically stable, in part due to + accumulating error (due to its recursive calculations). + +When designing a filter (FIR or IIR), there are always tradeoffs that +need to be considered, including but not limited to: + + 1. Ripple in the pass-band + 2. Attenuation of the stop-band + 3. Steepness of roll-off + 4. Filter order (i.e., length for FIR filters) + 5. Time-domain ringing + +In general, the sharper something is in frequency, the broader it is in time, +and vice-versa. This is a fundamental time-frequency tradeoff, and it will +show up below. + +Here we will focus first on FIR filters, which are the default filters used by +MNE-Python. +""" + +############################################################################### +# Designing FIR filters +# --------------------- +# Here we'll try designing a low-pass filter, and look at trade-offs in terms +# of time- and frequency-domain filter characteristics. Later, in +# :ref:`effect_on_signals`, we'll look at how such filters can affect +# signals when they are used. +# +# First let's import some useful tools for filtering, and set some default +# values for our data that are reasonable for M/EEG data. + +import numpy as np +from scipy import signal, fftpack +import matplotlib.pyplot as plt + +from mne.time_frequency.tfr import morlet + +import mne + +sfreq = 1000. +f_p = 40. +ylim = [-60, 10] # for dB plots +xlim = [2, sfreq / 2.] +blue = '#1f77b4' + +############################################################################### +# Take for example an ideal low-pass filter, which would give a value of 1 in +# the pass-band (up to frequency :math:`f_p`) and a value of 0 in the stop-band +# (down to frequency :math:`f_s`) such that :math:`f_p=f_s=40` Hz here +# (shown to a lower limit of -60 dB for simplicity): + +nyq = sfreq / 2. # the Nyquist frequency is half our sample rate +freq = [0, f_p, f_p, nyq] +gain = [1, 1, 0, 0] + + +def box_off(ax): + ax.grid(zorder=0) + for key in ('top', 'right'): + ax.spines[key].set_visible(False) + + +def plot_ideal(freq, gain, ax): + freq = np.maximum(freq, xlim[0]) + xs, ys = list(), list() + for ii in range(len(freq)): + xs.append(freq[ii]) + ys.append(ylim[0]) + if ii < len(freq) - 1 and gain[ii] != gain[ii + 1]: + xs += [freq[ii], freq[ii + 1]] + ys += [ylim[1]] * 2 + gain = 10 * np.log10(np.maximum(gain, 10 ** (ylim[0] / 10.))) + ax.fill_between(xs, ylim[0], ys, color='r', alpha=0.1) + ax.semilogx(freq, gain, 'r--', alpha=0.5, linewidth=4, zorder=3) + xticks = [1, 2, 4, 10, 20, 40, 100, 200, 400] + ax.set(xlim=xlim, ylim=ylim, xticks=xticks, xlabel='Frequency (Hz)', + ylabel='Amplitude (dB)') + ax.set(xticklabels=xticks) + box_off(ax) + +half_height = np.array(plt.rcParams['figure.figsize']) * [1, 0.5] +ax = plt.subplots(1, figsize=half_height)[1] +plot_ideal(freq, gain, ax) +ax.set(title='Ideal %s Hz lowpass' % f_p) +mne.viz.tight_layout() +plt.show() + +############################################################################### +# This filter hypothetically achieves zero ripple in the frequency domain, +# perfect attenuation, and perfect steepness. However, due to the discontunity +# in the frequency response, the filter would require infinite ringing in the +# time domain (i.e., infinite order) to be realized. Another way to think of +# this is that a rectangular window in frequency is actually sinc_ function +# in time, which requires an infinite number of samples, and thus infinite +# time, to represent. So although this filter has ideal frequency suppression, +# it has poor time-domain characteristics. +# +# Let's try to naïvely make a brick-wall filter of length 0.1 sec, and look +# at the filter itself in the time domain and the frequency domain: + +n = int(round(0.1 * sfreq)) + 1 +t = np.arange(-n // 2, n // 2) / sfreq # center our sinc +h = np.sinc(2 * f_p * t) / (4 * np.pi) + + +def plot_filter(h, title, freq, gain, show=True): + fig, axs = plt.subplots(2) + t = np.arange(len(h)) / sfreq + axs[0].plot(t, h, color=blue) + axs[0].set(xlim=t[[0, -1]], xlabel='Time (sec)', + ylabel='Amplitude h(n)', title=title) + box_off(axs[0]) + f, H = signal.freqz(h) + f *= sfreq / (2 * np.pi) + axs[1].semilogx(f, 10 * np.log10((H * H.conj()).real), color=blue, + linewidth=2, zorder=4) + plot_ideal(freq, gain, axs[1]) + mne.viz.tight_layout() + if show: + plt.show() + +plot_filter(h, 'Sinc (0.1 sec)', freq, gain) + +############################################################################### +# This is not so good! Making the filter 10 times longer (1 sec) gets us a +# bit better stop-band suppression, but still has a lot of ringing in +# the time domain. Note the x-axis is an order of magnitude longer here: + +n = int(round(1. * sfreq)) + 1 +t = np.arange(-n // 2, n // 2) / sfreq +h = np.sinc(2 * f_p * t) / (4 * np.pi) +plot_filter(h, 'Sinc (1.0 sec)', freq, gain) + +############################################################################### +# Let's make the stop-band tighter still with a longer filter (10 sec), +# with a resulting larger x-axis: + +n = int(round(10. * sfreq)) + 1 +t = np.arange(-n // 2, n // 2) / sfreq +h = np.sinc(2 * f_p * t) / (4 * np.pi) +plot_filter(h, 'Sinc (10.0 sec)', freq, gain) + +############################################################################### +# Now we have very sharp frequency suppression, but our filter rings for the +# entire second. So this naïve method is probably not a good way to build +# our low-pass filter. +# +# Fortunately, there are multiple established methods to design FIR filters +# based on desired response characteristics. These include: +# +# 1. The Remez_ algorithm (`scipy remez`_, `MATLAB firpm`_) +# 2. Windowed FIR design (`scipy firwin2`_, `MATLAB fir2`_) +# 3. Least squares designs (`MATLAB firls`_; coming to scipy 0.18) +# +# If we relax our frequency-domain filter requirements a little bit, we can +# use these functions to construct a lowpass filter that instead has a +# *transition band*, or a region between the pass frequency :math:`f_p` +# and stop frequency :math:`f_s`, e.g.: + +trans_bandwidth = 10 # 10 Hz transition band +f_s = f_p + trans_bandwidth # = 50 Hz + +freq = [0., f_p, f_s, nyq] +gain = [1., 1., 0., 0.] +ax = plt.subplots(1, figsize=half_height)[1] +plot_ideal(freq, gain, ax) +ax.set(title='%s Hz lowpass with a %s Hz transition' % (f_p, trans_bandwidth)) +mne.viz.tight_layout() +plt.show() + +############################################################################### +# Accepting a shallower roll-off of the filter in the frequency domain makes +# our time-domain response potentially much better. We end up with a +# smoother slope through the transition region, but a *much* cleaner time +# domain signal. Here again for the 1 sec filter: + +h = signal.firwin2(n, freq, gain, nyq=nyq) +plot_filter(h, 'Windowed 10-Hz transition (1.0 sec)', freq, gain) + +############################################################################### +# Since our lowpass is around 40 Hz with a 10 Hz transition, we can actually +# use a shorter filter (5 cycles at 10 Hz = 0.5 sec) and still get okay +# stop-band attenuation: + +n = int(round(sfreq * 0.5)) + 1 +h = signal.firwin2(n, freq, gain, nyq=nyq) +plot_filter(h, 'Windowed 10-Hz transition (0.5 sec)', freq, gain) + +############################################################################### +# But then if we shorten the filter too much (2 cycles of 10 Hz = 0.2 sec), +# our effective stop frequency gets pushed out past 60 Hz: + +n = int(round(sfreq * 0.2)) + 1 +h = signal.firwin2(n, freq, gain, nyq=nyq) +plot_filter(h, 'Windowed 10-Hz transition (0.2 sec)', freq, gain) + +############################################################################### +# If we want a filter that is only 0.1 seconds long, we should probably use +# something more like a 25 Hz transition band (0.2 sec = 5 cycles @ 25 Hz): + +trans_bandwidth = 25 +f_s = f_p + trans_bandwidth +freq = [0, f_p, f_s, nyq] +h = signal.firwin2(n, freq, gain, nyq=nyq) +plot_filter(h, 'Windowed 50-Hz transition (0.2 sec)', freq, gain) + +############################################################################### +# .. _effect_on_signals: +# +# Applying FIR filters +# -------------------- +# Now lets look at some practical effects of these filters by applying +# them to some data. +# +# Let's construct a Gaussian-windowed sinusoid (i.e., Morlet imaginary part) +# plus noise (random + line). Note that the original, clean signal contains +# frequency content in both the pass band and transition bands of our +# low-pass filter. + +dur = 10. +center = 2. +morlet_freq = f_p +tlim = [center - 0.2, center + 0.2] +tticks = [tlim[0], center, tlim[1]] +flim = [20, 70] + +x = np.zeros(int(sfreq * dur)) +blip = morlet(sfreq, [morlet_freq], n_cycles=7)[0].imag / 20. +n_onset = int(center * sfreq) - len(blip) // 2 +x[n_onset:n_onset + len(blip)] += blip +x_orig = x.copy() + +rng = np.random.RandomState(0) +x += rng.randn(len(x)) / 1000. +x += np.sin(2. * np.pi * 60. * np.arange(len(x)) / sfreq) / 2000. + +############################################################################### +# Filter it with a shallow cutoff, linear-phase FIR and compensate for +# the delay: + +transition_band = 0.25 * f_p +f_s = f_p + transition_band +filter_dur = 5. / transition_band # sec +n = int(sfreq * filter_dur) +freq = [0., f_p, f_s, sfreq / 2.] +gain = [1., 1., 0., 0.] +h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.) +x_shallow = np.convolve(h, x)[len(h) // 2:] + +############################################################################### +# Now let's filter it with the MNE-Python 0.12 defaults, which is a +# long-duration, steep cutoff FIR: + +transition_band = 0.5 # Hz +f_s = f_p + transition_band +filter_dur = 10. # sec +n = int(sfreq * filter_dur) +freq = [0., f_p, f_s, sfreq / 2.] +gain = [1., 1., 0., 0.] +h = signal.firwin2(n, freq, gain, nyq=sfreq / 2.) +x_steep = np.convolve(h, x)[len(h) // 2:] + +plot_filter(h, 'MNE-Python 0.12 default', freq, gain) + +############################################################################### +# It has excellent frequency attenuation, but this comes at a cost of potential +# ringing (long-lasting ripples) in the time domain. Ripple can occur with +# steep filters, especially on signals with frequency content around the +# transition band. Our Morlet wavelet signal has power in our transition band, +# and the time-domain ringing is thus more pronounced for the steep-slope, +# long-duration filter than the shorter, shallower-slope filter: + +axs = plt.subplots(2)[1] + + +def plot_signal(x, offset): + t = np.arange(len(x)) / sfreq + axs[0].plot(t, x + offset) + axs[0].set(xlabel='Time (sec)', xlim=t[[0, -1]]) + box_off(axs[0]) + X = fftpack.fft(x) + freqs = fftpack.fftfreq(len(x), 1. / sfreq) + mask = freqs >= 0 + X = X[mask] + freqs = freqs[mask] + axs[1].plot(freqs, 20 * np.log10(np.abs(X))) + axs[1].set(xlim=xlim) + +yticks = np.arange(4) / -30. +yticklabels = ['Original', 'Noisy', 'FIR-shallow', 'FIR-steep'] +plot_signal(x_orig, offset=yticks[0]) +plot_signal(x, offset=yticks[1]) +plot_signal(x_shallow, offset=yticks[2]) +plot_signal(x_steep, offset=yticks[3]) +axs[0].set(xlim=tlim, title='Lowpass=%d Hz' % f_p, xticks=tticks, + ylim=[-0.125, 0.025], yticks=yticks, yticklabels=yticklabels,) +for text in axs[0].get_yticklabels(): + text.set(rotation=45, size=8) +axs[1].set(xlim=flim, ylim=ylim, xlabel='Frequency (Hz)', + ylabel='Magnitude (dB)') +box_off(axs[0]) +box_off(axs[1]) +mne.viz.tight_layout() +plt.show() + +############################################################################### +# Summary +# ------- +# When filtering, there are always tradeoffs that should be considered. +# One important tradeoff is between time-domain characteristics (like ringing) +# and frequency-domain attenuation characteristics (like effective transition +# bandwidth). Filters with sharp frequency cutoffs can produce outputs that +# ring for a long time when they operate on signals with frequency content +# in the transition band. In general, therefore, the wider a transition band +# that can be tolerated, the better behaved the filter will be in the time +# domain. + +############################################################################### +# References +# ---------- +# .. [1] Parks TW, Burrus CS. Digital Filter Design. +# New York: Wiley-Interscience, 1987. +# +# .. _FIR: https://en.wikipedia.org/wiki/Finite_impulse_response +# .. _IIR: https://en.wikipedia.org/wiki/Infinite_impulse_response +# .. _sinc: https://en.wikipedia.org/wiki/Sinc_function +# .. _moving average: https://en.wikipedia.org/wiki/Moving_average +# .. _autoregression: https://en.wikipedia.org/wiki/Autoregressive_model +# .. _Remez: https://en.wikipedia.org/wiki/Remez_algorithm +# .. _scipy remez: http://scipy.github.io/devdocs/generated/scipy.signal.remez.html # noqa +# .. _matlab firpm: http://www.mathworks.com/help/signal/ref/firpm.html +# .. _scipy firwin2: http://scipy.github.io/devdocs/generated/scipy.signal.firwin2.html # noqa +# .. _matlab fir2: http://www.mathworks.com/help/signal/ref/fir2.html +# .. _matlab firls: http://www.mathworks.com/help/signal/ref/firls.html diff --git a/tutorials/plot_brainstorm_auditory.py b/tutorials/plot_brainstorm_auditory.py index a2adb75a9ce..b996571dc30 100644 --- a/tutorials/plot_brainstorm_auditory.py +++ b/tutorials/plot_brainstorm_auditory.py @@ -82,6 +82,7 @@ # Data channel array consisted of 274 MEG axial gradiometers, 26 MEG reference # sensors and 2 EEG electrodes (Cz and Pz). # In addition: +# # - 1 stim channel for marking presentation times for the stimuli # - 1 audio channel for the sent signal # - 1 response channel for recording the button presses @@ -89,6 +90,7 @@ # - 2 EOG bipolar (vertical and horizontal) # - 12 head tracking channels # - 20 unused channels +# # The head tracking channels and the unused channels are marked as misc # channels. Here we define the EOG and ECG channels. raw.set_channel_types({'HEOG': 'eog', 'VEOG': 'eog', 'ECG': 'ecg'}) diff --git a/tutorials/plot_eeg_erp.py b/tutorials/plot_eeg_erp.py index c22e1684ac1..64fbaad8116 100644 --- a/tutorials/plot_eeg_erp.py +++ b/tutorials/plot_eeg_erp.py @@ -7,6 +7,7 @@ For a generic introduction to the computation of ERP and ERF see :ref:`tut_epoching_and_averaging`. Here we cover the specifics of EEG, namely: + - setting the reference - using standard montages :func:`mne.channels.Montage` - Evoked arithmetic (e.g. differences) diff --git a/tutorials/plot_epochs_to_data_frame.py b/tutorials/plot_epochs_to_data_frame.py index 0c8beca545d..1359a96053e 100644 --- a/tutorials/plot_epochs_to_data_frame.py +++ b/tutorials/plot_epochs_to_data_frame.py @@ -203,7 +203,7 @@ class provides many useful methods for restructuring, reshaping and visualizing df.condition = df.condition.apply(lambda name: name + ' ') plt.figure() -max_latency.plot(kind='barh', title='Latency of Maximum Reponse', +max_latency.plot(kind='barh', title='Latency of Maximum Response', color=['steelblue']) mne.viz.tight_layout() diff --git a/tutorials/plot_forward.py b/tutorials/plot_forward.py index 3f542b05965..73e7b4f4129 100644 --- a/tutorials/plot_forward.py +++ b/tutorials/plot_forward.py @@ -27,7 +27,8 @@ # ------------------------------ # # To compute a forward operator we need: -# - a -trans.fif file that contains the coregistration info. +# +# - a ``-trans.fif`` file that contains the coregistration info. # - a source space # - the BEM surfaces @@ -171,6 +172,7 @@ # By looking at :ref:`sphx_glr_auto_examples_forward_plot_read_forward.py` # plot the sensitivity maps for EEG and compare it with the MEG, can you # justify the claims that: +# # - MEG is not sensitive to radial sources # - EEG is more sensitive to deep sources # diff --git a/tutorials/plot_sensors_time_frequency.py b/tutorials/plot_sensors_time_frequency.py index d685257bb50..c685519bf7e 100644 --- a/tutorials/plot_sensors_time_frequency.py +++ b/tutorials/plot_sensors_time_frequency.py @@ -59,7 +59,7 @@ ############################################################################### # Alternatively, you can also create PSDs from Epochs objects with functions -# that start with psd_ such as +# that start with ``psd_`` such as # :func:`mne.time_frequency.psd_multitaper` and # :func:`mne.time_frequency.psd_welch`. diff --git a/tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py b/tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py index ad1b7a2dda6..6ddb5783f1d 100644 --- a/tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py +++ b/tutorials/plot_stats_cluster_time_frequency_repeated_measures_anova.py @@ -132,15 +132,15 @@ # makes sure the first two dimensions are organized as expected (with A = # modality and B = location): # -# .. table:: +# .. table:: Sample data layout # -# ===== ==== ==== ==== ==== -# trial A1B1 A1B2 A2B1 B2B2 -# ===== ==== ==== ==== ==== -# 1 1.34 2.53 0.97 1.74 -# ... .... .... .... .... -# 56 2.45 7.90 3.09 4.76 -# ===== ==== ==== ==== ==== +# ===== ==== ==== ==== ==== +# trial A1B1 A1B2 A2B1 B2B2 +# ===== ==== ==== ==== ==== +# 1 1.34 2.53 0.97 1.74 +# ... ... ... ... ... +# 56 2.45 7.90 3.09 4.76 +# ===== ==== ==== ==== ==== # # Now we're ready to run our repeated measures ANOVA. # @@ -192,8 +192,8 @@ def stat_fun(*args): return f_mway_rm(np.swapaxes(args, 1, 0), factor_levels=factor_levels, effects=effects, return_pvals=False)[0] - # The ANOVA returns a tuple f-values and p-values, we will pick the former. +# The ANOVA returns a tuple f-values and p-values, we will pick the former. pthresh = 0.00001 # set threshold rather high to save some time f_thresh = f_threshold_mway_rm(n_replications, factor_levels, effects, pthresh) @@ -204,8 +204,8 @@ def stat_fun(*args): n_permutations=n_permutations, buffer_size=None) ############################################################################### -# Create new stats image with only significant clusters -# ----------------------------------------------------- +# Create new stats image with only significant clusters: + good_clusers = np.where(cluster_p_values < .05)[0] T_obs_plot = np.ma.masked_array(T_obs, np.invert(clusters[np.squeeze(good_clusers)])) @@ -222,8 +222,8 @@ def stat_fun(*args): plt.show() ############################################################################### -# Now using FDR -# ------------- +# Now using FDR: + mask, _ = fdr_correction(pvals[2]) T_obs_plot2 = np.ma.masked_array(T_obs, np.invert(mask)) @@ -239,5 +239,6 @@ def stat_fun(*args): ' FDR corrected (p <= 0.05)' % ch_name) plt.show() -# Both, cluster level and FDR correction help getting rid of +############################################################################### +# Both cluster level and FDR correction help get rid of # putatively spots we saw in the naive f-images.