Skip to content

Commit

Permalink
allow download of single subject
Browse files Browse the repository at this point in the history
  • Loading branch information
JoseAlanis committed May 17, 2019
1 parent b32c3b6 commit 85220c3
Show file tree
Hide file tree
Showing 3 changed files with 47 additions and 62 deletions.
4 changes: 4 additions & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,9 @@ jobs:
if [[ $(cat $FNAME | grep -x ".*datasets.*phantom_4dbti.*" | wc -l) -gt 0 ]]; then
python -c "import mne; print(mne.datasets.phantom_4dbti.data_path(update_path=True))";
fi;
if [[ $(cat $FNAME | grep -x ".*datasets.*limo.*" | wc -l) -gt 0 ]]; then
python -c "import mne; print(mne.datasets.limo.data_path(update_path=True))";
fi;
fi;
done;
echo PATTERN="$PATTERN";
Expand Down Expand Up @@ -271,6 +274,7 @@ jobs:
key: data-cache-8
paths:
- ~/mne_data/MNE-visual_92_categories-data
- ~~mne_data/MNE-limo-data


linkcheck:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,8 +23,6 @@
epochs structure in MNE-Python.
Missing channels can be interpolated if desired.
.. note:: Downloading the LIMO dataset for the first time can take some time (8 GB).
References
----------
Expand All @@ -37,14 +35,16 @@
#
# License: BSD (3-clause)

import numpy as np

import mne
from mne.datasets import limo
from mne.stats import linear_regression

print(__doc__)

# fetch data from subject 2
limo_epochs = load_data(subject=2, interpolate=True)
# fetch data from subject 2 and interpolate missing channels
limo_epochs = limo.load_data(subject=2, interpolate=True)

# check distribution of events (should be ordered)
mne.viz.plot_events(limo_epochs.events)
Expand All @@ -67,12 +67,14 @@
# fit linear model
reg = linear_regression(limo_epochs, design[names], names=names)

reg['Face_Effect'].beta.plot_joint(title='Face_Effect',
ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'),
times=[.16])

# plot effect of noise variable
reg['Noise'].beta.plot_joint(title='Effect of Noise',
ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'),
times=[.125, .225])

# plot effect of condition
reg['Face_Effect'].beta.plot_joint(title='Face_Effect',
ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'),
times=[.16])
85 changes: 32 additions & 53 deletions mne/datasets/limo/limo.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,19 +20,27 @@
from ...utils import _fetch_file, _url_to_local_path, verbose
from ..utils import _get_path, _do_path_update

# root url for LIMO files
root_url = 'https://files.de-1.osf.io/v1/resources/52rea/providers/osfstorage/'

# subject identifier
subject_ids = {'S1': '5cde823c8d6e050018595862',
'S2': '5cde825e23fec40017e0561a'}


@verbose
def data_path(url, path=None, force_update=False, update_path=None,
def data_path(subject, path=None, force_update=False, update_path=None,
verbose=None):
"""Get path to local copy of LIMO dataset URL.
This is a low-level function useful for getting a local copy of the
remote LIMO dataset [1]_ which is available at datashare.is.ed.ac.uk/ [2]_.
remote LIMO dataset [1]_. The complete dataset is available at
datashare.is.ed.ac.uk/ [2]_.
Parameters
----------
url : str
The location from where the dataset should be downloaded.
subject : int
Subject to download. Must be of class ìnt in the range from 1 to 18.
path : None | str
Location of where to look for the LIMO data storing directory.
If None, the environment variable or config parameter
Expand All @@ -50,23 +58,21 @@ def data_path(url, path=None, force_update=False, update_path=None,
Returns
-------
path : str
Local path to the given data file. This path is contained inside a list
of length one, for compatibility.
Local path to the given data file.
Notes
-----
For example, one could do:
>>> from mne.datasets import limo
>>> url = 'http://datashare.is.ed.ac.uk/download/DS_10283_2189.zip'
>>> limo.data_path(url, os.getenv('HOME') + '/datasets') # doctest:+SKIP
>>> limo.data_path(subject=1, path=os.getenv('HOME') + '/datasets') # doctest:+SKIP
This would download the LIMO data file to the 'datasets' folder,
and prompt the user to save the 'datasets' path to the mne-python config,
if it isn't there already.
References
----------
----------
.. [1] Guillaume, Rousselet. (2016). LIMO EEG Dataset, [dataset].
University of Edinburgh, Centre for Clinical Brain Sciences.
https://doi.org/10.7488/ds/1556.
Expand All @@ -77,58 +83,37 @@ def data_path(url, path=None, force_update=False, update_path=None,
name = 'LIMO'
path = _get_path(path, key, name)
limo_dir = op.join(path, 'MNE-limo-data')
destination = re.sub('/download', '', _url_to_local_path(url, limo_dir))
subject_id = 'S%s' % subject
destination = op.join(limo_dir, '%s.zip') % subject_id

# url for subject in question
url = op.join(root_url, subject_ids[subject_id], '?zip=')

# fetch data from online repository if required
# check if LIMO directory exists; update if desired
if not op.isdir(limo_dir) or force_update:
if op.isdir(limo_dir):
shutil.rmtree(limo_dir)
if not op.isdir(limo_dir):
os.makedirs(limo_dir)

# check if subject in question exists
if not op.isdir(op.join(limo_dir, subject_id)):
os.makedirs(op.join(limo_dir, subject_id))
_fetch_file(url, destination, print_destination=False)

# check if download is a zip-folder
if any(group.endswith(".zip") for group in op.splitext(destination)):
if not op.isdir(op.join(limo_dir, subject_id)):
os.makedirs(op.join(limo_dir, subject_id))
with zipfile.ZipFile(destination) as z1:
files = [op.join(limo_dir, file) for file in z1.namelist()]
stdout.write('Decompressing %g files from\n'
'"%s" ...' % (len(files), destination))
z1.extractall(limo_dir)
z1.extractall(op.join(limo_dir, subject_id))
stdout.write(' [done]\n')
z1.close()
os.remove(destination)

# check if further .zip-folders are contained
zips = [file for file in files if file.endswith('.zip')]
if zips:
for file in zips:
with zipfile.ZipFile(file) as z2:
in_zip = z2.namelist()
stdout.write('Decompressing .zip-files ...\n')
z2.extractall(limo_dir)
z2.close()
os.remove(file)
# continue decompressing if necessary
zfiles = [file for file in in_zip if file.endswith('.zip')]
if zfiles:
for zfile in zfiles:
zfile = op.join(limo_dir, zfile)
with zipfile.ZipFile(zfile) as z3:
z3.extractall(op.split(zfile)[0])
z3.close()
os.remove(zfile)

# check if .tar-folders are contained
tars = [file for file in files if file.endswith('.tar')]
if tars:
for file in tars:
with tarfile.open(op.join(limo_dir, file)) as tar:
stdout.write('Decompressing .tar-files ...\n')
tar.extractall(limo_dir)
tar.close()
os.remove(op.join(limo_dir, file))
stdout.write(' [done]\n')

# update path if desired
_do_path_update(path, update_path, key, name)

Expand All @@ -137,7 +122,7 @@ def data_path(url, path=None, force_update=False, update_path=None,

@verbose
def load_data(subject, path=None, interpolate=False, force_update=False,
update_path=None, url=None, verbose=None): # noqa: D301
update_path=None, verbose=None):
"""Fetch subjects epochs data for the LIMO data set.
Parameters
Expand All @@ -156,28 +141,22 @@ def load_data(subject, path=None, interpolate=False, force_update=False,
update_path : bool | None
If True, set the MNE_DATASETS_LIMO_PATH in mne-python
config to the given path. If None, the user is prompted.
url : str
The location from where the dataset should be downloaded, if not
found on drive.
%(verbose)s
Returns
-------
epochs : MNE Epochs data structure
The epochs.
""" # noqa: E501
if url is None:
url = 'http://datashare.is.ed.ac.uk/download/DS_10283_2189.zip'

# set limo path, download and decompress files if not found
limo_path = data_path(url, path, force_update, update_path)

# subject in question
if isinstance(subject, int) and 1 <= subject <= 18:
subj = 'S%i' % subject
else:
raise ValueError('subject must be an int in the range from 1 to 18')

# set limo path, download and decompress files if not found
limo_path = data_path(subject, path, force_update, update_path)

# -- 1) import .mat files
# epochs info
fname_info = op.join(limo_path, subj, 'LIMO.mat')
Expand Down

0 comments on commit 85220c3

Please sign in to comment.