Skip to content

Commit

Permalink
v0.2.1 support binocular files + other assorted fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
szorowi1 committed Aug 24, 2019
1 parent 12a547d commit e7893fd
Show file tree
Hide file tree
Showing 6 changed files with 156 additions and 77 deletions.
2 changes: 1 addition & 1 deletion README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,6 @@ The minimum required dependencies to run NivLink are:
- NumPy
- SciPy
- Pandas
- Optional: Matplotlib, Bokeh
- Optional: Matplotlib

**Note:** NivLink has not been thoroughly tested under varying package versions.
2 changes: 1 addition & 1 deletion nivlink/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Niv Lab software for preprocessing eyelink eyetracking data."""

__version__ = '0.2'
__version__ = '0.2.1'

from .raw import (Raw)
from .epochs import (Epochs)
Expand Down
13 changes: 8 additions & 5 deletions nivlink/edf/edfread.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,11 +141,14 @@ def edf_read(fname):
## Extract data.
samples = array(samples, dtype=float64)
if info['eye'] == 'LEFT':
data = samples[:,1::2]
data = np.expand_dims(samples[:,1::2], 1)
eye_names = ('LEFT')
elif info['eye'] == 'RIGHT':
data = samples[:,2::2]
data = np.expand_dims(samples[:,2::2], 1)
eye_names = ('RIGHT')
else:
raise ValueError('Binocular data not supported.')
data = samples[:,1:].reshape(-1, 2, 3, order='F')
eye_names = ('LEFT', 'RIGHT')

## Format time.
times = samples[:,0].astype(int)
Expand All @@ -166,6 +169,6 @@ def edf_read(fname):
messages['sample'] = searchsorted(times, messages['sample'])

## Define channel names.
ch_names = ['gx','gy','pupil']
ch_names = ('gx','gy','pupil')

return info, data, blinks, saccades, messages, ch_names
return info, data, blinks, saccades, messages, ch_names, eye_names
61 changes: 42 additions & 19 deletions nivlink/epochs.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,39 +18,61 @@ class Epochs(object):
time relative to event onset.
picks : 'gaze' | 'pupil' | None
Data types to include (if None, all data are used).
eyes : 'LEFT' | 'RIGHT' | None
Eye recordings to include (if None, all data are used).
blinks : True | False
Include blinks and re-reference to epochs.
saccades : True | False
Include saccades and re-ference to epochs.
Attributes
----------
info : dict
Recording metadata.
data : array, shape (n_trials, n_times, n_channels)
data : array, shape (n_trials, n_eyes, n_channels, n_times)
Recording samples.
times : array, shape (n_times,)
Time vector in seconds. Goes from `tmin` to `tmax`. Time interval
between consecutive time samples is equal to the inverse of the
sampling frequency.
events : array, shape (n_trials, 2)
extents : array, shape (n_trials, 2)
Onset and offset of trials.
ch_names : list
Names of data channels.
ch_names : list, shape (n_channels)
Names of data channels.
eye_names : list, shape (n_eyes)
Order of data channels (by eye).
blinks : array, shape (i, 3)
(If included) Detected blinks detailed by their trial, start, and end.
saccades : array, shape (j, 3)
(If included) Detected saccades detailed by their trial, start, and end.
"""

def __init__(self, raw, events, tmin=0, tmax=1, picks=None, blinks=False, saccades=False):
def __init__(self, raw, events, tmin=0, tmax=1, picks=None, eyes=None,
blinks=True, saccades=True):

## Define metadata.
self.info = deepcopy(raw.info)

## Define channels.
if picks is None: ch_names = ['gx','gy','pupil']
elif picks.lower().startswith('g'): ch_names = ['gx','gy']
elif picks.lower().startswith('p'): ch_names = ['pupil']
self.ch_names = np.intersect1d(ch_names, raw.ch_names)
if picks is None: ch_names = ('gx','gy','pupil')
elif picks.lower().startswith('g'): ch_names = ('gx','gy')
elif picks.lower().startswith('p'): ch_names = ('pupil')
else: raise ValueError(f'"{picks}" not valid input for picks.')
self.ch_names = tuple(np.intersect1d(ch_names, raw.ch_names))
ch_ix = np.in1d(raw.ch_names,self.ch_names)

## Define eyes.
if eyes is None: eye_names = deepcopy(raw.eye_names)
elif eyes.lower().startswith('l'): eye_names = ('LEFT')
elif eyes.lower().startswith('r'): eye_names = ('RIGHT')
else: raise ValueError(f'"{eyes}" not valid input for eyes.')
self.eye_names = tuple(np.intersect1d(eye_names, raw.eye_names))
eye_ix = np.in1d(raw.eye_names,self.eye_names)

## Define events.
assert np.ndim(events) == 1
if isinstance(tmin, (int, float)): tmin = np.repeat(tmin, events.size)
if isinstance(tmax, (int, float)): tmax = np.repeat(tmax, events.size)
if isinstance(tmin, (int, float)): tmin = np.repeat(float(tmin), events.size)
if isinstance(tmax, (int, float)): tmax = np.repeat(float(tmax), events.size)
assert np.size(events) == np.size(tmin) == np.size(tmax)
self.extents = np.column_stack([tmin, tmax])

Expand All @@ -62,18 +84,19 @@ def __init__(self, raw, events, tmin=0, tmax=1, picks=None, blinks=False, saccad

## Define indices of data relative to raw.
raw_ix = np.column_stack([events + tmin * sfreq, events + tmax * sfreq])

## Define indices of data relative to epochs.
epoch_ix = (np.column_stack([tmin,tmax]) - tmin.min()) * sfreq
self._ix = epoch_ix.astype(int)

## Make epochs.
self.data = np.ones((events.shape[0], self.times.size, len(self.ch_names))) * np.nan
self.data = np.ones((events.shape[0], self.times.size, len(self.eye_names), len(self.ch_names))) * np.nan
index = np.column_stack((raw_ix, epoch_ix)).astype(int)
for i, (r1, r2, e1, e2) in enumerate(index):
self.data[i,e1:e2,:] = deepcopy(raw.data[r1:r2,ch_ix])
self.data = self.data.swapaxes(1,2)

for i, (r1, r2, e1, e2) in enumerate(index):
# TODO: This ugly syntax should be replaced in time (numpy issues 13255)
self.data[i,e1:e2,...] = deepcopy(raw.data[r1:r2,eye_ix][...,ch_ix])
self.data = np.moveaxis(self.data,1,-1)

## Re-reference artifacts to epochs.
if blinks: self.blinks = self._align_artifacts(raw.blinks, raw_ix)
if saccades: self.saccades = self._align_artifacts(raw.saccades, raw_ix)
Expand All @@ -97,7 +120,7 @@ def _align_artifacts(self, artifacts, raw_ix):
"""

## Broadcast trial onsets/offsets to number of blinks.
n_events, _, n_times = self.data.shape
n_events, _, _, n_times = self.data.shape
onsets = np.broadcast_to(raw_ix[:,0], (artifacts.shape[0], n_events)).T
offsets = np.broadcast_to(raw_ix[:,1], (artifacts.shape[0], n_events)).T

Expand All @@ -117,7 +140,7 @@ def _align_artifacts(self, artifacts, raw_ix):
return artifacts

def __repr__(self):
return '<Epochs | {0} trials, {2} samples>'.format(*self.data.shape)
return '<Epochs | {0} trials, {3} samples>'.format(*self.data.shape)

def copy(self):
"""Return copy of Raw instance."""
Expand Down
125 changes: 79 additions & 46 deletions nivlink/gaze.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,22 @@
from .raw import Raw
from .epochs import Epochs

def align_to_aoi(data, screen, screenidx):
"""Align eyetracking data to areas of interest. Please see notes.
def align_to_aoi(data, screen, mapping=None):
"""Align eyetracking data to areas of interest.
Parameters
----------
data : Raw | Epochs | array, shape=(n_trials, n_times, 2)
data : Raw | Epochs | array, shape=(n_trials, n_eyes, n_channels, n_times)
Trials to be aligned.
screen : instance of Screen
screen : nivlink.Screen
Eyetracking acquisition information.
screenidx : array, shape (n_trials,)
Mapping of trial to screen index.
mapping : array, shape (n_trials,)
Mapping of trials to screens. If None, all trials mapped to
first Screen. Should be zero-indexed.
Returns
-------
aligned : array, shape (n_trials, n_times)
aligned : array, shape (n_trials, n_eyes, n_times)
Eyetracking timeseries aligned to areas of interest.
Notes
Expand All @@ -28,60 +29,82 @@ def align_to_aoi(data, screen, screenidx):
1. Eyetracking positions are rounded down to the nearest pixel.
2. Eyetracking positions outside (xdim, ydim) are set to NaN.
"""

## Data handling (Raw, Epochs).
if isinstance(data, (Raw, Epochs)):

if isinstance(data, Raw):

## Error-catching: force gx and gy to be present.
if not np.all(np.in1d(['gx','gy'], data.ch_names)):
raise ValueError('Both gaze channels (gx, gy) must be present.')


## Copy data.
gaze_ix = np.in1d(data.ch_names, ['gx','gy'])
data = data.data[..., gaze_ix].copy()
data = np.expand_dims(data, 0)
data = data.swapaxes(2,3)

if isinstance(data, Epochs):

## Error-catching: force gx and gy to be present.
if not np.all(np.in1d(['gx','gy'], data.ch_names)):
raise ValueError('Both gaze channels (gx, gy) must be present.')

## Copy data.
data = data.data[:,np.in1d(data.ch_names, ['gx','gy'])].copy()
if data.ndim == 2: data = np.expand_dims(data, 0)
else: data = data.swapaxes(1,2)
gaze_ix = np.in1d(data.ch_names, ['gx','gy'])
data = data.data[..., gaze_ix, :].copy()
data = data.swapaxes(2,3)

## Data handling (all else).
else:

## Error-catching: force gx and gy to be present.
if np.shape(data)[-1] != 2:
raise ValueError('data last dimension must be length 2, i.e. (xdim, ydim)')
if np.ndim(data) != 4:
raise ValueError('data must be shape (..., 2, n_trials)')
elif np.shape(data)[-2] != 2:
raise ValueError('data must be shape (..., 2, n_trials)')

## Copy data.
data = np.array(data.copy())
if data.ndim == 2: data = np.expand_dims(data, 0)
data = data.swapaxes(2,3)

## Collect metadata. Preallocate space.
n_trials, n_times, n_dim = data.shape
n_trials, n_eyes, n_times, n_dim = data.shape
xd, yd, n_screens = screen.indices.shape
aligned = np.zeros(n_trials * n_times)

## Unfold screen index variable into the events timeline.
trials_long = np.repeat(np.arange(1,n_trials+1),n_times)
screenidx_long = np.squeeze(screenidx[trials_long-1])

## Extract row (xdim) and col (ydim) info.
row, col = np.floor(data.reshape(n_trials*n_times,n_dim)).T


## Round gaze data to nearest pixel.
data = np.floor(data).astype(int)

## Identify missing data.
row[np.logical_or(row < 0, row >= screen.xdim)] = np.nan # Eyefix outside screen x-bound.
col[np.logical_or(col < 0, col >= screen.ydim)] = np.nan # Eyefix outside screen y-bound.
missing = np.logical_or(np.isnan(row), np.isnan(col))

## Align fixations for each screen.
for i in range(n_screens):

## Identify events associated with this screen.
this_screen = (screenidx_long == i+1)

## Combine with info about missing data.
x = np.logical_and(~missing, this_screen)

## Align eyefix with screen labels.
aligned[x] = screen.indices[row[x].astype(int), col[x].astype(int), i]

return aligned.reshape(n_trials, n_times)
missing_x = np.logical_or(data[...,0] < 0, data[...,0] >= screen.xdim )
missing_y = np.logical_or(data[...,1] < 0, data[...,1] >= screen.ydim )
missing = np.logical_or(missing_x, missing_y)

## Mask missing data.
data[missing] = 0

## Preallocate space.
aligned = np.zeros_like(missing, dtype=int)

## Define screen indices.
if mapping is None: mapping = np.zeros(n_trials, dtype=int)
assert np.size(mapping) == n_trials

## Main loop.
for ix in np.unique(mapping):

## Extract current screen.
current_screen = screen.indices[...,ix]

## Define row and column indices.
row = data[mapping == ix, :, :, 0].flatten()
col = data[mapping == ix, :, :, 1].flatten()

## Align data and reshape.
t = np.sum(mapping == ix)
aligned[mapping == ix] = current_screen[row, col].reshape(t, n_eyes, n_times)

## Mask missing data.
aligned[missing] = 0

return aligned

def compute_fixations(aligned, times, labels=None):
"""Compute fixations from aligned timeseries. Fixations are defined
Expand All @@ -102,8 +125,18 @@ def compute_fixations(aligned, times, labels=None):
fixations : pd.DataFrame
Pandas DataFrame where each row details the (Trial, AoI,
Onset, Offset, Duration) of the fixation.
Notes
-----
Currently supports only monocular data. In the case of binocular
data, the user can simply pass the aligned object twice (once
per eye).
"""


## Error-catching.
assert np.ndim(aligned) == 2
assert np.shape(aligned)[-1] == np.size(times)

## Define labels list.
if labels is None: labels = [i for i in np.unique(aligned) if i]

Expand Down
Loading

0 comments on commit e7893fd

Please sign in to comment.