Skip to content

Commit

Permalink
mock 0 score in train_test_overlap
Browse files Browse the repository at this point in the history
avoids an error in Futrell cross-subject averaging
  • Loading branch information
mschrimpf committed Mar 8, 2021
1 parent 93cb490 commit f559527
Showing 1 changed file with 3 additions and 2 deletions.
5 changes: 3 additions & 2 deletions neural_nlp/analyze/data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def output_apply(*args):
presentation_values = {key: str(value) for key, value in presentation_values.items()}

result.append(presentation_values)
return Score([np.nan],
return Score([0],
coords={**{"neuroid_id": ("neuroid", [0])},
**{subject_column: ("neuroid", [0]) for subject_column in set(subject_columns.values())}},
dims=["neuroid"])
Expand All @@ -76,7 +76,8 @@ def output_apply(*args):
result = pd.DataFrame(result)

# plot -- we're relying on the implicit ordering of train followed by test
stimuli_key = 'sentence' if not benchmark_identifier.startswith('Fedorenko2016') else 'word'
stimuli_key = 'sentence' if not any(benchmark_identifier.startswith(word_benchmark)
for word_benchmark in ['Fedorenko2016', 'Futrell2018']) else 'word'
train_stimuli = result[stimuli_key][result['type'] == 'source_train'].values
test_stimuli = result[stimuli_key][result['type'] == 'source_test'].values
assert len(train_stimuli) == len(test_stimuli)
Expand Down

0 comments on commit f559527

Please sign in to comment.