forked from massich/mne-python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathbem.py
1910 lines (1672 loc) · 70.5 KB
/
bem.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Authors: Alexandre Gramfort <[email protected]>
# Matti Hamalainen <[email protected]>
# Eric Larson <[email protected]>
# Lorenzo De Santis <[email protected]>
#
# License: BSD (3-clause)
from functools import partial
import glob
import os
import os.path as op
import shutil
from copy import deepcopy
import numpy as np
from scipy import linalg
from .io.constants import FIFF, FWD
from .digitization.base import _dig_kind_dict, _dig_kind_rev, _dig_kind_ints
from .io.write import (start_file, start_block, write_float, write_int,
write_float_matrix, write_int_matrix, end_block,
end_file)
from .io.tag import find_tag
from .io.tree import dir_tree_find
from .io.open import fiff_open
from .surface import (read_surface, write_surface, complete_surface_info,
_compute_nearest, _get_ico_surface, read_tri,
_fast_cross_nd_sum, _get_solids)
from .transforms import _ensure_trans, apply_trans
from .utils import (verbose, logger, run_subprocess, get_subjects_dir, warn,
_pl, _validate_type, _TempDir)
from .fixes import einsum
# ############################################################################
# Compute BEM solution
# The following approach is based on:
#
# de Munck JC: "A linear discretization of the volume conductor boundary
# integral equation using analytically integrated elements",
# IEEE Trans Biomed Eng. 1992 39(9) : 986 - 990
#
class ConductorModel(dict):
"""BEM or sphere model."""
def __repr__(self): # noqa: D105
if self['is_sphere']:
center = ', '.join('%0.1f' % (x * 1000.) for x in self['r0'])
rad = self.radius
if rad is None: # no radius / MEG only
extra = 'Sphere (no layers): r0=[%s] mm' % center
else:
extra = ('Sphere (%s layer%s): r0=[%s] R=%1.f mm'
% (len(self['layers']) - 1, _pl(self['layers']),
center, rad * 1000.))
else:
extra = ('BEM (%s layer%s)' % (len(self['surfs']),
_pl(self['surfs'])))
return '<ConductorModel | %s>' % extra
def copy(self):
"""Return copy of ConductorModel instance."""
return deepcopy(self)
@property
def radius(self):
"""Sphere radius if an EEG sphere model."""
if not self['is_sphere']:
raise RuntimeError('radius undefined for BEM')
return None if len(self['layers']) == 0 else self['layers'][-1]['rad']
def _calc_beta(rk, rk_norm, rk1, rk1_norm):
"""Compute coefficients for calculating the magic vector omega."""
rkk1 = rk1[0] - rk[0]
size = np.linalg.norm(rkk1)
rkk1 /= size
num = rk_norm + np.dot(rk, rkk1)
den = rk1_norm + np.dot(rk1, rkk1)
res = np.log(num / den) / size
return res
def _lin_pot_coeff(fros, tri_rr, tri_nn, tri_area):
"""Compute the linear potential matrix element computations."""
omega = np.zeros((len(fros), 3))
# we replicate a little bit of the _get_solids code here for speed
# (we need some of the intermediate values later)
v1 = tri_rr[np.newaxis, 0, :] - fros
v2 = tri_rr[np.newaxis, 1, :] - fros
v3 = tri_rr[np.newaxis, 2, :] - fros
triples = _fast_cross_nd_sum(v1, v2, v3)
l1 = np.linalg.norm(v1, axis=1)
l2 = np.linalg.norm(v2, axis=1)
l3 = np.linalg.norm(v3, axis=1)
ss = l1 * l2 * l3
ss += einsum('ij,ij,i->i', v1, v2, l3)
ss += einsum('ij,ij,i->i', v1, v3, l2)
ss += einsum('ij,ij,i->i', v2, v3, l1)
solids = np.arctan2(triples, ss)
# We *could* subselect the good points from v1, v2, v3, triples, solids,
# l1, l2, and l3, but there are *very* few bad points. So instead we do
# some unnecessary calculations, and then omit them from the final
# solution. These three lines ensure we don't get invalid values in
# _calc_beta.
bad_mask = np.abs(solids) < np.pi / 1e6
l1[bad_mask] = 1.
l2[bad_mask] = 1.
l3[bad_mask] = 1.
# Calculate the magic vector vec_omega
beta = [_calc_beta(v1, l1, v2, l2)[:, np.newaxis],
_calc_beta(v2, l2, v3, l3)[:, np.newaxis],
_calc_beta(v3, l3, v1, l1)[:, np.newaxis]]
vec_omega = (beta[2] - beta[0]) * v1
vec_omega += (beta[0] - beta[1]) * v2
vec_omega += (beta[1] - beta[2]) * v3
area2 = 2.0 * tri_area
n2 = 1.0 / (area2 * area2)
# leave omega = 0 otherwise
# Put it all together...
yys = [v1, v2, v3]
idx = [0, 1, 2, 0, 2]
for k in range(3):
diff = yys[idx[k - 1]] - yys[idx[k + 1]]
zdots = _fast_cross_nd_sum(yys[idx[k + 1]], yys[idx[k - 1]], tri_nn)
omega[:, k] = -n2 * (area2 * zdots * 2. * solids -
triples * (diff * vec_omega).sum(axis=-1))
# omit the bad points from the solution
omega[bad_mask] = 0.
return omega
def _correct_auto_elements(surf, mat):
"""Improve auto-element approximation."""
pi2 = 2.0 * np.pi
tris_flat = surf['tris'].ravel()
misses = pi2 - mat.sum(axis=1)
for j, miss in enumerate(misses):
# How much is missing?
n_memb = len(surf['neighbor_tri'][j])
assert n_memb > 0 # should be guaranteed by our surface checks
# The node itself receives one half
mat[j, j] = miss / 2.0
# The rest is divided evenly among the member nodes...
miss /= (4.0 * n_memb)
members = np.where(j == tris_flat)[0]
mods = members % 3
offsets = np.array([[1, 2], [-1, 1], [-1, -2]])
tri_1 = members + offsets[mods, 0]
tri_2 = members + offsets[mods, 1]
for t1, t2 in zip(tri_1, tri_2):
mat[j, tris_flat[t1]] += miss
mat[j, tris_flat[t2]] += miss
return
def _fwd_bem_lin_pot_coeff(surfs):
"""Calculate the coefficients for linear collocation approach."""
# taken from fwd_bem_linear_collocation.c
nps = [surf['np'] for surf in surfs]
np_tot = sum(nps)
coeff = np.zeros((np_tot, np_tot))
offsets = np.cumsum(np.concatenate(([0], nps)))
for si_1, surf1 in enumerate(surfs):
rr_ord = np.arange(nps[si_1])
for si_2, surf2 in enumerate(surfs):
logger.info(" %s (%d) -> %s (%d) ..." %
(_surf_name[surf1['id']], nps[si_1],
_surf_name[surf2['id']], nps[si_2]))
tri_rr = surf2['rr'][surf2['tris']]
tri_nn = surf2['tri_nn']
tri_area = surf2['tri_area']
submat = coeff[offsets[si_1]:offsets[si_1 + 1],
offsets[si_2]:offsets[si_2 + 1]] # view
for k in range(surf2['ntri']):
tri = surf2['tris'][k]
if si_1 == si_2:
skip_idx = ((rr_ord == tri[0]) |
(rr_ord == tri[1]) |
(rr_ord == tri[2]))
else:
skip_idx = list()
# No contribution from a triangle that
# this vertex belongs to
# if sidx1 == sidx2 and (tri == j).any():
# continue
# Otherwise do the hard job
coeffs = _lin_pot_coeff(surf1['rr'], tri_rr[k], tri_nn[k],
tri_area[k])
coeffs[skip_idx] = 0.
submat[:, tri] -= coeffs
if si_1 == si_2:
_correct_auto_elements(surf1, submat)
return coeff
def _fwd_bem_multi_solution(solids, gamma, nps):
"""Do multi surface solution.
* Invert I - solids/(2*M_PI)
* Take deflation into account
* The matrix is destroyed after inversion
* This is the general multilayer case
"""
pi2 = 1.0 / (2 * np.pi)
n_tot = np.sum(nps)
assert solids.shape == (n_tot, n_tot)
nsurf = len(nps)
defl = 1.0 / n_tot
# Modify the matrix
offsets = np.cumsum(np.concatenate(([0], nps)))
for si_1 in range(nsurf):
for si_2 in range(nsurf):
mult = pi2 if gamma is None else pi2 * gamma[si_1, si_2]
slice_j = slice(offsets[si_1], offsets[si_1 + 1])
slice_k = slice(offsets[si_2], offsets[si_2 + 1])
solids[slice_j, slice_k] = defl - solids[slice_j, slice_k] * mult
solids += np.eye(n_tot)
return linalg.inv(solids, overwrite_a=True)
def _fwd_bem_homog_solution(solids, nps):
"""Make a homogeneous solution."""
return _fwd_bem_multi_solution(solids, None, nps)
def _fwd_bem_ip_modify_solution(solution, ip_solution, ip_mult, n_tri):
"""Modify the solution according to the IP approach."""
n_last = n_tri[-1]
mult = (1.0 + ip_mult) / ip_mult
logger.info(' Combining...')
offsets = np.cumsum(np.concatenate(([0], n_tri)))
for si in range(len(n_tri)):
# Pick the correct submatrix (right column) and multiply
sub = solution[offsets[si]:offsets[si + 1], np.sum(n_tri[:-1]):]
# Multiply
sub -= 2 * np.dot(sub, ip_solution)
# The lower right corner is a special case
sub[-n_last:, -n_last:] += mult * ip_solution
# Final scaling
logger.info(' Scaling...')
solution *= ip_mult
return
def _check_complete_surface(surf, copy=False, incomplete='raise'):
surf = complete_surface_info(surf, copy=copy, verbose=False)
fewer = np.where([len(t) < 3 for t in surf['neighbor_tri']])[0]
if len(fewer) > 0:
msg = ('Surface %s has topological defects: %d / %d '
'vertices have fewer than three neighboring '
'triangles [%s]' % (_surf_name[surf['id']],
len(fewer), surf['ntri'],
', '.join(str(f) for f in fewer)))
if incomplete == 'raise':
raise RuntimeError(msg)
else:
warn(msg)
return surf
def _fwd_bem_linear_collocation_solution(m):
"""Compute the linear collocation potential solution."""
# first, add surface geometries
for surf in m['surfs']:
_check_complete_surface(surf)
logger.info('Computing the linear collocation solution...')
logger.info(' Matrix coefficients...')
coeff = _fwd_bem_lin_pot_coeff(m['surfs'])
m['nsol'] = len(coeff)
logger.info(" Inverting the coefficient matrix...")
nps = [surf['np'] for surf in m['surfs']]
m['solution'] = _fwd_bem_multi_solution(coeff, m['gamma'], nps)
if len(m['surfs']) == 3:
ip_mult = m['sigma'][1] / m['sigma'][2]
if ip_mult <= FWD.BEM_IP_APPROACH_LIMIT:
logger.info('IP approach required...')
logger.info(' Matrix coefficients (homog)...')
coeff = _fwd_bem_lin_pot_coeff([m['surfs'][-1]])
logger.info(' Inverting the coefficient matrix (homog)...')
ip_solution = _fwd_bem_homog_solution(coeff,
[m['surfs'][-1]['np']])
logger.info(' Modify the original solution to incorporate '
'IP approach...')
_fwd_bem_ip_modify_solution(m['solution'], ip_solution, ip_mult,
nps)
m['bem_method'] = FWD.BEM_LINEAR_COLL
logger.info("Solution ready.")
@verbose
def make_bem_solution(surfs, verbose=None):
"""Create a BEM solution using the linear collocation approach.
Parameters
----------
surfs : list of dict
The BEM surfaces to use (`from make_bem_model`)
%(verbose)s
Returns
-------
bem : instance of ConductorModel
The BEM solution.
Notes
-----
.. versionadded:: 0.10.0
See Also
--------
make_bem_model
read_bem_surfaces
write_bem_surfaces
read_bem_solution
write_bem_solution
"""
logger.info('Approximation method : Linear collocation\n')
if isinstance(surfs, str):
# Load the surfaces
logger.info('Loading surfaces...')
surfs = read_bem_surfaces(surfs)
bem = ConductorModel(is_sphere=False, surfs=surfs)
_add_gamma_multipliers(bem)
if len(bem['surfs']) == 3:
logger.info('Three-layer model surfaces loaded.')
elif len(bem['surfs']) == 1:
logger.info('Homogeneous model surface loaded.')
else:
raise RuntimeError('Only 1- or 3-layer BEM computations supported')
_check_bem_size(bem['surfs'])
_fwd_bem_linear_collocation_solution(bem)
logger.info('BEM geometry computations complete.')
return bem
# ############################################################################
# Make BEM model
def _ico_downsample(surf, dest_grade):
"""Downsample the surface if isomorphic to a subdivided icosahedron."""
n_tri = len(surf['tris'])
bad_msg = ("Cannot decimate to requested ico grade %d. The provided "
"BEM surface has %d triangles, which cannot be isomorphic with "
"a subdivided icosahedron. Consider manually decimating the "
"surface to a suitable density and then use ico=None in "
"make_bem_model." % (dest_grade, n_tri))
if n_tri % 20 != 0:
raise RuntimeError(bad_msg)
n_tri = n_tri // 20
found = int(round(np.log(n_tri) / np.log(4)))
if n_tri != 4 ** found:
raise RuntimeError(bad_msg)
del n_tri
if dest_grade > found:
raise RuntimeError('For this surface, decimation grade should be %d '
'or less, not %s.' % (found, dest_grade))
source = _get_ico_surface(found)
dest = _get_ico_surface(dest_grade, patch_stats=True)
del dest['tri_cent']
del dest['tri_nn']
del dest['neighbor_tri']
del dest['tri_area']
if not np.array_equal(source['tris'], surf['tris']):
raise RuntimeError('The source surface has a matching number of '
'triangles but ordering is wrong')
logger.info('Going from %dth to %dth subdivision of an icosahedron '
'(n_tri: %d -> %d)' % (found, dest_grade, len(surf['tris']),
len(dest['tris'])))
# Find the mapping
dest['rr'] = surf['rr'][_get_ico_map(source, dest)]
return dest
def _get_ico_map(fro, to):
"""Get a mapping between ico surfaces."""
nearest, dists = _compute_nearest(fro['rr'], to['rr'], return_dists=True)
n_bads = (dists > 5e-3).sum()
if n_bads > 0:
raise RuntimeError('No matching vertex for %d destination vertices'
% (n_bads))
return nearest
def _order_surfaces(surfs):
"""Reorder the surfaces."""
if len(surfs) != 3:
return surfs
# we have three surfaces
surf_order = [FIFF.FIFFV_BEM_SURF_ID_HEAD,
FIFF.FIFFV_BEM_SURF_ID_SKULL,
FIFF.FIFFV_BEM_SURF_ID_BRAIN]
ids = np.array([surf['id'] for surf in surfs])
if set(ids) != set(surf_order):
raise RuntimeError('bad surface ids: %s' % ids)
order = [np.where(ids == id_)[0][0] for id_ in surf_order]
surfs = [surfs[idx] for idx in order]
return surfs
def _assert_complete_surface(surf, incomplete='raise'):
"""Check the sum of solid angles as seen from inside."""
# from surface_checks.c
# Center of mass....
cm = surf['rr'].mean(axis=0)
logger.info('%s CM is %6.2f %6.2f %6.2f mm' %
(_surf_name[surf['id']],
1000 * cm[0], 1000 * cm[1], 1000 * cm[2]))
tot_angle = _get_solids(surf['rr'][surf['tris']], cm[np.newaxis, :])[0]
prop = tot_angle / (2 * np.pi)
if np.abs(prop - 1.0) > 1e-5:
msg = ('Surface %s is not complete (sum of solid angles '
'yielded %g, should be 1.)'
% (_surf_name[surf['id']], prop))
if incomplete == 'raise':
raise RuntimeError(msg)
else:
warn(msg)
_surf_name = {
FIFF.FIFFV_BEM_SURF_ID_HEAD: 'outer skin ',
FIFF.FIFFV_BEM_SURF_ID_SKULL: 'outer skull',
FIFF.FIFFV_BEM_SURF_ID_BRAIN: 'inner skull',
FIFF.FIFFV_BEM_SURF_ID_UNKNOWN: 'unknown ',
}
def _assert_inside(fro, to):
"""Check one set of points is inside a surface."""
# this is "is_inside" in surface_checks.c
tot_angle = _get_solids(to['rr'][to['tris']], fro['rr'])
if (np.abs(tot_angle / (2 * np.pi) - 1.0) > 1e-5).any():
raise RuntimeError('Surface %s is not completely inside surface %s'
% (_surf_name[fro['id']], _surf_name[to['id']]))
def _check_surfaces(surfs, incomplete='raise'):
"""Check that the surfaces are complete and non-intersecting."""
for surf in surfs:
_assert_complete_surface(surf, incomplete=incomplete)
# Then check the topology
for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
logger.info('Checking that %s surface is inside %s surface...' %
(_surf_name[surf_2['id']], _surf_name[surf_1['id']]))
_assert_inside(surf_2, surf_1)
def _check_surface_size(surf):
"""Check that the coordinate limits are reasonable."""
sizes = surf['rr'].max(axis=0) - surf['rr'].min(axis=0)
if (sizes < 0.05).any():
raise RuntimeError('Dimensions of the surface %s seem too small '
'(%9.5f mm). Maybe the the unit of measure is '
'meters instead of mm' %
(_surf_name[surf['id']], 1000 * sizes.min()))
def _check_thicknesses(surfs):
"""Compute how close we are."""
for surf_1, surf_2 in zip(surfs[:-1], surfs[1:]):
min_dist = _compute_nearest(surf_1['rr'], surf_2['rr'],
return_dists=True)[0]
min_dist = min_dist.min()
logger.info('Checking distance between %s and %s surfaces...' %
(_surf_name[surf_1['id']], _surf_name[surf_2['id']]))
logger.info('Minimum distance between the %s and %s surfaces is '
'approximately %6.1f mm' %
(_surf_name[surf_1['id']], _surf_name[surf_2['id']],
1000 * min_dist))
def _surfaces_to_bem(surfs, ids, sigmas, ico=None, rescale=True,
incomplete='raise'):
"""Convert surfaces to a BEM."""
# equivalent of mne_surf2bem
# surfs can be strings (filenames) or surface dicts
if len(surfs) not in (1, 3) or not (len(surfs) == len(ids) ==
len(sigmas)):
raise ValueError('surfs, ids, and sigmas must all have the same '
'number of elements (1 or 3)')
for si, surf in enumerate(surfs):
if isinstance(surf, str):
surfs[si] = read_surface(surf, return_dict=True)[-1]
# Downsampling if the surface is isomorphic with a subdivided icosahedron
if ico is not None:
for si, surf in enumerate(surfs):
surfs[si] = _ico_downsample(surf, ico)
for surf, id_ in zip(surfs, ids):
# Do topology checks (but don't save data) to fail early
surf['id'] = id_
_check_complete_surface(surf, copy=True, incomplete=incomplete)
surf['coord_frame'] = surf.get('coord_frame', FIFF.FIFFV_COORD_MRI)
surf.update(np=len(surf['rr']), ntri=len(surf['tris']))
if rescale:
surf['rr'] /= 1000. # convert to meters
# Shifting surfaces is not implemented here...
# Order the surfaces for the benefit of the topology checks
for surf, sigma in zip(surfs, sigmas):
surf['sigma'] = sigma
surfs = _order_surfaces(surfs)
# Check topology as best we can
_check_surfaces(surfs, incomplete=incomplete)
for surf in surfs:
_check_surface_size(surf)
_check_thicknesses(surfs)
logger.info('Surfaces passed the basic topology checks.')
return surfs
@verbose
def make_bem_model(subject, ico=4, conductivity=(0.3, 0.006, 0.3),
subjects_dir=None, verbose=None):
"""Create a BEM model for a subject.
.. note:: To get a single layer bem corresponding to the --homog flag in
the command line tool set the ``conductivity`` parameter
to a list/tuple with a single value (e.g. [0.3]).
Parameters
----------
subject : str
The subject.
ico : int | None
The surface ico downsampling to use, e.g. 5=20484, 4=5120, 3=1280.
If None, no subsampling is applied.
conductivity : array of int, shape (3,) or (1,)
The conductivities to use for each shell. Should be a single element
for a one-layer model, or three elements for a three-layer model.
Defaults to ``[0.3, 0.006, 0.3]``. The MNE-C default for a
single-layer model would be ``[0.3]``.
subjects_dir : string, or None
Path to SUBJECTS_DIR if it is not set in the environment.
%(verbose)s
Returns
-------
surfaces : list of dict
The BEM surfaces. Use `make_bem_solution` to turn these into a
`ConductorModel` suitable for forward calculation.
Notes
-----
.. versionadded:: 0.10.0
See Also
--------
make_bem_solution
make_sphere_model
read_bem_surfaces
write_bem_surfaces
"""
conductivity = np.array(conductivity, float)
if conductivity.ndim != 1 or conductivity.size not in (1, 3):
raise ValueError('conductivity must be 1D array-like with 1 or 3 '
'elements')
subjects_dir = get_subjects_dir(subjects_dir, raise_error=True)
subject_dir = op.join(subjects_dir, subject)
bem_dir = op.join(subject_dir, 'bem')
inner_skull = op.join(bem_dir, 'inner_skull.surf')
outer_skull = op.join(bem_dir, 'outer_skull.surf')
outer_skin = op.join(bem_dir, 'outer_skin.surf')
surfaces = [inner_skull, outer_skull, outer_skin]
ids = [FIFF.FIFFV_BEM_SURF_ID_BRAIN,
FIFF.FIFFV_BEM_SURF_ID_SKULL,
FIFF.FIFFV_BEM_SURF_ID_HEAD]
logger.info('Creating the BEM geometry...')
if len(conductivity) == 1:
surfaces = surfaces[:1]
ids = ids[:1]
surfaces = _surfaces_to_bem(surfaces, ids, conductivity, ico)
_check_bem_size(surfaces)
logger.info('Complete.\n')
return surfaces
# ############################################################################
# Compute EEG sphere model
def _fwd_eeg_get_multi_sphere_model_coeffs(m, n_terms):
"""Get the model depended weighting factor for n."""
nlayer = len(m['layers'])
if nlayer in (0, 1):
return 1.
# Initialize the arrays
c1 = np.zeros(nlayer - 1)
c2 = np.zeros(nlayer - 1)
cr = np.zeros(nlayer - 1)
cr_mult = np.zeros(nlayer - 1)
for k in range(nlayer - 1):
c1[k] = m['layers'][k]['sigma'] / m['layers'][k + 1]['sigma']
c2[k] = c1[k] - 1.0
cr_mult[k] = m['layers'][k]['rel_rad']
cr[k] = cr_mult[k]
cr_mult[k] *= cr_mult[k]
coeffs = np.zeros(n_terms - 1)
for n in range(1, n_terms):
# Increment the radius coefficients
for k in range(nlayer - 1):
cr[k] *= cr_mult[k]
# Multiply the matrices
M = np.eye(2)
n1 = n + 1.0
for k in range(nlayer - 2, -1, -1):
M = np.dot([[n + n1 * c1[k], n1 * c2[k] / cr[k]],
[n * c2[k] * cr[k], n1 + n * c1[k]]], M)
num = n * (2.0 * n + 1.0) ** (nlayer - 1)
coeffs[n - 1] = num / (n * M[1, 1] + n1 * M[1, 0])
return coeffs
def _compose_linear_fitting_data(mu, u):
"""Get the linear fitting data."""
k1 = np.arange(1, u['nterms'])
mu1ns = mu[0] ** k1
# data to be fitted
y = u['w'][:-1] * (u['fn'][1:] - mu1ns * u['fn'][0])
# model matrix
M = u['w'][:-1, np.newaxis] * (mu[1:] ** k1[:, np.newaxis] -
mu1ns[:, np.newaxis])
uu, sing, vv = linalg.svd(M, full_matrices=False)
ncomp = u['nfit'] - 1
uu, sing, vv = uu[:, :ncomp], sing[:ncomp], vv[:ncomp]
return y, uu, sing, vv
def _compute_linear_parameters(mu, u):
"""Compute the best-fitting linear parameters."""
y, uu, sing, vv = _compose_linear_fitting_data(mu, u)
# Compute the residuals
vec = np.dot(y, uu)
resi = y - np.dot(uu, vec)
vec /= sing
lambda_ = np.zeros(u['nfit'])
lambda_[1:] = np.dot(vec, vv)
lambda_[0] = u['fn'][0] - np.sum(lambda_[1:])
rv = np.dot(resi, resi) / np.dot(y, y)
return rv, lambda_
def _one_step(mu, u):
"""Evaluate the residual sum of squares fit for one set of mu values."""
if np.abs(mu).max() > 1.0:
return 1.0
# Compose the data for the linear fitting, compute SVD, then residuals
y, uu, sing, vv = _compose_linear_fitting_data(mu, u)
resi = y - np.dot(uu, np.dot(y, uu))
return np.dot(resi, resi)
def _fwd_eeg_fit_berg_scherg(m, nterms, nfit):
"""Fit the Berg-Scherg equivalent spherical model dipole parameters."""
from scipy.optimize import fmin_cobyla
assert nfit >= 2
u = dict(nfit=nfit, nterms=nterms)
# (1) Calculate the coefficients of the true expansion
u['fn'] = _fwd_eeg_get_multi_sphere_model_coeffs(m, nterms + 1)
# (2) Calculate the weighting
f = (min([layer['rad'] for layer in m['layers']]) /
max([layer['rad'] for layer in m['layers']]))
# correct weighting
k = np.arange(1, nterms + 1)
u['w'] = np.sqrt((2.0 * k + 1) * (3.0 * k + 1.0) /
k) * np.power(f, (k - 1.0))
u['w'][-1] = 0
# Do the nonlinear minimization, constraining mu to the interval [-1, +1]
mu_0 = np.zeros(3)
fun = partial(_one_step, u=u)
max_ = 1. - 2e-4 # adjust for fmin_cobyla "catol" that not all scipy have
cons = list()
for ii in range(nfit):
def mycon(x, ii=ii):
return max_ - np.abs(x[ii])
cons.append(mycon)
mu = fmin_cobyla(fun, mu_0, cons, rhobeg=0.5, rhoend=1e-5, disp=0)
# (6) Do the final step: calculation of the linear parameters
rv, lambda_ = _compute_linear_parameters(mu, u)
order = np.argsort(mu)[::-1]
mu, lambda_ = mu[order], lambda_[order] # sort: largest mu first
m['mu'] = mu
# This division takes into account the actual conductivities
m['lambda'] = lambda_ / m['layers'][-1]['sigma']
m['nfit'] = nfit
return rv
@verbose
def make_sphere_model(r0=(0., 0., 0.04), head_radius=0.09, info=None,
relative_radii=(0.90, 0.92, 0.97, 1.0),
sigmas=(0.33, 1.0, 0.004, 0.33), verbose=None):
"""Create a spherical model for forward solution calculation.
Parameters
----------
r0 : array-like | str
Head center to use (in head coordinates). If 'auto', the head
center will be calculated from the digitization points in info.
head_radius : float | str | None
If float, compute spherical shells for EEG using the given radius.
If 'auto', estimate an appropriate radius from the dig points in Info,
If None, exclude shells (single layer sphere model).
info : instance of Info | None
Measurement info. Only needed if ``r0`` or ``head_radius`` are
``'auto'``.
relative_radii : array-like
Relative radii for the spherical shells.
sigmas : array-like
Sigma values for the spherical shells.
%(verbose)s
Returns
-------
sphere : instance of ConductorModel
The resulting spherical conductor model.
Notes
-----
.. versionadded:: 0.9.0
See Also
--------
make_bem_model
make_bem_solution
"""
for name in ('r0', 'head_radius'):
param = locals()[name]
if isinstance(param, str):
if param != 'auto':
raise ValueError('%s, if str, must be "auto" not "%s"'
% (name, param))
relative_radii = np.array(relative_radii, float).ravel()
sigmas = np.array(sigmas, float).ravel()
if len(relative_radii) != len(sigmas):
raise ValueError('relative_radii length (%s) must match that of '
'sigmas (%s)' % (len(relative_radii),
len(sigmas)))
if len(sigmas) <= 1 and head_radius is not None:
raise ValueError('at least 2 sigmas must be supplied if '
'head_radius is not None, got %s' % (len(sigmas),))
if (isinstance(r0, str) and r0 == 'auto') or \
(isinstance(head_radius, str) and head_radius == 'auto'):
if info is None:
raise ValueError('Info must not be None for auto mode')
head_radius_fit, r0_fit = fit_sphere_to_headshape(info, units='m')[:2]
if isinstance(r0, str):
r0 = r0_fit
if isinstance(head_radius, str):
head_radius = head_radius_fit
sphere = ConductorModel(is_sphere=True, r0=np.array(r0),
coord_frame=FIFF.FIFFV_COORD_HEAD)
sphere['layers'] = list()
if head_radius is not None:
# Eventually these could be configurable...
relative_radii = np.array(relative_radii, float)
sigmas = np.array(sigmas, float)
order = np.argsort(relative_radii)
relative_radii = relative_radii[order]
sigmas = sigmas[order]
for rel_rad, sig in zip(relative_radii, sigmas):
# sort layers by (relative) radius, and scale radii
layer = dict(rad=rel_rad, sigma=sig)
layer['rel_rad'] = layer['rad'] = rel_rad
sphere['layers'].append(layer)
# scale the radii
R = sphere['layers'][-1]['rad']
rR = sphere['layers'][-1]['rel_rad']
for layer in sphere['layers']:
layer['rad'] /= R
layer['rel_rad'] /= rR
#
# Setup the EEG sphere model calculations
#
# Scale the relative radii
for k in range(len(relative_radii)):
sphere['layers'][k]['rad'] = (head_radius *
sphere['layers'][k]['rel_rad'])
rv = _fwd_eeg_fit_berg_scherg(sphere, 200, 3)
logger.info('\nEquiv. model fitting -> RV = %g %%' % (100 * rv))
for k in range(3):
logger.info('mu%d = %g lambda%d = %g'
% (k + 1, sphere['mu'][k], k + 1,
sphere['layers'][-1]['sigma'] *
sphere['lambda'][k]))
logger.info('Set up EEG sphere model with scalp radius %7.1f mm\n'
% (1000 * head_radius,))
return sphere
# #############################################################################
# Sphere fitting
@verbose
def fit_sphere_to_headshape(info, dig_kinds='auto', units='m', verbose=None):
"""Fit a sphere to the headshape points to determine head center.
Parameters
----------
info : instance of Info
Measurement info.
dig_kinds : list of str | str
Kind of digitization points to use in the fitting. These can be any
combination of ('cardinal', 'hpi', 'eeg', 'extra'). Can also
be 'auto' (default), which will use only the 'extra' points if
enough (more than 10) are available, and if not, uses 'extra' and
'eeg' points.
units : str
Can be "m" (default) or "mm".
.. versionadded:: 0.12
%(verbose)s
Returns
-------
radius : float
Sphere radius.
origin_head: ndarray, shape (3,)
Head center in head coordinates.
origin_device: ndarray, shape (3,)
Head center in device coordinates.
Notes
-----
This function excludes any points that are low and frontal
(``z < 0 and y > 0``) to improve the fit.
"""
if not isinstance(units, str) or units not in ('m', 'mm'):
raise ValueError('units must be a "m" or "mm"')
radius, origin_head, origin_device = _fit_sphere_to_headshape(
info, dig_kinds)
if units == 'mm':
radius *= 1e3
origin_head *= 1e3
origin_device *= 1e3
return radius, origin_head, origin_device
@verbose
def get_fitting_dig(info, dig_kinds='auto', verbose=None):
"""Get digitization points suitable for sphere fitting.
Parameters
----------
info : instance of Info
The measurement info.
dig_kinds : list of str | str
Kind of digitization points to use in the fitting. These can be any
combination of ('cardinal', 'hpi', 'eeg', 'extra'). Can also
be 'auto' (default), which will use only the 'extra' points if
enough (more than 10) are available, and if not, uses 'extra' and
'eeg' points.
%(verbose)s
Returns
-------
dig : array, shape (n_pts, 3)
The digitization points (in head coordinates) to use for fitting.
Notes
-----
This will exclude digitization locations that have ``z < 0 and y > 0``,
i.e. points on the nose and below the nose on the face.
.. versionadded:: 0.14
"""
_validate_type(info, "info")
if info['dig'] is None:
raise RuntimeError('Cannot fit headshape without digitization '
', info["dig"] is None')
if isinstance(dig_kinds, str):
if dig_kinds == 'auto':
# try "extra" first
try:
return get_fitting_dig(info, 'extra')
except ValueError:
pass
return get_fitting_dig(info, ('extra', 'eeg'))
else:
dig_kinds = (dig_kinds,)
# convert string args to ints (first make dig_kinds mutable in case tuple)
dig_kinds = list(dig_kinds)
for di, d in enumerate(dig_kinds):
dig_kinds[di] = _dig_kind_dict.get(d, d)
if dig_kinds[di] not in _dig_kind_ints:
raise ValueError('dig_kinds[#%d] (%s) must be one of %s'
% (di, d, sorted(list(_dig_kind_dict.keys()))))
# get head digization points of the specified kind(s)
hsp = [p['r'] for p in info['dig'] if p['kind'] in dig_kinds]
if any(p['coord_frame'] != FIFF.FIFFV_COORD_HEAD for p in info['dig']):
raise RuntimeError('Digitization points not in head coordinates, '
'contact mne-python developers')
# exclude some frontal points (nose etc.)
hsp = np.array([p for p in hsp if not (p[2] < -1e-6 and p[1] > 1e-6)])
if len(hsp) <= 10:
kinds_str = ', '.join(['"%s"' % _dig_kind_rev[d]
for d in sorted(dig_kinds)])
msg = ('Only %s head digitization points of the specified kind%s (%s,)'
% (len(hsp), _pl(dig_kinds), kinds_str))
if len(hsp) < 4:
raise ValueError(msg + ', at least 4 required')
else:
warn(msg + ', fitting may be inaccurate')
return hsp
@verbose
def _fit_sphere_to_headshape(info, dig_kinds, verbose=None):
"""Fit a sphere to the given head shape."""
hsp = get_fitting_dig(info, dig_kinds)
radius, origin_head = _fit_sphere(np.array(hsp), disp=False)
# compute origin in device coordinates
head_to_dev = _ensure_trans(info['dev_head_t'], 'head', 'meg')
origin_device = apply_trans(head_to_dev, origin_head)
logger.info('Fitted sphere radius:'.ljust(30) + '%0.1f mm'
% (radius * 1e3,))
# 99th percentile on Wikipedia for Giabella to back of head is 21.7cm,
# i.e. 108mm "radius", so let's go with 110mm
# en.wikipedia.org/wiki/Human_head#/media/File:HeadAnthropometry.JPG
if radius > 0.110:
warn('Estimated head size (%0.1f mm) exceeded 99th '
'percentile for adult head size' % (1e3 * radius,))
# > 2 cm away from head center in X or Y is strange
if np.linalg.norm(origin_head[:2]) > 0.02:
warn('(X, Y) fit (%0.1f, %0.1f) more than 20 mm from '
'head frame origin' % tuple(1e3 * origin_head[:2]))
logger.info('Origin head coordinates:'.ljust(30) +
'%0.1f %0.1f %0.1f mm' % tuple(1e3 * origin_head))
logger.info('Origin device coordinates:'.ljust(30) +
'%0.1f %0.1f %0.1f mm' % tuple(1e3 * origin_device))
return radius, origin_head, origin_device
def _fit_sphere(points, disp='auto'):
"""Fit a sphere to an arbitrary set of points."""
from scipy.optimize import fmin_cobyla
if isinstance(disp, str) and disp == 'auto':
disp = True if logger.level <= 20 else False
# initial guess for center and radius
radii = (np.max(points, axis=1) - np.min(points, axis=1)) / 2.
radius_init = radii.mean()
center_init = np.median(points, axis=0)
# optimization
x0 = np.concatenate([center_init, [radius_init]])
def cost_fun(center_rad):
d = np.linalg.norm(points - center_rad[:3], axis=1) - center_rad[3]
d *= d
return d.sum()
def constraint(center_rad):
return center_rad[3] # radius must be >= 0
x_opt = fmin_cobyla(cost_fun, x0, constraint, rhobeg=radius_init,
rhoend=radius_init * 1e-6, disp=disp)
origin = x_opt[:3]
radius = x_opt[3]
return radius, origin
def _check_origin(origin, info, coord_frame='head', disp=False):
"""Check or auto-determine the origin."""
if isinstance(origin, str):
if origin != 'auto':
raise ValueError('origin must be a numerical array, or "auto", '