forked from JuliaLang/julia
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmulti.jl
1789 lines (1529 loc) · 51.3 KB
/
multi.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# This file is a part of Julia. License is MIT: http://julialang.org/license
import .Serializer: reset_state
# todo:
# * fetch/wait latency seems to be excessive
# * message aggregation
# * timer events
# - send pings at some interval to detect failed/hung machines
# * integrate event loop with other kinds of i/o (non-messages)
# * serializing closures
# * recover from i/o errors
# * handle remote execution errors
# * all-to-all communication
# * distributed GC
# * call&wait and call&fetch combined messages
# * aggregate GC messages
# * dynamically adding nodes (then always start with 1 and grow)
## workers and message i/o ##
# Messages
abstract AbstractMsg
let REF_ID::Int = 1
global next_ref_id
next_ref_id() = (id = REF_ID; REF_ID += 1; id)
end
type RRID
whence
id
RRID() = RRID(myid(),next_ref_id())
RRID(whence, id) = new(whence,id)
end
hash(r::RRID, h::UInt) = hash(r.whence, hash(r.id, h))
==(r::RRID, s::RRID) = (r.whence==s.whence && r.id==s.id)
type CallMsg{Mode} <: AbstractMsg
f::Function
args::Tuple
kwargs::Array
response_oid::RRID
end
type CallWaitMsg <: AbstractMsg
f::Function
args::Tuple
kwargs::Array
response_oid::RRID
notify_oid::RRID
end
type RemoteDoMsg <: AbstractMsg
f::Function
args::Tuple
kwargs::Array
end
type ResultMsg <: AbstractMsg
response_oid::RRID
value::Any
end
# Worker initialization messages
type IdentifySocketMsg <: AbstractMsg
from_pid::Int
end
type IdentifySocketAckMsg <: AbstractMsg
end
type JoinPGRPMsg <: AbstractMsg
self_pid::Int
other_workers::Array
notify_oid::RRID
topology::Symbol
worker_pool
end
type JoinCompleteMsg <: AbstractMsg
notify_oid::RRID
cpu_cores::Int
ospid::Int
end
function send_msg_unknown(s::IO, msg)
error("attempt to send to unknown socket")
end
function send_msg(s::IO, msg)
id = worker_id_from_socket(s)
if id > -1
return send_msg(worker_from_id(id), msg)
end
send_msg_unknown(s, msg)
end
function send_msg_now(s::IO, msg::AbstractMsg)
id = worker_id_from_socket(s)
if id > -1
return send_msg_now(worker_from_id(id), msg)
end
send_msg_unknown(s, msg)
end
abstract ClusterManager
type WorkerConfig
# Common fields relevant to all cluster managers
io::Nullable{IO}
host::Nullable{AbstractString}
port::Nullable{Integer}
# Used when launching additional workers at a host
count::Nullable{Union{Int, Symbol}}
exename::Nullable{AbstractString}
exeflags::Nullable{Cmd}
# External cluster managers can use this to store information at a per-worker level
# Can be a dict if multiple fields need to be stored.
userdata::Nullable{Any}
# SSHManager / SSH tunnel connections to workers
tunnel::Nullable{Bool}
bind_addr::Nullable{AbstractString}
sshflags::Nullable{Cmd}
max_parallel::Nullable{Integer}
# Used by Local/SSH managers
connect_at::Nullable{Any}
process::Nullable{Process}
ospid::Nullable{Integer}
# Private dictionary used to store temporary information by Local/SSH managers.
environ::Nullable{Dict}
# Connections to be setup depending on the network topology requested
ident::Nullable{Any} # Worker as identified by the Cluster Manager.
# List of other worker idents this worker must connect with. Used with topology T_CUSTOM.
connect_idents::Nullable{Array}
function WorkerConfig()
wc = new()
for n in 1:length(WorkerConfig.types)
T = eltype(fieldtype(WorkerConfig, n))
setfield!(wc, n, Nullable{T}())
end
wc
end
end
@enum WorkerState W_CREATED W_CONNECTED W_TERMINATING W_TERMINATED
type Worker
id::Int
del_msgs::Array{Any,1}
add_msgs::Array{Any,1}
gcflag::Bool
state::WorkerState
c_state::Condition # wait for state changes
ct_time::Float64 # creation time
r_stream::IO
w_stream::IO
w_serializer::ClusterSerializer # writes can happen from any task hence store the
# serializer as part of the Worker object
manager::ClusterManager
config::WorkerConfig
version::Nullable{VersionNumber} # Julia version of the remote process
function Worker(id::Int, r_stream::IO, w_stream::IO, manager::ClusterManager;
version=Nullable{VersionNumber}(), config=WorkerConfig())
w = Worker(id)
w.r_stream = r_stream
w.w_stream = buffer_writes(w_stream)
w.w_serializer = ClusterSerializer(w.w_stream)
w.manager = manager
w.config = config
w.version = version
set_worker_state(w, W_CONNECTED)
register_worker_streams(w)
w
end
function Worker(id::Int)
@assert id > 0
if haskey(map_pid_wrkr, id)
return map_pid_wrkr[id]
end
w=new(id, [], [], false, W_CREATED, Condition(), time())
register_worker(w)
w
end
Worker() = Worker(get_next_pid())
end
function set_worker_state(w, state)
w.state = state
notify(w.c_state; all=true)
end
function send_msg_now(w::Worker, msg)
send_msg_(w, msg, true)
end
function send_msg(w::Worker, msg)
send_msg_(w, msg, false)
end
function flush_gc_msgs(w::Worker)
if !isdefined(w, :w_stream)
return
end
w.gcflag = false
msgs = copy(w.add_msgs)
if !isempty(msgs)
empty!(w.add_msgs)
remote_do(add_clients, w, msgs)
end
msgs = copy(w.del_msgs)
if !isempty(msgs)
empty!(w.del_msgs)
#print("sending delete of $msgs\n")
remote_do(del_clients, w, msgs)
end
end
function check_worker_state(w::Worker)
if w.state == W_CREATED
if PGRP.topology == :all_to_all
# Since higher pids connect with lower pids, the remote worker
# may not have connected to us yet. Wait for some time.
timeout = worker_timeout() - (time() - w.ct_time)
timeout <= 0 && error("peer $(w.id) has not connected to $(myid())")
@schedule (sleep(timeout); notify(w.c_state; all=true))
wait(w.c_state)
w.state == W_CREATED && error("peer $(w.id) didn't connect to $(myid()) within $timeout seconds")
else
error("peer $(w.id) is not connected to $(myid()). Topology : " * string(PGRP.topology))
end
end
end
function send_msg_(w::Worker, msg, now::Bool)
check_worker_state(w)
io = w.w_stream
lock(io.lock)
try
reset_state(w.w_serializer)
serialize(w.w_serializer, msg) # io is wrapped in w_serializer
if !now && w.gcflag
flush_gc_msgs(w)
else
flush(io)
end
finally
unlock(io.lock)
end
end
function flush_gc_msgs()
try
for w in (PGRP::ProcessGroup).workers
if isa(w,Worker) && w.gcflag && (w.state == W_CONNECTED)
flush_gc_msgs(w)
end
end
catch e
bt = catch_backtrace()
@schedule showerror(STDERR, e, bt)
end
end
function send_connection_hdr(w::Worker, cookie=true)
# For a connection initiated from the remote side to us, we only send the version,
# else when we initiate a connection we first send the cookie followed by our version.
# The remote side validates the cookie.
if cookie
write(w.w_stream, LPROC.cookie)
end
write(w.w_stream, rpad(VERSION_STRING, HDR_VERSION_LEN)[1:HDR_VERSION_LEN])
end
## process group creation ##
type LocalProcess
id::Int
bind_addr::AbstractString
bind_port::UInt16
cookie::AbstractString
LocalProcess() = new(1)
end
const LPROC = LocalProcess()
const HDR_VERSION_LEN=16
const HDR_COOKIE_LEN=16
cluster_cookie() = LPROC.cookie
function cluster_cookie(cookie)
# The cookie must be an ASCII string with length <= HDR_COOKIE_LEN
assert(isascii(cookie))
assert(length(cookie) <= HDR_COOKIE_LEN)
cookie = rpad(cookie, HDR_COOKIE_LEN)
LPROC.cookie = cookie
cookie
end
const map_pid_wrkr = Dict{Int, Union{Worker, LocalProcess}}()
const map_sock_wrkr = ObjectIdDict()
const map_del_wrkr = Set{Int}()
let next_pid = 2 # 1 is reserved for the client (always)
global get_next_pid
function get_next_pid()
retval = next_pid
next_pid += 1
retval
end
end
type ProcessGroup
name::AbstractString
workers::Array{Any,1}
refs::Dict # global references
topology::Symbol
ProcessGroup(w::Array{Any,1}) = new("pg-default", w, Dict(), :all_to_all)
end
const PGRP = ProcessGroup([])
function topology(t)
assert(t in [:all_to_all, :master_slave, :custom])
if (PGRP.topology==t) || ((myid()==1) && (nprocs()==1)) || (myid() > 1)
PGRP.topology = t
else
error("Workers with Topology $(PGRP.topology) already exist. Requested Topology $(t) cannot be set.")
end
t
end
get_bind_addr(pid::Integer) = get_bind_addr(worker_from_id(pid))
get_bind_addr(w::LocalProcess) = LPROC.bind_addr
function get_bind_addr(w::Worker)
if isnull(w.config.bind_addr)
if w.id != myid()
w.config.bind_addr = remotecall_fetch(get_bind_addr, w.id, w.id)
end
end
get(w.config.bind_addr)
end
myid() = LPROC.id
nprocs() = length(PGRP.workers)
function nworkers()
n = nprocs()
n == 1 ? 1 : n-1
end
procs() = Int[x.id for x in PGRP.workers]
function procs(pid::Integer)
if myid() == 1
if (pid == 1) || (isa(map_pid_wrkr[pid].manager, LocalManager))
Int[x.id for x in filter(w -> (w.id==1) || (isa(w.manager, LocalManager)), PGRP.workers)]
else
ipatpid = get_bind_addr(pid)
Int[x.id for x in filter(w -> get_bind_addr(w) == ipatpid, PGRP.workers)]
end
else
remotecall_fetch(procs, 1, pid)
end
end
function workers()
allp = procs()
if nprocs() == 1
allp
else
filter(x -> x != 1, allp)
end
end
function rmprocs(args...; waitfor = 0.0)
# Only pid 1 can add and remove processes
if myid() != 1
error("only process 1 can add and remove processes")
end
lock(worker_lock)
try
rmprocset = []
for i in vcat(args...)
if i == 1
warn("rmprocs: process 1 not removed")
else
if haskey(map_pid_wrkr, i)
w = map_pid_wrkr[i]
set_worker_state(w, W_TERMINATING)
kill(w.manager, i, w.config)
push!(rmprocset, w)
end
end
end
start = time()
while (time() - start) < waitfor
if all(w -> w.state == W_TERMINATED, rmprocset)
break
else
sleep(0.1)
end
end
((waitfor > 0) && any(w -> w.state != W_TERMINATED, rmprocset)) ? :timed_out : :ok
finally
unlock(worker_lock)
end
end
type ProcessExitedException <: Exception end
worker_from_id(i) = worker_from_id(PGRP, i)
function worker_from_id(pg::ProcessGroup, i)
if in(i, map_del_wrkr)
throw(ProcessExitedException())
end
if !haskey(map_pid_wrkr,i)
if myid() == 1
error("no process with id $i exists")
end
w = Worker(i)
map_pid_wrkr[i] = w
else
w = map_pid_wrkr[i]
end
w
end
function worker_id_from_socket(s)
w = get(map_sock_wrkr, s, nothing)
if isa(w,Worker)
if is(s, w.r_stream) || is(s, w.w_stream)
return w.id
end
end
if isa(s,IOStream) && fd(s)==-1
# serializing to a local buffer
return myid()
end
return -1
end
register_worker(w) = register_worker(PGRP, w)
function register_worker(pg, w)
push!(pg.workers, w)
map_pid_wrkr[w.id] = w
end
function register_worker_streams(w)
map_sock_wrkr[w.r_stream] = w
map_sock_wrkr[w.w_stream] = w
end
deregister_worker(pid) = deregister_worker(PGRP, pid)
function deregister_worker(pg, pid)
pg.workers = filter(x -> !(x.id == pid), pg.workers)
w = pop!(map_pid_wrkr, pid, nothing)
if isa(w, Worker)
if isdefined(w, :r_stream)
pop!(map_sock_wrkr, w.r_stream, nothing)
if w.r_stream != w.w_stream
pop!(map_sock_wrkr, w.w_stream, nothing)
end
end
if myid() == 1
# Notify the cluster manager of this workers death
manage(w.manager, w.id, w.config, :deregister)
if PGRP.topology != :all_to_all
for rpid in workers()
try
remote_do(deregister_worker, rpid, pid)
catch
end
end
end
end
end
push!(map_del_wrkr, pid)
# delete this worker from our remote reference client sets
ids = []
tonotify = []
for (id,rv) in pg.refs
if in(pid,rv.clientset)
push!(ids, id)
end
if rv.waitingfor == pid
push!(tonotify, (id,rv))
end
end
for id in ids
del_client(pg, id, pid)
end
# throw exception to tasks waiting for this pid
for (id,rv) in tonotify
notify_error(rv.c, ProcessExitedException())
delete!(pg.refs, id)
end
end
## remote refs ##
const client_refs = WeakKeyDict()
abstract AbstractRemoteRef
type Future <: AbstractRemoteRef
where::Int
whence::Int
id::Int
v::Nullable{Any}
Future(w::Int, rrid::RRID) = Future(w, rrid, Nullable{Any}())
Future(w::Int, rrid::RRID, v) = (r = new(w,rrid.whence,rrid.id,v); return test_existing_ref(r))
end
type RemoteChannel{T<:AbstractChannel} <: AbstractRemoteRef
where::Int
whence::Int
id::Int
RemoteChannel(w::Int, rrid::RRID) = (r = new(w, rrid.whence, rrid.id); return test_existing_ref(r))
end
function test_existing_ref(r::AbstractRemoteRef)
found = getkey(client_refs, r, false)
if found !== false
if client_refs[r] == true
@assert r.where > 0
if isa(r, Future) && isnull(found.v) && !isnull(r.v)
# we have recd the value from another source, probably a deserialized ref, send a del_client message
send_del_client(r)
found.v = r.v
end
return found
else
# just delete the entry.
delete!(client_refs, found)
end
end
client_refs[r] = true
finalizer(r, finalize_ref)
return r
end
function finalize_ref(r::AbstractRemoteRef)
if r.where > 0 # Handle the case of the finalizer having being called manually
if haskey(client_refs, r)
# NOTE: Change below line to deleting the entry once issue https://github.com/JuliaLang/julia/issues/14445
# is fixed.
client_refs[r] = false
end
if isa(r, RemoteChannel)
send_del_client(r)
else
# send_del_client only if the reference has not been set
isnull(r.v) && send_del_client(r)
r.v = Nullable{Any}()
end
r.where = 0
end
return r
end
Future(w::LocalProcess) = Future(w.id)
Future(w::Worker) = Future(w.id)
Future(pid::Integer=myid()) = Future(pid, RRID())
RemoteChannel(pid::Integer=myid()) = RemoteChannel{Channel{Any}}(pid, RRID())
function RemoteChannel(f::Function, pid::Integer=myid())
remotecall_fetch(pid, f, RRID()) do f, rrid
rv=lookup_ref(rrid, f)
RemoteChannel{typeof(rv.c)}(myid(), rrid)
end
end
hash(r::AbstractRemoteRef, h::UInt) = hash(r.whence, hash(r.id, h))
==(r::AbstractRemoteRef, s::AbstractRemoteRef) = (r.whence==s.whence && r.id==s.id)
remoteref_id(r::AbstractRemoteRef) = RRID(r.whence, r.id)
function channel_from_id(id)
rv = get(PGRP.refs, id, false)
if rv === false
throw(ErrorException("Local instance of remote reference not found"))
end
rv.c
end
lookup_ref(rrid::RRID, f=def_rv_channel) = lookup_ref(PGRP, rrid, f)
function lookup_ref(pg, rrid, f)
rv = get(pg.refs, rrid, false)
if rv === false
# first we've heard of this ref
rv = RemoteValue(f())
pg.refs[rrid] = rv
push!(rv.clientset, rrid.whence)
end
rv
end
function isready(rr::Future)
!isnull(rr.v) && return true
rid = remoteref_id(rr)
if rr.where == myid()
isready(lookup_ref(rid).c)
else
remotecall_fetch(rid->isready(lookup_ref(rid).c), rr.where, rid)
end
end
function isready(rr::RemoteChannel, args...)
rid = remoteref_id(rr)
if rr.where == myid()
isready(lookup_ref(rid).c, args...)
else
remotecall_fetch(rid->isready(lookup_ref(rid).c, args...), rr.where, rid)
end
end
del_client(rr::AbstractRemoteRef) = del_client(remoteref_id(rr), myid())
del_client(id, client) = del_client(PGRP, id, client)
function del_client(pg, id, client)
# As a workaround to issue https://github.com/JuliaLang/julia/issues/14445
# the dict/set updates are executed asynchronously so that they do
# not occur in the midst of a gc. The `@async` prefix must be removed once
# 14445 is fixed.
@async begin
rv = get(pg.refs, id, false)
if rv !== false
delete!(rv.clientset, client)
if isempty(rv.clientset)
delete!(pg.refs, id)
#print("$(myid()) collected $id\n")
end
end
end
nothing
end
function del_clients(pairs::Vector)
for p in pairs
del_client(p[1], p[2])
end
end
any_gc_flag = Condition()
function start_gc_msgs_task()
@schedule while true
wait(any_gc_flag)
flush_gc_msgs()
end
end
function send_del_client(rr)
if rr.where == myid()
del_client(rr)
elseif rr.where in procs() # process only if a valid worker
w = worker_from_id(rr.where)
push!(w.del_msgs, (remoteref_id(rr), myid()))
w.gcflag = true
notify(any_gc_flag)
end
end
function add_client(id, client)
rv = lookup_ref(id)
push!(rv.clientset, client)
nothing
end
function add_clients(pairs::Vector)
for p in pairs
add_client(p[1], p[2]...)
end
end
function send_add_client(rr::AbstractRemoteRef, i)
if rr.where == myid()
add_client(remoteref_id(rr), i)
elseif (i != rr.where) && (rr.where in procs())
# don't need to send add_client if the message is already going
# to the processor that owns the remote ref. it will add_client
# itself inside deserialize().
w = worker_from_id(rr.where)
push!(w.add_msgs, (remoteref_id(rr), i))
w.gcflag = true
notify(any_gc_flag)
end
end
channel_type{T}(rr::RemoteChannel{T}) = T
serialize(s::AbstractSerializer, f::Future) = serialize(s, f, isnull(f.v))
serialize(s::AbstractSerializer, rr::RemoteChannel) = serialize(s, rr, true)
function serialize(s::AbstractSerializer, rr::AbstractRemoteRef, addclient)
if addclient
p = worker_id_from_socket(s.io)
(p !== rr.where) && send_add_client(rr, p)
end
invoke(serialize, Tuple{AbstractSerializer, Any}, s, rr)
end
function deserialize{T<:Future}(s::AbstractSerializer, t::Type{T})
f = deserialize_rr(s,t)
Future(f.where, RRID(f.whence, f.id), f.v) # ctor adds to client_refs table
end
function deserialize{T<:RemoteChannel}(s::AbstractSerializer, t::Type{T})
rr = deserialize_rr(s,t)
# call ctor to make sure this rr gets added to the client_refs table
RemoteChannel{channel_type(rr)}(rr.where, RRID(rr.whence, rr.id))
end
function deserialize_rr(s, t)
rr = invoke(deserialize, Tuple{AbstractSerializer, DataType}, s, t)
if rr.where == myid()
# send_add_client() is not executed when the ref is being
# serialized to where it exists
add_client(remoteref_id(rr), myid())
end
rr
end
# data stored by the owner of a remote reference
def_rv_channel() = Channel(1)
type RemoteValue
c::AbstractChannel
clientset::IntSet # Set of workerids that have a reference to this channel.
# Keeping ids instead of a count aids in cleaning up upon
# a worker exit.
waitingfor::Int # processor we need to hear from to fill this, or 0
RemoteValue(c) = new(c, IntSet(), 0)
end
wait(rv::RemoteValue) = wait(rv.c)
## core messages: do, call, fetch, wait, ref, put! ##
type RemoteException <: Exception
pid::Int
captured::CapturedException
end
RemoteException(captured) = RemoteException(myid(), captured)
function showerror(io::IO, re::RemoteException)
(re.pid != myid()) && print(io, "On worker ", re.pid, ":\n")
showerror(io, re.captured)
end
function run_work_thunk(thunk, print_error)
local result
try
result = thunk()
catch err
ce = CapturedException(err, catch_backtrace())
result = RemoteException(ce)
print_error && showerror(STDERR, ce)
end
result
end
function run_work_thunk(rv::RemoteValue, thunk)
put!(rv, run_work_thunk(thunk, false))
nothing
end
function schedule_call(rid, thunk)
rv = RemoteValue(def_rv_channel())
(PGRP::ProcessGroup).refs[rid] = rv
push!(rv.clientset, rid.whence)
schedule(@task(run_work_thunk(rv,thunk)))
rv
end
# make a thunk to call f on args in a way that simulates what would happen if
# the function were sent elsewhere
function local_remotecall_thunk(f, args, kwargs)
if isempty(args) && isempty(kwargs)
return f
end
return ()->f(args...; kwargs...)
end
function remotecall(f, w::LocalProcess, args...; kwargs...)
rr = Future(w)
schedule_call(remoteref_id(rr), local_remotecall_thunk(f, args, kwargs))
rr
end
function remotecall(f, w::Worker, args...; kwargs...)
rr = Future(w)
#println("$(myid()) asking for $rr")
send_msg(w, CallMsg{:call}(f, args, kwargs, remoteref_id(rr)))
rr
end
remotecall(f, id::Integer, args...; kwargs...) = remotecall(f, worker_from_id(id), args...; kwargs...)
# faster version of fetch(remotecall(...))
function remotecall_fetch(f, w::LocalProcess, args...; kwargs...)
v=run_work_thunk(local_remotecall_thunk(f,args, kwargs), false)
isa(v, RemoteException) ? throw(v) : v
end
function remotecall_fetch(f, w::Worker, args...; kwargs...)
# can be weak, because the program will have no way to refer to the Ref
# itself, it only gets the result.
oid = RRID()
rv = lookup_ref(oid)
rv.waitingfor = w.id
send_msg(w, CallMsg{:call_fetch}(f, args, kwargs, oid))
v = take!(rv)
delete!(PGRP.refs, oid)
isa(v, RemoteException) ? throw(v) : v
end
remotecall_fetch(f, id::Integer, args...; kwargs...) =
remotecall_fetch(f, worker_from_id(id), args...; kwargs...)
# faster version of wait(remotecall(...))
remotecall_wait(f, w::LocalProcess, args...; kwargs...) = wait(remotecall(f, w, args...; kwargs...))
function remotecall_wait(f, w::Worker, args...; kwargs...)
prid = RRID()
rv = lookup_ref(prid)
rv.waitingfor = w.id
rr = Future(w)
send_msg(w, CallWaitMsg(f, args, kwargs, remoteref_id(rr), prid))
v = fetch(rv.c)
delete!(PGRP.refs, prid)
isa(v, RemoteException) && throw(v)
rr
end
remotecall_wait(f, id::Integer, args...; kwargs...) =
remotecall_wait(f, worker_from_id(id), args...; kwargs...)
function remote_do(f, w::LocalProcess, args...; kwargs...)
# the LocalProcess version just performs in local memory what a worker
# does when it gets a :do message.
# same for other messages on LocalProcess.
thk = local_remotecall_thunk(f, args, kwargs)
schedule(Task(thk))
nothing
end
function remote_do(f, w::Worker, args...; kwargs...)
send_msg(w, RemoteDoMsg(f, args, kwargs))
nothing
end
remote_do(f, id::Integer, args...; kwargs...) = remote_do(f, worker_from_id(id), args...; kwargs...)
# have the owner of rr call f on it
function call_on_owner(f, rr::AbstractRemoteRef, args...)
rid = remoteref_id(rr)
if rr.where == myid()
f(rid, args...)
else
remotecall_fetch(f, rr.where, rid, args...)
end
end
function wait_ref(rid, callee, args...)
v = fetch_ref(rid, args...)
if isa(v, RemoteException)
if myid() == callee
throw(v)
else
return v
end
end
nothing
end
wait(r::Future) = (!isnull(r.v) && return r; call_on_owner(wait_ref, r, myid()); r)
wait(r::RemoteChannel, args...) = (call_on_owner(wait_ref, r, myid(), args...); r)
function fetch_future(rid, callee)
rv = lookup_ref(rid)
v = fetch(rv.c)
del_client(rid, callee)
v
end
function fetch(r::Future)
!isnull(r.v) && return get(r.v)
v=call_on_owner(fetch_future, r, myid())
r.v=v
v
end
fetch_ref(rid, args...) = fetch(lookup_ref(rid).c, args...)
fetch(r::RemoteChannel, args...) = call_on_owner(fetch_ref, r, args...)
fetch(x::ANY) = x
isready(rv::RemoteValue, args...) = isready(rv.c, args...)
function put!(rr::Future, v)
!isnull(rr.v) && error("Future can be set only once")
call_on_owner(put_future, rr, v, myid())
rr.v = v
rr
end
function put_future(rid, v, callee)
rv = lookup_ref(rid)
isready(rv) && error("Future can be set only once")
put!(rv, v)
# The callee has the value and hence can be removed from the remote store.
del_client(rid, callee)
nothing
end
put!(rv::RemoteValue, args...) = put!(rv.c, args...)
put_ref(rid, args...) = (put!(lookup_ref(rid), args...); nothing)
put!(rr::RemoteChannel, args...) = (call_on_owner(put_ref, rr, args...); rr)
# take! is not supported on Future
take!(rv::RemoteValue, args...) = take!(rv.c, args...)
function take_ref(rid, callee, args...)
v=take!(lookup_ref(rid), args...)
isa(v, RemoteException) && (myid() == callee) && throw(v)
v
end
take!(rr::RemoteChannel, args...) = call_on_owner(take_ref, rr, myid(), args...)
# close is not supported on Future
close_ref(rid) = (close(lookup_ref(rid).c); nothing)
close(rr::RemoteChannel) = call_on_owner(close_ref, rr)
function deliver_result(sock::IO, msg, oid, value)
#print("$(myid()) sending result $oid\n")
if is(msg,:call_fetch) || isa(value, RemoteException)
val = value
else
val = :OK
end
try
send_msg_now(sock, ResultMsg(oid, val))
catch e
# terminate connection in case of serialization error
# otherwise the reading end would hang
print(STDERR, "fatal error on ", myid(), ": ")
display_error(e, catch_backtrace())
wid = worker_id_from_socket(sock)
close(sock)
if myid()==1
rmprocs(wid)
elseif wid == 1
exit(1)
else
remote_do(rmprocs, 1, wid)
end
end
end
## message event handlers ##
function process_messages(r_stream::TCPSocket, w_stream::TCPSocket, incoming=true)
@schedule process_tcp_streams(r_stream, w_stream, incoming)
end
function process_tcp_streams(r_stream::TCPSocket, w_stream::TCPSocket, incoming)
disable_nagle(r_stream)
wait_connected(r_stream)
if r_stream != w_stream
disable_nagle(w_stream)
wait_connected(w_stream)
end
message_handler_loop(r_stream, w_stream, incoming)
end
function process_messages(r_stream::IO, w_stream::IO, incoming=true)
@schedule message_handler_loop(r_stream, w_stream, incoming)
end
function message_handler_loop(r_stream::IO, w_stream::IO, incoming::Bool)
try
version = process_hdr(r_stream, incoming)