forked from JuliaLang/julia
-
Notifications
You must be signed in to change notification settings - Fork 0
/
multi.jl
1533 lines (1335 loc) · 40.8 KB
/
multi.jl
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
## multi.jl - multiprocessing
##
## julia starts with one process, and processors can be added using:
## addprocs(n) using exec
## addprocs({"host1","host2",...}) using remote execution
##
## remotecall(w, func, args...) -
## tell a worker to call a function on the given arguments.
## returns a RemoteRef to the result.
##
## remote_do(w, f, args...) - remote function call with no result
##
## wait(rr) - wait for a RemoteRef to be finished computing
##
## fetch(rr) - wait for and get the value of a RemoteRef
##
## remotecall_fetch(w, func, args...) - faster fetch(remotecall(...))
##
## pmap(func, lst) -
## call a function on each element of lst (some 1-d thing), in
## parallel.
##
## RemoteRef() - create an uninitialized RemoteRef on the local processor
##
## RemoteRef(p) - ...or on a particular processor
##
## put(r, val) - store a value to an uninitialized RemoteRef
##
## @spawn expr -
## evaluate expr somewhere. returns a RemoteRef. all variables in expr
## are copied to the remote processor.
##
## @spawnat p expr - @spawn specifying where to run
##
## @async expr -
## run expr as an asynchronous task on the local processor
##
## @parallel (r) for i=1:n ... end -
## parallel loop. the results from each iteration are reduced using (r).
##
## @everywhere expr - run expr everywhere.
# todo:
# - more indexing
# * take() to empty a Ref (full/empty variables)
# * have put() wait on non-empty Refs
# - removing nodes
# - more dynamic scheduling
# * fetch/wait latency seems to be excessive
# * message aggregation
# - timer events
# - send pings at some interval to detect failed/hung machines
# - integrate event loop with other kinds of i/o (non-messages)
# ? method_missing for waiting (getindex/setindex!/localdata seems to cover a lot)
# * serializing closures
# * recover from i/o errors
# * handle remote execution errors
# * all-to-all communication
# * distributed GC
# * call&wait and call&fetch combined messages
# * aggregate GC messages
# * dynamically adding nodes (then always start with 1 and grow)
# * add readline to event loop
# * GOs/darrays on a subset of nodes
## workers and message i/o ##
function send_msg_unknown(s::IO, kind, args)
error("attempt to send to unknown socket")
end
function send_msg(s::IO, kind, args...)
id = worker_id_from_socket(s)
if id > -1
return send_msg(worker_from_id(id), kind, args...)
end
send_msg_unknown(s, kind, args)
end
function send_msg_now(s::IO, kind, args...)
id = worker_id_from_socket(s)
if id > -1
return send_msg_now(worker_from_id(id), kind, args...)
end
send_msg_unknown(s, kind, args)
end
type Worker
host::ByteString
port::Uint16
socket::TcpSocket
sendbuf::IOBuffer
del_msgs::Array{Any,1}
add_msgs::Array{Any,1}
id::Int
gcflag::Bool
Worker(host::String, port::Integer, sock::TcpSocket, id::Int) =
new(bytestring(host), uint16(port), sock, IOBuffer(), {}, {}, id, false)
end
Worker(host::String, port::Integer, sock::TcpSocket) =
Worker(host, port, sock, 0)
Worker(host::String, port::Integer) =
Worker(host, port, connect(host,uint16(port)))
Worker(host::String, privhost::String, port::Integer, tunnel_user::String, sshflags) =
Worker(host, port, connect("localhost",
ssh_tunnel(tunnel_user, host, privhost, uint16(port), sshflags)))
function send_msg_now(w::Worker, kind, args...)
send_msg_(w, kind, args, true)
end
function send_msg(w::Worker, kind, args...)
send_msg_(w, kind, args, false)
end
function flush_gc_msgs(w::Worker)
w.gcflag = false
msgs = w.add_msgs
if !isempty(msgs)
empty!(w.add_msgs)
remote_do(w, add_clients, msgs...)
end
msgs = w.del_msgs
if !isempty(msgs)
empty!(w.del_msgs)
#print("sending delete of $msgs\n")
remote_do(w, del_clients, msgs...)
end
end
#TODO: Move to different Thread
function enq_send_req(sock::TcpSocket,buf,now::Bool)
arr=takebuf_array(buf)
write(sock,arr)
#TODO implement "now"
end
function send_msg_(w::Worker, kind, args, now::Bool)
#println("Sending msg $kind")
buf = w.sendbuf
serialize(buf, kind)
for arg in args
serialize(buf, arg)
end
if !now && w.gcflag
flush_gc_msgs(w)
else
enq_send_req(w.socket,buf,now)
end
end
function flush_gc_msgs()
for w in (PGRP::ProcessGroup).workers
if isa(w,Worker)
k = w::Worker
if k.gcflag
flush_gc_msgs(k)
end
end
end
end
## process group creation ##
type LocalProcess
id::Int
end
const LPROC = LocalProcess(0)
const map_pid_wrkr = Dict{Int, Union(Worker, LocalProcess)}()
const map_sock_wrkr = Dict{Socket, Union(Worker, LocalProcess)}()
const map_del_wrkr = Set{Int}()
let next_pid = 2 # 1 is reserved for the client (always)
global get_next_pid
function get_next_pid()
retval = next_pid
next_pid += 1
retval
end
end
type ProcessGroup
name::String
workers::Array{Any,1}
# global references
refs::Dict
ProcessGroup(w::Array{Any,1}) = new("pg-default", w, Dict())
end
const PGRP = ProcessGroup({})
function add_workers(pg::ProcessGroup, w::Array{Any,1})
# NOTE: currently only node 1 can add new nodes, since nobody else
# has the full list of address:port
assert(LPROC.id == 1)
for i=1:length(w)
w[i].id = get_next_pid()
register_worker(w[i])
create_message_handler_loop(w[i].socket)
end
all_locs = map(x -> isa(x, Worker) ? (x.host,x.port, x.id) : ("", 0, x.id), pg.workers)
for i=1:length(w)
send_msg_now(w[i], :join_pgrp, w[i].id, all_locs)
end
[w[i].id for i in 1:length(w)]
end
myid() = LPROC.id
nprocs() = length(PGRP.workers)
function nworkers()
n = nprocs()
n == 1 ? 1 : n-1
end
procs() = Int[x.id for x in PGRP.workers]
function workers()
allp = procs()
if nprocs() == 1
allp
else
filter(x -> x != 1, allp)
end
end
rmprocset = Set()
function rmprocs(args...; waitfor = 0.0)
# Only pid 1 can add and remove processes
if myid() != 1
error("only process 1 can add and remove processes")
end
global rmprocset
empty!(rmprocset)
for i in [args...]
if haskey(map_pid_wrkr, i)
add!(rmprocset, i)
remote_do(i, exit)
end
end
start = time()
while (time() - start) < waitfor
if length(rmprocset) == 0
break;
else
sleep(0.1)
end
end
((waitfor > 0) && (length(rmprocset) > 0)) ? :timed_out : :ok
end
type ProcessExitedException <: Exception end
worker_from_id(i) = worker_from_id(PGRP, i)
function worker_from_id(pg::ProcessGroup, i)
# Processes with pids > ours, have to connect to us. May not have happened. Wait for some time.
if contains(map_del_wrkr, i)
throw(ProcessExitedException())
end
if myid()==1 && !haskey(map_pid_wrkr,i)
error("no process with id $i exists")
end
start = time()
while (!haskey(map_pid_wrkr, i) && ((time() - start) < 60.0))
sleep(0.1)
yield()
end
map_pid_wrkr[i]
end
function worker_id_from_socket(s)
w = get(map_sock_wrkr, s, nothing)
if isa(w,Worker)
if is(s, w.socket) || is(s, w.sendbuf)
return w.id
end
end
if isa(s,IOStream) && fd(s)==-1
# serializing to a local buffer
return myid()
end
return -1
end
register_worker(w) = register_worker(PGRP, w)
function register_worker(pg, w)
push!(pg.workers, w)
map_pid_wrkr[w.id] = w
if isa(w, Worker) map_sock_wrkr[w.socket] = w end
end
deregister_worker(pid) = deregister_worker(PGRP, pid)
function deregister_worker(pg, pid)
pg.workers = filter(x -> !(x.id == pid), pg.workers)
w = delete!(map_pid_wrkr, pid, nothing)
if isa(w, Worker) delete!(map_sock_wrkr, w.socket) end
add!(map_del_wrkr, pid)
# delete this worker from our RemoteRef client sets
ids = {}
tonotify = {}
for (id,rv) in pg.refs
if contains(rv.clientset,pid)
push!(ids, id)
end
if rv.waitingfor == pid
push!(tonotify, (id,rv))
end
end
for id in ids
del_client(pg, id, pid)
end
# throw exception to tasks waiting for this pid
for (id,rv) in tonotify
notify_error(rv.full, ProcessExitedException())
delete!(pg.refs, id, nothing)
end
end
## remote refs ##
const client_refs = WeakKeyDict()
type RemoteRef
where::Int
whence::Int
id::Int
# TODO: cache value if it's fetched, but don't serialize the cached value
function RemoteRef(w, wh, id)
r = new(w,wh,id)
found = getkey(client_refs, r, false)
if !is(found,false)
return found
end
client_refs[r] = true
finalizer(r, send_del_client)
r
end
REQ_ID::Int = 0
function RemoteRef(pid::Integer)
rr = RemoteRef(pid, myid(), REQ_ID)
REQ_ID += 1
if mod(REQ_ID,200) == 0
# force gc after making a lot of refs since they take up
# space on the machine where they're stored, yet the client
# is responsible for freeing them.
gc()
end
rr
end
RemoteRef(w::LocalProcess) = RemoteRef(w.id)
RemoteRef(w::Worker) = RemoteRef(w.id)
RemoteRef() = RemoteRef(myid())
global WeakRemoteRef
function WeakRemoteRef(w, wh, id)
return new(w, wh, id)
end
function WeakRemoteRef(pid::Integer)
rr = WeakRemoteRef(pid, myid(), REQ_ID)
REQ_ID += 1
if mod(REQ_ID,200) == 0
gc()
end
rr
end
WeakRemoteRef(w::LocalProcess) = WeakRemoteRef(myid())
WeakRemoteRef(w::Worker) = WeakRemoteRef(w.id)
WeakRemoteRef() = WeakRemoteRef(myid())
global next_id
next_id() = (id=(myid(),REQ_ID); REQ_ID+=1; id)
end
hash(r::RemoteRef) = hash(r.whence)+3*hash(r.id)
isequal(r::RemoteRef, s::RemoteRef) = (r.whence==s.whence && r.id==s.id)
rr2id(r::RemoteRef) = (r.whence, r.id)
lookup_ref(id) = lookup_ref(PGRP, id)
function lookup_ref(pg, id)
rv = get(pg.refs, id, false)
if rv === false
# first we've heard of this ref
rv = RemoteValue()
pg.refs[id] = rv
add!(rv.clientset, id[1])
end
rv
end
# is a ref uninitialized? (for locally-owned refs only)
#function ref_uninitialized(id)
# wi = lookup_ref(id)
# !wi.done && is(wi.thunk,bottom_func)
#end
#ref_uninitialized(r::RemoteRef) = (assert(r.where==myid());
# ref_uninitialized(rr2id(r)))
function isready(rr::RemoteRef)
rid = rr2id(rr)
if rr.where == myid()
lookup_ref(rid).done
else
remotecall_fetch(rr.where, id->lookup_ref(id).done, rid)
end
end
del_client(id, client) = del_client(PGRP, id, client)
function del_client(pg, id, client)
rv = lookup_ref(id)
delete!(rv.clientset, client)
if isempty(rv.clientset)
delete!(pg.refs, id)
#print("$(myid()) collected $id\n")
end
nothing
end
function del_clients(pairs::(Any,Any)...)
for p in pairs
del_client(p[1], p[2])
end
end
any_gc_flag = false
function send_del_client(rr::RemoteRef)
if rr.where == myid()
del_client(rr2id(rr), myid())
else
if contains(map_del_wrkr, rr.where)
# for a removed worker, don't bother
return
end
w = worker_from_id(rr.where)
push!(w.del_msgs, (rr2id(rr), myid()))
w.gcflag = true
global any_gc_flag = true
end
end
function add_client(id, client)
rv = lookup_ref(id)
add!(rv.clientset, client)
nothing
end
function add_clients(pairs::(Any,Any)...)
for p in pairs
add_client(p[1], p[2])
end
end
function send_add_client(rr::RemoteRef, i)
if rr.where == myid()
add_client(rr2id(rr), i)
elseif i != rr.where
# don't need to send add_client if the message is already going
# to the processor that owns the remote ref. it will add_client
# itself inside deserialize().
w = worker_from_id(rr.where)
push!(w.add_msgs, (rr2id(rr), i))
w.gcflag = true
global any_gc_flag = true
end
end
function serialize(s, rr::RemoteRef)
i = worker_id_from_socket(s)
if i != -1
send_add_client(rr, i)
end
invoke(serialize, (Any, Any), s, rr)
end
function deserialize(s, t::Type{RemoteRef})
rr = invoke(deserialize, (Any, DataType), s, t)
where = rr.where
if where == myid()
add_client(rr2id(rr), myid())
end
# call ctor to make sure this rr gets added to the client_refs table
RemoteRef(where, rr.whence, rr.id)
end
# data stored by the owner of a RemoteRef
type RemoteValue
done::Bool
result
full::Condition # waiting for a value
empty::Condition # waiting for value to be removed
clientset::IntSet
waitingfor::Int # processor we need to hear from to fill this, or 0
RemoteValue() = new(false, nothing, Condition(), Condition(), IntSet(), 0)
end
function work_result(rv::RemoteValue)
v = rv.result
if isa(v,WeakRef)
v = v.value
end
v
end
function wait_full(rv::RemoteValue)
while !rv.done
wait(rv.full)
end
return work_result(rv)
end
function wait_empty(rv::RemoteValue)
while rv.done
wait(rv.empty)
end
return nothing
end
## core messages: do, call, fetch, wait, ref, put ##
function run_work_thunk(thunk)
local result
try
result = thunk()
catch err
print(STDERR, "exception on ", myid(), ": ")
display_error(err,catch_backtrace())
result = err
end
result
end
function run_work_thunk(rv::RemoteValue, thunk)
put(rv, run_work_thunk(thunk))
end
function schedule_call(rid, thunk)
rv = RemoteValue()
(PGRP::ProcessGroup).refs[rid] = rv
add!(rv.clientset, rid[1])
enq_work(@task(run_work_thunk(rv,thunk)))
rv
end
#localize_ref(b::Box) = Box(localize_ref(b.contents))
#function localize_ref(r::RemoteRef)
# if r.where == myid()
# fetch(r)
# else
# r
# end
#end
#localize_ref(x) = x
# make a thunk to call f on args in a way that simulates what would happen if
# the function were sent elsewhere
function local_remotecall_thunk(f, args)
if isempty(args)
return f
end
return ()->f(args...)
# TODO: this seems to be capable of causing deadlocks by waiting on
# Refs buried inside the closure that we don't want to wait on yet.
# linfo = ccall(:jl_closure_linfo, Any, (Any,), f)
# if isa(linfo,LambdaStaticData)
# env = ccall(:jl_closure_env, Any, (Any,), f)
# buf = memio()
# serialize(buf, env)
# seek(buf, 0)
# env = deserialize(buf)
# f = ccall(:jl_new_closure, Any, (Ptr{Void}, Any, Any),
# C_NULL, env, linfo)::Function
# end
# f(map(localize_ref,args)...)
end
function remotecall(w::LocalProcess, f, args...)
rr = RemoteRef(w)
schedule_call(rr2id(rr), local_remotecall_thunk(f,args))
rr
end
function remotecall(w::Worker, f, args...)
rr = RemoteRef(w)
#println("$(myid()) asking for $rr")
send_msg(w, :call, rr2id(rr), f, args)
rr
end
remotecall(id::Integer, f, args...) = remotecall(worker_from_id(id), f, args...)
# faster version of fetch(remotecall(...))
function remotecall_fetch(w::LocalProcess, f, args...)
run_work_thunk(local_remotecall_thunk(f,args))
end
function remotecall_fetch(w::Worker, f, args...)
# can be weak, because the program will have no way to refer to the Ref
# itself, it only gets the result.
oid = next_id()
rv = lookup_ref(oid)
rv.waitingfor = w.id
send_msg(w, :call_fetch, oid, f, args)
v = wait_full(rv)
delete!(PGRP.refs, oid)
v
end
remotecall_fetch(id::Integer, f, args...) =
remotecall_fetch(worker_from_id(id), f, args...)
# faster version of wait(remotecall(...))
remotecall_wait(w::LocalProcess, f, args...) = wait(remotecall(w,f,args...))
function remotecall_wait(w::Worker, f, args...)
prid = next_id()
rv = lookup_ref(prid)
rv.waitingfor = w.id
rr = RemoteRef(w)
send_msg(w, :call_wait, rr2id(rr), prid, f, args)
wait_full(rv)
delete!(PGRP.refs, prid)
rr
end
remotecall_wait(id::Integer, f, args...) =
remotecall_wait(worker_from_id(id), f, args...)
function remote_do(w::LocalProcess, f, args...)
# the LocalProcess version just performs in local memory what a worker
# does when it gets a :do message.
# same for other messages on LocalProcess.
thk = local_remotecall_thunk(f, args)
enq_work(Task(thk))
nothing
end
function remote_do(w::Worker, f, args...)
send_msg(w, :do, f, args)
nothing
end
remote_do(id::Integer, f, args...) = remote_do(worker_from_id(id), f, args...)
# have the owner of rr call f on it
function call_on_owner(f, rr::RemoteRef, args...)
rid = rr2id(rr)
if rr.where == myid()
f(rid, args...)
else
remotecall_fetch(rr.where, f, rid, args...)
end
end
wait_ref(rid) = (wait_full(lookup_ref(rid)); nothing)
wait(r::RemoteRef) = (call_on_owner(wait_ref, r); r)
fetch_ref(rid) = wait_full(lookup_ref(rid))
fetch(r::RemoteRef) = call_on_owner(fetch_ref, r)
fetch(x::ANY) = x
# storing a value to a Ref
function put(rv::RemoteValue, val::ANY)
wait_empty(rv)
rv.result = val
rv.done = true
notify_full(rv)
end
put_ref(rid, v) = put(lookup_ref(rid), v)
put(rr::RemoteRef, val::ANY) = (call_on_owner(put_ref, rr, val); val)
function take(rv::RemoteValue)
wait_full(rv)
val = rv.result
rv.done = false
notify_empty(rv)
val
end
take_ref(rid) = take(lookup_ref(rid))
take(rr::RemoteRef) = call_on_owner(take_ref, rr)
## work queue ##
function enq_work(t::Task)
ccall(:uv_stop,Void,(Ptr{Void},),eventloop())
unshift!(Workqueue, t)
end
function perform_work()
perform_work(pop!(Workqueue))
end
function perform_work(t::Task)
if !isdefined(t, :result)
# starting new task
yieldto(t)
else
# continuing interrupted work item
arg = t.result
t.result = nothing
t.runnable = true
yieldto(t, arg)
end
t = current_task().last
if istaskdone(t)
if isa(t.donenotify, Condition)
notify(t.donenotify, t.result)
end
elseif t.runnable
# still runnable; return to queue
enq_work(t)
end
end
function deliver_result(sock::IO, msg, oid, value)
#print("$(myid()) sending result $oid\n")
if is(msg,:call_fetch)
val = value
else
val = oid
end
try
send_msg_now(sock, :result, oid, val)
catch err
# send exception in case of serialization error; otherwise
# request side would hang.
send_msg_now(sock, :result, oid, err)
end
end
# notify waiters that a certain job has finished or Ref has been emptied
notify_full (rv::RemoteValue) = notify(rv.full, work_result(rv))
notify_empty(rv::RemoteValue) = notify(rv.empty)
## message event handlers ##
# activity on accept fd
function accept_handler(server::TcpServer, status::Int32)
if status == -1
error("An error occured during the creation of the server")
end
client = accept_nonblock(server)
create_message_handler_loop(client)
end
function create_message_handler_loop(sock::AsyncStream) #returns immediately
enq_work(@task begin
global PGRP
#println("message_handler_loop")
start_reading(sock)
wait_connected(sock)
try
while true
msg = deserialize(sock)
#println("got msg: ",msg)
# handle message
if is(msg, :call)
id = deserialize(sock)
#print("$(myid()) got id $id\n")
f0 = deserialize(sock)
#print("$(myid()) got call $f0\n")
args0 = deserialize(sock)
#print("$(myid()) got args $args0\n")
let f=f0, args=args0
schedule_call(id, ()->f(args...))
end
elseif is(msg, :call_fetch)
id = deserialize(sock)
f = deserialize(sock)
args = deserialize(sock)
@schedule begin
v = run_work_thunk(()->f(args...))
deliver_result(sock, msg, id, v)
v
end
elseif is(msg, :call_wait)
id = deserialize(sock)
notify_id = deserialize(sock)
f = deserialize(sock)
args = deserialize(sock)
@schedule begin
rv = schedule_call(id, ()->f(args...))
deliver_result(sock, msg, notify_id, wait_full(rv))
end
elseif is(msg, :do)
f = deserialize(sock)
args = deserialize(sock)
#print("got args: $args\n")
@schedule begin
run_work_thunk(RemoteValue(), ()->f(args...))
end
elseif is(msg, :result)
# used to deliver result of wait or fetch
oid = deserialize(sock)
#print("$(myid()) got $msg $oid\n")
val = deserialize(sock)
put(lookup_ref(oid), val)
elseif is(msg, :identify_socket)
otherid = deserialize(sock)
register_worker(Worker("", 0, sock, otherid))
elseif is(msg, :join_pgrp)
# first connection; get process group info from client
self_pid = LPROC.id = deserialize(sock)
locs = deserialize(sock)
#print("\nLocation: ",locs,"\nId:",myid(),"\n")
# joining existing process group
register_worker(Worker("", 0, sock, 1))
register_worker(LPROC)
for (rhost, rport, rpid) in locs
if (rpid < self_pid) && (!(rpid == 1))
# Connect to them
w = Worker(rhost, rport)
w.id = rpid
register_worker(w)
create_message_handler_loop(w.socket)
send_msg_now(w, :identify_socket, self_pid)
else
# Others will connect to us. Don't do anything just yet
continue
end
end
end
end # end of while
catch e
iderr = worker_id_from_socket(sock)
# If error occured talking to pid 1, commit harakiri
if iderr == 1
if isopen(sock)
print(STDERR, "exception on ", myid(), ": ")
display_error(e, catch_backtrace())
end
exit(1)
end
# Will treat any exception as death of node and cleanup
# since currently we do not have a mechanism for workers to reconnect
# to each other on unhandled errors
deregister_worker(iderr)
if isopen(sock) close(sock) end
if (myid() == 1)
global rmprocset
if contains(rmprocset, iderr)
delete!(rmprocset, iderr)
else
println("Worker $iderr terminated.")
rethrow(e)
end
end
return nothing
end
end)
end
function disable_threaded_libs()
blas_set_num_threads(1)
end
## worker creation and setup ##
# the entry point for julia worker processes. does not return.
# argument is descriptor to write listening port # to.
start_worker() = start_worker(STDOUT)
function start_worker(out::IO)
global bind_addr
if !isdefined(Base,:bind_addr)
bind_addr = getipaddr()
end
default_port = uint16(9009)
(actual_port,sock) = open_any_tcp_port(accept_handler,default_port)
print(out, "julia_worker:") # print header
print(out, "$(dec(actual_port))#") # print port
print(out, bind_addr) #TODO: print hostname
print(out, '\n')
# close STDIN; workers will not use it
#close(STDIN)
disable_threaded_libs()
ccall(:jl_install_sigint_handler, Void, ())
global const Scheduler = current_task()
try
check_master_connect(60.0)
event_loop(false)
catch err
print(STDERR, "unhandled exception on $(myid()): $(err)\nexiting.\n")
end
close(sock)
exit(0)
end
function start_cluster_workers(n, config)
w = cell(n)
cman = config[:cman]
# Get the cluster manager to launch the instance
(insttype, instances) = cman.launch_cb(n, config)
if insttype == :io_only
read_cb_response(inst) =
begin
(host, port) = read_worker_host_port(inst)
inst, host, port, host
end
elseif insttype == :io_host
read_cb_response(inst) =
begin
io = inst[1]
(priv_hostname, port) = read_worker_host_port(io)
io, priv_hostname, port, inst[2]
end
elseif insttype == :io_host_port
read_cb_response(inst) = (inst[1], inst[2], inst[3], inst[2])
elseif insttype == :host_port
read_cb_response(inst) = (nothing, inst[1], inst[2], inst[1])
elseif insttype == :cmd
read_cb_response(inst) =
begin
io,_ = readsfrom(detach(inst))
(host, port) = read_worker_host_port(io)
io, host, port, host
end
else
error("Unsupported format from Cluster Manager callback")
end
for i=1:n
(io, privhost, port, pubhost) = read_cb_response(instances[i])
w[i] = create_worker(privhost, port, pubhost, io, config)
end
w
end
function read_worker_host_port (io::IO)
io.line_buffered = true
while true
conninfo = readline(io)
private_hostname, port = parse_connection_info(conninfo)
if private_hostname != ""
return private_hostname, port
end
end
end
function create_worker(privhost, port, pubhost, stream, config)
tunnel = config[:tunnel]
s = split(pubhost,'@')
if length(s) > 1
user = s[1]
pubhost = s[2]
else
if haskey(ENV, "USER")
user = ENV["USER"]
elseif tunnel
error("USER must be specified either in the environment or as part of the hostname when tunnel option is used.")
end
end
if tunnel
sshflags = config[:sshflags]
w = Worker(pubhost, privhost, port, user, sshflags)
else
w = Worker(pubhost, port)
end